repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
pandas-dev/pandas | pandas/io/sql.py | 1 | 77759 | """
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
from __future__ import annotations
from abc import (
ABC,
abstractmethod,
)
from contextlib import contextmanager
from datetime import (
date,
datetime,
time,
)
from functools import partial
import re
from typing import (
TYPE_CHECKING,
Any,
Iterator,
Literal,
cast,
overload,
)
import warnings
import numpy as np
from pandas._libs import lib
from pandas._typing import (
DateTimeErrorChoices,
DtypeArg,
IndexLabel,
)
from pandas.compat._optional import import_optional_dependency
from pandas.errors import (
AbstractMethodError,
DatabaseError,
)
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
is_datetime64tz_dtype,
is_dict_like,
is_integer,
is_list_like,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import isna
from pandas import get_option
from pandas.core.api import (
DataFrame,
Series,
)
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.tools.datetimes import to_datetime
if TYPE_CHECKING:
from sqlalchemy import Table
# -----------------------------------------------------------------------------
# -- Helper functions
def _convert_params(sql, params):
"""Convert SQL and params args to DBAPI2.0 compliant format."""
args = [sql]
if params is not None:
if hasattr(params, "keys"): # test if params is a mapping
args += [params]
else:
args += [list(params)]
return args
def _process_parse_dates_argument(parse_dates):
"""Process parse_dates argument for read_sql functions"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
elif not hasattr(parse_dates, "__iter__"):
parse_dates = [parse_dates]
return parse_dates
def _handle_date_column(
col, utc: bool = False, format: str | dict[str, Any] | None = None
):
if isinstance(format, dict):
# GH35185 Allow custom error values in parse_dates argument of
# read_sql like functions.
# Format can take on custom to_datetime argument values such as
# {"errors": "coerce"} or {"dayfirst": True}
error: DateTimeErrorChoices = format.pop("errors", None) or "ignore"
return to_datetime(col, errors=error, **format)
else:
# Allow passing of formatting string for integers
# GH17855
if format is None and (
issubclass(col.dtype.type, np.floating)
or issubclass(col.dtype.type, np.integer)
):
format = "s"
if format in ["D", "d", "h", "m", "s", "ms", "us", "ns"]:
return to_datetime(col, errors="coerce", unit=format, utc=utc)
elif is_datetime64tz_dtype(col.dtype):
# coerce to UTC timezone
# GH11216
return to_datetime(col, utc=True)
else:
return to_datetime(col, errors="coerce", format=format, utc=utc)
def _parse_date_columns(data_frame, parse_dates):
"""
Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns.
"""
parse_dates = _process_parse_dates_argument(parse_dates)
# we want to coerce datetime64_tz dtypes for now to UTC
# we could in theory do a 'nice' conversion from a FixedOffset tz
# GH11216
for col_name, df_col in data_frame.items():
if is_datetime64tz_dtype(df_col.dtype) or col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
return data_frame
def _wrap_result(
data,
columns,
index_col=None,
coerce_float: bool = True,
parse_dates=None,
dtype: DtypeArg | None = None,
):
"""Wrap result set of query in a DataFrame."""
frame = DataFrame.from_records(data, columns=columns, coerce_float=coerce_float)
if dtype:
frame = frame.astype(dtype)
frame = _parse_date_columns(frame, parse_dates)
if index_col is not None:
frame.set_index(index_col, inplace=True)
return frame
def execute(sql, con, params=None):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
SQL query to be executed.
con : SQLAlchemy connectable(engine/connection) or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by the
library.
If a DBAPI2 object, only sqlite3 is supported.
params : list or tuple, optional, default: None
List of parameters to pass to execute method.
Returns
-------
Results Iterable
"""
with pandasSQL_builder(con, need_transaction=True) as pandas_sql:
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
# -----------------------------------------------------------------------------
# -- Read and write to DataFrames
@overload
def read_sql_table(
table_name,
con,
schema=...,
index_col: str | list[str] | None = ...,
coerce_float=...,
parse_dates: list[str] | dict[str, str] | None = ...,
columns: list[str] | None = ...,
chunksize: None = ...,
) -> DataFrame:
...
@overload
def read_sql_table(
table_name,
con,
schema=...,
index_col: str | list[str] | None = ...,
coerce_float=...,
parse_dates: list[str] | dict[str, str] | None = ...,
columns: list[str] | None = ...,
chunksize: int = ...,
) -> Iterator[DataFrame]:
...
def read_sql_table(
table_name: str,
con,
schema: str | None = None,
index_col: str | list[str] | None = None,
coerce_float: bool = True,
parse_dates: list[str] | dict[str, str] | None = None,
columns: list[str] | None = None,
chunksize: int | None = None,
) -> DataFrame | Iterator[DataFrame]:
"""
Read SQL database table into a DataFrame.
Given a table name and a SQLAlchemy connectable, returns a DataFrame.
This function does not support DBAPI connections.
Parameters
----------
table_name : str
Name of SQL table in database.
con : SQLAlchemy connectable or str
A database URI could be provided as str.
SQLite DBAPI connection mode not supported.
schema : str, default None
Name of SQL schema in database to query (if database flavor
supports this). Uses default schema if None (default).
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict, default None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default None
List of column names to select from SQL table.
chunksize : int, default None
If specified, returns an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame or Iterator[DataFrame]
A SQL table is returned as two-dimensional data structure with labeled
axes.
See Also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
Notes
-----
Any datetime values with time zone information will be converted to UTC.
Examples
--------
>>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP
"""
with pandasSQL_builder(con, schema=schema) as pandas_sql:
if not pandas_sql.has_table(table_name):
raise ValueError(f"Table {table_name} not found")
table = pandas_sql.read_table(
table_name,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
if table is not None:
return table
else:
raise ValueError(f"Table {table_name} not found", con)
@overload
def read_sql_query(
sql,
con,
index_col: str | list[str] | None = ...,
coerce_float=...,
params: list[str] | dict[str, str] | None = ...,
parse_dates: list[str] | dict[str, str] | None = ...,
chunksize: None = ...,
dtype: DtypeArg | None = ...,
) -> DataFrame:
...
@overload
def read_sql_query(
sql,
con,
index_col: str | list[str] | None = ...,
coerce_float=...,
params: list[str] | dict[str, str] | None = ...,
parse_dates: list[str] | dict[str, str] | None = ...,
chunksize: int = ...,
dtype: DtypeArg | None = ...,
) -> Iterator[DataFrame]:
...
def read_sql_query(
sql,
con,
index_col: str | list[str] | None = None,
coerce_float: bool = True,
params: list[str] | dict[str, str] | None = None,
parse_dates: list[str] | dict[str, str] | None = None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
) -> DataFrame | Iterator[DataFrame]:
"""
Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
----------
sql : str SQL query or SQLAlchemy Selectable (select or text object)
SQL query to be executed.
con : SQLAlchemy connectable, str, or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. If a DBAPI2 object, only sqlite3 is supported.
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Useful for SQL result sets.
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
dtype : Type name or dict of columns
Data type for data or columns. E.g. np.float64 or
{‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}.
.. versionadded:: 1.3.0
Returns
-------
DataFrame or Iterator[DataFrame]
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
Notes
-----
Any datetime values with time zone information parsed via the `parse_dates`
parameter will be converted to UTC.
"""
with pandasSQL_builder(con) as pandas_sql:
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
dtype=dtype,
)
@overload
def read_sql(
sql,
con,
index_col: str | list[str] | None = ...,
coerce_float=...,
params=...,
parse_dates=...,
columns: list[str] = ...,
chunksize: None = ...,
) -> DataFrame:
...
@overload
def read_sql(
sql,
con,
index_col: str | list[str] | None = ...,
coerce_float=...,
params=...,
parse_dates=...,
columns: list[str] = ...,
chunksize: int = ...,
) -> Iterator[DataFrame]:
...
def read_sql(
sql,
con,
index_col: str | list[str] | None = None,
coerce_float: bool = True,
params=None,
parse_dates=None,
columns: list[str] | None = None,
chunksize: int | None = None,
) -> DataFrame | Iterator[DataFrame]:
"""
Read SQL query or database table into a DataFrame.
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (for backward compatibility). It will delegate
to the specific function depending on the provided input. A SQL query
will be routed to ``read_sql_query``, while a database table name will
be routed to ``read_sql_table``. Note that the delegated function might
have more specific notes about their functionality not listed here.
Parameters
----------
sql : str or SQLAlchemy Selectable (select or text object)
SQL query to be executed or a table name.
con : SQLAlchemy connectable, str, or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible
for engine disposal and connection closure for the SQLAlchemy connectable; str
connections are closed automatically. See
`here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table (only used when reading
a table).
chunksize : int, default None
If specified, return an iterator where `chunksize` is the
number of rows to include in each chunk.
Returns
-------
DataFrame or Iterator[DataFrame]
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql_query : Read SQL query into a DataFrame.
Examples
--------
Read data from SQL via either a SQL query or a SQL tablename.
When using a SQLite database only SQL queries are accepted,
providing only the SQL tablename will result in an error.
>>> from sqlite3 import connect
>>> conn = connect(':memory:')
>>> df = pd.DataFrame(data=[[0, '10/11/12'], [1, '12/11/10']],
... columns=['int_column', 'date_column'])
>>> df.to_sql('test_data', conn)
2
>>> pd.read_sql('SELECT int_column, date_column FROM test_data', conn)
int_column date_column
0 0 10/11/12
1 1 12/11/10
>>> pd.read_sql('test_data', 'postgres:///db_name') # doctest:+SKIP
Apply date parsing to columns through the ``parse_dates`` argument
>>> pd.read_sql('SELECT int_column, date_column FROM test_data',
... conn,
... parse_dates=["date_column"])
int_column date_column
0 0 2012-10-11
1 1 2010-12-11
The ``parse_dates`` argument calls ``pd.to_datetime`` on the provided columns.
Custom argument values for applying ``pd.to_datetime`` on a column are specified
via a dictionary format:
1. Ignore errors while parsing the values of "date_column"
>>> pd.read_sql('SELECT int_column, date_column FROM test_data',
... conn,
... parse_dates={"date_column": {"errors": "ignore"}})
int_column date_column
0 0 2012-10-11
1 1 2010-12-11
2. Apply a dayfirst date parsing order on the values of "date_column"
>>> pd.read_sql('SELECT int_column, date_column FROM test_data',
... conn,
... parse_dates={"date_column": {"dayfirst": True}})
int_column date_column
0 0 2012-11-10
1 1 2010-11-12
3. Apply custom formatting when date parsing the values of "date_column"
>>> pd.read_sql('SELECT int_column, date_column FROM test_data',
... conn,
... parse_dates={"date_column": {"format": "%d/%m/%y"}})
int_column date_column
0 0 2012-11-10
1 1 2010-11-12
"""
with pandasSQL_builder(con) as pandas_sql:
if isinstance(pandas_sql, SQLiteDatabase):
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
)
try:
_is_table_name = pandas_sql.has_table(sql)
except Exception:
# using generic exception to catch errors from sql drivers (GH24988)
_is_table_name = False
if _is_table_name:
return pandas_sql.read_table(
sql,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
else:
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
)
def to_sql(
frame,
name: str,
con,
schema: str | None = None,
if_exists: Literal["fail", "replace", "append"] = "fail",
index: bool = True,
index_label: IndexLabel = None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
method: str | None = None,
engine: str = "auto",
**engine_kwargs,
) -> int | None:
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame, Series
name : str
Name of SQL table.
con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
schema : str, optional
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : bool, default True
Write DataFrame index as a column.
index_label : str or sequence, optional
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
Specify the number of rows in each batch to be written at a time.
By default, all rows will be written at once.
dtype : dict or scalar, optional
Specifying the datatype for columns. If a dictionary is used, the
keys should be the column names and the values should be the
SQLAlchemy types or strings for the sqlite3 fallback mode. If a
scalar is provided, it will be applied to all columns.
method : {None, 'multi', callable}, optional
Controls the SQL insertion clause used:
- None : Uses standard SQL ``INSERT`` clause (one per row).
- ``'multi'``: Pass multiple values in a single ``INSERT`` clause.
- callable with signature ``(pd_table, conn, keys, data_iter) -> int | None``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
engine : {'auto', 'sqlalchemy'}, default 'auto'
SQL engine library to use. If 'auto', then the option
``io.sql.engine`` is used. The default ``io.sql.engine``
behavior is 'sqlalchemy'
.. versionadded:: 1.3.0
**engine_kwargs
Any additional kwargs are passed to the engine.
Returns
-------
None or int
Number of rows affected by to_sql. None is returned if the callable
passed into ``method`` does not return an integer number of rows.
.. versionadded:: 1.4.0
Notes
-----
The returned rows affected is the sum of the ``rowcount`` attribute of ``sqlite3.Cursor``
or SQLAlchemy connectable. The returned value may not reflect the exact number of written
rows as stipulated in the
`sqlite3 <https://docs.python.org/3/library/sqlite3.html#sqlite3.Cursor.rowcount>`__ or
`SQLAlchemy <https://docs.sqlalchemy.org/en/14/core/connections.html#sqlalchemy.engine.BaseCursorResult.rowcount>`__
""" # noqa:E501
if if_exists not in ("fail", "replace", "append"):
raise ValueError(f"'{if_exists}' is not valid for if_exists")
if isinstance(frame, Series):
frame = frame.to_frame()
elif not isinstance(frame, DataFrame):
raise NotImplementedError(
"'frame' argument should be either a Series or a DataFrame"
)
with pandasSQL_builder(con, schema=schema, need_transaction=True) as pandas_sql:
return pandas_sql.to_sql(
frame,
name,
if_exists=if_exists,
index=index,
index_label=index_label,
schema=schema,
chunksize=chunksize,
dtype=dtype,
method=method,
engine=engine,
**engine_kwargs,
)
def has_table(table_name: str, con, schema: str | None = None) -> bool:
"""
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table.
con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
schema : string, default None
Name of SQL schema in database to write to (if database flavor supports
this). If None, use default schema (default).
Returns
-------
boolean
"""
with pandasSQL_builder(con, schema=schema) as pandas_sql:
return pandas_sql.has_table(table_name)
table_exists = has_table
@contextmanager
def pandasSQL_builder(
con,
schema: str | None = None,
need_transaction: bool = False,
) -> Iterator[PandasSQL]:
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters. Also creates a sqlalchemy connection and transaction
if necessary.
"""
import sqlite3
if isinstance(con, sqlite3.Connection) or con is None:
yield SQLiteDatabase(con)
else:
sqlalchemy = import_optional_dependency("sqlalchemy", errors="ignore")
if sqlalchemy is not None and isinstance(
con, (str, sqlalchemy.engine.Connectable)
):
with _sqlalchemy_con(con, need_transaction) as con:
yield SQLDatabase(con, schema=schema)
elif isinstance(con, str) and sqlalchemy is None:
raise ImportError("Using URI string without sqlalchemy installed.")
else:
warnings.warn(
"pandas only supports SQLAlchemy connectable (engine/connection) or "
"database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 "
"objects are not tested. Please consider using SQLAlchemy.",
UserWarning,
stacklevel=find_stack_level() + 2,
)
yield SQLiteDatabase(con)
@contextmanager
def _sqlalchemy_con(connectable, need_transaction: bool):
"""Create a sqlalchemy connection and a transaction if necessary."""
sqlalchemy = import_optional_dependency("sqlalchemy", errors="raise")
if isinstance(connectable, str):
connectable = sqlalchemy.create_engine(connectable)
if isinstance(connectable, sqlalchemy.engine.Engine):
with connectable.connect() as con:
if need_transaction:
with con.begin():
yield con
else:
yield con
else:
yield connectable
class SQLTable(PandasObject):
"""
For mapping Pandas tables to SQL tables.
Uses fact that table is reflected by SQLAlchemy to
do better type conversions.
Also holds various flags needed to avoid having to
pass them between functions all the time.
"""
# TODO: support for multiIndex
def __init__(
self,
name: str,
pandas_sql_engine,
frame=None,
index: bool | str | list[str] | None = True,
if_exists: Literal["fail", "replace", "append"] = "fail",
prefix: str = "pandas",
index_label=None,
schema=None,
keys=None,
dtype: DtypeArg | None = None,
) -> None:
self.name = name
self.pd_sql = pandas_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index, index_label)
self.schema = schema
self.if_exists = if_exists
self.keys = keys
self.dtype = dtype
if frame is not None:
# We want to initialize based on a dataframe
self.table = self._create_table_setup()
else:
# no data provided, read-only mode
self.table = self.pd_sql.get_table(self.name, self.schema)
if self.table is None:
raise ValueError(f"Could not init table '{name}'")
def exists(self):
return self.pd_sql.has_table(self.name, self.schema)
def sql_schema(self) -> str:
from sqlalchemy.schema import CreateTable
return str(CreateTable(self.table).compile(self.pd_sql.con))
def _execute_create(self) -> None:
# Inserting table into database, add to MetaData object
self.table = self.table.to_metadata(self.pd_sql.meta)
self.table.create(bind=self.pd_sql.con)
def create(self) -> None:
if self.exists():
if self.if_exists == "fail":
raise ValueError(f"Table '{self.name}' already exists.")
if self.if_exists == "replace":
self.pd_sql.drop_table(self.name, self.schema)
self._execute_create()
elif self.if_exists == "append":
pass
else:
raise ValueError(f"'{self.if_exists}' is not valid for if_exists")
else:
self._execute_create()
def _execute_insert(self, conn, keys: list[str], data_iter) -> int:
"""
Execute SQL statement inserting data
Parameters
----------
conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
keys : list of str
Column names
data_iter : generator of list
Each item contains a list of values to be inserted
"""
data = [dict(zip(keys, row)) for row in data_iter]
result = conn.execute(self.table.insert(), data)
return result.rowcount
def _execute_insert_multi(self, conn, keys: list[str], data_iter) -> int:
"""
Alternative to _execute_insert for DBs support multivalue INSERT.
Note: multi-value insert is usually faster for analytics DBs
and tables containing a few columns
but performance degrades quickly with increase of columns.
"""
from sqlalchemy import insert
data = [dict(zip(keys, row)) for row in data_iter]
stmt = insert(self.table).values(data)
result = conn.execute(stmt)
return result.rowcount
def insert_data(self) -> tuple[list[str], list[np.ndarray]]:
if self.index is not None:
temp = self.frame.copy()
temp.index.names = self.index
try:
temp.reset_index(inplace=True)
except ValueError as err:
raise ValueError(f"duplicate name in index/columns: {err}") from err
else:
temp = self.frame
column_names = list(map(str, temp.columns))
ncols = len(column_names)
# this just pre-allocates the list: None's will be replaced with ndarrays
# error: List item 0 has incompatible type "None"; expected "ndarray"
data_list: list[np.ndarray] = [None] * ncols # type: ignore[list-item]
for i, (_, ser) in enumerate(temp.items()):
vals = ser._values
if vals.dtype.kind == "M":
d = vals.to_pydatetime()
elif vals.dtype.kind == "m":
# store as integers, see GH#6921, GH#7076
d = vals.view("i8").astype(object)
else:
d = vals.astype(object)
assert isinstance(d, np.ndarray), type(d)
if ser._can_hold_na:
# Note: this will miss timedeltas since they are converted to int
mask = isna(d)
d[mask] = None
data_list[i] = d
return column_names, data_list
def insert(
self, chunksize: int | None = None, method: str | None = None
) -> int | None:
# set insert method
if method is None:
exec_insert = self._execute_insert
elif method == "multi":
exec_insert = self._execute_insert_multi
elif callable(method):
exec_insert = partial(method, self)
else:
raise ValueError(f"Invalid parameter `method`: {method}")
keys, data_list = self.insert_data()
nrows = len(self.frame)
if nrows == 0:
return 0
if chunksize is None:
chunksize = nrows
elif chunksize == 0:
raise ValueError("chunksize argument should be non-zero")
chunks = (nrows // chunksize) + 1
total_inserted = None
with self.pd_sql.run_transaction() as conn:
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
chunk_iter = zip(*(arr[start_i:end_i] for arr in data_list))
num_inserted = exec_insert(conn, keys, chunk_iter)
# GH 46891
if is_integer(num_inserted):
if total_inserted is None:
total_inserted = num_inserted
else:
total_inserted += num_inserted
return total_inserted
def _query_iterator(
self,
result,
chunksize: str | None,
columns,
coerce_float: bool = True,
parse_dates=None,
):
"""Return generator through chunked result set."""
has_read_data = False
while True:
data = result.fetchmany(chunksize)
if not data:
if not has_read_data:
yield DataFrame.from_records(
[], columns=columns, coerce_float=coerce_float
)
break
has_read_data = True
self.frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float
)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
yield self.frame
def read(
self,
coerce_float: bool = True,
parse_dates=None,
columns=None,
chunksize=None,
) -> DataFrame | Iterator[DataFrame]:
from sqlalchemy import select
if columns is not None and len(columns) > 0:
cols = [self.table.c[n] for n in columns]
if self.index is not None:
for idx in self.index[::-1]:
cols.insert(0, self.table.c[idx])
sql_select = select(*cols)
else:
sql_select = select(self.table)
result = self.pd_sql.execute(sql_select)
column_names = result.keys()
if chunksize is not None:
return self._query_iterator(
result,
chunksize,
column_names,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
else:
data = result.fetchall()
self.frame = DataFrame.from_records(
data, columns=column_names, coerce_float=coerce_float
)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
return self.frame
def _index_name(self, index, index_label):
# for writing: index=True to include index in sql table
if index is True:
nlevels = self.frame.index.nlevels
# if index_label is specified, set this as index name(s)
if index_label is not None:
if not isinstance(index_label, list):
index_label = [index_label]
if len(index_label) != nlevels:
raise ValueError(
"Length of 'index_label' should match number of "
f"levels, which is {nlevels}"
)
return index_label
# return the used column labels for the index columns
if (
nlevels == 1
and "index" not in self.frame.columns
and self.frame.index.name is None
):
return ["index"]
else:
return com.fill_missing_names(self.frame.index.names)
# for reading: index=(list of) string to specify column to set as index
elif isinstance(index, str):
return [index]
elif isinstance(index, list):
return index
else:
return None
def _get_column_names_and_types(self, dtype_mapper):
column_names_and_types = []
if self.index is not None:
for i, idx_label in enumerate(self.index):
idx_type = dtype_mapper(self.frame.index._get_level_values(i))
column_names_and_types.append((str(idx_label), idx_type, True))
column_names_and_types += [
(str(self.frame.columns[i]), dtype_mapper(self.frame.iloc[:, i]), False)
for i in range(len(self.frame.columns))
]
return column_names_and_types
def _create_table_setup(self):
from sqlalchemy import (
Column,
PrimaryKeyConstraint,
Table,
)
from sqlalchemy.schema import MetaData
column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type)
columns = [
Column(name, typ, index=is_index)
for name, typ, is_index in column_names_and_types
]
if self.keys is not None:
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
pkc = PrimaryKeyConstraint(*keys, name=self.name + "_pk")
columns.append(pkc)
schema = self.schema or self.pd_sql.meta.schema
# At this point, attach to new metadata, only attach to self.meta
# once table is created.
meta = MetaData()
return Table(self.name, meta, *columns, schema=schema)
def _harmonize_columns(self, parse_dates=None) -> None:
"""
Make the DataFrame's column types align with the SQL table
column types.
Need to work around limited NA value support. Floats are always
fine, ints must always be floats if there are Null values.
Booleans are hard because converting bool column with None replaces
all Nones with false. Therefore only convert bool if there are no
NA values.
Datetimes should already be converted to np.datetime64 if supported,
but here we also force conversion if required.
"""
parse_dates = _process_parse_dates_argument(parse_dates)
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# Handle date parsing upfront; don't try to convert columns
# twice
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(df_col, format=fmt)
continue
# the type the dataframe column should have
col_type = self._get_dtype(sql_col.type)
if (
col_type is datetime
or col_type is date
or col_type is DatetimeTZDtype
):
# Convert tz-aware Datetime SQL columns to UTC
utc = col_type is DatetimeTZDtype
self.frame[col_name] = _handle_date_column(df_col, utc=utc)
elif col_type is float:
# floats support NA, can always convert!
self.frame[col_name] = df_col.astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is np.dtype("int64") or col_type is bool:
self.frame[col_name] = df_col.astype(col_type, copy=False)
except KeyError:
pass # this column not in results
def _sqlalchemy_type(self, col):
dtype: DtypeArg = self.dtype or {}
if is_dict_like(dtype):
dtype = cast(dict, dtype)
if col.name in dtype:
return dtype[col.name]
# Infer type of column, while ignoring missing values.
# Needed for inserting typed data containing NULLs, GH 8778.
col_type = lib.infer_dtype(col, skipna=True)
from sqlalchemy.types import (
TIMESTAMP,
BigInteger,
Boolean,
Date,
DateTime,
Float,
Integer,
SmallInteger,
Text,
Time,
)
if col_type in ("datetime64", "datetime"):
# GH 9086: TIMESTAMP is the suggested type if the column contains
# timezone information
try:
if col.dt.tz is not None:
return TIMESTAMP(timezone=True)
except AttributeError:
# The column is actually a DatetimeIndex
# GH 26761 or an Index with date-like data e.g. 9999-01-01
if getattr(col, "tz", None) is not None:
return TIMESTAMP(timezone=True)
return DateTime
if col_type == "timedelta64":
warnings.warn(
"the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the database.",
UserWarning,
stacklevel=find_stack_level(),
)
return BigInteger
elif col_type == "floating":
if col.dtype == "float32":
return Float(precision=23)
else:
return Float(precision=53)
elif col_type == "integer":
# GH35076 Map pandas integer to optimal SQLAlchemy integer type
if col.dtype.name.lower() in ("int8", "uint8", "int16"):
return SmallInteger
elif col.dtype.name.lower() in ("uint16", "int32"):
return Integer
elif col.dtype.name.lower() == "uint64":
raise ValueError("Unsigned 64 bit integer datatype is not supported")
else:
return BigInteger
elif col_type == "boolean":
return Boolean
elif col_type == "date":
return Date
elif col_type == "time":
return Time
elif col_type == "complex":
raise ValueError("Complex datatypes not supported")
return Text
def _get_dtype(self, sqltype):
from sqlalchemy.types import (
TIMESTAMP,
Boolean,
Date,
DateTime,
Float,
Integer,
)
if isinstance(sqltype, Float):
return float
elif isinstance(sqltype, Integer):
# TODO: Refine integer size.
return np.dtype("int64")
elif isinstance(sqltype, TIMESTAMP):
# we have a timezone capable type
if not sqltype.timezone:
return datetime
return DatetimeTZDtype
elif isinstance(sqltype, DateTime):
# Caution: np.datetime64 is also a subclass of np.number.
return datetime
elif isinstance(sqltype, Date):
return date
elif isinstance(sqltype, Boolean):
return bool
return object
class PandasSQL(PandasObject, ABC):
"""
Subclasses Should define read_query and to_sql.
"""
def read_table(
self,
table_name: str,
index_col: str | list[str] | None = None,
coerce_float: bool = True,
parse_dates=None,
columns=None,
schema: str | None = None,
chunksize: int | None = None,
) -> DataFrame | Iterator[DataFrame]:
raise NotImplementedError
@abstractmethod
def read_query(
self,
sql: str,
index_col: str | list[str] | None = None,
coerce_float: bool = True,
parse_dates=None,
params=None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
) -> DataFrame | Iterator[DataFrame]:
pass
@abstractmethod
def to_sql(
self,
frame,
name,
if_exists: Literal["fail", "replace", "append"] = "fail",
index: bool = True,
index_label=None,
schema=None,
chunksize=None,
dtype: DtypeArg | None = None,
method=None,
engine: str = "auto",
**engine_kwargs,
) -> int | None:
pass
@abstractmethod
def execute(self, *args, **kwargs):
pass
@abstractmethod
def has_table(self, name: str, schema: str | None = None) -> bool:
pass
@abstractmethod
def _create_sql_schema(
self,
frame: DataFrame,
table_name: str,
keys: list[str] | None = None,
dtype: DtypeArg | None = None,
schema: str | None = None,
):
pass
class BaseEngine:
def insert_records(
self,
table: SQLTable,
con,
frame,
name,
index: bool | str | list[str] | None = True,
schema=None,
chunksize=None,
method=None,
**engine_kwargs,
) -> int | None:
"""
Inserts data into already-prepared table
"""
raise AbstractMethodError(self)
class SQLAlchemyEngine(BaseEngine):
def __init__(self) -> None:
import_optional_dependency(
"sqlalchemy", extra="sqlalchemy is required for SQL support."
)
def insert_records(
self,
table: SQLTable,
con,
frame,
name,
index: bool | str | list[str] | None = True,
schema=None,
chunksize=None,
method=None,
**engine_kwargs,
) -> int | None:
from sqlalchemy import exc
try:
return table.insert(chunksize=chunksize, method=method)
except exc.SQLAlchemyError as err:
# GH34431
# https://stackoverflow.com/a/67358288/6067848
msg = r"""(\(1054, "Unknown column 'inf(e0)?' in 'field list'"\))(?#
)|inf can not be used with MySQL"""
err_text = str(err.orig)
if re.search(msg, err_text):
raise ValueError("inf cannot be used with MySQL") from err
raise err
def get_engine(engine: str) -> BaseEngine:
"""return our implementation"""
if engine == "auto":
engine = get_option("io.sql.engine")
if engine == "auto":
# try engines in this order
engine_classes = [SQLAlchemyEngine]
error_msgs = ""
for engine_class in engine_classes:
try:
return engine_class()
except ImportError as err:
error_msgs += "\n - " + str(err)
raise ImportError(
"Unable to find a usable engine; "
"tried using: 'sqlalchemy'.\n"
"A suitable version of "
"sqlalchemy is required for sql I/O "
"support.\n"
"Trying to import the above resulted in these errors:"
f"{error_msgs}"
)
if engine == "sqlalchemy":
return SQLAlchemyEngine()
raise ValueError("engine must be one of 'auto', 'sqlalchemy'")
class SQLDatabase(PandasSQL):
"""
This class enables conversion between DataFrame and SQL databases
using SQLAlchemy to handle DataBase abstraction.
Parameters
----------
con : SQLAlchemy Connection
Connection to connect with the database. Using SQLAlchemy makes it
possible to use any DB supported by that library.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
"""
def __init__(self, con, schema: str | None = None) -> None:
from sqlalchemy.schema import MetaData
self.con = con
self.meta = MetaData(schema=schema)
@contextmanager
def run_transaction(self):
yield self.con
def execute(self, *args, **kwargs):
"""Simple passthrough to SQLAlchemy connectable"""
return self.con.execute(*args, **kwargs)
def read_table(
self,
table_name: str,
index_col: str | list[str] | None = None,
coerce_float: bool = True,
parse_dates=None,
columns=None,
schema: str | None = None,
chunksize: int | None = None,
) -> DataFrame | Iterator[DataFrame]:
"""
Read SQL database table into a DataFrame.
Parameters
----------
table_name : str
Name of SQL table in database.
index_col : string, optional, default: None
Column to set as index.
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table.
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQL database object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See Also
--------
pandas.read_sql_table
SQLDatabase.read_query
"""
self.meta.reflect(bind=self.con, only=[table_name])
table = SQLTable(table_name, self, index=index_col, schema=schema)
return table.read(
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
@staticmethod
def _query_iterator(
result,
chunksize: int,
columns,
index_col=None,
coerce_float: bool = True,
parse_dates=None,
dtype: DtypeArg | None = None,
):
"""Return generator through chunked result set"""
has_read_data = False
while True:
data = result.fetchmany(chunksize)
if not data:
if not has_read_data:
yield _wrap_result(
[],
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
break
has_read_data = True
yield _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
def read_query(
self,
sql: str,
index_col: str | list[str] | None = None,
coerce_float: bool = True,
parse_dates=None,
params=None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
) -> DataFrame | Iterator[DataFrame]:
"""
Read SQL query into a DataFrame.
Parameters
----------
sql : str
SQL query to be executed.
index_col : string, optional, default: None
Column name to use as index for the returned DataFrame object.
coerce_float : bool, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict
corresponds to the keyword arguments of
:func:`pandas.to_datetime` Especially useful with databases
without native Datetime support, such as SQLite.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
dtype : Type name or dict of columns
Data type for data or columns. E.g. np.float64 or
{‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}
.. versionadded:: 1.3.0
Returns
-------
DataFrame
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql
"""
args = _convert_params(sql, params)
result = self.execute(*args)
columns = result.keys()
if chunksize is not None:
return self._query_iterator(
result,
chunksize,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
else:
data = result.fetchall()
frame = _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
return frame
read_sql = read_query
def prep_table(
self,
frame,
name,
if_exists: Literal["fail", "replace", "append"] = "fail",
index: bool | str | list[str] | None = True,
index_label=None,
schema=None,
dtype: DtypeArg | None = None,
) -> SQLTable:
"""
Prepares table in the database for data insertion. Creates it if needed, etc.
"""
if dtype:
if not is_dict_like(dtype):
# error: Value expression in dictionary comprehension has incompatible
# type "Union[ExtensionDtype, str, dtype[Any], Type[object],
# Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]],
# Type[str], Type[float], Type[int], Type[complex], Type[bool],
# Type[object]]]]"; expected type "Union[ExtensionDtype, str,
# dtype[Any], Type[object]]"
dtype = {col_name: dtype for col_name in frame} # type: ignore[misc]
else:
dtype = cast(dict, dtype)
from sqlalchemy.types import (
TypeEngine,
to_instance,
)
for col, my_type in dtype.items():
if not isinstance(to_instance(my_type), TypeEngine):
raise ValueError(f"The type of {col} is not a SQLAlchemy type")
table = SQLTable(
name,
self,
frame=frame,
index=index,
if_exists=if_exists,
index_label=index_label,
schema=schema,
dtype=dtype,
)
table.create()
return table
def check_case_sensitive(
self,
name: str,
schema: str | None,
) -> None:
"""
Checks table name for issues with case-sensitivity.
Method is called after data is inserted.
"""
if not name.isdigit() and not name.islower():
# check for potentially case sensitivity issues (GH7815)
# Only check when name is not a number and name is not lower case
from sqlalchemy import inspect as sqlalchemy_inspect
insp = sqlalchemy_inspect(self.con)
table_names = insp.get_table_names(schema=schema or self.meta.schema)
if name not in table_names:
msg = (
f"The provided table name '{name}' is not found exactly as "
"such in the database after writing the table, possibly "
"due to case sensitivity issues. Consider using lower "
"case table names."
)
warnings.warn(
msg,
UserWarning,
stacklevel=find_stack_level(),
)
def to_sql(
self,
frame,
name: str,
if_exists: Literal["fail", "replace", "append"] = "fail",
index: bool = True,
index_label=None,
schema: str | None = None,
chunksize=None,
dtype: DtypeArg | None = None,
method=None,
engine: str = "auto",
**engine_kwargs,
) -> int | None:
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column.
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type. If all columns are of the same type, one
single value can be used.
method : {None', 'multi', callable}, default None
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
engine : {'auto', 'sqlalchemy'}, default 'auto'
SQL engine library to use. If 'auto', then the option
``io.sql.engine`` is used. The default ``io.sql.engine``
behavior is 'sqlalchemy'
.. versionadded:: 1.3.0
**engine_kwargs
Any additional kwargs are passed to the engine.
"""
sql_engine = get_engine(engine)
table = self.prep_table(
frame=frame,
name=name,
if_exists=if_exists,
index=index,
index_label=index_label,
schema=schema,
dtype=dtype,
)
total_inserted = sql_engine.insert_records(
table=table,
con=self.con,
frame=frame,
name=name,
index=index,
schema=schema,
chunksize=chunksize,
method=method,
**engine_kwargs,
)
self.check_case_sensitive(name=name, schema=schema)
return total_inserted
@property
def tables(self):
return self.meta.tables
def has_table(self, name: str, schema: str | None = None) -> bool:
from sqlalchemy import inspect as sqlalchemy_inspect
insp = sqlalchemy_inspect(self.con)
return insp.has_table(name, schema or self.meta.schema)
def get_table(self, table_name: str, schema: str | None = None) -> Table:
from sqlalchemy import (
Numeric,
Table,
)
schema = schema or self.meta.schema
tbl = Table(table_name, self.meta, autoload_with=self.con, schema=schema)
for column in tbl.columns:
if isinstance(column.type, Numeric):
column.type.asdecimal = False
return tbl
def drop_table(self, table_name: str, schema: str | None = None) -> None:
schema = schema or self.meta.schema
if self.has_table(table_name, schema):
self.meta.reflect(bind=self.con, only=[table_name], schema=schema)
self.get_table(table_name, schema).drop(bind=self.con)
self.meta.clear()
def _create_sql_schema(
self,
frame: DataFrame,
table_name: str,
keys: list[str] | None = None,
dtype: DtypeArg | None = None,
schema: str | None = None,
):
table = SQLTable(
table_name,
self,
frame=frame,
index=False,
keys=keys,
dtype=dtype,
schema=schema,
)
return str(table.sql_schema())
# ---- SQL without SQLAlchemy ---
# sqlite-specific sql strings and handler class
# dictionary used for readability purposes
_SQL_TYPES = {
"string": "TEXT",
"floating": "REAL",
"integer": "INTEGER",
"datetime": "TIMESTAMP",
"date": "DATE",
"time": "TIME",
"boolean": "INTEGER",
}
def _get_unicode_name(name):
try:
uname = str(name).encode("utf-8", "strict").decode("utf-8")
except UnicodeError as err:
raise ValueError(f"Cannot convert identifier to UTF-8: '{name}'") from err
return uname
def _get_valid_sqlite_name(name):
# See https://stackoverflow.com/questions/6514274/how-do-you-escape-strings\
# -for-sqlite-table-column-names-in-python
# Ensure the string can be encoded as UTF-8.
# Ensure the string does not include any NUL characters.
# Replace all " with "".
# Wrap the entire thing in double quotes.
uname = _get_unicode_name(name)
if not len(uname):
raise ValueError("Empty table or column name specified")
nul_index = uname.find("\x00")
if nul_index >= 0:
raise ValueError("SQLite identifier cannot contain NULs")
return '"' + uname.replace('"', '""') + '"'
class SQLiteTable(SQLTable):
"""
Patch the SQLTable for fallback support.
Instead of a table variable just use the Create Table statement.
"""
def __init__(self, *args, **kwargs) -> None:
# GH 8341
# register an adapter callable for datetime.time object
import sqlite3
# this will transform time(12,34,56,789) into '12:34:56.000789'
# (this is what sqlalchemy does)
def _adapt_time(t) -> str:
# This is faster than strftime
return f"{t.hour:02d}:{t.minute:02d}:{t.second:02d}.{t.microsecond:06d}"
sqlite3.register_adapter(time, _adapt_time)
super().__init__(*args, **kwargs)
def sql_schema(self) -> str:
return str(";\n".join(self.table))
def _execute_create(self) -> None:
with self.pd_sql.run_transaction() as conn:
for stmt in self.table:
conn.execute(stmt)
def insert_statement(self, *, num_rows: int) -> str:
names = list(map(str, self.frame.columns))
wld = "?" # wildcard char
escape = _get_valid_sqlite_name
if self.index is not None:
for idx in self.index[::-1]:
names.insert(0, idx)
bracketed_names = [escape(column) for column in names]
col_names = ",".join(bracketed_names)
row_wildcards = ",".join([wld] * len(names))
wildcards = ",".join([f"({row_wildcards})" for _ in range(num_rows)])
insert_statement = (
f"INSERT INTO {escape(self.name)} ({col_names}) VALUES {wildcards}"
)
return insert_statement
def _execute_insert(self, conn, keys, data_iter) -> int:
data_list = list(data_iter)
conn.executemany(self.insert_statement(num_rows=1), data_list)
return conn.rowcount
def _execute_insert_multi(self, conn, keys, data_iter) -> int:
data_list = list(data_iter)
flattened_data = [x for row in data_list for x in row]
conn.execute(self.insert_statement(num_rows=len(data_list)), flattened_data)
return conn.rowcount
def _create_table_setup(self):
"""
Return a list of SQL statements that creates a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements.
"""
column_names_and_types = self._get_column_names_and_types(self._sql_type_name)
escape = _get_valid_sqlite_name
create_tbl_stmts = [
escape(cname) + " " + ctype for cname, ctype, _ in column_names_and_types
]
if self.keys is not None and len(self.keys):
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
cnames_br = ", ".join([escape(c) for c in keys])
create_tbl_stmts.append(
f"CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})"
)
if self.schema:
schema_name = self.schema + "."
else:
schema_name = ""
create_stmts = [
"CREATE TABLE "
+ schema_name
+ escape(self.name)
+ " (\n"
+ ",\n ".join(create_tbl_stmts)
+ "\n)"
]
ix_cols = [cname for cname, _, is_index in column_names_and_types if is_index]
if len(ix_cols):
cnames = "_".join(ix_cols)
cnames_br = ",".join([escape(c) for c in ix_cols])
create_stmts.append(
"CREATE INDEX "
+ escape("ix_" + self.name + "_" + cnames)
+ "ON "
+ escape(self.name)
+ " ("
+ cnames_br
+ ")"
)
return create_stmts
def _sql_type_name(self, col):
dtype: DtypeArg = self.dtype or {}
if is_dict_like(dtype):
dtype = cast(dict, dtype)
if col.name in dtype:
return dtype[col.name]
# Infer type of column, while ignoring missing values.
# Needed for inserting typed data containing NULLs, GH 8778.
col_type = lib.infer_dtype(col, skipna=True)
if col_type == "timedelta64":
warnings.warn(
"the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the database.",
UserWarning,
stacklevel=find_stack_level(),
)
col_type = "integer"
elif col_type == "datetime64":
col_type = "datetime"
elif col_type == "empty":
col_type = "string"
elif col_type == "complex":
raise ValueError("Complex datatypes not supported")
if col_type not in _SQL_TYPES:
col_type = "string"
return _SQL_TYPES[col_type]
class SQLiteDatabase(PandasSQL):
"""
Version of SQLDatabase to support SQLite connections (fallback without
SQLAlchemy). This should only be used internally.
Parameters
----------
con : sqlite connection object
"""
def __init__(self, con) -> None:
self.con = con
@contextmanager
def run_transaction(self):
cur = self.con.cursor()
try:
yield cur
self.con.commit()
except Exception:
self.con.rollback()
raise
finally:
cur.close()
def execute(self, *args, **kwargs):
cur = self.con.cursor()
try:
cur.execute(*args, **kwargs)
return cur
except Exception as exc:
try:
self.con.rollback()
except Exception as inner_exc: # pragma: no cover
ex = DatabaseError(
f"Execution failed on sql: {args[0]}\n{exc}\nunable to rollback"
)
raise ex from inner_exc
ex = DatabaseError(f"Execution failed on sql '{args[0]}': {exc}")
raise ex from exc
@staticmethod
def _query_iterator(
cursor,
chunksize: int,
columns,
index_col=None,
coerce_float: bool = True,
parse_dates=None,
dtype: DtypeArg | None = None,
):
"""Return generator through chunked result set"""
has_read_data = False
while True:
data = cursor.fetchmany(chunksize)
if type(data) == tuple:
data = list(data)
if not data:
cursor.close()
if not has_read_data:
yield DataFrame.from_records(
[], columns=columns, coerce_float=coerce_float
)
break
has_read_data = True
yield _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
def read_query(
self,
sql,
index_col=None,
coerce_float: bool = True,
parse_dates=None,
params=None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
) -> DataFrame | Iterator[DataFrame]:
args = _convert_params(sql, params)
cursor = self.execute(*args)
columns = [col_desc[0] for col_desc in cursor.description]
if chunksize is not None:
return self._query_iterator(
cursor,
chunksize,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
else:
data = self._fetchall_as_list(cursor)
cursor.close()
frame = _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
return frame
def _fetchall_as_list(self, cur):
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
def to_sql(
self,
frame,
name,
if_exists: str = "fail",
index: bool = True,
index_label=None,
schema=None,
chunksize=None,
dtype: DtypeArg | None = None,
method=None,
engine: str = "auto",
**engine_kwargs,
) -> int | None:
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame: DataFrame
name: string
Name of SQL table.
if_exists: {'fail', 'replace', 'append'}, default 'fail'
fail: If table exists, do nothing.
replace: If table exists, drop it, recreate it, and insert data.
append: If table exists, insert data. Create if it does not exist.
index : bool, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Ignored parameter included for compatibility with SQLAlchemy
version of ``to_sql``.
chunksize : int, default None
If not None, then rows will be written in batches of this
size at a time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a string. If all columns are of the same type, one single value
can be used.
method : {None, 'multi', callable}, default None
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
"""
if dtype:
if not is_dict_like(dtype):
# error: Value expression in dictionary comprehension has incompatible
# type "Union[ExtensionDtype, str, dtype[Any], Type[object],
# Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]],
# Type[str], Type[float], Type[int], Type[complex], Type[bool],
# Type[object]]]]"; expected type "Union[ExtensionDtype, str,
# dtype[Any], Type[object]]"
dtype = {col_name: dtype for col_name in frame} # type: ignore[misc]
else:
dtype = cast(dict, dtype)
for col, my_type in dtype.items():
if not isinstance(my_type, str):
raise ValueError(f"{col} ({my_type}) not a string")
table = SQLiteTable(
name,
self,
frame=frame,
index=index,
if_exists=if_exists,
index_label=index_label,
dtype=dtype,
)
table.create()
return table.insert(chunksize, method)
def has_table(self, name: str, schema: str | None = None) -> bool:
wld = "?"
query = f"SELECT name FROM sqlite_master WHERE type='table' AND name={wld};"
return len(self.execute(query, [name]).fetchall()) > 0
def get_table(self, table_name: str, schema: str | None = None) -> None:
return None # not supported in fallback mode
def drop_table(self, name: str, schema: str | None = None) -> None:
drop_sql = f"DROP TABLE {_get_valid_sqlite_name(name)}"
self.execute(drop_sql)
def _create_sql_schema(
self,
frame,
table_name: str,
keys=None,
dtype: DtypeArg | None = None,
schema: str | None = None,
):
table = SQLiteTable(
table_name,
self,
frame=frame,
index=False,
keys=keys,
dtype=dtype,
schema=schema,
)
return str(table.sql_schema())
def get_schema(
frame,
name: str,
keys=None,
con=None,
dtype: DtypeArg | None = None,
schema: str | None = None,
) -> str:
"""
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : str
name of SQL table
keys : string or sequence, default: None
columns to use a primary key
con: an open SQL database connection object or a SQLAlchemy connectable
Using SQLAlchemy makes it possible to use any DB supported by that
library, default: None
If a DBAPI2 object, only sqlite3 is supported.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
schema: str, default: None
Optional specifying the schema to be used in creating the table.
.. versionadded:: 1.2.0
"""
with pandasSQL_builder(con=con) as pandas_sql:
return pandas_sql._create_sql_schema(
frame, name, keys=keys, dtype=dtype, schema=schema
)
| bsd-3-clause | 472d70b0eb3b1c9fe6a807457577cdda | 32.662625 | 120 | 0.566689 | 4.320087 | false | false | false | false |
pandas-dev/pandas | pandas/tests/arrays/categorical/test_indexing.py | 1 | 12790 | import math
import numpy as np
import pytest
from pandas import (
NA,
Categorical,
CategoricalIndex,
Index,
Interval,
IntervalIndex,
NaT,
PeriodIndex,
Series,
Timedelta,
Timestamp,
)
import pandas._testing as tm
import pandas.core.common as com
class TestCategoricalIndexingWithFactor:
def test_getitem(self, factor):
assert factor[0] == "a"
assert factor[-1] == "c"
subf = factor[[0, 1, 2]]
tm.assert_numpy_array_equal(subf._codes, np.array([0, 1, 1], dtype=np.int8))
subf = factor[np.asarray(factor) == "c"]
tm.assert_numpy_array_equal(subf._codes, np.array([2, 2, 2], dtype=np.int8))
def test_setitem(self, factor):
# int/positional
c = factor.copy()
c[0] = "b"
assert c[0] == "b"
c[-1] = "a"
assert c[-1] == "a"
# boolean
c = factor.copy()
indexer = np.zeros(len(c), dtype="bool")
indexer[0] = True
indexer[-1] = True
c[indexer] = "c"
expected = Categorical(["c", "b", "b", "a", "a", "c", "c", "c"], ordered=True)
tm.assert_categorical_equal(c, expected)
@pytest.mark.parametrize(
"other",
[Categorical(["b", "a"]), Categorical(["b", "a"], categories=["b", "a"])],
)
def test_setitem_same_but_unordered(self, other):
# GH-24142
target = Categorical(["a", "b"], categories=["a", "b"])
mask = np.array([True, False])
target[mask] = other[mask]
expected = Categorical(["b", "b"], categories=["a", "b"])
tm.assert_categorical_equal(target, expected)
@pytest.mark.parametrize(
"other",
[
Categorical(["b", "a"], categories=["b", "a", "c"]),
Categorical(["b", "a"], categories=["a", "b", "c"]),
Categorical(["a", "a"], categories=["a"]),
Categorical(["b", "b"], categories=["b"]),
],
)
def test_setitem_different_unordered_raises(self, other):
# GH-24142
target = Categorical(["a", "b"], categories=["a", "b"])
mask = np.array([True, False])
msg = "Cannot set a Categorical with another, without identical categories"
with pytest.raises(TypeError, match=msg):
target[mask] = other[mask]
@pytest.mark.parametrize(
"other",
[
Categorical(["b", "a"]),
Categorical(["b", "a"], categories=["b", "a"], ordered=True),
Categorical(["b", "a"], categories=["a", "b", "c"], ordered=True),
],
)
def test_setitem_same_ordered_raises(self, other):
# Gh-24142
target = Categorical(["a", "b"], categories=["a", "b"], ordered=True)
mask = np.array([True, False])
msg = "Cannot set a Categorical with another, without identical categories"
with pytest.raises(TypeError, match=msg):
target[mask] = other[mask]
def test_setitem_tuple(self):
# GH#20439
cat = Categorical([(0, 1), (0, 2), (0, 1)])
# This should not raise
cat[1] = cat[0]
assert cat[1] == (0, 1)
def test_setitem_listlike(self):
# GH#9469
# properly coerce the input indexers
np.random.seed(1)
cat = Categorical(
np.random.randint(0, 5, size=150000).astype(np.int8)
).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
cat[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = cat.codes[np.array([100000]).astype(np.int64)]
tm.assert_numpy_array_equal(result, np.array([5], dtype="int8"))
class TestCategoricalIndexing:
def test_getitem_slice(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
assert sliced == "d"
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=["a", "b", "c", "d"])
tm.assert_categorical_equal(sliced, expected)
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
tm.assert_numpy_array_equal(result, expected)
def test_periodindex(self):
idx1 = PeriodIndex(
["2014-01", "2014-01", "2014-02", "2014-02", "2014-03", "2014-03"], freq="M"
)
cat1 = Categorical(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.int8)
exp_idx = PeriodIndex(["2014-01", "2014-02", "2014-03"], freq="M")
tm.assert_numpy_array_equal(cat1._codes, exp_arr)
tm.assert_index_equal(cat1.categories, exp_idx)
idx2 = PeriodIndex(
["2014-03", "2014-03", "2014-02", "2014-01", "2014-03", "2014-01"], freq="M"
)
cat2 = Categorical(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.int8)
exp_idx2 = PeriodIndex(["2014-01", "2014-02", "2014-03"], freq="M")
tm.assert_numpy_array_equal(cat2._codes, exp_arr)
tm.assert_index_equal(cat2.categories, exp_idx2)
idx3 = PeriodIndex(
[
"2013-12",
"2013-11",
"2013-10",
"2013-09",
"2013-08",
"2013-07",
"2013-05",
],
freq="M",
)
cat3 = Categorical(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype=np.int8)
exp_idx = PeriodIndex(
[
"2013-05",
"2013-07",
"2013-08",
"2013-09",
"2013-10",
"2013-11",
"2013-12",
],
freq="M",
)
tm.assert_numpy_array_equal(cat3._codes, exp_arr)
tm.assert_index_equal(cat3.categories, exp_idx)
@pytest.mark.parametrize(
"null_val",
[None, np.nan, NaT, NA, math.nan, "NaT", "nat", "NAT", "nan", "NaN", "NAN"],
)
def test_periodindex_on_null_types(self, null_val):
# GH 46673
result = PeriodIndex(["2022-04-06", "2022-04-07", null_val], freq="D")
expected = PeriodIndex(["2022-04-06", "2022-04-07", "NaT"], dtype="period[D]")
assert result[2] is NaT
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("new_categories", [[1, 2, 3, 4], [1, 2]])
def test_categories_assignments_wrong_length_raises(self, new_categories):
cat = Categorical(["a", "b", "c", "a"])
msg = (
"new categories need to have the same number of items "
"as the old categories!"
)
with pytest.raises(ValueError, match=msg):
cat.rename_categories(new_categories)
# Combinations of sorted/unique:
@pytest.mark.parametrize(
"idx_values", [[1, 2, 3, 4], [1, 3, 2, 4], [1, 3, 3, 4], [1, 2, 2, 4]]
)
# Combinations of missing/unique
@pytest.mark.parametrize("key_values", [[1, 2], [1, 5], [1, 1], [5, 5]])
@pytest.mark.parametrize("key_class", [Categorical, CategoricalIndex])
@pytest.mark.parametrize("dtype", [None, "category", "key"])
def test_get_indexer_non_unique(self, idx_values, key_values, key_class, dtype):
# GH 21448
key = key_class(key_values, categories=range(1, 5))
if dtype == "key":
dtype = key.dtype
# Test for flat index and CategoricalIndex with same/different cats:
idx = Index(idx_values, dtype=dtype)
expected, exp_miss = idx.get_indexer_non_unique(key_values)
result, res_miss = idx.get_indexer_non_unique(key)
tm.assert_numpy_array_equal(expected, result)
tm.assert_numpy_array_equal(exp_miss, res_miss)
exp_unique = idx.unique().get_indexer(key_values)
res_unique = idx.unique().get_indexer(key)
tm.assert_numpy_array_equal(res_unique, exp_unique)
def test_where_unobserved_nan(self):
ser = Series(Categorical(["a", "b"]))
result = ser.where([True, False])
expected = Series(Categorical(["a", None], categories=["a", "b"]))
tm.assert_series_equal(result, expected)
# all NA
ser = Series(Categorical(["a", "b"]))
result = ser.where([False, False])
expected = Series(Categorical([None, None], categories=["a", "b"]))
tm.assert_series_equal(result, expected)
def test_where_unobserved_categories(self):
ser = Series(Categorical(["a", "b", "c"], categories=["d", "c", "b", "a"]))
result = ser.where([True, True, False], other="b")
expected = Series(Categorical(["a", "b", "b"], categories=ser.cat.categories))
tm.assert_series_equal(result, expected)
def test_where_other_categorical(self):
ser = Series(Categorical(["a", "b", "c"], categories=["d", "c", "b", "a"]))
other = Categorical(["b", "c", "a"], categories=["a", "c", "b", "d"])
result = ser.where([True, False, True], other)
expected = Series(Categorical(["a", "c", "c"], dtype=ser.dtype))
tm.assert_series_equal(result, expected)
def test_where_new_category_raises(self):
ser = Series(Categorical(["a", "b", "c"]))
msg = "Cannot setitem on a Categorical with a new category"
with pytest.raises(TypeError, match=msg):
ser.where([True, False, True], "d")
def test_where_ordered_differs_rasies(self):
ser = Series(
Categorical(["a", "b", "c"], categories=["d", "c", "b", "a"], ordered=True)
)
other = Categorical(
["b", "c", "a"], categories=["a", "c", "b", "d"], ordered=True
)
with pytest.raises(TypeError, match="without identical categories"):
ser.where([True, False, True], other)
class TestContains:
def test_contains(self):
# GH#21508
cat = Categorical(list("aabbca"), categories=list("cab"))
assert "b" in cat
assert "z" not in cat
assert np.nan not in cat
with pytest.raises(TypeError, match="unhashable type: 'list'"):
assert [1] in cat
# assert codes NOT in index
assert 0 not in cat
assert 1 not in cat
cat = Categorical(list("aabbca") + [np.nan], categories=list("cab"))
assert np.nan in cat
@pytest.mark.parametrize(
"item, expected",
[
(Interval(0, 1), True),
(1.5, True),
(Interval(0.5, 1.5), False),
("a", False),
(Timestamp(1), False),
(Timedelta(1), False),
],
ids=str,
)
def test_contains_interval(self, item, expected):
# GH#23705
cat = Categorical(IntervalIndex.from_breaks(range(3)))
result = item in cat
assert result is expected
def test_contains_list(self):
# GH#21729
cat = Categorical([1, 2, 3])
assert "a" not in cat
with pytest.raises(TypeError, match="unhashable type"):
["a"] in cat
with pytest.raises(TypeError, match="unhashable type"):
["a", "b"] in cat
@pytest.mark.parametrize("index", [True, False])
def test_mask_with_boolean(index):
ser = Series(range(3))
idx = Categorical([True, False, True])
if index:
idx = CategoricalIndex(idx)
assert com.is_bool_indexer(idx)
result = ser[idx]
expected = ser[idx.astype("object")]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("index", [True, False])
def test_mask_with_boolean_na_treated_as_false(index):
# https://github.com/pandas-dev/pandas/issues/31503
ser = Series(range(3))
idx = Categorical([True, False, None])
if index:
idx = CategoricalIndex(idx)
result = ser[idx]
expected = ser[idx.fillna(False)]
tm.assert_series_equal(result, expected)
@pytest.fixture
def non_coercible_categorical(monkeypatch):
"""
Monkeypatch Categorical.__array__ to ensure no implicit conversion.
Raises
------
ValueError
When Categorical.__array__ is called.
"""
# TODO(Categorical): identify other places where this may be
# useful and move to a conftest.py
def array(self, dtype=None):
raise ValueError("I cannot be converted.")
with monkeypatch.context() as m:
m.setattr(Categorical, "__array__", array)
yield
def test_series_at():
arr = Categorical(["a", "b", "c"])
ser = Series(arr)
result = ser.at[0]
assert result == "a"
| bsd-3-clause | b2d42ddb0b7e65abd00be8a58d99cd28 | 32.307292 | 88 | 0.550586 | 3.517602 | false | true | false | false |
pandas-dev/pandas | pandas/tests/frame/methods/test_first_and_last.py | 2 | 2819 | """
Note: includes tests for `last`
"""
import pytest
from pandas import (
DataFrame,
bdate_range,
)
import pandas._testing as tm
class TestFirst:
def test_first_subset(self, frame_or_series):
ts = tm.makeTimeDataFrame(freq="12h")
ts = tm.get_obj(ts, frame_or_series)
result = ts.first("10d")
assert len(result) == 20
ts = tm.makeTimeDataFrame(freq="D")
ts = tm.get_obj(ts, frame_or_series)
result = ts.first("10d")
assert len(result) == 10
result = ts.first("3M")
expected = ts[:"3/31/2000"]
tm.assert_equal(result, expected)
result = ts.first("21D")
expected = ts[:21]
tm.assert_equal(result, expected)
result = ts[:0].first("3M")
tm.assert_equal(result, ts[:0])
def test_first_last_raises(self, frame_or_series):
# GH#20725
obj = DataFrame([[1, 2, 3], [4, 5, 6]])
obj = tm.get_obj(obj, frame_or_series)
msg = "'first' only supports a DatetimeIndex index"
with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex
obj.first("1D")
msg = "'last' only supports a DatetimeIndex index"
with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex
obj.last("1D")
def test_last_subset(self, frame_or_series):
ts = tm.makeTimeDataFrame(freq="12h")
ts = tm.get_obj(ts, frame_or_series)
result = ts.last("10d")
assert len(result) == 20
ts = tm.makeTimeDataFrame(nper=30, freq="D")
ts = tm.get_obj(ts, frame_or_series)
result = ts.last("10d")
assert len(result) == 10
result = ts.last("21D")
expected = ts["2000-01-10":]
tm.assert_equal(result, expected)
result = ts.last("21D")
expected = ts[-21:]
tm.assert_equal(result, expected)
result = ts[:0].last("3M")
tm.assert_equal(result, ts[:0])
@pytest.mark.parametrize("start, periods", [("2010-03-31", 1), ("2010-03-30", 2)])
def test_first_with_first_day_last_of_month(self, frame_or_series, start, periods):
# GH#29623
x = frame_or_series([1] * 100, index=bdate_range(start, periods=100))
result = x.first("1M")
expected = frame_or_series(
[1] * periods, index=bdate_range(start, periods=periods)
)
tm.assert_equal(result, expected)
def test_first_with_first_day_end_of_frq_n_greater_one(self, frame_or_series):
# GH#29623
x = frame_or_series([1] * 100, index=bdate_range("2010-03-31", periods=100))
result = x.first("2M")
expected = frame_or_series(
[1] * 23, index=bdate_range("2010-03-31", "2010-04-30")
)
tm.assert_equal(result, expected)
| bsd-3-clause | a8aa6f703d03d9ad731e5e8ce8d36389 | 31.034091 | 87 | 0.572189 | 3.262731 | false | true | false | false |
pandas-dev/pandas | pandas/tests/indexes/numeric/test_join.py | 1 | 15039 | import numpy as np
import pytest
import pandas._testing as tm
from pandas.core.indexes.api import Index
class TestJoinInt64Index:
def test_join_non_unique(self):
left = Index([4, 4, 3, 3])
joined, lidx, ridx = left.join(left, return_indexers=True)
exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4])
tm.assert_index_equal(joined, exp_joined)
exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(lidx, exp_lidx)
exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_join_inner(self):
index = Index(range(0, 20, 2), dtype=np.int64)
other = Index([7, 12, 25, 1, 2, 5], dtype=np.int64)
other_mono = Index([1, 2, 5, 7, 12, 25], dtype=np.int64)
# not monotonic
res, lidx, ridx = index.join(other, how="inner", return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
res = res.take(ind)
lidx = lidx.take(ind)
ridx = ridx.take(ind)
eres = Index([2, 12], dtype=np.int64)
elidx = np.array([1, 6], dtype=np.intp)
eridx = np.array([4, 1], dtype=np.intp)
assert isinstance(res, Index) and res.dtype == np.int64
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = index.join(other_mono, how="inner", return_indexers=True)
res2 = index.intersection(other_mono)
tm.assert_index_equal(res, res2)
elidx = np.array([1, 6], dtype=np.intp)
eridx = np.array([1, 4], dtype=np.intp)
assert isinstance(res, Index) and res.dtype == np.int64
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self):
index = Index(range(0, 20, 2), dtype=np.int64)
other = Index([7, 12, 25, 1, 2, 5], dtype=np.int64)
other_mono = Index([1, 2, 5, 7, 12, 25], dtype=np.int64)
# not monotonic
res, lidx, ridx = index.join(other, how="left", return_indexers=True)
eres = index
eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1], dtype=np.intp)
assert isinstance(res, Index) and res.dtype == np.int64
tm.assert_index_equal(res, eres)
assert lidx is None
tm.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = index.join(other_mono, how="left", return_indexers=True)
eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1], dtype=np.intp)
assert isinstance(res, Index) and res.dtype == np.int64
tm.assert_index_equal(res, eres)
assert lidx is None
tm.assert_numpy_array_equal(ridx, eridx)
# non-unique
idx = Index([1, 1, 2, 5])
idx2 = Index([1, 2, 5, 7, 9])
res, lidx, ridx = idx2.join(idx, how="left", return_indexers=True)
eres = Index([1, 1, 2, 5, 7, 9]) # 1 is in idx2, so it should be x2
eridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp)
elidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_right(self):
index = Index(range(0, 20, 2), dtype=np.int64)
other = Index([7, 12, 25, 1, 2, 5], dtype=np.int64)
other_mono = Index([1, 2, 5, 7, 12, 25], dtype=np.int64)
# not monotonic
res, lidx, ridx = index.join(other, how="right", return_indexers=True)
eres = other
elidx = np.array([-1, 6, -1, -1, 1, -1], dtype=np.intp)
assert isinstance(other, Index) and other.dtype == np.int64
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
assert ridx is None
# monotonic
res, lidx, ridx = index.join(other_mono, how="right", return_indexers=True)
eres = other_mono
elidx = np.array([-1, 1, -1, -1, 6, -1], dtype=np.intp)
assert isinstance(other, Index) and other.dtype == np.int64
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
assert ridx is None
# non-unique
idx = Index([1, 1, 2, 5])
idx2 = Index([1, 2, 5, 7, 9])
res, lidx, ridx = idx.join(idx2, how="right", return_indexers=True)
eres = Index([1, 1, 2, 5, 7, 9]) # 1 is in idx2, so it should be x2
elidx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp)
eridx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_non_int_index(self):
index = Index(range(0, 20, 2), dtype=np.int64)
other = Index([3, 6, 7, 8, 10], dtype=object)
outer = index.join(other, how="outer")
outer2 = other.join(index, how="outer")
expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14, 16, 18])
tm.assert_index_equal(outer, outer2)
tm.assert_index_equal(outer, expected)
inner = index.join(other, how="inner")
inner2 = other.join(index, how="inner")
expected = Index([6, 8, 10])
tm.assert_index_equal(inner, inner2)
tm.assert_index_equal(inner, expected)
left = index.join(other, how="left")
tm.assert_index_equal(left, index.astype(object))
left2 = other.join(index, how="left")
tm.assert_index_equal(left2, other)
right = index.join(other, how="right")
tm.assert_index_equal(right, other)
right2 = other.join(index, how="right")
tm.assert_index_equal(right2, index.astype(object))
def test_join_outer(self):
index = Index(range(0, 20, 2), dtype=np.int64)
other = Index([7, 12, 25, 1, 2, 5], dtype=np.int64)
other_mono = Index([1, 2, 5, 7, 12, 25], dtype=np.int64)
# not monotonic
# guarantee of sortedness
res, lidx, ridx = index.join(other, how="outer", return_indexers=True)
noidx_res = index.join(other, how="outer")
tm.assert_index_equal(res, noidx_res)
eres = Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25], dtype=np.int64)
elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1], dtype=np.intp)
eridx = np.array(
[-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2], dtype=np.intp
)
assert isinstance(res, Index) and res.dtype == np.int64
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = index.join(other_mono, how="outer", return_indexers=True)
noidx_res = index.join(other_mono, how="outer")
tm.assert_index_equal(res, noidx_res)
elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1], dtype=np.intp)
eridx = np.array(
[-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5], dtype=np.intp
)
assert isinstance(res, Index) and res.dtype == np.int64
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
class TestJoinUInt64Index:
@pytest.fixture
def index_large(self):
# large values used in TestUInt64Index where no compat needed with int64/float64
large = [2**63, 2**63 + 10, 2**63 + 15, 2**63 + 20, 2**63 + 25]
return Index(large, dtype=np.uint64)
def test_join_inner(self, index_large):
other = Index(2**63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64"))
other_mono = Index(2**63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64"))
# not monotonic
res, lidx, ridx = index_large.join(other, how="inner", return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
res = res.take(ind)
lidx = lidx.take(ind)
ridx = ridx.take(ind)
eres = Index(2**63 + np.array([10, 25], dtype="uint64"))
elidx = np.array([1, 4], dtype=np.intp)
eridx = np.array([5, 2], dtype=np.intp)
assert isinstance(res, Index) and res.dtype == np.uint64
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = index_large.join(
other_mono, how="inner", return_indexers=True
)
res2 = index_large.intersection(other_mono)
tm.assert_index_equal(res, res2)
elidx = np.array([1, 4], dtype=np.intp)
eridx = np.array([3, 5], dtype=np.intp)
assert isinstance(res, Index) and res.dtype == np.uint64
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self, index_large):
other = Index(2**63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64"))
other_mono = Index(2**63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64"))
# not monotonic
res, lidx, ridx = index_large.join(other, how="left", return_indexers=True)
eres = index_large
eridx = np.array([-1, 5, -1, -1, 2], dtype=np.intp)
assert isinstance(res, Index) and res.dtype == np.uint64
tm.assert_index_equal(res, eres)
assert lidx is None
tm.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = index_large.join(other_mono, how="left", return_indexers=True)
eridx = np.array([-1, 3, -1, -1, 5], dtype=np.intp)
assert isinstance(res, Index) and res.dtype == np.uint64
tm.assert_index_equal(res, eres)
assert lidx is None
tm.assert_numpy_array_equal(ridx, eridx)
# non-unique
idx = Index(2**63 + np.array([1, 1, 2, 5], dtype="uint64"))
idx2 = Index(2**63 + np.array([1, 2, 5, 7, 9], dtype="uint64"))
res, lidx, ridx = idx2.join(idx, how="left", return_indexers=True)
# 1 is in idx2, so it should be x2
eres = Index(2**63 + np.array([1, 1, 2, 5, 7, 9], dtype="uint64"))
eridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp)
elidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_right(self, index_large):
other = Index(2**63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64"))
other_mono = Index(2**63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64"))
# not monotonic
res, lidx, ridx = index_large.join(other, how="right", return_indexers=True)
eres = other
elidx = np.array([-1, -1, 4, -1, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(lidx, elidx)
assert isinstance(other, Index) and other.dtype == np.uint64
tm.assert_index_equal(res, eres)
assert ridx is None
# monotonic
res, lidx, ridx = index_large.join(
other_mono, how="right", return_indexers=True
)
eres = other_mono
elidx = np.array([-1, -1, -1, 1, -1, 4], dtype=np.intp)
assert isinstance(other, Index) and other.dtype == np.uint64
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_index_equal(res, eres)
assert ridx is None
# non-unique
idx = Index(2**63 + np.array([1, 1, 2, 5], dtype="uint64"))
idx2 = Index(2**63 + np.array([1, 2, 5, 7, 9], dtype="uint64"))
res, lidx, ridx = idx.join(idx2, how="right", return_indexers=True)
# 1 is in idx2, so it should be x2
eres = Index(2**63 + np.array([1, 1, 2, 5, 7, 9], dtype="uint64"))
elidx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp)
eridx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
def test_join_non_int_index(self, index_large):
other = Index(
2**63 + np.array([1, 5, 7, 10, 20], dtype="uint64"), dtype=object
)
outer = index_large.join(other, how="outer")
outer2 = other.join(index_large, how="outer")
expected = Index(
2**63 + np.array([0, 1, 5, 7, 10, 15, 20, 25], dtype="uint64")
)
tm.assert_index_equal(outer, outer2)
tm.assert_index_equal(outer, expected)
inner = index_large.join(other, how="inner")
inner2 = other.join(index_large, how="inner")
expected = Index(2**63 + np.array([10, 20], dtype="uint64"))
tm.assert_index_equal(inner, inner2)
tm.assert_index_equal(inner, expected)
left = index_large.join(other, how="left")
tm.assert_index_equal(left, index_large.astype(object))
left2 = other.join(index_large, how="left")
tm.assert_index_equal(left2, other)
right = index_large.join(other, how="right")
tm.assert_index_equal(right, other)
right2 = other.join(index_large, how="right")
tm.assert_index_equal(right2, index_large.astype(object))
def test_join_outer(self, index_large):
other = Index(2**63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64"))
other_mono = Index(2**63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64"))
# not monotonic
# guarantee of sortedness
res, lidx, ridx = index_large.join(other, how="outer", return_indexers=True)
noidx_res = index_large.join(other, how="outer")
tm.assert_index_equal(res, noidx_res)
eres = Index(
2**63 + np.array([0, 1, 2, 7, 10, 12, 15, 20, 25], dtype="uint64")
)
elidx = np.array([0, -1, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp)
eridx = np.array([-1, 3, 4, 0, 5, 1, -1, -1, 2], dtype=np.intp)
assert isinstance(res, Index) and res.dtype == np.uint64
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = index_large.join(
other_mono, how="outer", return_indexers=True
)
noidx_res = index_large.join(other_mono, how="outer")
tm.assert_index_equal(res, noidx_res)
elidx = np.array([0, -1, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp)
eridx = np.array([-1, 0, 1, 2, 3, 4, -1, -1, 5], dtype=np.intp)
assert isinstance(res, Index) and res.dtype == np.uint64
tm.assert_index_equal(res, eres)
tm.assert_numpy_array_equal(lidx, elidx)
tm.assert_numpy_array_equal(ridx, eridx)
| bsd-3-clause | 8b969033526054d000e925f1635ce1a0 | 38.576316 | 88 | 0.568588 | 2.911713 | false | false | false | false |
pandas-dev/pandas | pandas/core/computation/align.py | 1 | 6154 | """
Core eval alignment algorithms.
"""
from __future__ import annotations
from functools import (
partial,
wraps,
)
from typing import (
TYPE_CHECKING,
Callable,
Sequence,
)
import warnings
import numpy as np
from pandas.errors import PerformanceWarning
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.computation.common import result_type_many
if TYPE_CHECKING:
from pandas._typing import F
from pandas.core.generic import NDFrame
from pandas.core.indexes.api import Index
def _align_core_single_unary_op(
term,
) -> tuple[partial | type[NDFrame], dict[str, Index] | None]:
typ: partial | type[NDFrame]
axes: dict[str, Index] | None = None
if isinstance(term.value, np.ndarray):
typ = partial(np.asanyarray, dtype=term.value.dtype)
else:
typ = type(term.value)
if hasattr(term.value, "axes"):
axes = _zip_axes_from_type(typ, term.value.axes)
return typ, axes
def _zip_axes_from_type(
typ: type[NDFrame], new_axes: Sequence[Index]
) -> dict[str, Index]:
return {name: new_axes[i] for i, name in enumerate(typ._AXIS_ORDERS)}
def _any_pandas_objects(terms) -> bool:
"""
Check a sequence of terms for instances of PandasObject.
"""
return any(isinstance(term.value, PandasObject) for term in terms)
def _filter_special_cases(f) -> Callable[[F], F]:
@wraps(f)
def wrapper(terms):
# single unary operand
if len(terms) == 1:
return _align_core_single_unary_op(terms[0])
term_values = (term.value for term in terms)
# we don't have any pandas objects
if not _any_pandas_objects(terms):
return result_type_many(*term_values), None
return f(terms)
return wrapper
@_filter_special_cases
def _align_core(terms):
term_index = [i for i, term in enumerate(terms) if hasattr(term.value, "axes")]
term_dims = [terms[i].value.ndim for i in term_index]
from pandas import Series
ndims = Series(dict(zip(term_index, term_dims)))
# initial axes are the axes of the largest-axis'd term
biggest = terms[ndims.idxmax()].value
typ = biggest._constructor
axes = biggest.axes
naxes = len(axes)
gt_than_one_axis = naxes > 1
for value in (terms[i].value for i in term_index):
is_series = isinstance(value, ABCSeries)
is_series_and_gt_one_axis = is_series and gt_than_one_axis
for axis, items in enumerate(value.axes):
if is_series_and_gt_one_axis:
ax, itm = naxes - 1, value.index
else:
ax, itm = axis, items
if not axes[ax].is_(itm):
axes[ax] = axes[ax].join(itm, how="outer")
for i, ndim in ndims.items():
for axis, items in zip(range(ndim), axes):
ti = terms[i].value
if hasattr(ti, "reindex"):
transpose = isinstance(ti, ABCSeries) and naxes > 1
reindexer = axes[naxes - 1] if transpose else items
term_axis_size = len(ti.axes[axis])
reindexer_size = len(reindexer)
ordm = np.log10(max(1, abs(reindexer_size - term_axis_size)))
if ordm >= 1 and reindexer_size >= 10000:
w = (
f"Alignment difference on axis {axis} is larger "
f"than an order of magnitude on term {repr(terms[i].name)}, "
f"by more than {ordm:.4g}; performance may suffer."
)
warnings.warn(
w, category=PerformanceWarning, stacklevel=find_stack_level()
)
f = partial(ti.reindex, reindexer, axis=axis, copy=False)
terms[i].update(f())
terms[i].update(terms[i].value.values)
return typ, _zip_axes_from_type(typ, axes)
def align_terms(terms):
"""
Align a set of terms.
"""
try:
# flatten the parse tree (a nested list, really)
terms = list(com.flatten(terms))
except TypeError:
# can't iterate so it must just be a constant or single variable
if isinstance(terms.value, (ABCSeries, ABCDataFrame)):
typ = type(terms.value)
return typ, _zip_axes_from_type(typ, terms.value.axes)
return np.result_type(terms.type), None
# if all resolved variables are numeric scalars
if all(term.is_scalar for term in terms):
return result_type_many(*(term.value for term in terms)).type, None
# perform the main alignment
typ, axes = _align_core(terms)
return typ, axes
def reconstruct_object(typ, obj, axes, dtype):
"""
Reconstruct an object given its type, raw value, and possibly empty
(None) axes.
Parameters
----------
typ : object
A type
obj : object
The value to use in the type constructor
axes : dict
The axes to use to construct the resulting pandas object
Returns
-------
ret : typ
An object of type ``typ`` with the value `obj` and possible axes
`axes`.
"""
try:
typ = typ.type
except AttributeError:
pass
res_t = np.result_type(obj.dtype, dtype)
if not isinstance(typ, partial) and issubclass(typ, PandasObject):
return typ(obj, dtype=res_t, **axes)
# special case for pathological things like ~True/~False
if hasattr(res_t, "type") and typ == np.bool_ and res_t != np.bool_:
ret_value = res_t.type(obj)
else:
ret_value = typ(obj).astype(res_t)
# The condition is to distinguish 0-dim array (returned in case of
# scalar) and 1 element array
# e.g. np.array(0) and np.array([0])
if (
len(obj.shape) == 1
and len(obj) == 1
and not isinstance(ret_value, np.ndarray)
):
ret_value = np.array([ret_value]).astype(res_t)
return ret_value
| bsd-3-clause | 23e49271fac0e6ce80ba094be5129253 | 27.757009 | 85 | 0.59701 | 3.738761 | false | false | false | false |
pandas-dev/pandas | scripts/no_bool_in_generic.py | 6 | 2801 | """
Check that pandas/core/generic.py doesn't use bool as a type annotation.
There is already the method `bool`, so the alias `bool_t` should be used instead.
This is meant to be run as a pre-commit hook - to run it manually, you can do:
pre-commit run no-bool-in-core-generic --all-files
The function `visit` is adapted from a function by the same name in pyupgrade:
https://github.com/asottile/pyupgrade/blob/5495a248f2165941c5d3b82ac3226ba7ad1fa59d/pyupgrade/_data.py#L70-L113
"""
from __future__ import annotations
import argparse
import ast
import collections
from typing import Sequence
def visit(tree: ast.Module) -> dict[int, list[int]]:
"Step through tree, recording when nodes are in annotations."
in_annotation = False
nodes: list[tuple[bool, ast.AST]] = [(in_annotation, tree)]
to_replace = collections.defaultdict(list)
while nodes:
in_annotation, node = nodes.pop()
if isinstance(node, ast.Name) and in_annotation and node.id == "bool":
to_replace[node.lineno].append(node.col_offset)
for name in reversed(node._fields):
value = getattr(node, name)
if name in {"annotation", "returns"}:
next_in_annotation = True
else:
next_in_annotation = in_annotation
if isinstance(value, ast.AST):
nodes.append((next_in_annotation, value))
elif isinstance(value, list):
for value in reversed(value):
if isinstance(value, ast.AST):
nodes.append((next_in_annotation, value))
return to_replace
def replace_bool_with_bool_t(to_replace, content: str) -> str:
new_lines = []
for n, line in enumerate(content.splitlines(), start=1):
if n in to_replace:
for col_offset in reversed(to_replace[n]):
line = line[:col_offset] + "bool_t" + line[col_offset + 4 :]
new_lines.append(line)
return "\n".join(new_lines)
def check_for_bool_in_generic(content: str) -> tuple[bool, str]:
tree = ast.parse(content)
to_replace = visit(tree)
if not to_replace:
mutated = False
return mutated, content
mutated = True
return mutated, replace_bool_with_bool_t(to_replace, content)
def main(argv: Sequence[str] | None = None) -> None:
parser = argparse.ArgumentParser()
parser.add_argument("paths", nargs="*")
args = parser.parse_args(argv)
for path in args.paths:
with open(path, encoding="utf-8") as fd:
content = fd.read()
mutated, new_content = check_for_bool_in_generic(content)
if mutated:
with open(path, "w", encoding="utf-8") as fd:
fd.write(new_content)
if __name__ == "__main__":
main()
| bsd-3-clause | 85c1ba7b373194e035c81c0e3b4be8b6 | 31.195402 | 111 | 0.623706 | 3.647135 | false | false | false | false |
pandas-dev/pandas | ci/fix_wheels.py | 1 | 1917 | import os
import shutil
import sys
import zipfile
try:
if len(sys.argv) != 3:
raise ValueError(
"User must pass the path to the wheel and the destination directory."
)
wheel_path = sys.argv[1]
dest_dir = sys.argv[2]
# Figure out whether we are building on 32 or 64 bit python
is_32 = sys.maxsize <= 2**32
PYTHON_ARCH = "x86" if is_32 else "x64"
except ValueError:
# Too many/little values to unpack
raise ValueError(
"User must pass the path to the wheel and the destination directory."
)
# Wheels are zip files
if not os.path.isdir(dest_dir):
print(f"Created directory {dest_dir}")
os.mkdir(dest_dir)
shutil.copy(wheel_path, dest_dir) # Remember to delete if process fails
wheel_name = os.path.basename(wheel_path)
success = True
exception = None
repaired_wheel_path = os.path.join(dest_dir, wheel_name)
with zipfile.ZipFile(repaired_wheel_path, "a") as zipf:
try:
# TODO: figure out how licensing works for the redistributables
base_redist_dir = (
f"C:/Program Files (x86)/Microsoft Visual Studio/2019/"
f"Enterprise/VC/Redist/MSVC/14.29.30133/{PYTHON_ARCH}/"
f"Microsoft.VC142.CRT/"
)
zipf.write(
os.path.join(base_redist_dir, "msvcp140.dll"),
"pandas/_libs/window/msvcp140.dll",
)
zipf.write(
os.path.join(base_redist_dir, "concrt140.dll"),
"pandas/_libs/window/concrt140.dll",
)
if not is_32:
zipf.write(
os.path.join(base_redist_dir, "vcruntime140_1.dll"),
"pandas/_libs/window/vcruntime140_1.dll",
)
except Exception as e:
success = False
exception = e
if not success:
os.remove(repaired_wheel_path)
raise exception
print(f"Successfully repaired wheel was written to {repaired_wheel_path}")
| bsd-3-clause | 2cc607d9f3e96fcd600aa96e30075dcc | 32.051724 | 81 | 0.622848 | 3.454054 | false | false | false | false |
astropy/astropy | astropy/modeling/tests/test_models_quantities.py | 3 | 32205 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name, no-member
import numpy as np
import pytest
from astropy import units as u
from astropy.modeling.bounding_box import ModelBoundingBox
from astropy.modeling.core import fix_inputs
from astropy.modeling.fitting import (
DogBoxLSQFitter,
LevMarLSQFitter,
LMLSQFitter,
TRFLSQFitter,
)
from astropy.modeling.functional_models import (
AiryDisk2D,
ArcCosine1D,
ArcSine1D,
ArcTangent1D,
Box1D,
Box2D,
Const1D,
Const2D,
Cosine1D,
Disk2D,
Ellipse2D,
Exponential1D,
Gaussian1D,
Gaussian2D,
KingProjectedAnalytic1D,
Linear1D,
Logarithmic1D,
Lorentz1D,
Moffat1D,
Moffat2D,
Multiply,
Planar2D,
RickerWavelet1D,
RickerWavelet2D,
Ring2D,
Scale,
Sersic1D,
Sersic2D,
Sine1D,
Tangent1D,
Trapezoid1D,
TrapezoidDisk2D,
Voigt1D,
)
from astropy.modeling.parameters import InputParameterError
from astropy.modeling.physical_models import Drude1D, Plummer1D
from astropy.modeling.polynomial import Polynomial1D, Polynomial2D
from astropy.modeling.powerlaws import (
BrokenPowerLaw1D,
ExponentialCutoffPowerLaw1D,
LogParabola1D,
PowerLaw1D,
Schechter1D,
SmoothlyBrokenPowerLaw1D,
)
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
fitters = [LevMarLSQFitter, TRFLSQFitter, LMLSQFitter, DogBoxLSQFitter]
FUNC_MODELS_1D = [
{
"class": Gaussian1D,
"parameters": {"amplitude": 3 * u.Jy, "mean": 2 * u.m, "stddev": 30 * u.cm},
"evaluation": [(2600 * u.mm, 3 * u.Jy * np.exp(-2))],
"bounding_box": [0.35, 3.65] * u.m,
},
{
"class": Sersic1D,
"parameters": {"amplitude": 3 * u.MJy / u.sr, "r_eff": 2 * u.arcsec, "n": 4},
"evaluation": [(3 * u.arcsec, 1.3237148119468918 * u.MJy / u.sr)],
"bounding_box": False,
},
{
"class": Sine1D,
"parameters": {
"amplitude": 3 * u.km / u.s,
"frequency": 0.25 * u.Hz,
"phase": 0.5,
},
"evaluation": [(1 * u.s, -3 * u.km / u.s)],
"bounding_box": False,
},
{
"class": Cosine1D,
"parameters": {
"amplitude": 3 * u.km / u.s,
"frequency": 0.25 * u.Hz,
"phase": 0.25,
},
"evaluation": [(1 * u.s, -3 * u.km / u.s)],
"bounding_box": False,
},
{
"class": Tangent1D,
"parameters": {
"amplitude": 3 * u.km / u.s,
"frequency": 0.125 * u.Hz,
"phase": 0.25,
},
"evaluation": [(1 * u.s, -3 * u.km / u.s)],
"bounding_box": [-4, 0] / u.Hz,
},
{
"class": ArcSine1D,
"parameters": {
"amplitude": 3 * u.km / u.s,
"frequency": 0.25 * u.Hz,
"phase": 0.5,
},
"evaluation": [(0 * u.km / u.s, -2 * u.s)],
"bounding_box": [-3, 3] * u.km / u.s,
},
{
"class": ArcCosine1D,
"parameters": {
"amplitude": 3 * u.km / u.s,
"frequency": 0.25 * u.Hz,
"phase": 0.5,
},
"evaluation": [(0 * u.km / u.s, -1 * u.s)],
"bounding_box": [-3, 3] * u.km / u.s,
},
{
"class": ArcTangent1D,
"parameters": {
"amplitude": 3 * u.km / u.s,
"frequency": 0.125 * u.Hz,
"phase": 0.25,
},
"evaluation": [(0 * u.km / u.s, -2 * u.s)],
"bounding_box": False,
},
{
"class": Linear1D,
"parameters": {"slope": 3 * u.km / u.s, "intercept": 5000 * u.m},
"evaluation": [(6000 * u.ms, 23 * u.km)],
"bounding_box": False,
},
{
"class": Lorentz1D,
"parameters": {"amplitude": 2 * u.Jy, "x_0": 505 * u.nm, "fwhm": 100 * u.AA},
"evaluation": [(0.51 * u.micron, 1 * u.Jy)],
"bounding_box": [255, 755] * u.nm,
},
{
"class": Voigt1D,
"parameters": {
"amplitude_L": 2 * u.Jy,
"x_0": 505 * u.nm,
"fwhm_L": 100 * u.AA,
"fwhm_G": 50 * u.AA,
},
"evaluation": [(0.51 * u.micron, 1.0621795524 * u.Jy)],
"bounding_box": False,
},
{
"class": Const1D,
"parameters": {"amplitude": 3 * u.Jy},
"evaluation": [(0.6 * u.micron, 3 * u.Jy)],
"bounding_box": False,
},
{
"class": Box1D,
"parameters": {"amplitude": 3 * u.Jy, "x_0": 4.4 * u.um, "width": 1 * u.um},
"evaluation": [(4200 * u.nm, 3 * u.Jy), (1 * u.m, 0 * u.Jy)],
"bounding_box": [3.9, 4.9] * u.um,
},
{
"class": Trapezoid1D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 4.4 * u.um,
"width": 1 * u.um,
"slope": 5 * u.Jy / u.um,
},
"evaluation": [(4200 * u.nm, 3 * u.Jy), (1 * u.m, 0 * u.Jy)],
"bounding_box": [3.3, 5.5] * u.um,
},
{
"class": RickerWavelet1D,
"parameters": {"amplitude": 3 * u.Jy, "x_0": 4.4 * u.um, "sigma": 1e-3 * u.mm},
"evaluation": [(1000 * u.nm, -0.09785050 * u.Jy)],
"bounding_box": [-5.6, 14.4] * u.um,
},
{
"class": Moffat1D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 4.4 * u.um,
"gamma": 1e-3 * u.mm,
"alpha": 1,
},
"evaluation": [(1000 * u.nm, 0.238853503 * u.Jy)],
"bounding_box": False,
},
{
"class": KingProjectedAnalytic1D,
"parameters": {
"amplitude": 1.0 * u.Msun / u.pc**2,
"r_core": 1.0 * u.pc,
"r_tide": 2.0 * u.pc,
},
"evaluation": [(0.5 * u.pc, 0.2 * u.Msun / u.pc**2)],
"bounding_box": [0.0 * u.pc, 2.0 * u.pc],
},
{
"class": Logarithmic1D,
"parameters": {"amplitude": 5 * u.m, "tau": 2 * u.m},
"evaluation": [(4 * u.m, 3.4657359027997265 * u.m)],
"bounding_box": False,
},
{
"class": Exponential1D,
"parameters": {"amplitude": 5 * u.m, "tau": 2 * u.m},
"evaluation": [(4 * u.m, 36.945280494653254 * u.m)],
"bounding_box": False,
},
]
SCALE_MODELS = [
{
"class": Scale,
"parameters": {"factor": 2 * u.m},
"evaluation": [(1 * u.m, 2 * u.m)],
"bounding_box": False,
},
{
"class": Multiply,
"parameters": {"factor": 2 * u.m},
"evaluation": [(1 * u.m / u.m, 2 * u.m)],
"bounding_box": False,
},
]
PHYS_MODELS_1D = [
{
"class": Plummer1D,
"parameters": {"mass": 3 * u.kg, "r_plum": 0.5 * u.m},
"evaluation": [(1 * u.m, 0.10249381 * u.kg / (u.m**3))],
"bounding_box": False,
},
{
"class": Drude1D,
"parameters": {
"amplitude": 1.0 * u.m,
"x_0": 2175.0 * u.AA,
"fwhm": 400.0 * u.AA,
},
"evaluation": [(2000 * u.AA, 0.5452317018423869 * u.m)],
"bounding_box": [-17825, 22175] * u.AA,
},
]
FUNC_MODELS_2D = [
{
"class": Gaussian2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_mean": 2 * u.m,
"y_mean": 1 * u.m,
"x_stddev": 3 * u.m,
"y_stddev": 2 * u.m,
"theta": 45 * u.deg,
},
"evaluation": [
(412.1320343 * u.cm, 3.121320343 * u.m, 3 * u.Jy * np.exp(-0.5))
],
"bounding_box": [[-13.02230366, 15.02230366], [-12.02230366, 16.02230366]]
* u.m,
},
{
"class": Const2D,
"parameters": {"amplitude": 3 * u.Jy},
"evaluation": [(0.6 * u.micron, 0.2 * u.m, 3 * u.Jy)],
"bounding_box": False,
},
{
"class": Disk2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"R_0": 300 * u.cm,
},
"evaluation": [(5.8 * u.m, 201 * u.cm, 3 * u.Jy)],
"bounding_box": [[-1, 5], [0, 6]] * u.m,
},
{
"class": TrapezoidDisk2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 1 * u.m,
"y_0": 2 * u.m,
"R_0": 100 * u.cm,
"slope": 1 * u.Jy / u.m,
},
"evaluation": [(3.5 * u.m, 2 * u.m, 1.5 * u.Jy)],
"bounding_box": [[-2, 6], [-3, 5]] * u.m,
},
{
"class": Ellipse2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"a": 300 * u.cm,
"b": 200 * u.cm,
"theta": 45 * u.deg,
},
"evaluation": [(4 * u.m, 300 * u.cm, 3 * u.Jy)],
"bounding_box": [
[-0.5495097567963922, 4.549509756796392],
[0.4504902432036073, 5.549509756796393],
]
* u.m,
},
{
"class": Ring2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"r_in": 2 * u.cm,
"r_out": 2.1 * u.cm,
},
"evaluation": [(302.05 * u.cm, 2 * u.m + 10 * u.um, 3 * u.Jy)],
"bounding_box": [[1.979, 2.021], [2.979, 3.021]] * u.m,
},
{
"class": Box2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.s,
"x_width": 4 * u.cm,
"y_width": 3 * u.s,
},
"evaluation": [(301 * u.cm, 3 * u.s, 3 * u.Jy)],
"bounding_box": [[0.5 * u.s, 3.5 * u.s], [2.98 * u.m, 3.02 * u.m]],
},
{
"class": RickerWavelet2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"sigma": 1 * u.m,
},
"evaluation": [(4 * u.m, 2.5 * u.m, 0.602169107 * u.Jy)],
"bounding_box": False,
},
{
"class": AiryDisk2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"radius": 1 * u.m,
},
"evaluation": [(4 * u.m, 2.1 * u.m, 4.76998480e-05 * u.Jy)],
"bounding_box": False,
},
{
"class": Moffat2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 4.4 * u.um,
"y_0": 3.5 * u.um,
"gamma": 1e-3 * u.mm,
"alpha": 1,
},
"evaluation": [(1000 * u.nm, 2 * u.um, 0.202565833 * u.Jy)],
"bounding_box": False,
},
{
"class": Sersic2D,
"parameters": {
"amplitude": 3 * u.MJy / u.sr,
"x_0": 1 * u.arcsec,
"y_0": 2 * u.arcsec,
"r_eff": 2 * u.arcsec,
"n": 4,
"ellip": 0,
"theta": 0,
},
"evaluation": [(3 * u.arcsec, 2.5 * u.arcsec, 2.829990489 * u.MJy / u.sr)],
"bounding_box": False,
},
{
"class": Planar2D,
"parameters": {"slope_x": 2 * u.m, "slope_y": 3 * u.m, "intercept": 4 * u.m},
"evaluation": [(5 * u.m / u.m, 6 * u.m / u.m, 32 * u.m)],
"bounding_box": False,
},
]
POWERLAW_MODELS = [
{
"class": PowerLaw1D,
"parameters": {"amplitude": 5 * u.kg, "x_0": 10 * u.cm, "alpha": 1},
"evaluation": [(1 * u.m, 500 * u.g)],
"bounding_box": False,
},
{
"class": BrokenPowerLaw1D,
"parameters": {
"amplitude": 5 * u.kg,
"x_break": 10 * u.cm,
"alpha_1": 1,
"alpha_2": -1,
},
"evaluation": [(1 * u.m, 50 * u.kg), (1 * u.cm, 50 * u.kg)],
"bounding_box": False,
},
{
"class": SmoothlyBrokenPowerLaw1D,
"parameters": {
"amplitude": 5 * u.kg,
"x_break": 10 * u.cm,
"alpha_1": 1,
"alpha_2": -1,
"delta": 1,
},
"evaluation": [(1 * u.cm, 15.125 * u.kg), (1 * u.m, 15.125 * u.kg)],
"bounding_box": False,
},
{
"class": ExponentialCutoffPowerLaw1D,
"parameters": {
"amplitude": 5 * u.kg,
"x_0": 10 * u.cm,
"alpha": 1,
"x_cutoff": 1 * u.m,
},
"evaluation": [(1 * u.um, 499999.5 * u.kg), (10 * u.m, 50 * np.exp(-10) * u.g)],
"bounding_box": False,
},
{
"class": LogParabola1D,
"parameters": {"amplitude": 5 * u.kg, "x_0": 10 * u.cm, "alpha": 1, "beta": 2},
"evaluation": [(1 * u.cm, 5 * 0.1 ** (-1 - 2 * np.log(0.1)) * u.kg)],
"bounding_box": False,
},
{
"class": Schechter1D,
"parameters": {
"phi_star": 1.0e-4 * (u.Mpc**-3),
"m_star": -20.0 * u.ABmag,
"alpha": -1.9,
},
"evaluation": [(-23 * u.ABmag, 1.002702276867279e-12 * (u.Mpc**-3))],
"bounding_box": False,
},
]
POLY_MODELS = [
{
"class": Polynomial1D,
"parameters": {"degree": 2, "c0": 3 * u.one, "c1": 2 / u.m, "c2": 3 / u.m**2},
"evaluation": [(3 * u.m, 36 * u.one)],
"bounding_box": False,
},
{
"class": Polynomial1D,
"parameters": {
"degree": 2,
"c0": 3 * u.kg,
"c1": 2 * u.kg / u.m,
"c2": 3 * u.kg / u.m**2,
},
"evaluation": [(3 * u.m, 36 * u.kg)],
"bounding_box": False,
},
{
"class": Polynomial1D,
"parameters": {"degree": 2, "c0": 3 * u.kg, "c1": 2 * u.kg, "c2": 3 * u.kg},
"evaluation": [(3 * u.one, 36 * u.kg)],
"bounding_box": False,
},
{
"class": Polynomial2D,
"parameters": {
"degree": 2,
"c0_0": 3 * u.one,
"c1_0": 2 / u.m,
"c2_0": 3 / u.m**2,
"c0_1": 3 / u.s,
"c0_2": -2 / u.s**2,
"c1_1": 5 / u.m / u.s,
},
"evaluation": [(3 * u.m, 2 * u.s, 64 * u.one)],
"bounding_box": False,
},
{
"class": Polynomial2D,
"parameters": {
"degree": 2,
"c0_0": 3 * u.kg,
"c1_0": 2 * u.kg / u.m,
"c2_0": 3 * u.kg / u.m**2,
"c0_1": 3 * u.kg / u.s,
"c0_2": -2 * u.kg / u.s**2,
"c1_1": 5 * u.kg / u.m / u.s,
},
"evaluation": [(3 * u.m, 2 * u.s, 64 * u.kg)],
"bounding_box": False,
},
{
"class": Polynomial2D,
"parameters": {
"degree": 2,
"c0_0": 3 * u.kg,
"c1_0": 2 * u.kg,
"c2_0": 3 * u.kg,
"c0_1": 3 * u.kg,
"c0_2": -2 * u.kg,
"c1_1": 5 * u.kg,
},
"evaluation": [(3 * u.one, 2 * u.one, 64 * u.kg)],
"bounding_box": False,
},
]
MODELS = (
FUNC_MODELS_1D
+ SCALE_MODELS
+ FUNC_MODELS_2D
+ POWERLAW_MODELS
+ PHYS_MODELS_1D
+ POLY_MODELS
)
SCIPY_MODELS = {Sersic1D, Sersic2D, AiryDisk2D}
# These models will fail fitting test, because built in fitting data
# will produce non-finite values
NON_FINITE_LevMar_MODELS = [
Sersic1D,
ArcSine1D,
ArcCosine1D,
PowerLaw1D,
ExponentialCutoffPowerLaw1D,
BrokenPowerLaw1D,
LogParabola1D,
Schechter1D,
]
# These models will fail the TRFLSQFitter fitting test due to non-finite
NON_FINITE_TRF_MODELS = [
ArcSine1D,
ArcCosine1D,
Sersic1D,
Sersic2D,
PowerLaw1D,
ExponentialCutoffPowerLaw1D,
BrokenPowerLaw1D,
]
# These models will fail the LMLSQFitter fitting test due to non-finite
NON_FINITE_LM_MODELS = [
Sersic1D,
ArcSine1D,
ArcCosine1D,
PowerLaw1D,
LogParabola1D,
Schechter1D,
ExponentialCutoffPowerLaw1D,
BrokenPowerLaw1D,
]
# These models will fail the DogBoxLSQFitter fitting test due to non-finite
NON_FINITE_DogBox_MODELS = [
Sersic1D,
Sersic2D,
ArcSine1D,
ArcCosine1D,
SmoothlyBrokenPowerLaw1D,
ExponentialCutoffPowerLaw1D,
LogParabola1D,
]
@pytest.mark.parametrize("model", MODELS)
def test_models_evaluate_without_units(model):
if not HAS_SCIPY and model["class"] in SCIPY_MODELS:
pytest.skip()
m = model["class"](**model["parameters"])
for args in model["evaluation"]:
if len(args) == 2:
kwargs = dict(zip(("x", "y"), args))
else:
kwargs = dict(zip(("x", "y", "z"), args))
if kwargs["x"].unit.is_equivalent(kwargs["y"].unit):
kwargs["x"] = kwargs["x"].to(kwargs["y"].unit)
mnu = m.without_units_for_data(**kwargs)
args = [x.value for x in kwargs.values()]
assert_quantity_allclose(mnu(*args[:-1]), args[-1])
@pytest.mark.parametrize("model", MODELS)
def test_models_evaluate_with_units(model):
if not HAS_SCIPY and model["class"] in SCIPY_MODELS:
pytest.skip()
m = model["class"](**model["parameters"])
for args in model["evaluation"]:
assert_quantity_allclose(m(*args[:-1]), args[-1])
@pytest.mark.parametrize("model", MODELS)
def test_models_evaluate_with_units_x_array(model):
if not HAS_SCIPY and model["class"] in SCIPY_MODELS:
pytest.skip()
m = model["class"](**model["parameters"])
for args in model["evaluation"]:
if len(args) == 2:
x, y = args
x_arr = u.Quantity([x, x], subok=True)
result = m(x_arr)
assert_quantity_allclose(result, u.Quantity([y, y], subok=True))
else:
x, y, z = args
x_arr = u.Quantity([x, x])
y_arr = u.Quantity([y, y])
result = m(x_arr, y_arr)
assert_quantity_allclose(result, u.Quantity([z, z]))
@pytest.mark.parametrize("model", MODELS)
def test_models_evaluate_with_units_param_array(model):
if not HAS_SCIPY and model["class"] in SCIPY_MODELS:
pytest.skip()
params = {}
for key, value in model["parameters"].items():
if value is None or key == "degree":
params[key] = value
else:
params[key] = np.repeat(value, 2)
params["n_models"] = 2
m = model["class"](**params)
for args in model["evaluation"]:
if len(args) == 2:
x, y = args
x_arr = u.Quantity([x, x], subok=True)
result = m(x_arr)
assert_quantity_allclose(result, u.Quantity([y, y], subok=True))
else:
x, y, z = args
x_arr = u.Quantity([x, x])
y_arr = u.Quantity([y, y])
result = m(x_arr, y_arr)
assert_quantity_allclose(result, u.Quantity([z, z]))
if model["class"] == Drude1D:
params["x_0"][-1] = 0 * u.AA
MESSAGE = r"0 is not an allowed value for x_0"
with pytest.raises(InputParameterError, match=MESSAGE):
model["class"](**params)
@pytest.mark.parametrize("model", MODELS)
def test_models_bounding_box(model):
# In some cases, having units in parameters caused bounding_box to break,
# so this is to ensure that it works correctly.
if not HAS_SCIPY and model["class"] in SCIPY_MODELS:
pytest.skip()
m = model["class"](**model["parameters"])
# In the following we need to explicitly test that the value is False
# since Quantities no longer evaluate as as True
if model["bounding_box"] is False:
# Check that NotImplementedError is raised, so that if bounding_box is
# implemented we remember to set bounding_box=True in the list of models
# above
MESSAGE = r"No bounding box is defined for this model"
with pytest.raises(NotImplementedError, match=MESSAGE):
m.bounding_box
else:
# A bounding box may have inhomogeneous units so we need to check the
# values one by one.
for i in range(len(model["bounding_box"])):
bbox = m.bounding_box
if isinstance(bbox, ModelBoundingBox):
bbox = bbox.bounding_box()
assert_quantity_allclose(bbox[i], model["bounding_box"][i])
@pytest.mark.parametrize("model", MODELS)
def test_compound_model_input_units_equivalencies_defaults(model):
m = model["class"](**model["parameters"])
assert m.input_units_equivalencies is None
compound_model = m + m
assert compound_model.inputs_map()["x"][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {"x": 1})
assert fixed_input_model.input_units_equivalencies is None
compound_model = m - m
assert compound_model.inputs_map()["x"][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {"x": 1})
assert fixed_input_model.input_units_equivalencies is None
compound_model = m & m
assert compound_model.inputs_map()["x1"][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {"x0": 1})
assert fixed_input_model.inputs_map()["x1"][0].input_units_equivalencies is None
assert fixed_input_model.input_units_equivalencies is None
if m.n_outputs == m.n_inputs:
compound_model = m | m
assert compound_model.inputs_map()["x"][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {"x": 1})
assert fixed_input_model.input_units_equivalencies is None
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings(r"ignore:.*:RuntimeWarning")
@pytest.mark.filterwarnings(r"ignore:Model is linear in parameters.*")
@pytest.mark.filterwarnings(r"ignore:The fit may be unsuccessful.*")
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("fitter", fitters)
def test_models_fitting(model, fitter):
fitter = fitter()
if (
(
isinstance(fitter, LevMarLSQFitter)
and model["class"] in NON_FINITE_LevMar_MODELS
)
or (
isinstance(fitter, TRFLSQFitter) and model["class"] in NON_FINITE_TRF_MODELS
)
or (isinstance(fitter, LMLSQFitter) and model["class"] in NON_FINITE_LM_MODELS)
or (
isinstance(fitter, DogBoxLSQFitter)
and model["class"] in NON_FINITE_DogBox_MODELS
)
):
return
m = model["class"](**model["parameters"])
if len(model["evaluation"][0]) == 2:
x = np.linspace(1, 3, 100) * model["evaluation"][0][0].unit
y = np.exp(-x.value**2) * model["evaluation"][0][1].unit
args = [x, y]
else:
x = np.linspace(1, 3, 100) * model["evaluation"][0][0].unit
y = np.linspace(1, 3, 100) * model["evaluation"][0][1].unit
z = np.exp(-x.value**2 - y.value**2) * model["evaluation"][0][2].unit
args = [x, y, z]
# Test that the model fits even if it has units on parameters
m_new = fitter(m, *args)
# Check that units have been put back correctly
for param_name in m.param_names:
par_bef = getattr(m, param_name)
par_aft = getattr(m_new, param_name)
if par_bef.unit is None:
# If the parameter used to not have a unit then had a radian unit
# for example, then we should allow that
assert par_aft.unit is None or par_aft.unit is u.rad
else:
assert par_aft.unit.is_equivalent(par_bef.unit)
unit_mismatch_models = [
{
"class": Gaussian2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_mean": 2 * u.m,
"y_mean": 1 * u.m,
"x_stddev": 3 * u.m,
"y_stddev": 2 * u.m,
"theta": 45 * u.deg,
},
"evaluation": [
(412.1320343 * u.cm, 3.121320343 * u.K, 3 * u.Jy * np.exp(-0.5)),
(412.1320343 * u.K, 3.121320343 * u.m, 3 * u.Jy * np.exp(-0.5)),
],
"bounding_box": [[-14.18257445, 16.18257445], [-10.75693665, 14.75693665]]
* u.m,
},
{
"class": Ellipse2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"a": 300 * u.cm,
"b": 200 * u.cm,
"theta": 45 * u.deg,
},
"evaluation": [(4 * u.m, 300 * u.K, 3 * u.Jy), (4 * u.K, 300 * u.cm, 3 * u.Jy)],
"bounding_box": [[-0.76046808, 4.76046808], [0.68055697, 5.31944302]] * u.m,
},
{
"class": Disk2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"R_0": 300 * u.cm,
},
"evaluation": [
(5.8 * u.m, 201 * u.K, 3 * u.Jy),
(5.8 * u.K, 201 * u.cm, 3 * u.Jy),
],
"bounding_box": [[-1, 5], [0, 6]] * u.m,
},
{
"class": Ring2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"r_in": 2 * u.cm,
"r_out": 2.1 * u.cm,
},
"evaluation": [
(302.05 * u.cm, 2 * u.K + 10 * u.K, 3 * u.Jy),
(302.05 * u.K, 2 * u.m + 10 * u.um, 3 * u.Jy),
],
"bounding_box": [[1.979, 2.021], [2.979, 3.021]] * u.m,
},
{
"class": TrapezoidDisk2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 1 * u.m,
"y_0": 2 * u.m,
"R_0": 100 * u.cm,
"slope": 1 * u.Jy / u.m,
},
"evaluation": [
(3.5 * u.m, 2 * u.K, 1.5 * u.Jy),
(3.5 * u.K, 2 * u.m, 1.5 * u.Jy),
],
"bounding_box": [[-2, 6], [-3, 5]] * u.m,
},
{
"class": RickerWavelet2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"sigma": 1 * u.m,
},
"evaluation": [
(4 * u.m, 2.5 * u.K, 0.602169107 * u.Jy),
(4 * u.K, 2.5 * u.m, 0.602169107 * u.Jy),
],
"bounding_box": False,
},
{
"class": AiryDisk2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"radius": 1 * u.m,
},
"evaluation": [
(4 * u.m, 2.1 * u.K, 4.76998480e-05 * u.Jy),
(4 * u.K, 2.1 * u.m, 4.76998480e-05 * u.Jy),
],
"bounding_box": False,
},
{
"class": Moffat2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 4.4 * u.um,
"y_0": 3.5 * u.um,
"gamma": 1e-3 * u.mm,
"alpha": 1,
},
"evaluation": [
(1000 * u.nm, 2 * u.K, 0.202565833 * u.Jy),
(1000 * u.K, 2 * u.um, 0.202565833 * u.Jy),
],
"bounding_box": False,
},
{
"class": Sersic2D,
"parameters": {
"amplitude": 3 * u.MJy / u.sr,
"x_0": 1 * u.arcsec,
"y_0": 2 * u.arcsec,
"r_eff": 2 * u.arcsec,
"n": 4,
"ellip": 0,
"theta": 0,
},
"evaluation": [
(3 * u.arcsec, 2.5 * u.m, 2.829990489 * u.MJy / u.sr),
(3 * u.m, 2.5 * u.arcsec, 2.829990489 * u.MJy / u.sr),
],
"bounding_box": False,
},
]
@pytest.mark.parametrize("model", unit_mismatch_models)
def test_input_unit_mismatch_error(model):
if not HAS_SCIPY and model["class"] in SCIPY_MODELS:
pytest.skip()
MESSAGE = "Units of 'x' and 'y' inputs should match"
m = model["class"](**model["parameters"])
for args in model["evaluation"]:
if len(args) == 2:
kwargs = dict(zip(("x", "y"), args))
else:
kwargs = dict(zip(("x", "y", "z"), args))
if kwargs["x"].unit.is_equivalent(kwargs["y"].unit):
kwargs["x"] = kwargs["x"].to(kwargs["y"].unit)
with pytest.raises(u.UnitsError, match=MESSAGE):
m.without_units_for_data(**kwargs)
mag_models = [
{
"class": Const1D,
"parameters": {"amplitude": 3 * u.ABmag},
"evaluation": [(0.6 * u.ABmag, 3 * u.ABmag)],
},
{
"class": Const1D,
"parameters": {"amplitude": 3 * u.ABmag},
"evaluation": [(0.6 * u.mag, 3 * u.ABmag)],
},
{
"class": Const1D,
"parameters": {"amplitude": 3 * u.mag},
"evaluation": [(0.6 * u.ABmag, 3 * u.mag)],
},
{
"class": Const1D,
"parameters": {"amplitude": 3 * u.mag},
"evaluation": [(0.6 * u.mag, 3 * u.mag)],
},
{
"class": Const2D,
"parameters": {"amplitude": 3 * u.ABmag},
"evaluation": [(0.6 * u.micron, 0.2 * u.m, 3 * u.ABmag)],
},
{
"class": Ellipse2D,
"parameters": {
"amplitude": 3 * u.ABmag,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"a": 300 * u.cm,
"b": 200 * u.cm,
"theta": 45 * u.deg,
},
"evaluation": [(4 * u.m, 300 * u.cm, 3 * u.ABmag)],
},
{
"class": Disk2D,
"parameters": {
"amplitude": 3 * u.ABmag,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"R_0": 300 * u.cm,
},
"evaluation": [(5.8 * u.m, 201 * u.cm, 3 * u.ABmag)],
},
{
"class": Ring2D,
"parameters": {
"amplitude": 3 * u.ABmag,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"r_in": 2 * u.cm,
"r_out": 2.1 * u.cm,
},
"evaluation": [(302.05 * u.cm, 2 * u.m + 10 * u.um, 3 * u.ABmag)],
},
{
"class": Box2D,
"parameters": {
"amplitude": 3 * u.ABmag,
"x_0": 3 * u.m,
"y_0": 2 * u.s,
"x_width": 4 * u.cm,
"y_width": 3 * u.s,
},
"evaluation": [(301 * u.cm, 3 * u.s, 3 * u.ABmag)],
},
{
"class": SmoothlyBrokenPowerLaw1D,
"parameters": {
"amplitude": 5 * u.ABmag,
"x_break": 10 * u.cm,
"alpha_1": 1,
"alpha_2": -1,
"delta": 1,
},
"evaluation": [(1 * u.cm, 15.125 * u.ABmag), (1 * u.m, 15.125 * u.ABmag)],
},
{
"class": Box1D,
"parameters": {"amplitude": 3 * u.ABmag, "x_0": 4.4 * u.um, "width": 1 * u.um},
"evaluation": [(4200 * u.nm, 3 * u.ABmag), (1 * u.m, 0 * u.ABmag)],
"bounding_box": [3.9, 4.9] * u.um,
},
{
"class": Schechter1D,
"parameters": {
"phi_star": 1.0e-4 * (u.Mpc**-3),
"m_star": -20.0 * u.ABmag,
"alpha": -1.9,
},
"evaluation": [(-23 * u.ABmag, 1.002702276867279e-12 * (u.Mpc**-3))],
},
{
"class": Schechter1D,
"parameters": {
"phi_star": 1.0e-4 * (u.Mpc**-3),
"m_star": -20.0 * u.mag,
"alpha": -1.9,
},
"evaluation": [(-23 * u.mag, 1.002702276867279e-12 * (u.Mpc**-3))],
},
]
@pytest.mark.parametrize("model", mag_models)
def test_models_evaluate_magunits(model):
if not HAS_SCIPY and model["class"] in SCIPY_MODELS:
pytest.skip()
m = model["class"](**model["parameters"])
for args in model["evaluation"]:
assert_quantity_allclose(m(*args[:-1]), args[-1])
def test_Schechter1D_errors():
# Non magnitude units are bad
model = Schechter1D(
phi_star=1.0e-4 * (u.Mpc**-3), m_star=-20.0 * u.km, alpha=-1.9
)
MESSAGE = r"The units of magnitude and m_star must be a magnitude"
with pytest.raises(u.UnitsError, match=MESSAGE):
model(-23 * u.km)
# Differing magnitude systems are bad
model = Schechter1D(
phi_star=1.0e-4 * (u.Mpc**-3), m_star=-20.0 * u.ABmag, alpha=-1.9
)
MESSAGE = (
r".*: Units of input 'x', .*, could not be converted to required input units"
r" of .*"
)
with pytest.raises(u.UnitsError, match=MESSAGE):
model(-23 * u.STmag)
# Differing magnitude systems are bad
model = Schechter1D(
phi_star=1.0e-4 * (u.Mpc**-3), m_star=-20.0 * u.ABmag, alpha=-1.9
)
with pytest.raises(u.UnitsError, match=MESSAGE):
model(-23 * u.mag)
| bsd-3-clause | fea9d8145c4cdbc1aeccb1825755134f | 28.600184 | 88 | 0.465952 | 2.917648 | false | false | false | false |
astropy/astropy | astropy/coordinates/builtin_frames/ecliptic.py | 3 | 9356 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.coordinates import representation as r
from astropy.coordinates.attributes import QuantityAttribute, TimeAttribute
from astropy.coordinates.baseframe import BaseCoordinateFrame, base_doc
from astropy.utils.decorators import format_doc
from .utils import DEFAULT_OBSTIME, EQUINOX_J2000
__all__ = [
"GeocentricMeanEcliptic",
"BarycentricMeanEcliptic",
"HeliocentricMeanEcliptic",
"BaseEclipticFrame",
"GeocentricTrueEcliptic",
"BarycentricTrueEcliptic",
"HeliocentricTrueEcliptic",
"HeliocentricEclipticIAU76",
"CustomBarycentricEcliptic",
]
doc_components_ecl = """
lon : `~astropy.coordinates.Angle`, optional, keyword-only
The ecliptic longitude for this object (``lat`` must also be given and
``representation`` must be None).
lat : `~astropy.coordinates.Angle`, optional, keyword-only
The ecliptic latitude for this object (``lon`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity` ['length'], optional, keyword-only
The distance for this object from the {0}.
(``representation`` must be None).
pm_lon_coslat : `~astropy.units.Quantity` ['angualar speed'], optional, keyword-only
The proper motion in the ecliptic longitude (including the ``cos(lat)``
factor) for this object (``pm_lat`` must also be given).
pm_lat : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in the ecliptic latitude for this object
(``pm_lon_coslat`` must also be given).
radial_velocity : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The radial velocity of this object.
"""
@format_doc(
base_doc, components=doc_components_ecl.format("specified location"), footer=""
)
class BaseEclipticFrame(BaseCoordinateFrame):
"""
A base class for frames that have names and conventions like that of
ecliptic frames.
.. warning::
In the current version of astropy, the ecliptic frames do not yet have
stringent accuracy tests. We recommend you test to "known-good" cases
to ensure this frames are what you are looking for. (and then ideally
you would contribute these tests to Astropy!)
"""
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
doc_footer_geo = """
Other parameters
----------------
equinox : `~astropy.time.Time`, optional
The date to assume for this frame. Determines the location of the
x-axis and the location of the Earth (necessary for transformation to
non-geocentric systems). Defaults to the 'J2000' equinox.
obstime : `~astropy.time.Time`, optional
The time at which the observation is taken. Used for determining the
position of the Earth. Defaults to J2000.
"""
@format_doc(
base_doc, components=doc_components_ecl.format("geocenter"), footer=doc_footer_geo
)
class GeocentricMeanEcliptic(BaseEclipticFrame):
"""
Geocentric mean ecliptic coordinates. These origin of the coordinates are the
geocenter (Earth), with the x axis pointing to the *mean* (not true) equinox
at the time specified by the ``equinox`` attribute, and the xy-plane in the
plane of the ecliptic for that date.
Be aware that the definition of "geocentric" here means that this frame
*includes* light deflection from the sun, aberration, etc when transforming
to/from e.g. ICRS.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
@format_doc(
base_doc, components=doc_components_ecl.format("geocenter"), footer=doc_footer_geo
)
class GeocentricTrueEcliptic(BaseEclipticFrame):
"""
Geocentric true ecliptic coordinates. These origin of the coordinates are the
geocenter (Earth), with the x axis pointing to the *true* (not mean) equinox
at the time specified by the ``equinox`` attribute, and the xy-plane in the
plane of the ecliptic for that date.
Be aware that the definition of "geocentric" here means that this frame
*includes* light deflection from the sun, aberration, etc when transforming
to/from e.g. ICRS.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
doc_footer_bary = """
Other parameters
----------------
equinox : `~astropy.time.Time`, optional
The date to assume for this frame. Determines the location of the
x-axis and the location of the Earth and Sun.
Defaults to the 'J2000' equinox.
"""
@format_doc(
base_doc, components=doc_components_ecl.format("barycenter"), footer=doc_footer_bary
)
class BarycentricMeanEcliptic(BaseEclipticFrame):
"""
Barycentric mean ecliptic coordinates. These origin of the coordinates are the
barycenter of the solar system, with the x axis pointing in the direction of
the *mean* (not true) equinox as at the time specified by the ``equinox``
attribute (as seen from Earth), and the xy-plane in the plane of the
ecliptic for that date.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
@format_doc(
base_doc, components=doc_components_ecl.format("barycenter"), footer=doc_footer_bary
)
class BarycentricTrueEcliptic(BaseEclipticFrame):
"""
Barycentric true ecliptic coordinates. These origin of the coordinates are the
barycenter of the solar system, with the x axis pointing in the direction of
the *true* (not mean) equinox as at the time specified by the ``equinox``
attribute (as seen from Earth), and the xy-plane in the plane of the
ecliptic for that date.
The frame attributes are listed under **Other Parameters**.
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
doc_footer_helio = """
Other parameters
----------------
equinox : `~astropy.time.Time`, optional
The date to assume for this frame. Determines the location of the
x-axis and the location of the Earth and Sun.
Defaults to the 'J2000' equinox.
obstime : `~astropy.time.Time`, optional
The time at which the observation is taken. Used for determining the
position of the Sun. Defaults to J2000.
"""
@format_doc(
base_doc,
components=doc_components_ecl.format("sun's center"),
footer=doc_footer_helio,
)
class HeliocentricMeanEcliptic(BaseEclipticFrame):
"""
Heliocentric mean ecliptic coordinates. These origin of the coordinates are the
center of the sun, with the x axis pointing in the direction of
the *mean* (not true) equinox as at the time specified by the ``equinox``
attribute (as seen from Earth), and the xy-plane in the plane of the
ecliptic for that date.
The frame attributes are listed under **Other Parameters**.
{params}
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
@format_doc(
base_doc,
components=doc_components_ecl.format("sun's center"),
footer=doc_footer_helio,
)
class HeliocentricTrueEcliptic(BaseEclipticFrame):
"""
Heliocentric true ecliptic coordinates. These origin of the coordinates are the
center of the sun, with the x axis pointing in the direction of
the *true* (not mean) equinox as at the time specified by the ``equinox``
attribute (as seen from Earth), and the xy-plane in the plane of the
ecliptic for that date.
The frame attributes are listed under **Other Parameters**.
{params}
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
@format_doc(base_doc, components=doc_components_ecl.format("sun's center"), footer="")
class HeliocentricEclipticIAU76(BaseEclipticFrame):
"""
Heliocentric mean (IAU 1976) ecliptic coordinates. These origin of the coordinates are the
center of the sun, with the x axis pointing in the direction of
the *mean* (not true) equinox of J2000, and the xy-plane in the plane of the
ecliptic of J2000 (according to the IAU 1976/1980 obliquity model).
It has, therefore, a fixed equinox and an older obliquity value
than the rest of the frames.
The frame attributes are listed under **Other Parameters**.
{params}
"""
obstime = TimeAttribute(default=DEFAULT_OBSTIME)
@format_doc(base_doc, components=doc_components_ecl.format("barycenter"), footer="")
class CustomBarycentricEcliptic(BaseEclipticFrame):
"""
Barycentric ecliptic coordinates with custom obliquity.
These origin of the coordinates are the
barycenter of the solar system, with the x axis pointing in the direction of
the *mean* (not true) equinox of J2000, and the xy-plane in the plane of the
ecliptic tilted a custom obliquity angle.
The frame attributes are listed under **Other Parameters**.
"""
obliquity = QuantityAttribute(default=84381.448 * u.arcsec, unit=u.arcsec)
| bsd-3-clause | c20c996dba5390dbd483afd2cc1d44f6 | 35.404669 | 95 | 0.707674 | 3.721559 | false | false | false | false |
astropy/astropy | astropy/units/format/vounit.py | 3 | 8586 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "VOUnit" unit format.
"""
import copy
import keyword
import operator
import re
import warnings
from . import core, generic, utils
class VOUnit(generic.Generic):
"""
The IVOA standard for units used by the VO.
This is an implementation of `Units in the VO 1.0
<http://www.ivoa.net/documents/VOUnits/>`_.
"""
_explicit_custom_unit_regex = re.compile(r"^[YZEPTGMkhdcmunpfazy]?'((?!\d)\w)+'$")
_custom_unit_regex = re.compile(r"^((?!\d)\w)+$")
_custom_units = {}
@staticmethod
def _generate_unit_names():
from astropy import units as u
from astropy.units import required_by_vounit as uvo
names = {}
deprecated_names = set()
bases = [
"A", "C", "D", "F", "G", "H", "Hz", "J", "Jy", "K", "N",
"Ohm", "Pa", "R", "Ry", "S", "T", "V", "W", "Wb", "a",
"adu", "arcmin", "arcsec", "barn", "beam", "bin", "cd",
"chan", "count", "ct", "d", "deg", "eV", "erg", "g", "h",
"lm", "lx", "lyr", "m", "mag", "min", "mol", "pc", "ph",
"photon", "pix", "pixel", "rad", "rad", "s", "solLum",
"solMass", "solRad", "sr", "u", "voxel", "yr",
] # fmt: skip
binary_bases = ["bit", "byte", "B"]
simple_units = ["Angstrom", "angstrom", "AU", "au", "Ba", "dB", "mas"]
si_prefixes = [
"y", "z", "a", "f", "p", "n", "u", "m", "c", "d",
"", "da", "h", "k", "M", "G", "T", "P", "E", "Z", "Y"
] # fmt: skip
binary_prefixes = ["Ki", "Mi", "Gi", "Ti", "Pi", "Ei"]
deprecated_units = {
"a", "angstrom", "Angstrom", "au", "Ba", "barn", "ct",
"erg", "G", "ph", "pix",
} # fmt: skip
def do_defines(bases, prefixes, skips=[]):
for base in bases:
for prefix in prefixes:
key = prefix + base
if key in skips:
continue
if keyword.iskeyword(key):
continue
names[key] = getattr(u if hasattr(u, key) else uvo, key)
if base in deprecated_units:
deprecated_names.add(key)
do_defines(bases, si_prefixes, ["pct", "pcount", "yd"])
do_defines(binary_bases, si_prefixes + binary_prefixes, ["dB", "dbyte"])
do_defines(simple_units, [""])
return names, deprecated_names, []
@classmethod
def parse(cls, s, debug=False):
if s in ("unknown", "UNKNOWN"):
return None
if s == "":
return core.dimensionless_unscaled
# Check for excess solidi, but exclude fractional exponents (allowed)
if s.count("/") > 1 and s.count("/") - len(re.findall(r"\(\d+/\d+\)", s)) > 1:
raise core.UnitsError(
f"'{s}' contains multiple slashes, which is "
"disallowed by the VOUnit standard."
)
result = cls._do_parse(s, debug=debug)
if hasattr(result, "function_unit"):
raise ValueError("Function units are not yet supported in VOUnit.")
return result
@classmethod
def _get_unit(cls, t):
try:
return super()._get_unit(t)
except ValueError:
if cls._explicit_custom_unit_regex.match(t.value):
return cls._def_custom_unit(t.value)
if cls._custom_unit_regex.match(t.value):
warnings.warn(
f"Unit {t.value!r} not supported by the VOUnit standard. "
+ utils.did_you_mean_units(
t.value,
cls._units,
cls._deprecated_units,
cls._to_decomposed_alternative,
),
core.UnitsWarning,
)
return cls._def_custom_unit(t.value)
raise
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
raise ValueError()
if unit in cls._deprecated_units:
utils.unit_deprecation_warning(
unit, cls._units[unit], "VOUnit", cls._to_decomposed_alternative
)
return cls._units[unit]
@classmethod
def _get_unit_name(cls, unit):
# The da- and d- prefixes are discouraged. This has the
# effect of adding a scale to value in the result.
if isinstance(unit, core.PrefixUnit):
if unit._represents.scale == 10.0:
raise ValueError(
f"In '{unit}': VOUnit can not represent units with the 'da' "
"(deka) prefix"
)
elif unit._represents.scale == 0.1:
raise ValueError(
f"In '{unit}': VOUnit can not represent units with the 'd' "
"(deci) prefix"
)
name = unit.get_format_name("vounit")
if unit in cls._custom_units.values():
return name
if name not in cls._units:
raise ValueError(f"Unit {name!r} is not part of the VOUnit standard")
if name in cls._deprecated_units:
utils.unit_deprecation_warning(
name, unit, "VOUnit", cls._to_decomposed_alternative
)
return name
@classmethod
def _def_custom_unit(cls, unit):
def def_base(name):
if name in cls._custom_units:
return cls._custom_units[name]
if name.startswith("'"):
return core.def_unit(
[name[1:-1], name],
format={"vounit": name},
namespace=cls._custom_units,
)
else:
return core.def_unit(name, namespace=cls._custom_units)
if unit in cls._custom_units:
return cls._custom_units[unit]
for short, full, factor in core.si_prefixes:
for prefix in short:
if unit.startswith(prefix):
base_name = unit[len(prefix) :]
base_unit = def_base(base_name)
return core.PrefixUnit(
[prefix + x for x in base_unit.names],
core.CompositeUnit(
factor, [base_unit], [1], _error_check=False
),
format={"vounit": prefix + base_unit.names[-1]},
namespace=cls._custom_units,
)
return def_base(unit)
@classmethod
def _format_unit_list(cls, units):
out = []
units.sort(key=lambda x: cls._get_unit_name(x[0]).lower())
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
power = utils.format_power(power)
if "/" in power or "." in power:
out.append(f"{cls._get_unit_name(base)}({power})")
else:
out.append(f"{cls._get_unit_name(base)}**{power}")
return ".".join(out)
@classmethod
def to_string(cls, unit):
from astropy.units import core
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
if unit.physical_type == "dimensionless" and unit.scale != 1:
raise core.UnitScaleError(
"The VOUnit format is not able to "
"represent scale for dimensionless units. "
f"Multiply your data by {unit.scale:e}."
)
s = ""
if unit.scale != 1:
s += f"{unit.scale:.8g}"
pairs = list(zip(unit.bases, unit.powers))
pairs.sort(key=operator.itemgetter(1), reverse=True)
s += cls._format_unit_list(pairs)
elif isinstance(unit, core.NamedUnit):
s = cls._get_unit_name(unit)
return s
@classmethod
def _to_decomposed_alternative(cls, unit):
from astropy.units import core
try:
s = cls.to_string(unit)
except core.UnitScaleError:
scale = unit.scale
unit = copy.copy(unit)
unit._scale = 1.0
return f"{cls.to_string(unit)} (with data multiplied by {scale})"
return s
| bsd-3-clause | 34a6e2e56d90825eeb90792c23a96f7a | 33.761134 | 86 | 0.490333 | 3.902727 | false | false | false | false |
astropy/astropy | astropy/io/misc/asdf/tags/transform/tabular.py | 3 | 3539 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from numpy.testing import assert_array_equal
from astropy import modeling
from astropy import units as u
from astropy.io.misc.asdf.tags.transform.basic import TransformType
from astropy.modeling.bounding_box import ModelBoundingBox
__all__ = ["TabularType"]
class TabularType(TransformType):
name = "transform/tabular"
version = "1.2.0"
types = [modeling.models.Tabular2D, modeling.models.Tabular1D]
@classmethod
def from_tree_transform(cls, node, ctx):
lookup_table = node.pop("lookup_table")
dim = lookup_table.ndim
fill_value = node.pop("fill_value", None)
if dim == 1:
# The copy is necessary because the array is memory mapped.
points = (node["points"][0][:],)
model = modeling.models.Tabular1D(
points=points,
lookup_table=lookup_table,
method=node["method"],
bounds_error=node["bounds_error"],
fill_value=fill_value,
)
elif dim == 2:
points = tuple(p[:] for p in node["points"])
model = modeling.models.Tabular2D(
points=points,
lookup_table=lookup_table,
method=node["method"],
bounds_error=node["bounds_error"],
fill_value=fill_value,
)
else:
tabular_class = modeling.models.tabular_model(dim)
points = tuple(p[:] for p in node["points"])
model = tabular_class(
points=points,
lookup_table=lookup_table,
method=node["method"],
bounds_error=node["bounds_error"],
fill_value=fill_value,
)
return model
@classmethod
def to_tree_transform(cls, model, ctx):
node = {}
if model.fill_value is not None:
node["fill_value"] = model.fill_value
node["lookup_table"] = model.lookup_table
node["points"] = [p for p in model.points]
node["method"] = str(model.method)
node["bounds_error"] = model.bounds_error
return node
@classmethod
def assert_equal(cls, a, b):
if isinstance(a.lookup_table, u.Quantity):
assert u.allclose(a.lookup_table, b.lookup_table)
assert u.allclose(a.points, b.points)
a_box = a.bounding_box
if isinstance(a_box, ModelBoundingBox):
a_box = a_box.bounding_box()
b_box = b.bounding_box
if isinstance(b_box, ModelBoundingBox):
b_box = b_box.bounding_box()
for i in range(len(a_box)):
assert u.allclose(a_box[i], b_box[i])
else:
assert_array_equal(a.lookup_table, b.lookup_table)
assert_array_equal(a.points, b.points)
a_box = a.bounding_box
if isinstance(a_box, ModelBoundingBox):
a_box = a_box.bounding_box()
b_box = b.bounding_box
if isinstance(b_box, ModelBoundingBox):
b_box = b_box.bounding_box()
assert_array_equal(a_box, b_box)
assert a.method == b.method
if a.fill_value is None:
assert b.fill_value is None
elif np.isnan(a.fill_value):
assert np.isnan(b.fill_value)
else:
assert a.fill_value == b.fill_value
assert a.bounds_error == b.bounds_error
| bsd-3-clause | 6330b3f59c1a2949ee9043352e9e918e | 35.484536 | 71 | 0.55722 | 3.880482 | false | false | false | false |
astropy/astropy | astropy/samp/hub_script.py | 3 | 6077 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import argparse
import copy
import sys
import time
from astropy import __version__, log
from .hub import SAMPHubServer
__all__ = ["hub_script"]
def hub_script(timeout=0):
"""
This main function is executed by the ``samp_hub`` command line tool.
"""
parser = argparse.ArgumentParser(prog="samp_hub " + __version__)
parser.add_argument(
"-k", "--secret", dest="secret", metavar="CODE", help="custom secret code."
)
parser.add_argument(
"-d", "--addr", dest="addr", metavar="ADDR", help="listening address (or IP)."
)
parser.add_argument(
"-p",
"--port",
dest="port",
metavar="PORT",
type=int,
help="listening port number.",
)
parser.add_argument(
"-f", "--lockfile", dest="lockfile", metavar="FILE", help="custom lockfile."
)
parser.add_argument(
"-w",
"--no-web-profile",
dest="web_profile",
action="store_false",
help="run the Hub disabling the Web Profile.",
default=True,
)
parser.add_argument(
"-P",
"--pool-size",
dest="pool_size",
metavar="SIZE",
type=int,
help="the socket connections pool size.",
default=20,
)
timeout_group = parser.add_argument_group(
"Timeout group",
"Special options to setup hub and client timeouts."
"It contains a set of special options that allows to set up the Hub and "
"clients inactivity timeouts, that is the Hub or client inactivity time "
"interval after which the Hub shuts down or unregisters the client. "
"Notification of samp.hub.disconnect MType is sent to the clients "
"forcibly unregistered for timeout expiration.",
)
timeout_group.add_argument(
"-t",
"--timeout",
dest="timeout",
metavar="SECONDS",
help=(
"set the Hub inactivity timeout in SECONDS. By default it "
"is set to 0, that is the Hub never expires."
),
type=int,
default=0,
)
timeout_group.add_argument(
"-c",
"--client-timeout",
dest="client_timeout",
metavar="SECONDS",
help=(
"set the client inactivity timeout in SECONDS. By default it "
"is set to 0, that is the client never expires."
),
type=int,
default=0,
)
parser.add_argument_group(timeout_group)
log_group = parser.add_argument_group(
"Logging options",
"Additional options which allow to customize the logging output. By "
"default the SAMP Hub uses the standard output and standard error "
"devices to print out INFO level logging messages. Using the options "
"here below it is possible to modify the logging level and also "
"specify the output files where redirect the logging messages.",
)
log_group.add_argument(
"-L",
"--log-level",
dest="loglevel",
metavar="LEVEL",
help="set the Hub instance log level (OFF, ERROR, WARNING, INFO, DEBUG).",
type=str,
choices=["OFF", "ERROR", "WARNING", "INFO", "DEBUG"],
default="INFO",
)
log_group.add_argument(
"-O",
"--log-output",
dest="logout",
metavar="FILE",
help="set the output file for the log messages.",
default="",
)
parser.add_argument_group(log_group)
adv_group = parser.add_argument_group(
"Advanced group",
"Advanced options addressed to facilitate administrative tasks and "
"allow new non-standard Hub behaviors. In particular the --label "
"options is used to assign a value to hub.label token and is used to "
"assign a name to the Hub instance. "
"The very special --multi option allows to start a Hub in multi-instance mode. "
"Multi-instance mode is a non-standard Hub behavior that enables "
"multiple contemporaneous running Hubs. Multi-instance hubs place "
"their non-standard lock-files within the <home directory>/.samp-1 "
"directory naming them making use of the format: "
"samp-hub-<PID>-<ID>, where PID is the Hub process ID while ID is an "
"internal ID (integer).",
)
adv_group.add_argument(
"-l",
"--label",
dest="label",
metavar="LABEL",
help="assign a LABEL to the Hub.",
default="",
)
adv_group.add_argument(
"-m",
"--multi",
dest="mode",
help=(
"run the Hub in multi-instance mode generating a custom "
"lockfile with a random name."
),
action="store_const",
const="multiple",
default="single",
)
parser.add_argument_group(adv_group)
options = parser.parse_args()
try:
if options.loglevel in ("OFF", "ERROR", "WARNING", "DEBUG", "INFO"):
log.setLevel(options.loglevel)
if options.logout != "":
context = log.log_to_file(options.logout)
else:
class dummy_context:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
context = dummy_context()
with context:
args = copy.deepcopy(options.__dict__)
del args["loglevel"]
del args["logout"]
hub = SAMPHubServer(**args)
hub.start(False)
if not timeout:
while hub.is_running:
time.sleep(0.01)
else:
time.sleep(timeout)
hub.stop()
except KeyboardInterrupt:
try:
hub.stop()
except NameError:
pass
except OSError as e:
print(f"[SAMP] Error: I/O error({e.errno}): {e.strerror}")
sys.exit(1)
except SystemExit:
pass
| bsd-3-clause | 34a3c8c17ed6d1abfd54feca0f5ed798 | 27.397196 | 88 | 0.558664 | 4.214286 | false | false | false | false |
astropy/astropy | astropy/visualization/transform.py | 3 | 1083 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ["BaseTransform", "CompositeTransform"]
class BaseTransform:
"""
A transformation object.
This is used to construct transformations such as scaling, stretching, and
so on.
"""
def __add__(self, other):
return CompositeTransform(other, self)
class CompositeTransform(BaseTransform):
"""
A combination of two transforms.
Parameters
----------
transform_1 : :class:`astropy.visualization.BaseTransform`
The first transform to apply.
transform_2 : :class:`astropy.visualization.BaseTransform`
The second transform to apply.
"""
def __init__(self, transform_1, transform_2):
super().__init__()
self.transform_1 = transform_1
self.transform_2 = transform_2
def __call__(self, values, clip=True):
return self.transform_2(self.transform_1(values, clip=clip), clip=clip)
@property
def inverse(self):
return self.__class__(self.transform_2.inverse, self.transform_1.inverse)
| bsd-3-clause | a514b120bf3ee71ba6acda22cf97e0da | 25.414634 | 81 | 0.648199 | 4.102273 | false | false | false | false |
astropy/astropy | astropy/io/ascii/qdp.py | 3 | 20227 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains functions for reading and writing QDP tables that are
not meant to be used directly, but instead are available as readers/writers in
`astropy.table`. See :ref:`astropy:table_io` for more details.
"""
import copy
import re
import warnings
from collections.abc import Iterable
import numpy as np
from astropy.table import Table
from astropy.utils.exceptions import AstropyUserWarning
from . import basic, core
def _line_type(line, delimiter=None):
"""Interpret a QDP file line
Parameters
----------
line : str
a single line of the file
Returns
-------
type : str
Line type: "comment", "command", or "data"
Examples
--------
>>> _line_type("READ SERR 3")
'command'
>>> _line_type(" \\n !some gibberish")
'comment'
>>> _line_type(" ")
'comment'
>>> _line_type(" 21345.45")
'data,1'
>>> _line_type(" 21345.45 1.53e-3 1e-3 .04 NO nan")
'data,6'
>>> _line_type(" 21345.45,1.53e-3,1e-3,.04,NO,nan", delimiter=',')
'data,6'
>>> _line_type(" 21345.45 ! a comment to disturb")
'data,1'
>>> _line_type("NO NO NO NO NO")
'new'
>>> _line_type("NO,NO,NO,NO,NO", delimiter=',')
'new'
>>> _line_type("N O N NOON OON O")
Traceback (most recent call last):
...
ValueError: Unrecognized QDP line...
>>> _line_type(" some non-comment gibberish")
Traceback (most recent call last):
...
ValueError: Unrecognized QDP line...
"""
_decimal_re = r"[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?"
_command_re = r"READ [TS]ERR(\s+[0-9]+)+"
sep = delimiter
if delimiter is None:
sep = r"\s+"
_new_re = rf"NO({sep}NO)+"
_data_re = rf"({_decimal_re}|NO|[-+]?nan)({sep}({_decimal_re}|NO|[-+]?nan))*)"
_type_re = rf"^\s*((?P<command>{_command_re})|(?P<new>{_new_re})|(?P<data>{_data_re})?\s*(\!(?P<comment>.*))?\s*$"
_line_type_re = re.compile(_type_re)
line = line.strip()
if not line:
return "comment"
match = _line_type_re.match(line)
if match is None:
raise ValueError(f"Unrecognized QDP line: {line}")
for type_, val in match.groupdict().items():
if val is None:
continue
if type_ == "data":
return f"data,{len(val.split(sep=delimiter))}"
else:
return type_
def _get_type_from_list_of_lines(lines, delimiter=None):
"""Read through the list of QDP file lines and label each line by type
Parameters
----------
lines : list
List containing one file line in each entry
Returns
-------
contents : list
List containing the type for each line (see `line_type_and_data`)
ncol : int
The number of columns in the data lines. Must be the same throughout
the file
Examples
--------
>>> line0 = "! A comment"
>>> line1 = "543 12 456.0"
>>> lines = [line0, line1]
>>> types, ncol = _get_type_from_list_of_lines(lines)
>>> types[0]
'comment'
>>> types[1]
'data,3'
>>> ncol
3
>>> lines.append("23")
>>> _get_type_from_list_of_lines(lines)
Traceback (most recent call last):
...
ValueError: Inconsistent number of columns
"""
types = [_line_type(line, delimiter=delimiter) for line in lines]
current_ncol = None
for type_ in types:
if type_.startswith("data,"):
ncol = int(type_[5:])
if current_ncol is None:
current_ncol = ncol
elif ncol != current_ncol:
raise ValueError("Inconsistent number of columns")
return types, current_ncol
def _get_lines_from_file(qdp_file):
if "\n" in qdp_file:
lines = qdp_file.split("\n")
elif isinstance(qdp_file, str):
with open(qdp_file) as fobj:
lines = [line.strip() for line in fobj.readlines()]
elif isinstance(qdp_file, Iterable):
lines = qdp_file
else:
raise ValueError("invalid value of qdb_file")
return lines
def _interpret_err_lines(err_specs, ncols, names=None):
"""Give list of column names from the READ SERR and TERR commands
Parameters
----------
err_specs : dict
``{'serr': [n0, n1, ...], 'terr': [n2, n3, ...]}``
Error specifications for symmetric and two-sided errors
ncols : int
Number of data columns
Other Parameters
----------------
names : list of str
Name of data columns (defaults to ['col1', 'col2', ...]), _not_
including error columns.
Returns
-------
colnames : list
List containing the column names. Error columns will have the name
of the main column plus ``_err`` for symmetric errors, and ``_perr``
and ``_nerr`` for positive and negative errors respectively
Examples
--------
>>> col_in = ['MJD', 'Rate']
>>> cols = _interpret_err_lines(None, 2, names=col_in)
>>> cols[0]
'MJD'
>>> err_specs = {'terr': [1], 'serr': [2]}
>>> ncols = 5
>>> cols = _interpret_err_lines(err_specs, ncols, names=col_in)
>>> cols[0]
'MJD'
>>> cols[2]
'MJD_nerr'
>>> cols[4]
'Rate_err'
>>> _interpret_err_lines(err_specs, 6, names=col_in)
Traceback (most recent call last):
...
ValueError: Inconsistent number of input colnames
"""
colnames = ["" for i in range(ncols)]
if err_specs is None:
serr_cols = terr_cols = []
else:
# I don't want to empty the original one when using `pop` below
err_specs = copy.deepcopy(err_specs)
serr_cols = err_specs.pop("serr", [])
terr_cols = err_specs.pop("terr", [])
if names is not None:
all_error_cols = len(serr_cols) + len(terr_cols) * 2
if all_error_cols + len(names) != ncols:
raise ValueError("Inconsistent number of input colnames")
shift = 0
for i in range(ncols):
col_num = i + 1 - shift
if colnames[i] != "":
continue
colname_root = f"col{col_num}"
if names is not None:
colname_root = names[col_num - 1]
colnames[i] = f"{colname_root}"
if col_num in serr_cols:
colnames[i + 1] = f"{colname_root}_err"
shift += 1
continue
if col_num in terr_cols:
colnames[i + 1] = f"{colname_root}_perr"
colnames[i + 2] = f"{colname_root}_nerr"
shift += 2
continue
assert not np.any([c == "" for c in colnames])
return colnames
def _get_tables_from_qdp_file(qdp_file, input_colnames=None, delimiter=None):
"""Get all tables from a QDP file
Parameters
----------
qdp_file : str
Input QDP file name
Other Parameters
----------------
input_colnames : list of str
Name of data columns (defaults to ['col1', 'col2', ...]), _not_
including error columns.
delimiter : str
Delimiter for the values in the table.
Returns
-------
list of `~astropy.table.Table`
List containing all the tables present inside the QDP file
"""
lines = _get_lines_from_file(qdp_file)
contents, ncol = _get_type_from_list_of_lines(lines, delimiter=delimiter)
table_list = []
err_specs = {}
colnames = None
comment_text = ""
initial_comments = ""
command_lines = ""
current_rows = None
for line, datatype in zip(lines, contents):
line = line.strip().lstrip("!")
# Is this a comment?
if datatype == "comment":
comment_text += line + "\n"
continue
if datatype == "command":
# The first time I find commands, I save whatever comments into
# The initial comments.
if command_lines == "":
initial_comments = comment_text
comment_text = ""
if err_specs != {}:
warnings.warn(
"This file contains multiple command blocks. Please verify",
AstropyUserWarning,
)
command_lines += line + "\n"
continue
if datatype.startswith("data"):
# The first time I find data, I define err_specs
if err_specs == {} and command_lines != "":
for cline in command_lines.strip().split("\n"):
command = cline.strip().split()
# This should never happen, but just in case.
if len(command) < 3:
continue
err_specs[command[1].lower()] = [int(c) for c in command[2:]]
if colnames is None:
colnames = _interpret_err_lines(err_specs, ncol, names=input_colnames)
if current_rows is None:
current_rows = []
values = []
for v in line.split(delimiter):
if v == "NO":
values.append(np.ma.masked)
else:
# Understand if number is int or float
try:
values.append(int(v))
except ValueError:
values.append(float(v))
current_rows.append(values)
continue
if datatype == "new":
# Save table to table_list and reset
if current_rows is not None:
new_table = Table(names=colnames, rows=current_rows)
new_table.meta["initial_comments"] = initial_comments.strip().split(
"\n"
)
new_table.meta["comments"] = comment_text.strip().split("\n")
# Reset comments
comment_text = ""
table_list.append(new_table)
current_rows = None
continue
# At the very end, if there is still a table being written, let's save
# it to the table_list
if current_rows is not None:
new_table = Table(names=colnames, rows=current_rows)
new_table.meta["initial_comments"] = initial_comments.strip().split("\n")
new_table.meta["comments"] = comment_text.strip().split("\n")
table_list.append(new_table)
return table_list
def _understand_err_col(colnames):
"""Get which column names are error columns
Examples
--------
>>> colnames = ['a', 'a_err', 'b', 'b_perr', 'b_nerr']
>>> serr, terr = _understand_err_col(colnames)
>>> np.allclose(serr, [1])
True
>>> np.allclose(terr, [2])
True
>>> serr, terr = _understand_err_col(['a', 'a_nerr'])
Traceback (most recent call last):
...
ValueError: Missing positive error...
>>> serr, terr = _understand_err_col(['a', 'a_perr'])
Traceback (most recent call last):
...
ValueError: Missing negative error...
"""
shift = 0
serr = []
terr = []
for i, col in enumerate(colnames):
if col.endswith("_err"):
# The previous column, but they're numbered from 1!
# Plus, take shift into account
serr.append(i - shift)
shift += 1
elif col.endswith("_perr"):
terr.append(i - shift)
if len(colnames) == i + 1 or not colnames[i + 1].endswith("_nerr"):
raise ValueError("Missing negative error")
shift += 2
elif col.endswith("_nerr") and not colnames[i - 1].endswith("_perr"):
raise ValueError("Missing positive error")
return serr, terr
def _read_table_qdp(qdp_file, names=None, table_id=None, delimiter=None):
"""Read a table from a QDP file
Parameters
----------
qdp_file : str
Input QDP file name
Other Parameters
----------------
names : list of str
Name of data columns (defaults to ['col1', 'col2', ...]), _not_
including error columns.
table_id : int, default 0
Number of the table to be read from the QDP file. This is useful
when multiple tables present in the file. By default, the first is read.
delimiter : str
Any delimiter accepted by the `sep` argument of str.split()
Returns
-------
tables : list of `~astropy.table.Table`
List containing all the tables present inside the QDP file
"""
if table_id is None:
warnings.warn(
"table_id not specified. Reading the first available table",
AstropyUserWarning,
)
table_id = 0
tables = _get_tables_from_qdp_file(
qdp_file, input_colnames=names, delimiter=delimiter
)
return tables[table_id]
def _write_table_qdp(table, filename=None, err_specs=None):
"""Write a table to a QDP file
Parameters
----------
table : :class:`~astropy.table.Table`
Input table to be written
filename : str
Output QDP file name
Other Parameters
----------------
err_specs : dict
Dictionary of the format {'serr': [1], 'terr': [2, 3]}, specifying
which columns have symmetric and two-sided errors (see QDP format
specification)
"""
import io
fobj = io.StringIO()
if "initial_comments" in table.meta and table.meta["initial_comments"] != []:
for line in table.meta["initial_comments"]:
line = line.strip()
if not line.startswith("!"):
line = "!" + line
print(line, file=fobj)
if err_specs is None:
serr_cols, terr_cols = _understand_err_col(table.colnames)
else:
serr_cols = err_specs.pop("serr", [])
terr_cols = err_specs.pop("terr", [])
if serr_cols != []:
col_string = " ".join([str(val) for val in serr_cols])
print(f"READ SERR {col_string}", file=fobj)
if terr_cols != []:
col_string = " ".join([str(val) for val in terr_cols])
print(f"READ TERR {col_string}", file=fobj)
if "comments" in table.meta and table.meta["comments"] != []:
for line in table.meta["comments"]:
line = line.strip()
if not line.startswith("!"):
line = "!" + line
print(line, file=fobj)
colnames = table.colnames
print("!" + " ".join(colnames), file=fobj)
for row in table:
values = []
for val in row:
if not np.ma.is_masked(val):
rep = str(val)
else:
rep = "NO"
values.append(rep)
print(" ".join(values), file=fobj)
full_string = fobj.getvalue()
fobj.close()
if filename is not None:
with open(filename, "w") as fobj:
print(full_string, file=fobj)
return full_string.split("\n")
class QDPSplitter(core.DefaultSplitter):
"""
Split on space for QDP tables
"""
delimiter = " "
class QDPHeader(basic.CommentedHeaderHeader):
"""
Header that uses the :class:`astropy.io.ascii.basic.QDPSplitter`
"""
splitter_class = QDPSplitter
comment = "!"
write_comment = "!"
class QDPData(basic.BasicData):
"""
Data that uses the :class:`astropy.io.ascii.basic.CsvSplitter`
"""
splitter_class = QDPSplitter
fill_values = [(core.masked, "NO")]
comment = "!"
write_comment = None
class QDP(basic.Basic):
"""Quick and Dandy Plot table.
Example::
! Initial comment line 1
! Initial comment line 2
READ TERR 1
READ SERR 3
! Table 0 comment
!a a(pos) a(neg) b be c d
53000.5 0.25 -0.5 1 1.5 3.5 2
54000.5 1.25 -1.5 2 2.5 4.5 3
NO NO NO NO NO
! Table 1 comment
!a a(pos) a(neg) b be c d
54000.5 2.25 -2.5 NO 3.5 5.5 5
55000.5 3.25 -3.5 4 4.5 6.5 nan
The input table above contains some initial comments, the error commands,
then two tables.
This file format can contain multiple tables, separated by a line full
of ``NO``s. Comments are exclamation marks, and missing values are single
``NO`` entries. The delimiter is usually whitespace, more rarely a comma.
The QDP format differentiates between data and error columns. The table
above has commands::
READ TERR 1
READ SERR 3
which mean that after data column 1 there will be two error columns
containing its positive and engative error bars, then data column 2 without
error bars, then column 3, then a column with the symmetric error of column
3, then the remaining data columns.
As explained below, table headers are highly inconsistent. Possible
comments containing column names will be ignored and columns will be called
``col1``, ``col2``, etc. unless the user specifies their names with the
``names=`` keyword argument,
When passing column names, pass **only the names of the data columns, not
the error columns.**
Error information will be encoded in the names of the table columns.
(e.g. ``a_perr`` and ``a_nerr`` for the positive and negative error of
column ``a``, ``b_err`` the symmetric error of column ``b``.)
When writing tables to this format, users can pass an ``err_specs`` keyword
passing a dictionary ``{'serr': [3], 'terr': [1, 2]}``, meaning that data
columns 1 and two will have two additional columns each with their positive
and negative errors, and data column 3 will have an additional column with
a symmetric error (just like the ``READ SERR`` and ``READ TERR`` commands
above)
Headers are just comments, and tables distributed by various missions
can differ greatly in their use of conventions. For example, light curves
distributed by the Swift-Gehrels mission have an extra space in one header
entry that makes the number of labels inconsistent with the number of cols.
For this reason, we ignore the comments that might encode the column names
and leave the name specification to the user.
Example::
> Extra space
> |
> v
>! MJD Err (pos) Err(neg) Rate Error
>53000.123456 2.378e-05 -2.378472e-05 NO 0.212439
These readers and writer classes will strive to understand which of the
comments belong to all the tables, and which ones to each single table.
General comments will be stored in the ``initial_comments`` meta of each
table. The comments of each table will be stored in the ``comments`` meta.
Example::
t = Table.read(example_qdp, format='ascii.qdp', table_id=1, names=['a', 'b', 'c', 'd'])
reads the second table (``table_id=1``) in file ``example.qdp`` containing
the table above. There are four column names but seven data columns, why?
Because the ``READ SERR`` and ``READ TERR`` commands say that there are
three error columns.
``t.meta['initial_comments']`` will contain the initial two comment lines
in the file, while ``t.meta['comments']`` will contain ``Table 1 comment``
The table can be written to another file, preserving the same information,
as::
t.write(test_file, err_specs={'terr': [1], 'serr': [3]})
Note how the ``terr`` and ``serr`` commands are passed to the writer.
"""
_format_name = "qdp"
_io_registry_can_write = True
_io_registry_suffix = ".qdp"
_description = "Quick and Dandy Plotter"
header_class = QDPHeader
data_class = QDPData
def __init__(self, table_id=None, names=None, err_specs=None, sep=None):
super().__init__()
self.table_id = table_id
self.names = names
self.err_specs = err_specs
self.delimiter = sep
def read(self, table):
self.lines = self.inputter.get_lines(table, newline="\n")
return _read_table_qdp(
self.lines,
table_id=self.table_id,
names=self.names,
delimiter=self.delimiter,
)
def write(self, table):
self._check_multidim_table(table)
lines = _write_table_qdp(table, err_specs=self.err_specs)
return lines
| bsd-3-clause | 50619e24e944f4e070faa06cf7dd72e5 | 30.35969 | 118 | 0.568943 | 3.794222 | false | false | false | false |
astropy/astropy | astropy/utils/tests/test_data_info.py | 3 | 2855 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.table import QTable
from astropy.table.index import SlicedIndex
from astropy.time import Time
from astropy.utils.data_info import dtype_info_name
STRING_TYPE_NAMES = {(True, "S"): "bytes", (True, "U"): "str"}
DTYPE_TESTS = (
(np.array(b"abcd").dtype, STRING_TYPE_NAMES[(True, "S")] + "4"),
(np.array("abcd").dtype, STRING_TYPE_NAMES[(True, "U")] + "4"),
("S4", STRING_TYPE_NAMES[(True, "S")] + "4"),
("U4", STRING_TYPE_NAMES[(True, "U")] + "4"),
(np.void, "void"),
(np.int32, "int32"),
(bool, "bool"),
(float, "float64"),
("<f4", "float32"),
("u8", "uint64"),
("c16", "complex128"),
("object", "object"),
)
@pytest.mark.parametrize("input,output", DTYPE_TESTS)
def test_dtype_info_name(input, output):
"""
Test that dtype_info_name is giving the expected output
Here the available types::
'b' boolean
'i' (signed) integer
'u' unsigned integer
'f' floating-point
'c' complex-floating point
'O' (Python) objects
'S', 'a' (byte-)string
'U' Unicode
'V' raw data (void)
"""
assert dtype_info_name(input) == output
def test_info_no_copy_numpy():
"""Test that getting a single item from Table column object does not copy info.
See #10889.
"""
col = [1, 2]
t = QTable([col], names=["col"])
t.add_index("col")
val = t["col"][0]
# Returns a numpy scalar (e.g. np.float64) with no .info
assert isinstance(val, np.number)
with pytest.raises(AttributeError):
val.info
val = t["col"][:]
assert val.info.indices == []
cols = [[1, 2] * u.m, Time([1, 2], format="cxcsec")]
@pytest.mark.parametrize("col", cols)
def test_info_no_copy_mixin_with_index(col):
"""Test that getting a single item from Table column object does not copy info.
See #10889.
"""
t = QTable([col], names=["col"])
t.add_index("col")
val = t["col"][0]
assert "info" not in val.__dict__
assert val.info.indices == []
val = t["col"][:]
assert "info" in val.__dict__
assert val.info.indices == []
val = t[:]["col"]
assert "info" in val.__dict__
assert isinstance(val.info.indices[0], SlicedIndex)
def test_info_no_copy_skycoord():
"""Test that getting a single item from Table SkyCoord column object does
not copy info. Cannot create an index on a SkyCoord currently.
"""
col = (SkyCoord([1, 2], [1, 2], unit="deg"),)
t = QTable([col], names=["col"])
val = t["col"][0]
assert "info" not in val.__dict__
assert val.info.indices == []
val = t["col"][:]
assert val.info.indices == []
val = t[:]["col"]
assert val.info.indices == []
| bsd-3-clause | 3fc8dfe0dd8fbd0843e351b901052a8e | 27.55 | 83 | 0.595447 | 3.207865 | false | true | false | false |
astropy/astropy | astropy/io/votable/tests/vo_test.py | 3 | 34597 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a set of regression tests for vo.
"""
# STDLIB
import difflib
import gzip
import io
import pathlib
import sys
from unittest import mock
import numpy as np
# THIRD-PARTY
import pytest
from numpy.testing import assert_array_equal
from astropy.io.votable import tree
from astropy.io.votable.exceptions import W39, VOTableSpecError, VOWarning
# LOCAL
from astropy.io.votable.table import parse, parse_single_table, validate
from astropy.io.votable.xmlutil import validate_schema
from astropy.utils.data import get_pkg_data_filename, get_pkg_data_filenames
# Determine the kind of float formatting in this build of Python
if hasattr(sys, "float_repr_style"):
legacy_float_repr = sys.float_repr_style == "legacy"
else:
legacy_float_repr = sys.platform.startswith("win")
def assert_validate_schema(filename, version):
if sys.platform.startswith("win"):
return
try:
rc, stdout, stderr = validate_schema(filename, version)
except OSError:
# If xmllint is not installed, we want the test to pass anyway
return
assert rc == 0, "File did not validate against VOTable schema"
def test_parse_single_table():
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
table = parse_single_table(get_pkg_data_filename("data/regression.xml"))
assert isinstance(table, tree.Table)
assert len(table.array) == 5
def test_parse_single_table2():
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
table2 = parse_single_table(
get_pkg_data_filename("data/regression.xml"), table_number=1
)
assert isinstance(table2, tree.Table)
assert len(table2.array) == 1
assert len(table2.array.dtype.names) == 28
def test_parse_single_table3():
with pytest.raises(IndexError):
parse_single_table(get_pkg_data_filename("data/regression.xml"), table_number=3)
def _test_regression(tmp_path, _python_based=False, binary_mode=1):
# Read the VOTABLE
votable = parse(
get_pkg_data_filename("data/regression.xml"),
_debug_python_based_parser=_python_based,
)
table = votable.get_first_table()
dtypes = [
(("string test", "string_test"), "|O8"),
(("fixed string test", "string_test_2"), "<U10"),
("unicode_test", "|O8"),
(("unicode test", "fixed_unicode_test"), "<U10"),
(("string array test", "string_array_test"), "<U4"),
("unsignedByte", "|u1"),
("short", "<i2"),
("int", "<i4"),
("long", "<i8"),
("double", "<f8"),
("float", "<f4"),
("array", "|O8"),
("bit", "|b1"),
("bitarray", "|b1", (3, 2)),
("bitvararray", "|O8"),
("bitvararray2", "|O8"),
("floatComplex", "<c8"),
("doubleComplex", "<c16"),
("doubleComplexArray", "|O8"),
("doubleComplexArrayFixed", "<c16", (2,)),
("boolean", "|b1"),
("booleanArray", "|b1", (4,)),
("nulls", "<i4"),
("nulls_array", "<i4", (2, 2)),
("precision1", "<f8"),
("precision2", "<f8"),
("doublearray", "|O8"),
("bitarray2", "|b1", (16,)),
]
if sys.byteorder == "big":
new_dtypes = []
for dtype in dtypes:
dtype = list(dtype)
dtype[1] = dtype[1].replace("<", ">")
new_dtypes.append(tuple(dtype))
dtypes = new_dtypes
assert table.array.dtype == dtypes
votable.to_xml(
str(tmp_path / "regression.tabledata.xml"),
_debug_python_based_parser=_python_based,
)
assert_validate_schema(str(tmp_path / "regression.tabledata.xml"), votable.version)
if binary_mode == 1:
votable.get_first_table().format = "binary"
votable.version = "1.1"
elif binary_mode == 2:
votable.get_first_table()._config["version_1_3_or_later"] = True
votable.get_first_table().format = "binary2"
votable.version = "1.3"
# Also try passing a file handle
with open(str(tmp_path / "regression.binary.xml"), "wb") as fd:
votable.to_xml(fd, _debug_python_based_parser=_python_based)
assert_validate_schema(str(tmp_path / "regression.binary.xml"), votable.version)
# Also try passing a file handle
with open(str(tmp_path / "regression.binary.xml"), "rb") as fd:
votable2 = parse(fd, _debug_python_based_parser=_python_based)
votable2.get_first_table().format = "tabledata"
votable2.to_xml(
str(tmp_path / "regression.bin.tabledata.xml"),
_astropy_version="testing",
_debug_python_based_parser=_python_based,
)
assert_validate_schema(
str(tmp_path / "regression.bin.tabledata.xml"), votable.version
)
with open(
get_pkg_data_filename(
f"data/regression.bin.tabledata.truth.{votable.version}.xml"
),
encoding="utf-8",
) as fd:
truth = fd.readlines()
with open(str(tmp_path / "regression.bin.tabledata.xml"), encoding="utf-8") as fd:
output = fd.readlines()
# If the lines happen to be different, print a diff
# This is convenient for debugging
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile="truth", tofile="output")
)
assert truth == output
# Test implicit gzip saving
votable2.to_xml(
str(tmp_path / "regression.bin.tabledata.xml.gz"),
_astropy_version="testing",
_debug_python_based_parser=_python_based,
)
with gzip.GzipFile(str(tmp_path / "regression.bin.tabledata.xml.gz"), "rb") as gzfd:
output = gzfd.readlines()
output = [x.decode("utf-8").rstrip() for x in output]
truth = [x.rstrip() for x in truth]
assert truth == output
@pytest.mark.xfail("legacy_float_repr")
def test_regression(tmp_path):
# W39: Bit values can not be masked
with pytest.warns(W39):
_test_regression(tmp_path, False)
@pytest.mark.xfail("legacy_float_repr")
def test_regression_python_based_parser(tmp_path):
# W39: Bit values can not be masked
with pytest.warns(W39):
_test_regression(tmp_path, True)
@pytest.mark.xfail("legacy_float_repr")
def test_regression_binary2(tmp_path):
# W39: Bit values can not be masked
with pytest.warns(W39):
_test_regression(tmp_path, False, 2)
class TestFixups:
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
self.table = parse(
get_pkg_data_filename("data/regression.xml")
).get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_implicit_id(self):
assert_array_equal(self.array["string_test_2"], self.array["fixed string test"])
class TestReferences:
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
self.votable = parse(get_pkg_data_filename("data/regression.xml"))
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_fieldref(self):
fieldref = self.table.groups[1].entries[0]
assert isinstance(fieldref, tree.FieldRef)
assert fieldref.get_ref().name == "boolean"
assert fieldref.get_ref().datatype == "boolean"
def test_paramref(self):
paramref = self.table.groups[0].entries[0]
assert isinstance(paramref, tree.ParamRef)
assert paramref.get_ref().name == "INPUT"
assert paramref.get_ref().datatype == "float"
def test_iter_fields_and_params_on_a_group(self):
assert len(list(self.table.groups[1].iter_fields_and_params())) == 2
def test_iter_groups_on_a_group(self):
assert len(list(self.table.groups[1].iter_groups())) == 1
def test_iter_groups(self):
# Because of the ref'd table, there are more logical groups
# than actually exist in the file
assert len(list(self.votable.iter_groups())) == 9
def test_ref_table(self):
tables = list(self.votable.iter_tables())
for x, y in zip(tables[0].array.data[0], tables[1].array.data[0]):
assert_array_equal(x, y)
def test_iter_coosys(self):
assert len(list(self.votable.iter_coosys())) == 1
def test_select_columns_by_index():
columns = [0, 5, 13]
table = parse(
get_pkg_data_filename("data/regression.xml"), columns=columns
).get_first_table()
array = table.array
mask = table.array.mask
assert array["string_test"][0] == "String & test"
columns = ["string_test", "unsignedByte", "bitarray"]
for c in columns:
assert not np.all(mask[c])
assert np.all(mask["unicode_test"])
def test_select_columns_by_name():
columns = ["string_test", "unsignedByte", "bitarray"]
table = parse(
get_pkg_data_filename("data/regression.xml"), columns=columns
).get_first_table()
array = table.array
mask = table.array.mask
assert array["string_test"][0] == "String & test"
for c in columns:
assert not np.all(mask[c])
assert np.all(mask["unicode_test"])
class TestParse:
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
self.votable = parse(get_pkg_data_filename("data/regression.xml"))
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_string_test(self):
assert issubclass(self.array["string_test"].dtype.type, np.object_)
assert_array_equal(
self.array["string_test"],
["String & test", "String & test", "XXXX", "", ""],
)
def test_fixed_string_test(self):
assert issubclass(self.array["string_test_2"].dtype.type, np.unicode_)
assert_array_equal(
self.array["string_test_2"], ["Fixed stri", "0123456789", "XXXX", "", ""]
)
def test_unicode_test(self):
assert issubclass(self.array["unicode_test"].dtype.type, np.object_)
assert_array_equal(
self.array["unicode_test"],
["Ceçi n'est pas un pipe", "வணக்கம்", "XXXX", "", ""],
)
def test_fixed_unicode_test(self):
assert issubclass(self.array["fixed_unicode_test"].dtype.type, np.unicode_)
assert_array_equal(
self.array["fixed_unicode_test"],
["Ceçi n'est", "வணக்கம்", "0123456789", "", ""],
)
def test_unsignedByte(self):
assert issubclass(self.array["unsignedByte"].dtype.type, np.uint8)
assert_array_equal(self.array["unsignedByte"], [128, 255, 0, 255, 255])
assert not np.any(self.mask["unsignedByte"])
def test_short(self):
assert issubclass(self.array["short"].dtype.type, np.int16)
assert_array_equal(self.array["short"], [4096, 32767, -4096, 32767, 32767])
assert not np.any(self.mask["short"])
def test_int(self):
assert issubclass(self.array["int"].dtype.type, np.int32)
assert_array_equal(
self.array["int"], [268435456, 2147483647, -268435456, 268435455, 123456789]
)
assert_array_equal(self.mask["int"], [False, False, False, False, True])
def test_long(self):
assert issubclass(self.array["long"].dtype.type, np.int64)
assert_array_equal(
self.array["long"],
[
922337203685477,
123456789,
-1152921504606846976,
1152921504606846975,
123456789,
],
)
assert_array_equal(self.mask["long"], [False, True, False, False, True])
def test_double(self):
assert issubclass(self.array["double"].dtype.type, np.float64)
assert_array_equal(
self.array["double"], [8.9990234375, 0.0, np.inf, np.nan, -np.inf]
)
assert_array_equal(self.mask["double"], [False, False, False, True, False])
def test_float(self):
assert issubclass(self.array["float"].dtype.type, np.float32)
assert_array_equal(self.array["float"], [1.0, 0.0, np.inf, np.inf, np.nan])
assert_array_equal(self.mask["float"], [False, False, False, False, True])
def test_array(self):
assert issubclass(self.array["array"].dtype.type, np.object_)
match = [
[],
[[42, 32], [12, 32]],
[[12, 34], [56, 78], [87, 65], [43, 21]],
[[-1, 23]],
[[31, -1]],
]
for a, b in zip(self.array["array"], match):
# assert issubclass(a.dtype.type, np.int64)
# assert a.shape[1] == 2
for a0, b0 in zip(a, b):
assert issubclass(a0.dtype.type, np.int64)
assert_array_equal(a0, b0)
assert self.array.data["array"][3].mask[0][0]
assert self.array.data["array"][4].mask[0][1]
def test_bit(self):
assert issubclass(self.array["bit"].dtype.type, np.bool_)
assert_array_equal(self.array["bit"], [True, False, True, False, False])
def test_bit_mask(self):
assert_array_equal(self.mask["bit"], [False, False, False, False, True])
def test_bitarray(self):
assert issubclass(self.array["bitarray"].dtype.type, np.bool_)
assert self.array["bitarray"].shape == (5, 3, 2)
assert_array_equal(
self.array["bitarray"],
[
[[True, False], [True, True], [False, True]],
[[False, True], [False, False], [True, True]],
[[True, True], [True, False], [False, False]],
[[False, False], [False, False], [False, False]],
[[False, False], [False, False], [False, False]],
],
)
def test_bitarray_mask(self):
assert_array_equal(
self.mask["bitarray"],
[
[[False, False], [False, False], [False, False]],
[[False, False], [False, False], [False, False]],
[[False, False], [False, False], [False, False]],
[[True, True], [True, True], [True, True]],
[[True, True], [True, True], [True, True]],
],
)
def test_bitvararray(self):
assert issubclass(self.array["bitvararray"].dtype.type, np.object_)
match = [
[True, True, True],
[False, False, False, False, False],
[True, False, True, False, True],
[],
[],
]
for a, b in zip(self.array["bitvararray"], match):
assert_array_equal(a, b)
match_mask = [
[False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
False,
False,
]
for a, b in zip(self.array["bitvararray"], match_mask):
assert_array_equal(a.mask, b)
def test_bitvararray2(self):
assert issubclass(self.array["bitvararray2"].dtype.type, np.object_)
match = [
[],
[
[[False, True], [False, False], [True, False]],
[[True, False], [True, False], [True, False]],
],
[[[True, True], [True, True], [True, True]]],
[],
[],
]
for a, b in zip(self.array["bitvararray2"], match):
for a0, b0 in zip(a, b):
assert a0.shape == (3, 2)
assert issubclass(a0.dtype.type, np.bool_)
assert_array_equal(a0, b0)
def test_floatComplex(self):
assert issubclass(self.array["floatComplex"].dtype.type, np.complex64)
assert_array_equal(
self.array["floatComplex"],
[np.nan + 0j, 0 + 0j, 0 + -1j, np.nan + 0j, np.nan + 0j],
)
assert_array_equal(self.mask["floatComplex"], [True, False, False, True, True])
def test_doubleComplex(self):
assert issubclass(self.array["doubleComplex"].dtype.type, np.complex128)
assert_array_equal(
self.array["doubleComplex"],
[np.nan + 0j, 0 + 0j, 0 + -1j, np.nan + (np.inf * 1j), np.nan + 0j],
)
assert_array_equal(self.mask["doubleComplex"], [True, False, False, True, True])
def test_doubleComplexArray(self):
assert issubclass(self.array["doubleComplexArray"].dtype.type, np.object_)
assert [len(x) for x in self.array["doubleComplexArray"]] == [0, 2, 2, 0, 0]
def test_boolean(self):
assert issubclass(self.array["boolean"].dtype.type, np.bool_)
assert_array_equal(self.array["boolean"], [True, False, True, False, False])
def test_boolean_mask(self):
assert_array_equal(self.mask["boolean"], [False, False, False, False, True])
def test_boolean_array(self):
assert issubclass(self.array["booleanArray"].dtype.type, np.bool_)
assert_array_equal(
self.array["booleanArray"],
[
[True, True, True, True],
[True, True, False, True],
[True, True, False, True],
[False, False, False, False],
[False, False, False, False],
],
)
def test_boolean_array_mask(self):
assert_array_equal(
self.mask["booleanArray"],
[
[False, False, False, False],
[False, False, False, False],
[False, False, True, False],
[True, True, True, True],
[True, True, True, True],
],
)
def test_nulls(self):
assert_array_equal(self.array["nulls"], [0, -9, 2, -9, -9])
assert_array_equal(self.mask["nulls"], [False, True, False, True, True])
def test_nulls_array(self):
assert_array_equal(
self.array["nulls_array"],
[
[[-9, -9], [-9, -9]],
[[0, 1], [2, 3]],
[[-9, 0], [-9, 1]],
[[0, -9], [1, -9]],
[[-9, -9], [-9, -9]],
],
)
assert_array_equal(
self.mask["nulls_array"],
[
[[True, True], [True, True]],
[[False, False], [False, False]],
[[True, False], [True, False]],
[[False, True], [False, True]],
[[True, True], [True, True]],
],
)
def test_double_array(self):
assert issubclass(self.array["doublearray"].dtype.type, np.object_)
assert len(self.array["doublearray"][0]) == 0
assert_array_equal(
self.array["doublearray"][1], [0, 1, np.inf, -np.inf, np.nan, 0, -1]
)
assert_array_equal(
self.array.data["doublearray"][1].mask,
[False, False, False, False, False, False, True],
)
def test_bit_array2(self):
assert_array_equal(
self.array["bitarray2"][0],
[
True,
True,
True,
True,
False,
False,
False,
False,
True,
True,
True,
True,
False,
False,
False,
False,
],
)
def test_bit_array2_mask(self):
assert not np.any(self.mask["bitarray2"][0])
assert np.all(self.mask["bitarray2"][1:])
def test_get_coosys_by_id(self):
coosys = self.votable.get_coosys_by_id("J2000")
assert coosys.system == "eq_FK5"
def test_get_field_by_utype(self):
fields = list(self.votable.get_fields_by_utype("myint"))
assert fields[0].name == "int"
assert fields[0].values.min == -1000
def test_get_info_by_id(self):
info = self.votable.get_info_by_id("QUERY_STATUS")
assert info.value == "OK"
if self.votable.version != "1.1":
info = self.votable.get_info_by_id("ErrorInfo")
assert info.value == "One might expect to find some INFO here, too..."
def test_repr(self):
assert "3 tables" in repr(self.votable)
assert (
repr(list(self.votable.iter_fields_and_params())[0])
== '<PARAM ID="awesome" arraysize="*" datatype="float" '
'name="INPUT" unit="deg" value="[0.0 0.0]"/>'
)
# Smoke test
repr(list(self.votable.iter_groups()))
# Resource
assert repr(self.votable.resources) == "[</>]"
class TestThroughTableData(TestParse):
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename("data/regression.xml"))
self.xmlout = bio = io.BytesIO()
# W39: Bit values can not be masked
with pytest.warns(W39):
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_bit_mask(self):
assert_array_equal(self.mask["bit"], [False, False, False, False, False])
def test_bitarray_mask(self):
assert not np.any(self.mask["bitarray"])
def test_bit_array2_mask(self):
assert not np.any(self.mask["bitarray2"])
def test_schema(self, tmp_path):
# have to use an actual file because assert_validate_schema only works
# on filenames, not file-like objects
fn = tmp_path / "test_through_tabledata.xml"
with open(fn, "wb") as f:
f.write(self.xmlout.getvalue())
assert_validate_schema(fn, "1.1")
class TestThroughBinary(TestParse):
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename("data/regression.xml"))
votable.get_first_table().format = "binary"
self.xmlout = bio = io.BytesIO()
# W39: Bit values can not be masked
with pytest.warns(W39):
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
# Masked values in bit fields don't roundtrip through the binary
# representation -- that's not a bug, just a limitation, so
# override the mask array checks here.
def test_bit_mask(self):
assert not np.any(self.mask["bit"])
def test_bitarray_mask(self):
assert not np.any(self.mask["bitarray"])
def test_bit_array2_mask(self):
assert not np.any(self.mask["bitarray2"])
class TestThroughBinary2(TestParse):
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename("data/regression.xml"))
votable.version = "1.3"
votable.get_first_table()._config["version_1_3_or_later"] = True
votable.get_first_table().format = "binary2"
self.xmlout = bio = io.BytesIO()
# W39: Bit values can not be masked
with pytest.warns(W39):
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_get_coosys_by_id(self):
# No COOSYS in VOTable 1.2 or later
pass
def table_from_scratch():
from astropy.io.votable.tree import Field, Resource, Table, VOTableFile
# Create a new VOTable file...
votable = VOTableFile()
# ...with one resource...
resource = Resource()
votable.resources.append(resource)
# ... with one table
table = Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend(
[
Field(votable, ID="filename", datatype="char"),
Field(votable, ID="matrix", datatype="double", arraysize="2x2"),
]
)
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ("test1.xml", [[1, 0], [0, 1]])
table.array[1] = ("test2.xml", [[0.5, 0.3], [0.2, 0.1]])
# Now write the whole thing to a file.
# Note, we have to use the top-level votable file object
out = io.StringIO()
votable.to_xml(out)
# https://github.com/astropy/astropy/issues/13341
@np.errstate(over="ignore")
def test_open_files():
for filename in get_pkg_data_filenames("data", pattern="*.xml"):
if filename.endswith("custom_datatype.xml") or filename.endswith(
"timesys_errors.xml"
):
continue
parse(filename)
def test_too_many_columns():
with pytest.raises(VOTableSpecError):
parse(get_pkg_data_filename("data/too_many_columns.xml.gz"))
def test_build_from_scratch(tmp_path):
# Create a new VOTable file...
votable = tree.VOTableFile()
# ...with one resource...
resource = tree.Resource()
votable.resources.append(resource)
# ... with one table
table = tree.Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend(
[
tree.Field(
votable, ID="filename", name="filename", datatype="char", arraysize="1"
),
tree.Field(
votable, ID="matrix", name="matrix", datatype="double", arraysize="2x2"
),
]
)
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ("test1.xml", [[1, 0], [0, 1]])
table.array[1] = ("test2.xml", [[0.5, 0.3], [0.2, 0.1]])
# Now write the whole thing to a file.
# Note, we have to use the top-level votable file object
votable.to_xml(str(tmp_path / "new_votable.xml"))
votable = parse(str(tmp_path / "new_votable.xml"))
table = votable.get_first_table()
assert_array_equal(
table.array.mask,
np.array(
[
(False, [[False, False], [False, False]]),
(False, [[False, False], [False, False]]),
],
dtype=[("filename", "?"), ("matrix", "?", (2, 2))],
),
)
def test_validate(test_path_object=False):
"""
test_path_object is needed for test below ``test_validate_path_object``
so that file could be passed as pathlib.Path object.
"""
output = io.StringIO()
fpath = get_pkg_data_filename("data/regression.xml")
if test_path_object:
fpath = pathlib.Path(fpath)
# We can't test xmllint, because we can't rely on it being on the
# user's machine.
result = validate(fpath, output, xmllint=False)
assert result is False
output.seek(0)
output = output.readlines()
# Uncomment to generate new groundtruth
# with open('validation.txt', 'wt', encoding='utf-8') as fd:
# fd.write(u''.join(output))
with open(get_pkg_data_filename("data/validation.txt"), encoding="utf-8") as fd:
truth = fd.readlines()
truth = truth[1:]
output = output[1:-1]
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile="truth", tofile="output")
)
assert truth == output
@mock.patch("subprocess.Popen")
def test_validate_xmllint_true(mock_subproc_popen):
process_mock = mock.Mock()
attrs = {"communicate.return_value": ("ok", "ko"), "returncode": 0}
process_mock.configure_mock(**attrs)
mock_subproc_popen.return_value = process_mock
assert validate(get_pkg_data_filename("data/empty_table.xml"), xmllint=True)
def test_validate_path_object():
"""
Validating when source is passed as path object. (#4412)
"""
test_validate(test_path_object=True)
def test_gzip_filehandles(tmp_path):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename("data/regression.xml"))
# W39: Bit values can not be masked
with pytest.warns(W39):
with open(tmp_path / "regression.compressed.xml", "wb") as fd:
votable.to_xml(fd, compressed=True, _astropy_version="testing")
with open(tmp_path / "regression.compressed.xml", "rb") as fd:
votable = parse(fd)
def test_from_scratch_example():
_run_test_from_scratch_example()
def _run_test_from_scratch_example():
from astropy.io.votable.tree import Field, Resource, Table, VOTableFile
# Create a new VOTable file...
votable = VOTableFile()
# ...with one resource...
resource = Resource()
votable.resources.append(resource)
# ... with one table
table = Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend(
[
Field(votable, name="filename", datatype="char", arraysize="*"),
Field(votable, name="matrix", datatype="double", arraysize="2x2"),
]
)
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ("test1.xml", [[1, 0], [0, 1]])
table.array[1] = ("test2.xml", [[0.5, 0.3], [0.2, 0.1]])
assert table.array[0][0] == "test1.xml"
def test_fileobj():
# Assert that what we get back is a raw C file pointer
# so it will be super fast in the C extension.
from astropy.utils.xml import iterparser
filename = get_pkg_data_filename("data/regression.xml")
with iterparser._convert_to_fd_or_read_function(filename) as fd:
if sys.platform == "win32":
fd()
else:
assert isinstance(fd, io.FileIO)
def test_nonstandard_units():
from astropy import units as u
votable = parse(get_pkg_data_filename("data/nonstandard_units.xml"))
assert isinstance(votable.get_first_table().fields[0].unit, u.UnrecognizedUnit)
votable = parse(
get_pkg_data_filename("data/nonstandard_units.xml"), unit_format="generic"
)
assert not isinstance(votable.get_first_table().fields[0].unit, u.UnrecognizedUnit)
def test_resource_structure():
# Based on issue #1223, as reported by @astro-friedel and @RayPlante
from astropy.io.votable import tree as vot
vtf = vot.VOTableFile()
r1 = vot.Resource()
vtf.resources.append(r1)
t1 = vot.Table(vtf)
t1.name = "t1"
t2 = vot.Table(vtf)
t2.name = "t2"
r1.tables.append(t1)
r1.tables.append(t2)
r2 = vot.Resource()
vtf.resources.append(r2)
t3 = vot.Table(vtf)
t3.name = "t3"
t4 = vot.Table(vtf)
t4.name = "t4"
r2.tables.append(t3)
r2.tables.append(t4)
r3 = vot.Resource()
vtf.resources.append(r3)
t5 = vot.Table(vtf)
t5.name = "t5"
t6 = vot.Table(vtf)
t6.name = "t6"
r3.tables.append(t5)
r3.tables.append(t6)
buff = io.BytesIO()
vtf.to_xml(buff)
buff.seek(0)
vtf2 = parse(buff)
assert len(vtf2.resources) == 3
for r in range(len(vtf2.resources)):
res = vtf2.resources[r]
assert len(res.tables) == 2
assert len(res.resources) == 0
def test_no_resource_check():
output = io.StringIO()
# We can't test xmllint, because we can't rely on it being on the
# user's machine.
result = validate(
get_pkg_data_filename("data/no_resource.xml"), output, xmllint=False
)
assert result is False
output.seek(0)
output = output.readlines()
# Uncomment to generate new groundtruth
# with open('no_resource.txt', 'wt', encoding='utf-8') as fd:
# fd.write(u''.join(output))
with open(get_pkg_data_filename("data/no_resource.txt"), encoding="utf-8") as fd:
truth = fd.readlines()
truth = truth[1:]
output = output[1:-1]
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile="truth", tofile="output")
)
assert truth == output
def test_instantiate_vowarning():
# This used to raise a deprecation exception.
# See https://github.com/astropy/astroquery/pull/276
VOWarning(())
def test_custom_datatype():
votable = parse(
get_pkg_data_filename("data/custom_datatype.xml"),
datatype_mapping={"bar": "int"},
)
table = votable.get_first_table()
assert table.array.dtype["foo"] == np.int32
def _timesys_tests(votable):
assert len(list(votable.iter_timesys())) == 4
timesys = votable.get_timesys_by_id("time_frame")
assert timesys.timeorigin == 2455197.5
assert timesys.timescale == "TCB"
assert timesys.refposition == "BARYCENTER"
timesys = votable.get_timesys_by_id("mjd_origin")
assert timesys.timeorigin == "MJD-origin"
assert timesys.timescale == "TDB"
assert timesys.refposition == "EMBARYCENTER"
timesys = votable.get_timesys_by_id("jd_origin")
assert timesys.timeorigin == "JD-origin"
assert timesys.timescale == "TT"
assert timesys.refposition == "HELIOCENTER"
timesys = votable.get_timesys_by_id("no_origin")
assert timesys.timeorigin is None
assert timesys.timescale == "UTC"
assert timesys.refposition == "TOPOCENTER"
def test_timesys():
votable = parse(get_pkg_data_filename("data/timesys.xml"))
_timesys_tests(votable)
def test_timesys_roundtrip():
orig_votable = parse(get_pkg_data_filename("data/timesys.xml"))
bio = io.BytesIO()
orig_votable.to_xml(bio)
bio.seek(0)
votable = parse(bio)
_timesys_tests(votable)
def test_timesys_errors():
output = io.StringIO()
validate(get_pkg_data_filename("data/timesys_errors.xml"), output, xmllint=False)
outstr = output.getvalue()
assert "E23: Invalid timeorigin attribute 'bad-origin'" in outstr
assert "E22: ID attribute is required for all TIMESYS elements" in outstr
assert "W48: Unknown attribute 'refposition_mispelled' on TIMESYS" in outstr
| bsd-3-clause | 50e78e01e5aa8d33cec381dbe2b65c8f | 31.548964 | 88 | 0.585298 | 3.440872 | false | true | false | false |
astropy/astropy | astropy/convolution/tests/test_convolve_kernels.py | 3 | 4445 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_almost_equal
from astropy import units as u
from astropy.convolution.convolve import convolve, convolve_fft
from astropy.convolution.kernels import (
Box2DKernel,
Gaussian2DKernel,
Moffat2DKernel,
Tophat2DKernel,
)
SHAPES_ODD = [[15, 15], [31, 31]]
SHAPES_EVEN = [[8, 8], [16, 16], [32, 32]] # FIXME: not used ?!
NOSHAPE = [[None, None]]
WIDTHS = [2, 3, 4, 5]
KERNELS = []
for shape in SHAPES_ODD + NOSHAPE:
for width in WIDTHS:
KERNELS.append(
Gaussian2DKernel(
x_stddev=width,
x_size=shape[0],
y_size=shape[1],
mode="oversample",
factor=10,
)
)
KERNELS.append(
Box2DKernel(
width=width,
x_size=shape[0],
y_size=shape[1],
mode="oversample",
factor=10,
)
)
KERNELS.append(
Tophat2DKernel(
radius=width,
x_size=shape[0],
y_size=shape[1],
mode="oversample",
factor=10,
)
)
KERNELS.append(
Moffat2DKernel(
gamma=width,
alpha=2,
x_size=shape[0],
y_size=shape[1],
mode="oversample",
factor=10,
)
)
class Test2DConvolutions:
@pytest.mark.parametrize("kernel", KERNELS)
def test_centered_makekernel(self, kernel):
"""
Test smoothing of an image with a single positive pixel
"""
shape = kernel.array.shape
x = np.zeros(shape)
xslice = tuple(slice(sh // 2, sh // 2 + 1) for sh in shape)
x[xslice] = 1.0
c2 = convolve_fft(x, kernel, boundary="fill")
c1 = convolve(x, kernel, boundary="fill")
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize("kernel", KERNELS)
def test_random_makekernel(self, kernel):
"""
Test smoothing of an image made of random noise
"""
shape = kernel.array.shape
x = np.random.randn(*shape)
c2 = convolve_fft(x, kernel, boundary="fill")
c1 = convolve(x, kernel, boundary="fill")
# not clear why, but these differ by a couple ulps...
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize(
("shape", "width"), list(itertools.product(SHAPES_ODD, WIDTHS))
)
def test_uniform_smallkernel(self, shape, width):
"""
Test smoothing of an image with a single positive pixel
Uses a simple, small kernel
"""
if width % 2 == 0:
# convolve does not accept odd-shape kernels
return
kernel = np.ones([width, width])
x = np.zeros(shape)
xslice = tuple(slice(sh // 2, sh // 2 + 1) for sh in shape)
x[xslice] = 1.0
c2 = convolve_fft(x, kernel, boundary="fill")
c1 = convolve(x, kernel, boundary="fill")
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize(
("shape", "width"), list(itertools.product(SHAPES_ODD, [1, 3, 5]))
)
def test_smallkernel_Box2DKernel(self, shape, width):
"""
Test smoothing of an image with a single positive pixel
Compares a small uniform kernel to the Box2DKernel
"""
kernel1 = np.ones([width, width]) / float(width) ** 2
kernel2 = Box2DKernel(width, mode="oversample", factor=10)
x = np.zeros(shape)
xslice = tuple(slice(sh // 2, sh // 2 + 1) for sh in shape)
x[xslice] = 1.0
c2 = convolve_fft(x, kernel2, boundary="fill")
c1 = convolve_fft(x, kernel1, boundary="fill")
assert_almost_equal(c1, c2, decimal=12)
c2 = convolve(x, kernel2, boundary="fill")
c1 = convolve(x, kernel1, boundary="fill")
assert_almost_equal(c1, c2, decimal=12)
def test_gaussian_2d_kernel_quantity():
# Make sure that the angle can be a quantity
kernel1 = Gaussian2DKernel(x_stddev=2, y_stddev=4, theta=45 * u.deg)
kernel2 = Gaussian2DKernel(x_stddev=2, y_stddev=4, theta=np.pi / 4)
assert_allclose(kernel1.array, kernel2.array)
| bsd-3-clause | 5a36977f60ffc15c000a0cc16c356df8 | 26.955975 | 74 | 0.554331 | 3.502758 | false | true | false | false |
astropy/astropy | astropy/timeseries/periodograms/lombscargle/implementations/scipy_impl.py | 3 | 2387 | import numpy as np
def lombscargle_scipy(t, y, frequency, normalization="standard", center_data=True):
"""Lomb-Scargle Periodogram
This is a wrapper of ``scipy.signal.lombscargle`` for computation of the
Lomb-Scargle periodogram. This is a relatively fast version of the naive
O[N^2] algorithm, but cannot handle heteroskedastic errors.
Parameters
----------
t, y : array-like
times, values, and errors of the data points. These should be
broadcastable to the same shape. None should be `~astropy.units.Quantity`.
frequency : array-like
frequencies (not angular frequencies) at which to calculate periodogram
normalization : str, optional
Normalization to use for the periodogram.
Options are 'standard', 'model', 'log', or 'psd'.
center_data : bool, optional
if True, pre-center the data by subtracting the weighted mean
of the input data.
Returns
-------
power : array-like
Lomb-Scargle power associated with each frequency.
Units of the result depend on the normalization.
References
----------
.. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
.. [2] W. Press et al, Numerical Recipes in C (2002)
.. [3] Scargle, J.D. 1982, ApJ 263:835-853
"""
try:
from scipy import signal
except ImportError:
raise ImportError("scipy must be installed to use lombscargle_scipy")
t, y = np.broadcast_arrays(t, y)
# Scipy requires floating-point input
t = np.asarray(t, dtype=float)
y = np.asarray(y, dtype=float)
frequency = np.asarray(frequency, dtype=float)
if t.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
if frequency.ndim != 1:
raise ValueError("frequency should be one-dimensional")
if center_data:
y = y - y.mean()
# Note: scipy input accepts angular frequencies
p = signal.lombscargle(t, y, 2 * np.pi * frequency)
if normalization == "psd":
pass
elif normalization == "standard":
p *= 2 / (t.size * np.mean(y**2))
elif normalization == "log":
p = -np.log(1 - 2 * p / (t.size * np.mean(y**2)))
elif normalization == "model":
p /= 0.5 * t.size * np.mean(y**2) - p
else:
raise ValueError(f"normalization='{normalization}' not recognized")
return p
| bsd-3-clause | 11899f364c2670a9d9e399dc20bea8c1 | 33.1 | 83 | 0.63008 | 3.706522 | false | false | false | false |
astropy/astropy | examples/coordinates/plot_sgr-coordinate-frame.py | 5 | 10531 | r"""
==========================================================
Create a new coordinate class (for the Sagittarius stream)
==========================================================
This document describes in detail how to subclass and define a custom spherical
coordinate frame, as discussed in :ref:`astropy:astropy-coordinates-design` and
the docstring for `~astropy.coordinates.BaseCoordinateFrame`. In this example,
we will define a coordinate system defined by the plane of orbit of the
Sagittarius Dwarf Galaxy (hereafter Sgr; as defined in Majewski et al. 2003).
The Sgr coordinate system is often referred to in terms of two angular
coordinates, :math:`\Lambda,B`.
To do this, we need to define a subclass of
`~astropy.coordinates.BaseCoordinateFrame` that knows the names and units of the
coordinate system angles in each of the supported representations. In this case
we support `~astropy.coordinates.SphericalRepresentation` with "Lambda" and
"Beta". Then we have to define the transformation from this coordinate system to
some other built-in system. Here we will use Galactic coordinates, represented
by the `~astropy.coordinates.Galactic` class.
See Also
--------
* The `gala package <http://gala.adrian.pw/>`_, which defines a number of
Astropy coordinate frames for stellar stream coordinate systems.
* Majewski et al. 2003, "A Two Micron All Sky Survey View of the Sagittarius
Dwarf Galaxy. I. Morphology of the Sagittarius Core and Tidal Arms",
https://arxiv.org/abs/astro-ph/0304198
* Law & Majewski 2010, "The Sagittarius Dwarf Galaxy: A Model for Evolution in a
Triaxial Milky Way Halo", https://arxiv.org/abs/1003.1132
* David Law's Sgr info page https://www.stsci.edu/~dlaw/Sgr/
*By: Adrian Price-Whelan, Erik Tollerud*
*License: BSD*
"""
##############################################################################
# Make `print` work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import matplotlib.pyplot as plt
import numpy as np
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the packages necessary for coordinates
import astropy.coordinates as coord
import astropy.units as u
from astropy.coordinates import frame_transform_graph
from astropy.coordinates.matrix_utilities import matrix_transpose, rotation_matrix
##############################################################################
# The first step is to create a new class, which we'll call
# ``Sagittarius`` and make it a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`:
class Sagittarius(coord.BaseCoordinateFrame):
"""
A Heliocentric spherical coordinate system defined by the orbit
of the Sagittarius dwarf galaxy, as described in
https://ui.adsabs.harvard.edu/abs/2003ApJ...599.1082M
and further explained in
https://www.stsci.edu/~dlaw/Sgr/.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
Lambda : `~astropy.coordinates.Angle`, optional, must be keyword
The longitude-like angle corresponding to Sagittarius' orbit.
Beta : `~astropy.coordinates.Angle`, optional, must be keyword
The latitude-like angle corresponding to Sagittarius' orbit.
distance : `~astropy.units.Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
pm_Lambda_cosBeta : `~astropy.units.Quantity`, optional, must be keyword
The proper motion along the stream in ``Lambda`` (including the
``cos(Beta)`` factor) for this object (``pm_Beta`` must also be given).
pm_Beta : `~astropy.units.Quantity`, optional, must be keyword
The proper motion in Declination for this object (``pm_ra_cosdec`` must
also be given).
radial_velocity : `~astropy.units.Quantity`, optional, keyword-only
The radial velocity of this object.
"""
default_representation = coord.SphericalRepresentation
default_differential = coord.SphericalCosLatDifferential
frame_specific_representation_info = {
coord.SphericalRepresentation: [
coord.RepresentationMapping('lon', 'Lambda'),
coord.RepresentationMapping('lat', 'Beta'),
coord.RepresentationMapping('distance', 'distance')]
}
##############################################################################
# Breaking this down line-by-line, we define the class as a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`. Then we include a descriptive
# docstring. The final lines are class-level attributes that specify the
# default representation for the data, default differential for the velocity
# information, and mappings from the attribute names used by representation
# objects to the names that are to be used by the ``Sagittarius`` frame. In this
# case we override the names in the spherical representations but don't do
# anything with other representations like cartesian or cylindrical.
#
# Next we have to define the transformation from this coordinate system to some
# other built-in coordinate system; we will use Galactic coordinates. We can do
# this by defining functions that return transformation matrices, or by simply
# defining a function that accepts a coordinate and returns a new coordinate in
# the new system. Because the transformation to the Sagittarius coordinate
# system is just a spherical rotation from Galactic coordinates, we'll just
# define a function that returns this matrix. We'll start by constructing the
# transformation matrix using pre-determined Euler angles and the
# ``rotation_matrix`` helper function:
SGR_PHI = (180 + 3.75) * u.degree # Euler angles (from Law & Majewski 2010)
SGR_THETA = (90 - 13.46) * u.degree
SGR_PSI = (180 + 14.111534) * u.degree
# Generate the rotation matrix using the x-convention (see Goldstein)
SGR_MATRIX = (
np.diag([1.,1.,-1.])
@ rotation_matrix(SGR_PSI, "z")
@ rotation_matrix(SGR_THETA, "x")
@ rotation_matrix(SGR_PHI, "z")
)
##############################################################################
# Since we already constructed the transformation (rotation) matrix above, and
# the inverse of a rotation matrix is just its transpose, the required
# transformation functions are very simple:
@frame_transform_graph.transform(coord.StaticMatrixTransform, coord.Galactic, Sagittarius)
def galactic_to_sgr():
""" Compute the transformation matrix from Galactic spherical to
heliocentric Sgr coordinates.
"""
return SGR_MATRIX
##############################################################################
# The decorator ``@frame_transform_graph.transform(coord.StaticMatrixTransform,
# coord.Galactic, Sagittarius)`` registers this function on the
# ``frame_transform_graph`` as a coordinate transformation. Inside the function,
# we simply return the previously defined rotation matrix.
#
# We then register the inverse transformation by using the transpose of the
# rotation matrix (which is faster to compute than the inverse):
@frame_transform_graph.transform(coord.StaticMatrixTransform, Sagittarius, coord.Galactic)
def sgr_to_galactic():
""" Compute the transformation matrix from heliocentric Sgr coordinates to
spherical Galactic.
"""
return matrix_transpose(SGR_MATRIX)
##############################################################################
# Now that we've registered these transformations between ``Sagittarius`` and
# `~astropy.coordinates.Galactic`, we can transform between *any* coordinate
# system and ``Sagittarius`` (as long as the other system has a path to
# transform to `~astropy.coordinates.Galactic`). For example, to transform from
# ICRS coordinates to ``Sagittarius``, we would do:
icrs = coord.SkyCoord(280.161732*u.degree, 11.91934*u.degree, frame='icrs')
sgr = icrs.transform_to(Sagittarius)
print(sgr)
##############################################################################
# Or, to transform from the ``Sagittarius`` frame to ICRS coordinates (in this
# case, a line along the ``Sagittarius`` x-y plane):
sgr = coord.SkyCoord(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian, frame='sagittarius')
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
##############################################################################
# As an example, we'll now plot the points in both coordinate systems:
fig, axes = plt.subplots(2, 1, figsize=(8, 10),
subplot_kw={'projection': 'aitoff'})
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.wrap_at(180*u.deg).radian, sgr.Beta.radian,
linestyle='none', marker='.')
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.wrap_at(180*u.deg).radian, icrs.dec.radian,
linestyle='none', marker='.')
plt.show()
##############################################################################
# This particular transformation is just a spherical rotation, which is a
# special case of an Affine transformation with no vector offset. The
# transformation of velocity components is therefore natively supported as
# well:
sgr = coord.SkyCoord(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian,
pm_Lambda_cosBeta=np.random.uniform(-5, 5, 128)*u.mas/u.yr,
pm_Beta=np.zeros(128)*u.mas/u.yr,
frame='sagittarius')
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
fig, axes = plt.subplots(3, 1, figsize=(8, 10), sharex=True)
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.degree,
sgr.pm_Lambda_cosBeta.value,
linestyle='none', marker='.')
axes[0].set_xlabel(r"$\Lambda$ [deg]")
axes[0].set_ylabel(
fr"$\mu_\Lambda \, \cos B$ [{sgr.pm_Lambda_cosBeta.unit.to_string('latex_inline')}]")
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.degree, icrs.pm_ra_cosdec.value,
linestyle='none', marker='.')
axes[1].set_ylabel(
fr"$\mu_\alpha \, \cos\delta$ [{icrs.pm_ra_cosdec.unit.to_string('latex_inline')}]")
axes[2].set_title("ICRS")
axes[2].plot(icrs.ra.degree, icrs.pm_dec.value,
linestyle='none', marker='.')
axes[2].set_xlabel("RA [deg]")
axes[2].set_ylabel(
fr"$\mu_\delta$ [{icrs.pm_dec.unit.to_string('latex_inline')}]")
plt.show()
| bsd-3-clause | d04d27148ce0526dc9c93c7dad4b4b3a | 42.159836 | 90 | 0.660526 | 3.74236 | false | false | false | false |
astropy/astropy | astropy/units/format/fits.py | 3 | 4876 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handles the "FITS" unit format.
"""
import copy
import keyword
import operator
import numpy as np
from . import core, generic, utils
class Fits(generic.Generic):
"""
The FITS standard unit format.
This supports the format defined in the Units section of the `FITS
Standard <https://fits.gsfc.nasa.gov/fits_standard.html>`_.
"""
name = "fits"
@staticmethod
def _generate_unit_names():
from astropy import units as u
names = {}
deprecated_names = set()
bases = [
"m", "g", "s", "rad", "sr", "K", "A", "mol", "cd",
"Hz", "J", "W", "V", "N", "Pa", "C", "Ohm", "S",
"F", "Wb", "T", "H", "lm", "lx", "a", "yr", "eV",
"pc", "Jy", "mag", "R", "bit", "byte", "G", "barn",
] # fmt: skip
deprecated_bases = []
prefixes = [
"y", "z", "a", "f", "p", "n", "u", "m", "c", "d",
"", "da", "h", "k", "M", "G", "T", "P", "E", "Z", "Y",
] # fmt: skip
special_cases = {"dbyte": u.Unit("dbyte", 0.1 * u.byte)}
for base in bases + deprecated_bases:
for prefix in prefixes:
key = prefix + base
if keyword.iskeyword(key):
continue
elif key in special_cases:
names[key] = special_cases[key]
else:
names[key] = getattr(u, key)
for base in deprecated_bases:
for prefix in prefixes:
deprecated_names.add(prefix + base)
simple_units = [
"deg", "arcmin", "arcsec", "mas", "min", "h", "d", "Ry",
"solMass", "u", "solLum", "solRad", "AU", "lyr", "count",
"ct", "photon", "ph", "pixel", "pix", "D", "Sun", "chan",
"bin", "voxel", "adu", "beam", "erg", "Angstrom", "angstrom",
] # fmt: skip
deprecated_units = []
for unit in simple_units + deprecated_units:
names[unit] = getattr(u, unit)
for unit in deprecated_units:
deprecated_names.add(unit)
return names, deprecated_names, []
@classmethod
def _validate_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if detailed_exception:
raise ValueError(
f"Unit '{unit}' not supported by the FITS standard. "
+ utils.did_you_mean_units(
unit,
cls._units,
cls._deprecated_units,
cls._to_decomposed_alternative,
),
)
else:
raise ValueError()
if unit in cls._deprecated_units:
utils.unit_deprecation_warning(
unit, cls._units[unit], "FITS", cls._to_decomposed_alternative
)
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
cls._validate_unit(unit, detailed_exception=detailed_exception)
return cls._units[unit]
@classmethod
def _get_unit_name(cls, unit):
name = unit.get_format_name("fits")
cls._validate_unit(name)
return name
@classmethod
def to_string(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
parts = []
if isinstance(unit, core.CompositeUnit):
base = np.log10(unit.scale)
if base % 1.0 != 0.0:
raise core.UnitScaleError(
"The FITS unit format is not able to represent scales "
"that are not powers of 10. Multiply your data by "
f"{unit.scale:e}."
)
elif unit.scale != 1.0:
parts.append(f"10**{int(base)}")
pairs = list(zip(unit.bases, unit.powers))
if len(pairs):
pairs.sort(key=operator.itemgetter(1), reverse=True)
parts.append(cls._format_unit_list(pairs))
s = " ".join(parts)
elif isinstance(unit, core.NamedUnit):
s = cls._get_unit_name(unit)
return s
@classmethod
def _to_decomposed_alternative(cls, unit):
try:
s = cls.to_string(unit)
except core.UnitScaleError:
scale = unit.scale
unit = copy.copy(unit)
unit._scale = 1.0
return f"{cls.to_string(unit)} (with data multiplied by {scale})"
return s
@classmethod
def parse(cls, s, debug=False):
result = super().parse(s, debug)
if hasattr(result, "function_unit"):
raise ValueError("Function units are not yet supported for FITS units.")
return result
| bsd-3-clause | 2c99fd7e3c6167fe13d858367d16b22a | 31.078947 | 84 | 0.501231 | 3.818324 | false | false | false | false |
astropy/astropy | astropy/stats/spatial.py | 3 | 12953 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements functions and classes for spatial statistics.
"""
import math
import numpy as np
__all__ = ["RipleysKEstimator"]
class RipleysKEstimator:
"""
Estimators for Ripley's K function for two-dimensional spatial data.
See [1]_, [2]_, [3]_, [4]_, [5]_ for detailed mathematical and
practical aspects of those estimators.
Parameters
----------
area : float
Area of study from which the points where observed.
x_max, y_max : float, float, optional
Maximum rectangular coordinates of the area of study.
Required if ``mode == 'translation'`` or ``mode == ohser``.
x_min, y_min : float, float, optional
Minimum rectangular coordinates of the area of study.
Required if ``mode == 'variable-width'`` or ``mode == ohser``.
Examples
--------
>>> import numpy as np
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> from astropy.stats import RipleysKEstimator
>>> z = np.random.uniform(low=5, high=10, size=(100, 2))
>>> Kest = RipleysKEstimator(area=25, x_max=10, y_max=10,
... x_min=5, y_min=5)
>>> r = np.linspace(0, 2.5, 100)
>>> plt.plot(r, Kest.poisson(r)) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='none')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='translation')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='ohser')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='var-width')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='ripley')) # doctest: +SKIP
References
----------
.. [1] Peebles, P.J.E. *The large scale structure of the universe*.
<https://ui.adsabs.harvard.edu/abs/1980lssu.book.....P>
.. [2] Spatial descriptive statistics.
<https://en.wikipedia.org/wiki/Spatial_descriptive_statistics>
.. [3] Package spatstat.
<https://cran.r-project.org/web/packages/spatstat/spatstat.pdf>
.. [4] Cressie, N.A.C. (1991). Statistics for Spatial Data,
Wiley, New York.
.. [5] Stoyan, D., Stoyan, H. (1992). Fractals, Random Shapes and
Point Fields, Akademie Verlag GmbH, Chichester.
"""
def __init__(self, area, x_max=None, y_max=None, x_min=None, y_min=None):
self.area = area
self.x_max = x_max
self.y_max = y_max
self.x_min = x_min
self.y_min = y_min
@property
def area(self):
return self._area
@area.setter
def area(self, value):
if isinstance(value, (float, int)) and value > 0:
self._area = value
else:
raise ValueError(f"area is expected to be a positive number. Got {value}.")
@property
def y_max(self):
return self._y_max
@y_max.setter
def y_max(self, value):
if value is None or isinstance(value, (float, int)):
self._y_max = value
else:
raise ValueError(
f"y_max is expected to be a real number or None. Got {value}."
)
@property
def x_max(self):
return self._x_max
@x_max.setter
def x_max(self, value):
if value is None or isinstance(value, (float, int)):
self._x_max = value
else:
raise ValueError(
f"x_max is expected to be a real number or None. Got {value}."
)
@property
def y_min(self):
return self._y_min
@y_min.setter
def y_min(self, value):
if value is None or isinstance(value, (float, int)):
self._y_min = value
else:
raise ValueError(f"y_min is expected to be a real number. Got {value}.")
@property
def x_min(self):
return self._x_min
@x_min.setter
def x_min(self, value):
if value is None or isinstance(value, (float, int)):
self._x_min = value
else:
raise ValueError(f"x_min is expected to be a real number. Got {value}.")
def __call__(self, data, radii, mode="none"):
return self.evaluate(data=data, radii=radii, mode=mode)
def _pairwise_diffs(self, data):
npts = len(data)
diff = np.zeros(shape=(npts * (npts - 1) // 2, 2), dtype=np.double)
k = 0
for i in range(npts - 1):
size = npts - i - 1
diff[k : k + size] = abs(data[i] - data[i + 1 :])
k += size
return diff
def poisson(self, radii):
"""
Evaluates the Ripley K function for the homogeneous Poisson process,
also known as Complete State of Randomness (CSR).
Parameters
----------
radii : 1D array
Set of distances in which Ripley's K function will be evaluated.
Returns
-------
output : 1D array
Ripley's K function evaluated at ``radii``.
"""
return np.pi * radii * radii
def Lfunction(self, data, radii, mode="none"):
"""
Evaluates the L function at ``radii``. For parameter description
see ``evaluate`` method.
"""
return np.sqrt(self.evaluate(data, radii, mode=mode) / np.pi)
def Hfunction(self, data, radii, mode="none"):
"""
Evaluates the H function at ``radii``. For parameter description
see ``evaluate`` method.
"""
return self.Lfunction(data, radii, mode=mode) - radii
def evaluate(self, data, radii, mode="none"):
"""
Evaluates the Ripley K estimator for a given set of values ``radii``.
Parameters
----------
data : 2D array
Set of observed points in as a n by 2 array which will be used to
estimate Ripley's K function.
radii : 1D array
Set of distances in which Ripley's K estimator will be evaluated.
Usually, it's common to consider max(radii) < (area/2)**0.5.
mode : str
Keyword which indicates the method for edge effects correction.
Available methods are 'none', 'translation', 'ohser', 'var-width',
and 'ripley'.
* 'none'
this method does not take into account any edge effects
whatsoever.
* 'translation'
computes the intersection of rectangular areas centered at
the given points provided the upper bounds of the
dimensions of the rectangular area of study. It assumes that
all the points lie in a bounded rectangular region satisfying
x_min < x_i < x_max; y_min < y_i < y_max. A detailed
description of this method can be found on ref [4].
* 'ohser'
this method uses the isotropized set covariance function of
the window of study as a weight to correct for
edge-effects. A detailed description of this method can be
found on ref [4].
* 'var-width'
this method considers the distance of each observed point to
the nearest boundary of the study window as a factor to
account for edge-effects. See [3] for a brief description of
this method.
* 'ripley'
this method is known as Ripley's edge-corrected estimator.
The weight for edge-correction is a function of the
proportions of circumferences centered at each data point
which crosses another data point of interest. See [3] for
a detailed description of this method.
Returns
-------
ripley : 1D array
Ripley's K function estimator evaluated at ``radii``.
"""
data = np.asarray(data)
if not data.shape[1] == 2:
raise ValueError(
"data must be an n by 2 array, where n is the "
"number of observed points."
)
npts = len(data)
ripley = np.zeros(len(radii))
if mode == "none":
diff = self._pairwise_diffs(data)
distances = np.hypot(diff[:, 0], diff[:, 1])
for r in range(len(radii)):
ripley[r] = (distances < radii[r]).sum()
ripley = self.area * 2.0 * ripley / (npts * (npts - 1))
# eq. 15.11 Stoyan book page 283
elif mode == "translation":
diff = self._pairwise_diffs(data)
distances = np.hypot(diff[:, 0], diff[:, 1])
intersec_area = ((self.x_max - self.x_min) - diff[:, 0]) * (
(self.y_max - self.y_min) - diff[:, 1]
)
for r in range(len(radii)):
dist_indicator = distances < radii[r]
ripley[r] = ((1 / intersec_area) * dist_indicator).sum()
ripley = (self.area**2 / (npts * (npts - 1))) * 2 * ripley
# Stoyan book page 123 and eq 15.13
elif mode == "ohser":
diff = self._pairwise_diffs(data)
distances = np.hypot(diff[:, 0], diff[:, 1])
a = self.area
b = max(
(self.y_max - self.y_min) / (self.x_max - self.x_min),
(self.x_max - self.x_min) / (self.y_max - self.y_min),
)
x = distances / math.sqrt(a / b)
u = np.sqrt((x * x - 1) * (x > 1))
v = np.sqrt((x * x - b**2) * (x < math.sqrt(b**2 + 1)) * (x > b))
c1 = np.pi - 2 * x * (1 + 1 / b) + x * x / b
c2 = 2 * np.arcsin((1 / x) * (x > 1)) - 1 / b - 2 * (x - u)
c3 = (
2
* np.arcsin(
((b - u * v) / (x * x)) * (x > b) * (x < math.sqrt(b**2 + 1))
)
+ 2 * u
+ 2 * v / b
- b
- (1 + x * x) / b
)
cov_func = (a / np.pi) * (
c1 * (x >= 0) * (x <= 1)
+ c2 * (x > 1) * (x <= b)
+ c3 * (b < x) * (x < math.sqrt(b**2 + 1))
)
for r in range(len(radii)):
dist_indicator = distances < radii[r]
ripley[r] = ((1 / cov_func) * dist_indicator).sum()
ripley = (self.area**2 / (npts * (npts - 1))) * 2 * ripley
# Cressie book eq 8.2.20 page 616
elif mode == "var-width":
lt_dist = np.minimum(
np.minimum(self.x_max - data[:, 0], self.y_max - data[:, 1]),
np.minimum(data[:, 0] - self.x_min, data[:, 1] - self.y_min),
)
for r in range(len(radii)):
for i in range(npts):
for j in range(npts):
if i != j:
diff = abs(data[i] - data[j])
dist = math.sqrt((diff * diff).sum())
if dist < radii[r] < lt_dist[i]:
ripley[r] = ripley[r] + 1
lt_dist_sum = (lt_dist > radii[r]).sum()
if not lt_dist_sum == 0:
ripley[r] = ripley[r] / lt_dist_sum
ripley = self.area * ripley / npts
# Cressie book eq 8.4.22 page 640
elif mode == "ripley":
hor_dist = np.zeros(shape=(npts * (npts - 1)) // 2, dtype=np.double)
ver_dist = np.zeros(shape=(npts * (npts - 1)) // 2, dtype=np.double)
for k in range(npts - 1):
min_hor_dist = min(self.x_max - data[k][0], data[k][0] - self.x_min)
min_ver_dist = min(self.y_max - data[k][1], data[k][1] - self.y_min)
start = (k * (2 * (npts - 1) - (k - 1))) // 2
end = ((k + 1) * (2 * (npts - 1) - k)) // 2
hor_dist[start:end] = min_hor_dist * np.ones(npts - 1 - k)
ver_dist[start:end] = min_ver_dist * np.ones(npts - 1 - k)
diff = self._pairwise_diffs(data)
dist = np.hypot(diff[:, 0], diff[:, 1])
dist_ind = dist <= np.hypot(hor_dist, ver_dist)
w1 = (
1
- (
np.arccos(np.minimum(ver_dist, dist) / dist)
+ np.arccos(np.minimum(hor_dist, dist) / dist)
)
/ np.pi
)
w2 = (
3 / 4
- 0.5
* (
np.arccos(ver_dist / dist * ~dist_ind)
+ np.arccos(hor_dist / dist * ~dist_ind)
)
/ np.pi
)
weight = dist_ind * w1 + ~dist_ind * w2
for r in range(len(radii)):
ripley[r] = ((dist < radii[r]) / weight).sum()
ripley = self.area * 2.0 * ripley / (npts * (npts - 1))
else:
raise ValueError(f"mode {mode} is not implemented.")
return ripley
| bsd-3-clause | 6425f744d5ecf84361e082c504e15983 | 35.384831 | 87 | 0.499035 | 3.575214 | false | false | false | false |
astropy/astropy | astropy/units/si.py | 3 | 9129 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines the SI units. They are also available in the
`astropy.units` namespace.
"""
import numpy as _numpy
from astropy.constants import si as _si
from .core import Unit, UnitBase, def_unit
_ns = globals()
###########################################################################
# DIMENSIONLESS
def_unit(
["percent", "pct"],
Unit(0.01),
namespace=_ns,
prefixes=False,
doc="percent: one hundredth of unity, factor 0.01",
format={"generic": "%", "console": "%", "cds": "%", "latex": r"\%", "unicode": "%"},
)
###########################################################################
# LENGTH
def_unit(
["m", "meter"],
namespace=_ns,
prefixes=True,
doc="meter: base unit of length in SI",
)
def_unit(
["micron"],
um,
namespace=_ns,
doc="micron: alias for micrometer (um)",
format={"latex": r"\mu m", "unicode": "\N{MICRO SIGN}m"},
)
def_unit(
["Angstrom", "AA", "angstrom"],
0.1 * nm,
namespace=_ns,
doc="ångström: 10 ** -10 m",
prefixes=[(["m", "milli"], ["milli", "m"], 1.0e-3)],
format={"latex": r"\mathring{A}", "unicode": "Å", "vounit": "Angstrom"},
)
###########################################################################
# VOLUMES
def_unit(
(["l", "L"], ["liter"]),
1000 * cm**3.0,
namespace=_ns,
prefixes=True,
format={"latex": r"\mathcal{l}", "unicode": "ℓ"},
doc="liter: metric unit of volume",
)
###########################################################################
# ANGULAR MEASUREMENTS
def_unit(
["rad", "radian"],
namespace=_ns,
prefixes=True,
doc=(
"radian: angular measurement of the ratio between the length "
"on an arc and its radius"
),
)
def_unit(
["deg", "degree"],
_numpy.pi / 180.0 * rad,
namespace=_ns,
prefixes=True,
doc="degree: angular measurement 1/360 of full rotation",
format={"latex": r"{}^{\circ}", "unicode": "°"},
)
def_unit(
["hourangle"],
15.0 * deg,
namespace=_ns,
prefixes=False,
doc="hour angle: angular measurement with 24 in a full circle",
format={"latex": r"{}^{h}", "unicode": "ʰ"},
)
def_unit(
["arcmin", "arcminute"],
1.0 / 60.0 * deg,
namespace=_ns,
prefixes=True,
doc="arc minute: angular measurement",
format={"latex": r"{}^{\prime}", "unicode": "′"},
)
def_unit(
["arcsec", "arcsecond"],
1.0 / 3600.0 * deg,
namespace=_ns,
prefixes=True,
doc="arc second: angular measurement",
)
# These special formats should only be used for the non-prefix versions
arcsec._format = {"latex": r"{}^{\prime\prime}", "unicode": "″"}
def_unit(
["mas"],
0.001 * arcsec,
namespace=_ns,
doc="milli arc second: angular measurement",
)
def_unit(
["uas"],
0.000001 * arcsec,
namespace=_ns,
doc="micro arc second: angular measurement",
format={"latex": r"\mu as", "unicode": "μas"},
)
def_unit(
["sr", "steradian"],
rad**2,
namespace=_ns,
prefixes=True,
doc="steradian: unit of solid angle in SI",
)
###########################################################################
# TIME
def_unit(
["s", "second"],
namespace=_ns,
prefixes=True,
exclude_prefixes=["a"],
doc="second: base unit of time in SI.",
)
def_unit(
["min", "minute"],
60 * s,
prefixes=True,
namespace=_ns,
)
def_unit(
["h", "hour", "hr"],
3600 * s,
namespace=_ns,
prefixes=True,
exclude_prefixes=["p"],
)
def_unit(
["d", "day"],
24 * h,
namespace=_ns,
prefixes=True,
exclude_prefixes=["c", "y"],
)
def_unit(
["sday"],
86164.09053 * s,
namespace=_ns,
doc="Sidereal day (sday) is the time of one rotation of the Earth.",
)
def_unit(
["wk", "week"],
7 * day,
namespace=_ns,
)
def_unit(
["fortnight"],
2 * wk,
namespace=_ns,
)
def_unit(
["a", "annum"],
365.25 * d,
namespace=_ns,
prefixes=True,
exclude_prefixes=["P"],
)
def_unit(
["yr", "year"],
365.25 * d,
namespace=_ns,
prefixes=True,
)
###########################################################################
# FREQUENCY
def_unit(
["Hz", "Hertz", "hertz"],
1 / s,
namespace=_ns,
prefixes=True,
doc="Frequency",
)
###########################################################################
# MASS
def_unit(
["kg", "kilogram"],
namespace=_ns,
doc="kilogram: base unit of mass in SI.",
)
def_unit(
["g", "gram"],
1.0e-3 * kg,
namespace=_ns,
prefixes=True,
exclude_prefixes=["k", "kilo"],
)
def_unit(
["t", "tonne"],
1000 * kg,
namespace=_ns,
doc="Metric tonne",
)
###########################################################################
# AMOUNT OF SUBSTANCE
def_unit(
["mol", "mole"],
namespace=_ns,
prefixes=True,
doc="mole: amount of a chemical substance in SI.",
)
###########################################################################
# TEMPERATURE
def_unit(
["K", "Kelvin"],
namespace=_ns,
prefixes=True,
doc="Kelvin: temperature with a null point at absolute zero.",
)
def_unit(
["deg_C", "Celsius"],
namespace=_ns,
doc="Degrees Celsius",
format={"latex": r"{}^{\circ}C", "unicode": "°C"},
)
###########################################################################
# FORCE
def_unit(
["N", "Newton", "newton"],
kg * m * s**-2,
namespace=_ns,
prefixes=True,
doc="Newton: force",
)
##########################################################################
# ENERGY
def_unit(
["J", "Joule", "joule"],
N * m,
namespace=_ns,
prefixes=True,
doc="Joule: energy",
)
def_unit(
["eV", "electronvolt"],
_si.e.value * J,
namespace=_ns,
prefixes=True,
doc="Electron Volt",
)
##########################################################################
# PRESSURE
def_unit(
["Pa", "Pascal", "pascal"],
J * m**-3,
namespace=_ns,
prefixes=True,
doc="Pascal: pressure",
)
###########################################################################
# POWER
def_unit(
["W", "Watt", "watt"],
J / s,
namespace=_ns,
prefixes=True,
doc="Watt: power",
)
###########################################################################
# ELECTRICAL
def_unit(
["A", "ampere", "amp"],
namespace=_ns,
prefixes=True,
doc="ampere: base unit of electric current in SI",
)
def_unit(
["C", "coulomb"],
A * s,
namespace=_ns,
prefixes=True,
doc="coulomb: electric charge",
)
def_unit(
["V", "Volt", "volt"],
J * C**-1,
namespace=_ns,
prefixes=True,
doc="Volt: electric potential or electromotive force",
)
def_unit(
(["Ohm", "ohm"], ["Ohm"]),
V * A**-1,
namespace=_ns,
prefixes=True,
doc="Ohm: electrical resistance",
format={"latex": r"\Omega", "unicode": "Ω"},
)
def_unit(
["S", "Siemens", "siemens"],
A * V**-1,
namespace=_ns,
prefixes=True,
doc="Siemens: electrical conductance",
)
def_unit(
["F", "Farad", "farad"],
C * V**-1,
namespace=_ns,
prefixes=True,
doc="Farad: electrical capacitance",
)
###########################################################################
# MAGNETIC
def_unit(
["Wb", "Weber", "weber"],
V * s,
namespace=_ns,
prefixes=True,
doc="Weber: magnetic flux",
)
def_unit(
["T", "Tesla", "tesla"],
Wb * m**-2,
namespace=_ns,
prefixes=True,
doc="Tesla: magnetic flux density",
)
def_unit(
["H", "Henry", "henry"],
Wb * A**-1,
namespace=_ns,
prefixes=True,
doc="Henry: inductance",
)
###########################################################################
# ILLUMINATION
def_unit(
["cd", "candela"],
namespace=_ns,
prefixes=True,
doc="candela: base unit of luminous intensity in SI",
)
def_unit(
["lm", "lumen"],
cd * sr,
namespace=_ns,
prefixes=True,
doc="lumen: luminous flux",
)
def_unit(
["lx", "lux"],
lm * m**-2,
namespace=_ns,
prefixes=True,
doc="lux: luminous emittance",
)
###########################################################################
# RADIOACTIVITY
def_unit(
["Bq", "becquerel"],
1 / s,
namespace=_ns,
prefixes=False,
doc="becquerel: unit of radioactivity",
)
def_unit(
["Ci", "curie"],
Bq * 3.7e10,
namespace=_ns,
prefixes=False,
doc="curie: unit of radioactivity",
)
###########################################################################
# BASES
bases = {m, s, kg, A, cd, rad, K, mol}
###########################################################################
# CLEANUP
del UnitBase
del Unit
del def_unit
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
| bsd-3-clause | 89175944427adc7656947ef07ab62750 | 19.165929 | 88 | 0.464619 | 3.383445 | false | false | false | false |
astropy/astropy | astropy/timeseries/periodograms/lombscargle/implementations/fast_impl.py | 3 | 4855 | import numpy as np
from .utils import trig_sum
def lombscargle_fast(
t,
y,
dy,
f0,
df,
Nf,
center_data=True,
fit_mean=True,
normalization="standard",
use_fft=True,
trig_sum_kwds=None,
):
"""Fast Lomb-Scargle Periodogram
This implements the Press & Rybicki method [1]_ for fast O[N log(N)]
Lomb-Scargle periodograms.
Parameters
----------
t, y, dy : array-like
times, values, and errors of the data points. These should be
broadcastable to the same shape. None should be `~astropy.units.Quantity`.
f0, df, Nf : (float, float, int)
parameters describing the frequency grid, f = f0 + df * arange(Nf).
center_data : bool (default=True)
Specify whether to subtract the mean of the data before the fit
fit_mean : bool (default=True)
If True, then compute the floating-mean periodogram; i.e. let the mean
vary with the fit.
normalization : str, optional
Normalization to use for the periodogram.
Options are 'standard', 'model', 'log', or 'psd'.
use_fft : bool (default=True)
If True, then use the Press & Rybicki O[NlogN] algorithm to compute
the result. Otherwise, use a slower O[N^2] algorithm
trig_sum_kwds : dict or None, optional
extra keyword arguments to pass to the ``trig_sum`` utility.
Options are ``oversampling`` and ``Mfft``. See documentation
of ``trig_sum`` for details.
Returns
-------
power : ndarray
Lomb-Scargle power associated with each frequency.
Units of the result depend on the normalization.
Notes
-----
Note that the ``use_fft=True`` algorithm is an approximation to the true
Lomb-Scargle periodogram, and as the number of points grows this
approximation improves. On the other hand, for very small datasets
(<~50 points or so) this approximation may not be useful.
References
----------
.. [1] Press W.H. and Rybicki, G.B, "Fast algorithm for spectral analysis
of unevenly sampled data". ApJ 1:338, p277, 1989
.. [2] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
.. [3] W. Press et al, Numerical Recipes in C (2002)
"""
if dy is None:
dy = 1
# Validate and setup input data
t, y, dy = np.broadcast_arrays(t, y, dy)
if t.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
# Validate and setup frequency grid
if f0 < 0:
raise ValueError("Frequencies must be positive")
if df <= 0:
raise ValueError("Frequency steps must be positive")
if Nf <= 0:
raise ValueError("Number of frequencies must be positive")
w = dy**-2.0
w /= w.sum()
# Center the data. Even if we're fitting the offset,
# this step makes the expressions below more succinct
if center_data or fit_mean:
y = y - np.dot(w, y)
# set up arguments to trig_sum
kwargs = dict.copy(trig_sum_kwds or {})
kwargs.update(f0=f0, df=df, use_fft=use_fft, N=Nf)
# ----------------------------------------------------------------------
# 1. compute functions of the time-shift tau at each frequency
Sh, Ch = trig_sum(t, w * y, **kwargs)
S2, C2 = trig_sum(t, w, freq_factor=2, **kwargs)
if fit_mean:
S, C = trig_sum(t, w, **kwargs)
tan_2omega_tau = (S2 - 2 * S * C) / (C2 - (C * C - S * S))
else:
tan_2omega_tau = S2 / C2
# This is what we're computing below; the straightforward way is slower
# and less stable, so we use trig identities instead
#
# omega_tau = 0.5 * np.arctan(tan_2omega_tau)
# S2w, C2w = np.sin(2 * omega_tau), np.cos(2 * omega_tau)
# Sw, Cw = np.sin(omega_tau), np.cos(omega_tau)
S2w = tan_2omega_tau / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau)
C2w = 1 / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau)
Cw = np.sqrt(0.5) * np.sqrt(1 + C2w)
Sw = np.sqrt(0.5) * np.sign(S2w) * np.sqrt(1 - C2w)
# ----------------------------------------------------------------------
# 2. Compute the periodogram, following Zechmeister & Kurster
# and using tricks from Press & Rybicki.
YY = np.dot(w, y**2)
YC = Ch * Cw + Sh * Sw
YS = Sh * Cw - Ch * Sw
CC = 0.5 * (1 + C2 * C2w + S2 * S2w)
SS = 0.5 * (1 - C2 * C2w - S2 * S2w)
if fit_mean:
CC -= (C * Cw + S * Sw) ** 2
SS -= (S * Cw - C * Sw) ** 2
power = YC * YC / CC + YS * YS / SS
if normalization == "standard":
power /= YY
elif normalization == "model":
power /= YY - power
elif normalization == "log":
power = -np.log(1 - power / YY)
elif normalization == "psd":
power *= 0.5 * (dy**-2.0).sum()
else:
raise ValueError(f"normalization='{normalization}' not recognized")
return power
| bsd-3-clause | 605933f3365f36834a724afc835465ac | 32.715278 | 82 | 0.576931 | 3.302721 | false | false | false | false |
astropy/astropy | astropy/io/votable/ucd.py | 3 | 5687 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file contains routines to verify the correctness of UCD strings.
"""
# STDLIB
import re
# LOCAL
from astropy.utils import data
__all__ = ["parse_ucd", "check_ucd"]
class UCDWords:
"""
Manages a list of acceptable UCD words.
Works by reading in a data file exactly as provided by IVOA. This
file resides in data/ucd1p-words.txt.
"""
def __init__(self):
self._primary = set()
self._secondary = set()
self._descriptions = {}
self._capitalization = {}
with data.get_pkg_data_fileobj("data/ucd1p-words.txt", encoding="ascii") as fd:
for line in fd.readlines():
type, name, descr = (x.strip() for x in line.split("|"))
name_lower = name.lower()
if type in "QPEVC":
self._primary.add(name_lower)
if type in "QSEVC":
self._secondary.add(name_lower)
self._descriptions[name_lower] = descr
self._capitalization[name_lower] = name
def is_primary(self, name):
"""
Returns True if *name* is a valid primary name.
"""
return name.lower() in self._primary
def is_secondary(self, name):
"""
Returns True if *name* is a valid secondary name.
"""
return name.lower() in self._secondary
def get_description(self, name):
"""
Returns the official English description of the given UCD
*name*.
"""
return self._descriptions[name.lower()]
def normalize_capitalization(self, name):
"""
Returns the standard capitalization form of the given name.
"""
return self._capitalization[name.lower()]
_ucd_singleton = None
def parse_ucd(ucd, check_controlled_vocabulary=False, has_colon=False):
"""
Parse the UCD into its component parts.
Parameters
----------
ucd : str
The UCD string
check_controlled_vocabulary : bool, optional
If `True`, then each word in the UCD will be verified against
the UCD1+ controlled vocabulary, (as required by the VOTable
specification version 1.2), otherwise not.
has_colon : bool, optional
If `True`, the UCD may contain a colon (as defined in earlier
versions of the standard).
Returns
-------
parts : list
The result is a list of tuples of the form:
(*namespace*, *word*)
If no namespace was explicitly specified, *namespace* will be
returned as ``'ivoa'`` (i.e., the default namespace).
Raises
------
ValueError
if *ucd* is invalid
"""
global _ucd_singleton
if _ucd_singleton is None:
_ucd_singleton = UCDWords()
if has_colon:
m = re.search(r"[^A-Za-z0-9_.:;\-]", ucd)
else:
m = re.search(r"[^A-Za-z0-9_.;\-]", ucd)
if m is not None:
raise ValueError(f"UCD has invalid character '{m.group(0)}' in '{ucd}'")
word_component_re = r"[A-Za-z0-9][A-Za-z0-9\-_]*"
word_re = rf"{word_component_re}(\.{word_component_re})*"
parts = ucd.split(";")
words = []
for i, word in enumerate(parts):
colon_count = word.count(":")
if colon_count == 1:
ns, word = word.split(":", 1)
if not re.match(word_component_re, ns):
raise ValueError(f"Invalid namespace '{ns}'")
ns = ns.lower()
elif colon_count > 1:
raise ValueError(f"Too many colons in '{word}'")
else:
ns = "ivoa"
if not re.match(word_re, word):
raise ValueError(f"Invalid word '{word}'")
if ns == "ivoa" and check_controlled_vocabulary:
if i == 0:
if not _ucd_singleton.is_primary(word):
if _ucd_singleton.is_secondary(word):
raise ValueError(
f"Secondary word '{word}' is not valid as a primary word"
)
else:
raise ValueError(f"Unknown word '{word}'")
else:
if not _ucd_singleton.is_secondary(word):
if _ucd_singleton.is_primary(word):
raise ValueError(
f"Primary word '{word}' is not valid as a secondary word"
)
else:
raise ValueError(f"Unknown word '{word}'")
try:
normalized_word = _ucd_singleton.normalize_capitalization(word)
except KeyError:
normalized_word = word
words.append((ns, normalized_word))
return words
def check_ucd(ucd, check_controlled_vocabulary=False, has_colon=False):
"""
Returns False if *ucd* is not a valid `unified content descriptor`_.
Parameters
----------
ucd : str
The UCD string
check_controlled_vocabulary : bool, optional
If `True`, then each word in the UCD will be verified against
the UCD1+ controlled vocabulary, (as required by the VOTable
specification version 1.2), otherwise not.
has_colon : bool, optional
If `True`, the UCD may contain a colon (as defined in earlier
versions of the standard).
Returns
-------
valid : bool
"""
if ucd is None:
return True
try:
parse_ucd(
ucd,
check_controlled_vocabulary=check_controlled_vocabulary,
has_colon=has_colon,
)
except ValueError:
return False
return True
| bsd-3-clause | 607fc3a166070a075d9db202e77da7b6 | 28.314433 | 87 | 0.550906 | 4.157164 | false | false | false | false |
astropy/astropy | astropy/modeling/statistic.py | 3 | 5416 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Statistic functions used in `~astropy.modeling.fitting`.
"""
# pylint: disable=invalid-name
import numpy as np
__all__ = ["leastsquare", "leastsquare_1d", "leastsquare_2d", "leastsquare_3d"]
def leastsquare(measured_vals, updated_model, weights, *x):
"""Least square statistic, with optional weights, in N-dimensions.
Parameters
----------
measured_vals : ndarray or sequence
Measured data values. Will be cast to array whose
shape must match the array-cast of the evaluated model.
updated_model : :class:`~astropy.modeling.Model` instance
Model with parameters set by the current iteration of the optimizer.
when evaluated on "x", must return array of shape "measured_vals"
weights : ndarray or None
Array of weights to apply to each residual.
*x : ndarray
Independent variables on which to evaluate the model.
Returns
-------
res : float
The sum of least squares.
See Also
--------
:func:`~astropy.modeling.statistic.leastsquare_1d`
:func:`~astropy.modeling.statistic.leastsquare_2d`
:func:`~astropy.modeling.statistic.leastsquare_3d`
Notes
-----
Models in :mod:`~astropy.modeling` have broadcasting rules that try to
match inputs with outputs with Model shapes. Numpy arrays have flexible
broadcasting rules, so mismatched shapes can often be made compatible. To
ensure data matches the model we must perform shape comparison and leverage
the Numpy arithmetic functions. This can obfuscate arithmetic computation
overrides, like with Quantities. Implement a custom statistic for more
direct control.
"""
model_vals = updated_model(*x)
if np.shape(model_vals) != np.shape(measured_vals):
raise ValueError(
f"Shape mismatch between model ({np.shape(model_vals)}) "
f"and measured ({np.shape(measured_vals)})"
)
if weights is None:
weights = 1.0
return np.sum(np.square(weights * np.subtract(model_vals, measured_vals)))
# -------------------------------------------------------------------
def leastsquare_1d(measured_vals, updated_model, weights, x):
"""
Least square statistic with optional weights.
Safer than the general :func:`~astropy.modeling.statistic.leastsquare`
for 1D models by avoiding numpy methods that support broadcasting.
Parameters
----------
measured_vals : ndarray
Measured data values.
updated_model : `~astropy.modeling.Model`
Model with parameters set by the current iteration of the optimizer.
weights : ndarray or None
Array of weights to apply to each residual.
x : ndarray
Independent variable "x" on which to evaluate the model.
Returns
-------
res : float
The sum of least squares.
See Also
--------
:func:`~astropy.modeling.statistic.leastsquare`
"""
model_vals = updated_model(x)
if weights is None:
return np.sum((model_vals - measured_vals) ** 2)
return np.sum((weights * (model_vals - measured_vals)) ** 2)
def leastsquare_2d(measured_vals, updated_model, weights, x, y):
"""
Least square statistic with optional weights.
Safer than the general :func:`~astropy.modeling.statistic.leastsquare`
for 2D models by avoiding numpy methods that support broadcasting.
Parameters
----------
measured_vals : ndarray
Measured data values.
updated_model : `~astropy.modeling.Model`
Model with parameters set by the current iteration of the optimizer.
weights : ndarray or None
Array of weights to apply to each residual.
x : ndarray
Independent variable "x" on which to evaluate the model.
y : ndarray
Independent variable "y" on which to evaluate the model.
Returns
-------
res : float
The sum of least squares.
See Also
--------
:func:`~astropy.modeling.statistic.leastsquare`
"""
model_vals = updated_model(x, y)
if weights is None:
return np.sum((model_vals - measured_vals) ** 2)
return np.sum((weights * (model_vals - measured_vals)) ** 2)
def leastsquare_3d(measured_vals, updated_model, weights, x, y, z):
"""
Least square statistic with optional weights.
Safer than the general :func:`~astropy.modeling.statistic.leastsquare`
for 3D models by avoiding numpy methods that support broadcasting.
Parameters
----------
measured_vals : ndarray
Measured data values.
updated_model : `~astropy.modeling.Model`
Model with parameters set by the current iteration of the optimizer.
weights : ndarray or None
Array of weights to apply to each residual.
x : ndarray
Independent variable "x" on which to evaluate the model.
y : ndarray
Independent variable "y" on which to evaluate the model.
z : ndarray
Independent variable "z" on which to evaluate the model.
Returns
-------
res : float
The sum of least squares.
See Also
--------
:func:`~astropy.modeling.statistic.leastsquare`
"""
model_vals = updated_model(x, y, z)
if weights is None:
return np.sum((model_vals - measured_vals) ** 2)
return np.sum((weights * (model_vals - measured_vals)) ** 2)
| bsd-3-clause | 354e92157840b6a10a22c6bafd6b543c | 30.306358 | 79 | 0.649372 | 4.179012 | false | false | false | false |
astropy/astropy | astropy/stats/biweight.py | 3 | 27611 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains functions for computing robust statistics using
Tukey's biweight function.
"""
import numpy as np
from .funcs import median_absolute_deviation
__all__ = [
"biweight_location",
"biweight_scale",
"biweight_midvariance",
"biweight_midcovariance",
"biweight_midcorrelation",
]
def _stat_functions(data, ignore_nan=False):
if isinstance(data, np.ma.MaskedArray):
median_func = np.ma.median
sum_func = np.ma.sum
elif ignore_nan:
median_func = np.nanmedian
sum_func = np.nansum
else:
median_func = np.median
sum_func = np.sum
return median_func, sum_func
def biweight_location(data, c=6.0, M=None, axis=None, *, ignore_nan=False):
r"""
Compute the biweight location.
The biweight location is a robust statistic for determining the
central location of a distribution. It is given by:
.. math::
\zeta_{biloc}= M + \frac{\sum_{|u_i|<1} \ (x_i - M) (1 - u_i^2)^2}
{\sum_{|u_i|<1} \ (1 - u_i^2)^2}
where :math:`x` is the input data, :math:`M` is the sample median
(or the input initial location guess) and :math:`u_i` is given by:
.. math::
u_{i} = \frac{(x_i - M)}{c * MAD}
where :math:`c` is the tuning constant and :math:`MAD` is the
`median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The
biweight location tuning constant ``c`` is typically 6.0 (the
default).
If :math:`MAD` is zero, then the median will be returned.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
``data`` can be a `~numpy.ma.MaskedArray`.
c : float, optional
Tuning constant for the biweight estimator (default = 6.0).
M : float or array-like, optional
Initial guess for the location. If ``M`` is a scalar value,
then its value will be used for the entire array (or along each
``axis``, if specified). If ``M`` is an array, then its must be
an array containing the initial location estimate along each
``axis`` of the input array. If `None` (default), then the
median of the input array will be used (or along each ``axis``,
if specified).
axis : None, int, or tuple of int, optional
The axis or axes along which the biweight locations are
computed. If `None` (default), then the biweight location of
the flattened input array will be computed.
ignore_nan : bool, optional
Whether to ignore NaN values in the input ``data``.
Returns
-------
biweight_location : float or `~numpy.ndarray`
The biweight location of the input data. If ``axis`` is `None`
then a scalar will be returned, otherwise a `~numpy.ndarray`
will be returned.
See Also
--------
biweight_scale, biweight_midvariance, biweight_midcovariance
References
----------
.. [1] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (https://ui.adsabs.harvard.edu/abs/1990AJ....100...32B)
.. [2] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwloc.htm
Examples
--------
Generate random variates from a Gaussian distribution and return the
biweight location of the distribution:
>>> import numpy as np
>>> from astropy.stats import biweight_location
>>> rand = np.random.default_rng(12345)
>>> biloc = biweight_location(rand.standard_normal(1000))
>>> print(biloc) # doctest: +FLOAT_CMP
0.01535330525461019
"""
median_func, sum_func = _stat_functions(data, ignore_nan=ignore_nan)
if isinstance(data, np.ma.MaskedArray) and ignore_nan:
data = np.ma.masked_where(np.isnan(data), data, copy=True)
data = np.asanyarray(data).astype(np.float64)
if M is None:
M = median_func(data, axis=axis)
if axis is not None:
M = np.expand_dims(M, axis=axis)
# set up the differences
d = data - M
# set up the weighting
mad = median_absolute_deviation(data, axis=axis, ignore_nan=ignore_nan)
# mad = 0 means data is constant or mostly constant
# mad = np.nan means data contains NaNs and ignore_nan=False
if axis is None and (mad == 0.0 or np.isnan(mad)):
return M
if axis is not None:
mad = np.expand_dims(mad, axis=axis)
with np.errstate(divide="ignore", invalid="ignore"):
u = d / (c * mad)
# now remove the outlier points
# ignore RuntimeWarnings for comparisons with NaN data values
with np.errstate(invalid="ignore"):
mask = np.abs(u) >= 1
u = (1 - u**2) ** 2
u[mask] = 0
# If mad == 0 along the specified ``axis`` in the input data, return
# the median value along that axis.
# Ignore RuntimeWarnings for divide by zero
with np.errstate(divide="ignore", invalid="ignore"):
value = M.squeeze() + (sum_func(d * u, axis=axis) / sum_func(u, axis=axis))
if np.isscalar(value):
return value
where_func = np.where
if isinstance(data, np.ma.MaskedArray):
where_func = np.ma.where # return MaskedArray
return where_func(mad.squeeze() == 0, M.squeeze(), value)
def biweight_scale(
data, c=9.0, M=None, axis=None, modify_sample_size=False, *, ignore_nan=False
):
r"""
Compute the biweight scale.
The biweight scale is a robust statistic for determining the
standard deviation of a distribution. It is the square root of the
`biweight midvariance
<https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance>`_.
It is given by:
.. math::
\zeta_{biscl} = \sqrt{n} \ \frac{\sqrt{\sum_{|u_i| < 1} \
(x_i - M)^2 (1 - u_i^2)^4}} {|(\sum_{|u_i| < 1} \
(1 - u_i^2) (1 - 5u_i^2))|}
where :math:`x` is the input data, :math:`M` is the sample median
(or the input location) and :math:`u_i` is given by:
.. math::
u_{i} = \frac{(x_i - M)}{c * MAD}
where :math:`c` is the tuning constant and :math:`MAD` is the
`median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The
biweight midvariance tuning constant ``c`` is typically 9.0 (the
default).
If :math:`MAD` is zero, then zero will be returned.
For the standard definition of biweight scale, :math:`n` is the
total number of points in the array (or along the input ``axis``, if
specified). That definition is used if ``modify_sample_size`` is
`False`, which is the default.
However, if ``modify_sample_size = True``, then :math:`n` is the
number of points for which :math:`|u_i| < 1` (i.e. the total number
of non-rejected values), i.e.
.. math::
n = \sum_{|u_i| < 1} \ 1
which results in a value closer to the true standard deviation for
small sample sizes or for a large number of rejected values.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
``data`` can be a `~numpy.ma.MaskedArray`.
c : float, optional
Tuning constant for the biweight estimator (default = 9.0).
M : float or array-like, optional
The location estimate. If ``M`` is a scalar value, then its
value will be used for the entire array (or along each ``axis``,
if specified). If ``M`` is an array, then its must be an array
containing the location estimate along each ``axis`` of the
input array. If `None` (default), then the median of the input
array will be used (or along each ``axis``, if specified).
axis : None, int, or tuple of int, optional
The axis or axes along which the biweight scales are computed.
If `None` (default), then the biweight scale of the flattened
input array will be computed.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of elements in the array (or along the input ``axis``, if
specified), which follows the standard definition of biweight
scale. If `True`, then the sample size is reduced to correct
for any rejected values (i.e. the sample size used includes only
the non-rejected values), which results in a value closer to the
true standard deviation for small sample sizes or for a large
number of rejected values.
ignore_nan : bool, optional
Whether to ignore NaN values in the input ``data``.
Returns
-------
biweight_scale : float or `~numpy.ndarray`
The biweight scale of the input data. If ``axis`` is `None`
then a scalar will be returned, otherwise a `~numpy.ndarray`
will be returned.
See Also
--------
biweight_midvariance, biweight_midcovariance, biweight_location, astropy.stats.mad_std, astropy.stats.median_absolute_deviation
References
----------
.. [1] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (https://ui.adsabs.harvard.edu/abs/1990AJ....100...32B)
.. [2] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwscale.htm
Examples
--------
Generate random variates from a Gaussian distribution and return the
biweight scale of the distribution:
>>> import numpy as np
>>> from astropy.stats import biweight_scale
>>> rand = np.random.default_rng(12345)
>>> biscl = biweight_scale(rand.standard_normal(1000))
>>> print(biscl) # doctest: +FLOAT_CMP
1.0239311812635818
"""
return np.sqrt(
biweight_midvariance(
data,
c=c,
M=M,
axis=axis,
modify_sample_size=modify_sample_size,
ignore_nan=ignore_nan,
)
)
def biweight_midvariance(
data, c=9.0, M=None, axis=None, modify_sample_size=False, *, ignore_nan=False
):
r"""
Compute the biweight midvariance.
The biweight midvariance is a robust statistic for determining the
variance of a distribution. Its square root is a robust estimator
of scale (i.e. standard deviation). It is given by:
.. math::
\zeta_{bivar} = n \ \frac{\sum_{|u_i| < 1} \
(x_i - M)^2 (1 - u_i^2)^4} {(\sum_{|u_i| < 1} \
(1 - u_i^2) (1 - 5u_i^2))^2}
where :math:`x` is the input data, :math:`M` is the sample median
(or the input location) and :math:`u_i` is given by:
.. math::
u_{i} = \frac{(x_i - M)}{c * MAD}
where :math:`c` is the tuning constant and :math:`MAD` is the
`median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The
biweight midvariance tuning constant ``c`` is typically 9.0 (the
default).
If :math:`MAD` is zero, then zero will be returned.
For the standard definition of `biweight midvariance
<https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance>`_,
:math:`n` is the total number of points in the array (or along the
input ``axis``, if specified). That definition is used if
``modify_sample_size`` is `False`, which is the default.
However, if ``modify_sample_size = True``, then :math:`n` is the
number of points for which :math:`|u_i| < 1` (i.e. the total number
of non-rejected values), i.e.
.. math::
n = \sum_{|u_i| < 1} \ 1
which results in a value closer to the true variance for small
sample sizes or for a large number of rejected values.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
``data`` can be a `~numpy.ma.MaskedArray`.
c : float, optional
Tuning constant for the biweight estimator (default = 9.0).
M : float or array-like, optional
The location estimate. If ``M`` is a scalar value, then its
value will be used for the entire array (or along each ``axis``,
if specified). If ``M`` is an array, then its must be an array
containing the location estimate along each ``axis`` of the
input array. If `None` (default), then the median of the input
array will be used (or along each ``axis``, if specified).
axis : None, int, or tuple of int, optional
The axis or axes along which the biweight midvariances are
computed. If `None` (default), then the biweight midvariance of
the flattened input array will be computed.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of elements in the array (or along the input ``axis``, if
specified), which follows the standard definition of biweight
midvariance. If `True`, then the sample size is reduced to
correct for any rejected values (i.e. the sample size used
includes only the non-rejected values), which results in a value
closer to the true variance for small sample sizes or for a
large number of rejected values.
ignore_nan : bool, optional
Whether to ignore NaN values in the input ``data``.
Returns
-------
biweight_midvariance : float or `~numpy.ndarray`
The biweight midvariance of the input data. If ``axis`` is
`None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
See Also
--------
biweight_midcovariance, biweight_midcorrelation, astropy.stats.mad_std, astropy.stats.median_absolute_deviation
References
----------
.. [1] https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance
.. [2] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (https://ui.adsabs.harvard.edu/abs/1990AJ....100...32B)
Examples
--------
Generate random variates from a Gaussian distribution and return the
biweight midvariance of the distribution:
>>> import numpy as np
>>> from astropy.stats import biweight_midvariance
>>> rand = np.random.default_rng(12345)
>>> bivar = biweight_midvariance(rand.standard_normal(1000))
>>> print(bivar) # doctest: +FLOAT_CMP
1.0484350639638342
"""
median_func, sum_func = _stat_functions(data, ignore_nan=ignore_nan)
if isinstance(data, np.ma.MaskedArray) and ignore_nan:
data = np.ma.masked_where(np.isnan(data), data, copy=True)
data = np.asanyarray(data).astype(np.float64)
if M is None:
M = median_func(data, axis=axis)
if axis is not None:
M = np.expand_dims(M, axis=axis)
# set up the differences
d = data - M
# set up the weighting
mad = median_absolute_deviation(data, axis=axis, ignore_nan=ignore_nan)
if axis is None:
# data is constant or mostly constant OR
# data contains NaNs and ignore_nan=False
if mad == 0.0 or np.isnan(mad):
return mad**2 # variance units
else:
mad = np.expand_dims(mad, axis=axis)
with np.errstate(divide="ignore", invalid="ignore"):
u = d / (c * mad)
# now remove the outlier points
# ignore RuntimeWarnings for comparisons with NaN data values
with np.errstate(invalid="ignore"):
mask = np.abs(u) < 1
if isinstance(mask, np.ma.MaskedArray):
mask = mask.filled(fill_value=False) # exclude masked data values
u = u**2
if modify_sample_size:
n = sum_func(mask, axis=axis)
else:
# set good values to 1, bad values to 0
include_mask = np.ones(data.shape)
if isinstance(data, np.ma.MaskedArray):
include_mask[data.mask] = 0
if ignore_nan:
include_mask[np.isnan(data)] = 0
n = np.sum(include_mask, axis=axis)
f1 = d * d * (1.0 - u) ** 4
f1[~mask] = 0.0
f1 = sum_func(f1, axis=axis)
f2 = (1.0 - u) * (1.0 - 5.0 * u)
f2[~mask] = 0.0
f2 = np.abs(np.sum(f2, axis=axis)) ** 2
# If mad == 0 along the specified ``axis`` in the input data, return
# 0.0 along that axis.
# Ignore RuntimeWarnings for divide by zero.
with np.errstate(divide="ignore", invalid="ignore"):
value = n * f1 / f2
if np.isscalar(value):
return value
where_func = np.where
if isinstance(data, np.ma.MaskedArray):
where_func = np.ma.where # return MaskedArray
return where_func(mad.squeeze() == 0, 0.0, value)
def biweight_midcovariance(data, c=9.0, M=None, modify_sample_size=False):
r"""
Compute the biweight midcovariance between pairs of multiple
variables.
The biweight midcovariance is a robust and resistant estimator of
the covariance between two variables.
This function computes the biweight midcovariance between all pairs
of the input variables (rows) in the input data. The output array
will have a shape of (N_variables, N_variables). The diagonal
elements will be the biweight midvariances of each input variable
(see :func:`biweight_midvariance`). The off-diagonal elements will
be the biweight midcovariances between each pair of input variables.
For example, if the input array ``data`` contains three variables
(rows) ``x``, ``y``, and ``z``, the output `~numpy.ndarray`
midcovariance matrix will be:
.. math::
\begin{pmatrix}
\zeta_{xx} & \zeta_{xy} & \zeta_{xz} \\
\zeta_{yx} & \zeta_{yy} & \zeta_{yz} \\
\zeta_{zx} & \zeta_{zy} & \zeta_{zz}
\end{pmatrix}
where :math:`\zeta_{xx}`, :math:`\zeta_{yy}`, and :math:`\zeta_{zz}`
are the biweight midvariances of each variable. The biweight
midcovariance between :math:`x` and :math:`y` is :math:`\zeta_{xy}`
(:math:`= \zeta_{yx}`). The biweight midcovariance between
:math:`x` and :math:`z` is :math:`\zeta_{xz}` (:math:`=
\zeta_{zx}`). The biweight midcovariance between :math:`y` and
:math:`z` is :math:`\zeta_{yz}` (:math:`= \zeta_{zy}`).
The biweight midcovariance between two variables :math:`x` and
:math:`y` is given by:
.. math::
\zeta_{xy} = n_{xy} \ \frac{\sum_{|u_i| < 1, \ |v_i| < 1} \
(x_i - M_x) (1 - u_i^2)^2 (y_i - M_y) (1 - v_i^2)^2}
{(\sum_{|u_i| < 1} \ (1 - u_i^2) (1 - 5u_i^2))
(\sum_{|v_i| < 1} \ (1 - v_i^2) (1 - 5v_i^2))}
where :math:`M_x` and :math:`M_y` are the medians (or the input
locations) of the two variables and :math:`u_i` and :math:`v_i` are
given by:
.. math::
u_{i} = \frac{(x_i - M_x)}{c * MAD_x}
v_{i} = \frac{(y_i - M_y)}{c * MAD_y}
where :math:`c` is the biweight tuning constant and :math:`MAD_x`
and :math:`MAD_y` are the `median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_ of the
:math:`x` and :math:`y` variables. The biweight midvariance tuning
constant ``c`` is typically 9.0 (the default).
If :math:`MAD_x` or :math:`MAD_y` are zero, then zero will be
returned for that element.
For the standard definition of biweight midcovariance,
:math:`n_{xy}` is the total number of observations of each variable.
That definition is used if ``modify_sample_size`` is `False`, which
is the default.
However, if ``modify_sample_size = True``, then :math:`n_{xy}` is the
number of observations for which :math:`|u_i| < 1` and/or :math:`|v_i|
< 1`, i.e.
.. math::
n_{xx} = \sum_{|u_i| < 1} \ 1
.. math::
n_{xy} = n_{yx} = \sum_{|u_i| < 1, \ |v_i| < 1} \ 1
.. math::
n_{yy} = \sum_{|v_i| < 1} \ 1
which results in a value closer to the true variance for small
sample sizes or for a large number of rejected values.
Parameters
----------
data : 2D or 1D array-like
Input data either as a 2D or 1D array. For a 2D array, it
should have a shape (N_variables, N_observations). A 1D array
may be input for observations of a single variable, in which
case the biweight midvariance will be calculated (no
covariance). Each row of ``data`` represents a variable, and
each column a single observation of all those variables (same as
the `numpy.cov` convention).
c : float, optional
Tuning constant for the biweight estimator (default = 9.0).
M : float or 1D array-like, optional
The location estimate of each variable, either as a scalar or
array. If ``M`` is an array, then its must be a 1D array
containing the location estimate of each row (i.e. ``a.ndim``
elements). If ``M`` is a scalar value, then its value will be
used for each variable (row). If `None` (default), then the
median of each variable (row) will be used.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of observations of each variable, which follows the
standard definition of biweight midcovariance. If `True`, then
the sample size is reduced to correct for any rejected values
(see formula above), which results in a value closer to the true
covariance for small sample sizes or for a large number of
rejected values.
Returns
-------
biweight_midcovariance : ndarray
A 2D array representing the biweight midcovariances between each
pair of the variables (rows) in the input array. The output
array will have a shape of (N_variables, N_variables). The
diagonal elements will be the biweight midvariances of each
input variable. The off-diagonal elements will be the biweight
midcovariances between each pair of input variables.
See Also
--------
biweight_midvariance, biweight_midcorrelation, biweight_scale, biweight_location
References
----------
.. [1] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwmidc.htm
Examples
--------
Compute the biweight midcovariance between two random variables:
>>> import numpy as np
>>> from astropy.stats import biweight_midcovariance
>>> # Generate two random variables x and y
>>> rng = np.random.default_rng(1)
>>> x = rng.normal(0, 1, 200)
>>> y = rng.normal(0, 3, 200)
>>> # Introduce an obvious outlier
>>> x[0] = 30.0
>>> # Calculate the biweight midcovariances between x and y
>>> bicov = biweight_midcovariance([x, y])
>>> print(bicov) # doctest: +FLOAT_CMP
[[0.83435568 0.02379316]
[0.02379316 7.15665769]]
>>> # Print standard deviation estimates
>>> print(np.sqrt(bicov.diagonal())) # doctest: +FLOAT_CMP
[0.91343072 2.67519302]
"""
data = np.asanyarray(data).astype(np.float64)
# ensure data is 2D
if data.ndim == 1:
data = data[np.newaxis, :]
if data.ndim != 2:
raise ValueError("The input array must be 2D or 1D.")
# estimate location if not given
if M is None:
M = np.median(data, axis=1)
M = np.asanyarray(M)
if M.ndim > 1:
raise ValueError("M must be a scalar or 1D array.")
# set up the differences
d = (data.T - M).T
# set up the weighting
mad = median_absolute_deviation(data, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
u = (d.T / (c * mad)).T
# now remove the outlier points
# ignore RuntimeWarnings for comparisons with NaN data values
with np.errstate(invalid="ignore"):
mask = np.abs(u) < 1
u = u**2
if modify_sample_size:
maskf = mask.astype(float)
n = np.inner(maskf, maskf)
else:
n = data[0].size
usub1 = 1.0 - u
usub5 = 1.0 - 5.0 * u
usub1[~mask] = 0.0
with np.errstate(divide="ignore", invalid="ignore"):
numerator = d * usub1**2
denominator = (usub1 * usub5).sum(axis=1)[:, np.newaxis]
numerator_matrix = np.dot(numerator, numerator.T)
denominator_matrix = np.dot(denominator, denominator.T)
value = n * (numerator_matrix / denominator_matrix)
idx = np.where(mad == 0)[0]
value[idx, :] = 0
value[:, idx] = 0
return value
def biweight_midcorrelation(x, y, c=9.0, M=None, modify_sample_size=False):
r"""
Compute the biweight midcorrelation between two variables.
The `biweight midcorrelation
<https://en.wikipedia.org/wiki/Biweight_midcorrelation>`_ is a
measure of similarity between samples. It is given by:
.. math::
r_{bicorr} = \frac{\zeta_{xy}}{\sqrt{\zeta_{xx} \ \zeta_{yy}}}
where :math:`\zeta_{xx}` is the biweight midvariance of :math:`x`,
:math:`\zeta_{yy}` is the biweight midvariance of :math:`y`, and
:math:`\zeta_{xy}` is the biweight midcovariance of :math:`x` and
:math:`y`.
Parameters
----------
x, y : 1D array-like
Input arrays for the two variables. ``x`` and ``y`` must be 1D
arrays and have the same number of elements.
c : float, optional
Tuning constant for the biweight estimator (default = 9.0). See
`biweight_midcovariance` for more details.
M : float or array-like, optional
The location estimate. If ``M`` is a scalar value, then its
value will be used for the entire array (or along each ``axis``,
if specified). If ``M`` is an array, then its must be an array
containing the location estimate along each ``axis`` of the
input array. If `None` (default), then the median of the input
array will be used (or along each ``axis``, if specified). See
`biweight_midcovariance` for more details.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of elements in the array (or along the input ``axis``, if
specified), which follows the standard definition of biweight
midcovariance. If `True`, then the sample size is reduced to
correct for any rejected values (i.e. the sample size used
includes only the non-rejected values), which results in a value
closer to the true midcovariance for small sample sizes or for a
large number of rejected values. See `biweight_midcovariance`
for more details.
Returns
-------
biweight_midcorrelation : float
The biweight midcorrelation between ``x`` and ``y``.
See Also
--------
biweight_scale, biweight_midvariance, biweight_midcovariance, biweight_location
References
----------
.. [1] https://en.wikipedia.org/wiki/Biweight_midcorrelation
Examples
--------
Calculate the biweight midcorrelation between two variables:
>>> import numpy as np
>>> from astropy.stats import biweight_midcorrelation
>>> rng = np.random.default_rng(12345)
>>> x = rng.normal(0, 1, 200)
>>> y = rng.normal(0, 3, 200)
>>> # Introduce an obvious outlier
>>> x[0] = 30.0
>>> bicorr = biweight_midcorrelation(x, y)
>>> print(bicorr) # doctest: +FLOAT_CMP
-0.09203238319481295
"""
x = np.asanyarray(x)
y = np.asanyarray(y)
if x.ndim != 1:
raise ValueError("x must be a 1D array.")
if y.ndim != 1:
raise ValueError("y must be a 1D array.")
if x.shape != y.shape:
raise ValueError("x and y must have the same shape.")
bicorr = biweight_midcovariance(
[x, y], c=c, M=M, modify_sample_size=modify_sample_size
)
return bicorr[0, 1] / (np.sqrt(bicorr[0, 0] * bicorr[1, 1]))
| bsd-3-clause | df743a15e726e9f7dc8f35b3c1aef341 | 35.378129 | 131 | 0.621817 | 3.554912 | false | false | false | false |
astropy/astropy | astropy/timeseries/periodograms/lombscargle/implementations/main.py | 3 | 7564 | """
Main Lomb-Scargle Implementation
The ``lombscargle`` function here is essentially a sophisticated switch
statement for the various implementations available in this submodule
"""
__all__ = ["lombscargle", "available_methods"]
import numpy as np
from .chi2_impl import lombscargle_chi2
from .cython_impl import lombscargle_cython
from .fast_impl import lombscargle_fast
from .fastchi2_impl import lombscargle_fastchi2
from .scipy_impl import lombscargle_scipy
from .slow_impl import lombscargle_slow
METHODS = {
"slow": lombscargle_slow,
"fast": lombscargle_fast,
"chi2": lombscargle_chi2,
"scipy": lombscargle_scipy,
"fastchi2": lombscargle_fastchi2,
"cython": lombscargle_cython,
}
def available_methods():
methods = ["auto", "slow", "chi2", "cython", "fast", "fastchi2"]
# Scipy required for scipy algorithm (obviously)
try:
import scipy # noqa: F401
except ImportError:
pass
else:
methods.append("scipy")
return methods
def _is_regular(frequency):
frequency = np.asarray(frequency)
if frequency.ndim != 1:
return False
elif len(frequency) == 1:
return True
else:
diff = np.diff(frequency)
return np.allclose(diff[0], diff)
def _get_frequency_grid(frequency, assume_regular_frequency=False):
"""Utility to get grid parameters from a frequency array
Parameters
----------
frequency : array-like or `~astropy.units.Quantity` ['frequency']
input frequency grid
assume_regular_frequency : bool (default = False)
if True, then do not check whether frequency is a regular grid
Returns
-------
f0, df, N : scalar
Parameters such that all(frequency == f0 + df * np.arange(N))
"""
frequency = np.asarray(frequency)
if frequency.ndim != 1:
raise ValueError("frequency grid must be 1 dimensional")
elif len(frequency) == 1:
return frequency[0], frequency[0], 1
elif not (assume_regular_frequency or _is_regular(frequency)):
raise ValueError("frequency must be a regular grid")
return frequency[0], frequency[1] - frequency[0], len(frequency)
def validate_method(method, dy, fit_mean, nterms, frequency, assume_regular_frequency):
"""
Validate the method argument, and if method='auto'
choose the appropriate method
"""
methods = available_methods()
prefer_fast = len(frequency) > 200 and (
assume_regular_frequency or _is_regular(frequency)
)
prefer_scipy = "scipy" in methods and dy is None and not fit_mean
# automatically choose the appropriate method
if method == "auto":
if nterms != 1:
if prefer_fast:
method = "fastchi2"
else:
method = "chi2"
elif prefer_fast:
method = "fast"
elif prefer_scipy:
method = "scipy"
else:
method = "cython"
if method not in METHODS:
raise ValueError(f"invalid method: {method}")
return method
def lombscargle(
t,
y,
dy=None,
frequency=None,
method="auto",
assume_regular_frequency=False,
normalization="standard",
fit_mean=True,
center_data=True,
method_kwds=None,
nterms=1,
):
"""
Compute the Lomb-scargle Periodogram with a given method.
Parameters
----------
t : array-like
sequence of observation times
y : array-like
sequence of observations associated with times t
dy : float or array-like, optional
error or sequence of observational errors associated with times t
frequency : array-like
frequencies (not angular frequencies) at which to evaluate the
periodogram. If not specified, optimal frequencies will be chosen using
a heuristic which will attempt to provide sufficient frequency range
and sampling so that peaks will not be missed. Note that in order to
use method='fast', frequencies must be regularly spaced.
method : str, optional
specify the lomb scargle implementation to use. Options are:
- 'auto': choose the best method based on the input
- 'fast': use the O[N log N] fast method. Note that this requires
evenly-spaced frequencies: by default this will be checked unless
``assume_regular_frequency`` is set to True.
- `slow`: use the O[N^2] pure-python implementation
- `chi2`: use the O[N^2] chi2/linear-fitting implementation
- `fastchi2`: use the O[N log N] chi2 implementation. Note that this
requires evenly-spaced frequencies: by default this will be checked
unless `assume_regular_frequency` is set to True.
- `scipy`: use ``scipy.signal.lombscargle``, which is an O[N^2]
implementation written in C. Note that this does not support
heteroskedastic errors.
assume_regular_frequency : bool, optional
if True, assume that the input frequency is of the form
freq = f0 + df * np.arange(N). Only referenced if method is 'auto'
or 'fast'.
normalization : str, optional
Normalization to use for the periodogram.
Options are 'standard' or 'psd'.
fit_mean : bool, optional
if True, include a constant offset as part of the model at each
frequency. This can lead to more accurate results, especially in the
case of incomplete phase coverage.
center_data : bool, optional
if True, pre-center the data by subtracting the weighted mean
of the input data. This is especially important if `fit_mean = False`
method_kwds : dict, optional
additional keywords to pass to the lomb-scargle method
nterms : int, optional
number of Fourier terms to use in the periodogram.
Not supported with every method.
Returns
-------
PLS : array-like
Lomb-Scargle power associated with each frequency omega
"""
# frequencies should be one-dimensional arrays
output_shape = frequency.shape
frequency = frequency.ravel()
# we'll need to adjust args and kwds for each method
args = (t, y, dy)
kwds = dict(
frequency=frequency,
center_data=center_data,
fit_mean=fit_mean,
normalization=normalization,
nterms=nterms,
**(method_kwds or {}),
)
method = validate_method(
method,
dy=dy,
fit_mean=fit_mean,
nterms=nterms,
frequency=frequency,
assume_regular_frequency=assume_regular_frequency,
)
# scipy doesn't support dy or fit_mean=True
if method == "scipy":
if kwds.pop("fit_mean"):
raise ValueError("scipy method does not support fit_mean=True")
if dy is not None:
dy = np.ravel(np.asarray(dy))
if not np.allclose(dy[0], dy):
raise ValueError("scipy method only supports uniform uncertainties dy")
args = (t, y)
# fast methods require frequency expressed as a grid
if method.startswith("fast"):
f0, df, Nf = _get_frequency_grid(
kwds.pop("frequency"), assume_regular_frequency
)
kwds.update(f0=f0, df=df, Nf=Nf)
# only chi2 methods support nterms
if not method.endswith("chi2"):
if kwds.pop("nterms") != 1:
raise ValueError(
"nterms != 1 only supported with 'chi2' or 'fastchi2' methods"
)
PLS = METHODS[method](*args, **kwds)
return PLS.reshape(output_shape)
| bsd-3-clause | 474ae0c16dc13b4b43d9f41866e90fa8 | 31.744589 | 87 | 0.639344 | 4.036286 | false | false | false | false |
astropy/astropy | astropy/wcs/wcsapi/wrappers/tests/test_sliced_wcs.py | 3 | 33901 | import warnings
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_equal
import astropy.units as u
from astropy.coordinates import ICRS, Galactic, SkyCoord
from astropy.io.fits import Header
from astropy.io.fits.verify import VerifyWarning
from astropy.time import Time
from astropy.units import Quantity
from astropy.wcs.wcs import WCS, FITSFixedWarning
from astropy.wcs.wcsapi.wrappers.sliced_wcs import (
SlicedLowLevelWCS,
combine_slices,
sanitize_slices,
)
# To test the slicing we start off from standard FITS WCS
# objects since those implement the low-level API. We create
# a WCS for a spectral cube with axes in non-standard order
# and with correlated celestial axes and an uncorrelated
# spectral axis.
HEADER_SPECTRAL_CUBE = """
NAXIS = 3
NAXIS1 = 10
NAXIS2 = 20
NAXIS3 = 30
CTYPE1 = GLAT-CAR
CTYPE2 = FREQ
CTYPE3 = GLON-CAR
CNAME1 = Latitude
CNAME2 = Frequency
CNAME3 = Longitude
CRVAL1 = 10
CRVAL2 = 20
CRVAL3 = 25
CRPIX1 = 30
CRPIX2 = 40
CRPIX3 = 45
CDELT1 = -0.1
CDELT2 = 0.5
CDELT3 = 0.1
CUNIT1 = deg
CUNIT2 = Hz
CUNIT3 = deg
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore", VerifyWarning)
WCS_SPECTRAL_CUBE = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep="\n"))
WCS_SPECTRAL_CUBE.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)]
def test_invalid_slices():
with pytest.raises(IndexError):
SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [None, None, [False, False, False]])
with pytest.raises(IndexError):
SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [None, None, slice(None, None, 2)])
with pytest.raises(IndexError):
SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [None, None, 1000.100])
@pytest.mark.parametrize(
"item, ndim, expected",
(
([Ellipsis, 10], 4, [slice(None)] * 3 + [10]),
([10, slice(20, 30)], 5, [10, slice(20, 30)] + [slice(None)] * 3),
([10, Ellipsis, 8], 10, [10] + [slice(None)] * 8 + [8]),
),
)
def test_sanitize_slice(item, ndim, expected):
new_item = sanitize_slices(item, ndim)
# FIXME: do we still need the first two since the third assert
# should cover it all?
assert len(new_item) == ndim
assert all(isinstance(i, (slice, int)) for i in new_item)
assert new_item == expected
EXPECTED_ELLIPSIS_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20, 10)
Pixel Dim Axis Name Data size Bounds
0 None 10 (-1, 11)
1 None 20 (-2, 18)
2 None 30 (5, 15)
World Dim Axis Name Physical Type Units
0 Latitude pos.galactic.lat deg
1 Frequency em.freq Hz
2 Longitude pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_ellipsis():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, Ellipsis)
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20, 10)
assert wcs.pixel_shape == (10, 20, 30)
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert wcs.pixel_axis_names == ["", "", ""]
assert wcs.world_axis_names == ["Latitude", "Frequency", "Longitude"]
assert_equal(
wcs.axis_correlation_matrix,
[[True, False, True], [False, True, False], [True, False, True]],
)
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29.0, 39.0, 44.0))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29))
assert_equal(wcs.pixel_bounds, [(-1, 11), (-2, 18), (5, 15)])
assert str(wcs) == EXPECTED_ELLIPSIS_REPR.strip()
assert EXPECTED_ELLIPSIS_REPR.strip() in repr(wcs)
def test_pixel_to_world_broadcasting():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, Ellipsis)
assert_allclose(
wcs.pixel_to_world_values((29, 29), 39, 44), ((10, 10), (20, 20), (25, 25))
)
def test_world_to_pixel_broadcasting():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, Ellipsis)
assert_allclose(
wcs.world_to_pixel_values((10, 10), 20, 25),
((29.0, 29.0), (39.0, 39.0), (44.0, 44.0)),
)
EXPECTED_SPECTRAL_SLICE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 2 pixel and 2 world dimensions
Array shape (Numpy order): (30, 10)
Pixel Dim Axis Name Data size Bounds
0 None 10 (-1, 11)
1 None 30 (5, 15)
World Dim Axis Name Physical Type Units
0 Latitude pos.galactic.lat deg
1 Longitude pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1
0 yes yes
1 yes yes
"""
def test_spectral_slice():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [slice(None), 10])
assert wcs.pixel_n_dim == 2
assert wcs.world_n_dim == 2
assert wcs.array_shape == (30, 10)
assert wcs.pixel_shape == (10, 30)
assert wcs.world_axis_physical_types == ["pos.galactic.lat", "pos.galactic.lon"]
assert wcs.world_axis_units == ["deg", "deg"]
assert wcs.pixel_axis_names == ["", ""]
assert wcs.world_axis_names == ["Latitude", "Longitude"]
assert_equal(wcs.axis_correlation_matrix, [[True, True], [True, True]])
assert wcs.world_axis_object_components == [
("celestial", 1, "spherical.lat.degree"),
("celestial", 0, "spherical.lon.degree"),
]
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert_allclose(wcs.pixel_to_world_values(29, 44), (10, 25))
assert_allclose(wcs.array_index_to_world_values(44, 29), (10, 25))
assert_allclose(wcs.world_to_pixel_values(10, 25), (29.0, 44.0))
assert_equal(wcs.world_to_array_index_values(10, 25), (44, 29))
assert_equal(wcs.pixel_bounds, [(-1, 11), (5, 15)])
assert str(wcs) == EXPECTED_SPECTRAL_SLICE_REPR.strip()
assert EXPECTED_SPECTRAL_SLICE_REPR.strip() in repr(wcs)
EXPECTED_SPECTRAL_RANGE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 6, 10)
Pixel Dim Axis Name Data size Bounds
0 None 10 (-1, 11)
1 None 6 (-6, 14)
2 None 30 (5, 15)
World Dim Axis Name Physical Type Units
0 Latitude pos.galactic.lat deg
1 Frequency em.freq Hz
2 Longitude pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_spectral_range():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [slice(None), slice(4, 10)])
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 6, 10)
assert wcs.pixel_shape == (10, 6, 30)
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert wcs.pixel_axis_names == ["", "", ""]
assert wcs.world_axis_names == ["Latitude", "Frequency", "Longitude"]
assert_equal(
wcs.axis_correlation_matrix,
[[True, False, True], [False, True, False], [True, False, True]],
)
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
assert_allclose(wcs.pixel_to_world_values(29, 35, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 35, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29.0, 35.0, 44.0))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 35, 29))
assert_equal(wcs.pixel_bounds, [(-1, 11), (-6, 14), (5, 15)])
assert str(wcs) == EXPECTED_SPECTRAL_RANGE_REPR.strip()
assert EXPECTED_SPECTRAL_RANGE_REPR.strip() in repr(wcs)
EXPECTED_CELESTIAL_SLICE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 2 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20)
Pixel Dim Axis Name Data size Bounds
0 None 20 (-2, 18)
1 None 30 (5, 15)
World Dim Axis Name Physical Type Units
0 Latitude pos.galactic.lat deg
1 Frequency em.freq Hz
2 Longitude pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1
0 no yes
1 yes no
2 no yes
"""
def test_celestial_slice():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [Ellipsis, 5])
assert wcs.pixel_n_dim == 2
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20)
assert wcs.pixel_shape == (20, 30)
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert wcs.pixel_axis_names == ["", ""]
assert wcs.world_axis_names == ["Latitude", "Frequency", "Longitude"]
assert_equal(
wcs.axis_correlation_matrix, [[False, True], [True, False], [False, True]]
)
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
assert_allclose(wcs.pixel_to_world_values(39, 44), (12.4, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39), (12.4, 20, 25))
assert_allclose(wcs.world_to_pixel_values(12.4, 20, 25), (39.0, 44.0))
assert_equal(wcs.world_to_array_index_values(12.4, 20, 25), (44, 39))
assert_equal(wcs.pixel_bounds, [(-2, 18), (5, 15)])
assert str(wcs) == EXPECTED_CELESTIAL_SLICE_REPR.strip()
assert EXPECTED_CELESTIAL_SLICE_REPR.strip() in repr(wcs)
EXPECTED_CELESTIAL_RANGE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20, 5)
Pixel Dim Axis Name Data size Bounds
0 None 5 (-6, 6)
1 None 20 (-2, 18)
2 None 30 (5, 15)
World Dim Axis Name Physical Type Units
0 Latitude pos.galactic.lat deg
1 Frequency em.freq Hz
2 Longitude pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_celestial_range():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [Ellipsis, slice(5, 10)])
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20, 5)
assert wcs.pixel_shape == (5, 20, 30)
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert wcs.pixel_axis_names == ["", "", ""]
assert wcs.world_axis_names == ["Latitude", "Frequency", "Longitude"]
assert_equal(
wcs.axis_correlation_matrix,
[[True, False, True], [False, True, False], [True, False, True]],
)
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
assert_allclose(wcs.pixel_to_world_values(24, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 24), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (24.0, 39.0, 44.0))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 24))
assert_equal(wcs.pixel_bounds, [(-6, 6), (-2, 18), (5, 15)])
assert str(wcs) == EXPECTED_CELESTIAL_RANGE_REPR.strip()
assert EXPECTED_CELESTIAL_RANGE_REPR.strip() in repr(wcs)
# Now try with a 90 degree rotation
with warnings.catch_warnings():
warnings.simplefilter("ignore", VerifyWarning)
WCS_SPECTRAL_CUBE_ROT = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep="\n"))
WCS_SPECTRAL_CUBE_ROT.wcs.pc = [[0, 0, 1], [0, 1, 0], [1, 0, 0]]
WCS_SPECTRAL_CUBE_ROT.wcs.crval[0] = 0
WCS_SPECTRAL_CUBE_ROT.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)]
EXPECTED_CELESTIAL_RANGE_ROT_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20, 5)
Pixel Dim Axis Name Data size Bounds
0 None 5 (-6, 6)
1 None 20 (-2, 18)
2 None 30 (5, 15)
World Dim Axis Name Physical Type Units
0 Latitude pos.galactic.lat deg
1 Frequency em.freq Hz
2 Longitude pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_celestial_range_rot():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE_ROT, [Ellipsis, slice(5, 10)])
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20, 5)
assert wcs.pixel_shape == (5, 20, 30)
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert wcs.pixel_axis_names == ["", "", ""]
assert wcs.world_axis_names == ["Latitude", "Frequency", "Longitude"]
assert_equal(
wcs.axis_correlation_matrix,
[[True, False, True], [False, True, False], [True, False, True]],
)
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
assert_allclose(wcs.pixel_to_world_values(14, 29, 34), (1, 15, 24))
assert_allclose(wcs.array_index_to_world_values(34, 29, 14), (1, 15, 24))
assert_allclose(wcs.world_to_pixel_values(1, 15, 24), (14.0, 29.0, 34.0))
assert_equal(wcs.world_to_array_index_values(1, 15, 24), (34, 29, 14))
assert_equal(wcs.pixel_bounds, [(-6, 6), (-2, 18), (5, 15)])
assert str(wcs) == EXPECTED_CELESTIAL_RANGE_ROT_REPR.strip()
assert EXPECTED_CELESTIAL_RANGE_ROT_REPR.strip() in repr(wcs)
HEADER_NO_SHAPE_CUBE = """
NAXIS = 3
CTYPE1 = GLAT-CAR
CTYPE2 = FREQ
CTYPE3 = GLON-CAR
CRVAL1 = 10
CRVAL2 = 20
CRVAL3 = 25
CRPIX1 = 30
CRPIX2 = 40
CRPIX3 = 45
CDELT1 = -0.1
CDELT2 = 0.5
CDELT3 = 0.1
CUNIT1 = deg
CUNIT2 = Hz
CUNIT3 = deg
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore", VerifyWarning)
WCS_NO_SHAPE_CUBE = WCS(Header.fromstring(HEADER_NO_SHAPE_CUBE, sep="\n"))
EXPECTED_NO_SHAPE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): None
Pixel Dim Axis Name Data size Bounds
0 None None None
1 None None None
2 None None None
World Dim Axis Name Physical Type Units
0 None pos.galactic.lat deg
1 None em.freq Hz
2 None pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_no_array_shape():
wcs = SlicedLowLevelWCS(WCS_NO_SHAPE_CUBE, Ellipsis)
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
"em.freq",
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert_equal(
wcs.axis_correlation_matrix,
[[True, False, True], [False, True, False], [True, False, True]],
)
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == (
"celestial",
1,
"spherical.lat.degree",
)
assert wcs.world_axis_object_components[1][:2] == ("spectral", 0)
assert wcs.world_axis_object_components[2] == (
"celestial",
0,
"spherical.lon.degree",
)
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert wcs.world_axis_object_classes["spectral"][0] is Quantity
assert wcs.world_axis_object_classes["spectral"][1] == ()
assert wcs.world_axis_object_classes["spectral"][2] == {}
assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29.0, 39.0, 44.0))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29))
assert str(wcs) == EXPECTED_NO_SHAPE_REPR.strip()
assert EXPECTED_NO_SHAPE_REPR.strip() in repr(wcs)
# Testing the WCS object having some physical types as None/Unknown
HEADER_SPECTRAL_CUBE_NONE_TYPES = {
"CTYPE1": "GLAT-CAR",
"CUNIT1": "deg",
"CDELT1": -0.1,
"CRPIX1": 30,
"CRVAL1": 10,
"NAXIS1": 10,
"CTYPE2": "",
"CUNIT2": "Hz",
"CDELT2": 0.5,
"CRPIX2": 40,
"CRVAL2": 20,
"NAXIS2": 20,
"CTYPE3": "GLON-CAR",
"CUNIT3": "deg",
"CDELT3": 0.1,
"CRPIX3": 45,
"CRVAL3": 25,
"NAXIS3": 30,
}
WCS_SPECTRAL_CUBE_NONE_TYPES = WCS(header=HEADER_SPECTRAL_CUBE_NONE_TYPES)
WCS_SPECTRAL_CUBE_NONE_TYPES.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)]
EXPECTED_ELLIPSIS_REPR_NONE_TYPES = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20, 10)
Pixel Dim Axis Name Data size Bounds
0 None 10 (-1, 11)
1 None 20 (-2, 18)
2 None 30 (5, 15)
World Dim Axis Name Physical Type Units
0 None pos.galactic.lat deg
1 None None Hz
2 None pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_ellipsis_none_types():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE_NONE_TYPES, Ellipsis)
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20, 10)
assert wcs.pixel_shape == (10, 20, 30)
assert wcs.world_axis_physical_types == [
"pos.galactic.lat",
None,
"pos.galactic.lon",
]
assert wcs.world_axis_units == ["deg", "Hz", "deg"]
assert_equal(
wcs.axis_correlation_matrix,
[[True, False, True], [False, True, False], [True, False, True]],
)
assert wcs.world_axis_object_components == [
("celestial", 1, "spherical.lat.degree"),
("world", 0, "value"),
("celestial", 0, "spherical.lon.degree"),
]
assert wcs.world_axis_object_classes["celestial"][0] is SkyCoord
assert wcs.world_axis_object_classes["celestial"][1] == ()
assert isinstance(wcs.world_axis_object_classes["celestial"][2]["frame"], Galactic)
assert wcs.world_axis_object_classes["celestial"][2]["unit"] is u.deg
assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29.0, 39.0, 44.0))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29))
assert_equal(wcs.pixel_bounds, [(-1, 11), (-2, 18), (5, 15)])
assert str(wcs) == EXPECTED_ELLIPSIS_REPR_NONE_TYPES.strip()
assert EXPECTED_ELLIPSIS_REPR_NONE_TYPES.strip() in repr(wcs)
CASES = [
(slice(None), slice(None), slice(None)),
(slice(None), slice(3, None), slice(3, None)),
(slice(None), slice(None, 16), slice(None, 16)),
(slice(None), slice(3, 16), slice(3, 16)),
(slice(2, None), slice(None), slice(2, None)),
(slice(2, None), slice(3, None), slice(5, None)),
(slice(2, None), slice(None, 16), slice(2, 18)),
(slice(2, None), slice(3, 16), slice(5, 18)),
(slice(None, 10), slice(None), slice(None, 10)),
(slice(None, 10), slice(3, None), slice(3, 10)),
(slice(None, 10), slice(None, 16), slice(None, 10)),
(slice(None, 10), slice(3, 16), slice(3, 10)),
(slice(2, 10), slice(None), slice(2, 10)),
(slice(2, 10), slice(3, None), slice(5, 10)),
(slice(2, 10), slice(None, 16), slice(2, 10)),
(slice(2, 10), slice(3, 16), slice(5, 10)),
(slice(None), 3, 3),
(slice(2, None), 3, 5),
(slice(None, 10), 3, 3),
(slice(2, 10), 3, 5),
]
@pytest.mark.parametrize(("slice1", "slice2", "expected"), CASES)
def test_combine_slices(slice1, slice2, expected):
assert combine_slices(slice1, slice2) == expected
def test_nested_slicing():
# Make sure that if we call slicing several times, the result is the same
# as calling the slicing once with the final slice settings.
wcs = WCS_SPECTRAL_CUBE
sub1 = SlicedLowLevelWCS(
SlicedLowLevelWCS(
SlicedLowLevelWCS(wcs, [slice(None), slice(1, 10), slice(None)]),
[3, slice(2, None)],
),
[slice(None), slice(2, 8)],
)
sub2 = wcs[3, 3:10, 2:8]
assert_allclose(sub1.pixel_to_world_values(3, 5), sub2.pixel_to_world_values(3, 5))
assert not isinstance(sub1._wcs, SlicedLowLevelWCS)
def test_too_much_slicing():
wcs = WCS_SPECTRAL_CUBE
with pytest.raises(
ValueError,
match=(
"Cannot slice WCS: the resulting WCS "
"should have at least one pixel and "
"one world dimension"
),
):
wcs[0, 1, 2]
HEADER_TIME_1D = """
SIMPLE = T
BITPIX = -32
NAXIS = 1
NAXIS1 = 2048
TIMESYS = 'UTC'
TREFPOS = 'TOPOCENT'
MJDREF = 50002.6
CTYPE1 = 'UTC'
CRVAL1 = 5
CUNIT1 = 's'
CRPIX1 = 1.0
CDELT1 = 2
OBSGEO-L= -20
OBSGEO-B= -70
OBSGEO-H= 2530
"""
@pytest.fixture
def header_time_1d():
return Header.fromstring(HEADER_TIME_1D, sep="\n")
@pytest.fixture
def time_1d_wcs(header_time_1d):
with warnings.catch_warnings():
warnings.simplefilter("ignore", FITSFixedWarning)
return WCS(header_time_1d)
def test_1d_sliced_low_level(time_1d_wcs):
sll = SlicedLowLevelWCS(time_1d_wcs, np.s_[10:20])
world = sll.pixel_to_world_values([1, 2])
assert isinstance(world, np.ndarray)
assert np.allclose(world, [27, 29])
def validate_info_dict(result, expected):
result_value = result.pop("value")
expected_value = expected.pop("value")
np.testing.assert_allclose(result_value, expected_value)
assert result == expected
def test_dropped_dimensions():
wcs = WCS_SPECTRAL_CUBE
sub = SlicedLowLevelWCS(wcs, np.s_[:, :, :])
assert sub.dropped_world_dimensions == {}
sub = SlicedLowLevelWCS(wcs, np.s_[:, 2:5, :])
assert sub.dropped_world_dimensions == {}
sub = SlicedLowLevelWCS(wcs, np.s_[:, 0])
waocomp = sub.dropped_world_dimensions.pop("world_axis_object_components")
assert len(waocomp) == 1 and waocomp[0][0] == "spectral" and waocomp[0][1] == 0
waocls = sub.dropped_world_dimensions.pop("world_axis_object_classes")
assert (
len(waocls) == 1
and "spectral" in waocls
and waocls["spectral"][0] == u.Quantity
)
validate_info_dict(
sub.dropped_world_dimensions,
{
"value": [0.5],
"world_axis_physical_types": ["em.freq"],
"world_axis_names": ["Frequency"],
"world_axis_units": ["Hz"],
"serialized_classes": False,
},
)
sub = SlicedLowLevelWCS(wcs, np.s_[:, 0, 0])
waocomp = sub.dropped_world_dimensions.pop("world_axis_object_components")
assert len(waocomp) == 1 and waocomp[0][0] == "spectral" and waocomp[0][1] == 0
waocls = sub.dropped_world_dimensions.pop("world_axis_object_classes")
assert (
len(waocls) == 1
and "spectral" in waocls
and waocls["spectral"][0] == u.Quantity
)
validate_info_dict(
sub.dropped_world_dimensions,
{
"value": [0.5],
"world_axis_physical_types": ["em.freq"],
"world_axis_names": ["Frequency"],
"world_axis_units": ["Hz"],
"serialized_classes": False,
},
)
sub = SlicedLowLevelWCS(wcs, np.s_[0, :, 0])
dwd = sub.dropped_world_dimensions
wao_classes = dwd.pop("world_axis_object_classes")
validate_info_dict(
dwd,
{
"value": [12.86995801, 20.49217541],
"world_axis_physical_types": ["pos.galactic.lat", "pos.galactic.lon"],
"world_axis_names": ["Latitude", "Longitude"],
"world_axis_units": ["deg", "deg"],
"serialized_classes": False,
"world_axis_object_components": [
("celestial", 1, "spherical.lat.degree"),
("celestial", 0, "spherical.lon.degree"),
],
},
)
assert wao_classes["celestial"][0] is SkyCoord
assert wao_classes["celestial"][1] == ()
assert isinstance(wao_classes["celestial"][2]["frame"], Galactic)
assert wao_classes["celestial"][2]["unit"] is u.deg
sub = SlicedLowLevelWCS(wcs, np.s_[5, :5, 12])
dwd = sub.dropped_world_dimensions
wao_classes = dwd.pop("world_axis_object_classes")
validate_info_dict(
dwd,
{
"value": [11.67648267, 21.01921192],
"world_axis_physical_types": ["pos.galactic.lat", "pos.galactic.lon"],
"world_axis_names": ["Latitude", "Longitude"],
"world_axis_units": ["deg", "deg"],
"serialized_classes": False,
"world_axis_object_components": [
("celestial", 1, "spherical.lat.degree"),
("celestial", 0, "spherical.lon.degree"),
],
},
)
assert wao_classes["celestial"][0] is SkyCoord
assert wao_classes["celestial"][1] == ()
assert isinstance(wao_classes["celestial"][2]["frame"], Galactic)
assert wao_classes["celestial"][2]["unit"] is u.deg
def test_dropped_dimensions_4d(cube_4d_fitswcs):
sub = SlicedLowLevelWCS(cube_4d_fitswcs, np.s_[:, 12, 5, 5])
dwd = sub.dropped_world_dimensions
wao_classes = dwd.pop("world_axis_object_classes")
wao_components = dwd.pop("world_axis_object_components")
validate_info_dict(
dwd,
{
"value": [4.0e00, -2.0e00, 1.0e10],
"world_axis_physical_types": ["pos.eq.ra", "pos.eq.dec", "em.freq"],
"world_axis_names": ["Right Ascension", "Declination", "Frequency"],
"world_axis_units": ["deg", "deg", "Hz"],
"serialized_classes": False,
},
)
assert wao_classes["celestial"][0] is SkyCoord
assert wao_classes["celestial"][1] == ()
assert isinstance(wao_classes["celestial"][2]["frame"], ICRS)
assert wao_classes["celestial"][2]["unit"] is u.deg
assert wao_classes["spectral"][0:3] == (u.Quantity, (), {})
assert wao_components[0] == ("celestial", 0, "spherical.lon.degree")
assert wao_components[1] == ("celestial", 1, "spherical.lat.degree")
assert wao_components[2][0:2] == ("spectral", 0)
sub = SlicedLowLevelWCS(cube_4d_fitswcs, np.s_[12, 12])
dwd = sub.dropped_world_dimensions
wao_classes = dwd.pop("world_axis_object_classes")
wao_components = dwd.pop("world_axis_object_components")
validate_info_dict(
dwd,
{
"value": [1.0e10, 5.0e00],
"world_axis_physical_types": ["em.freq", "time"],
"world_axis_names": ["Frequency", "Time"],
"world_axis_units": ["Hz", "s"],
"serialized_classes": False,
},
)
assert wao_components[0][0:2] == ("spectral", 0)
assert wao_components[1][0] == "time"
assert wao_components[1][1] == 0
assert wao_classes["spectral"][0:3] == (u.Quantity, (), {})
assert wao_classes["time"][0:3] == (Time, (), {})
def test_pixel_to_world_values_different_int_types():
int_sliced = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, np.s_[:, 0, :])
np64_sliced = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, np.s_[:, np.int64(0), :])
pixel_arrays = ([0, 1], [0, 1])
for int_coord, np64_coord in zip(
int_sliced.pixel_to_world_values(*pixel_arrays),
np64_sliced.pixel_to_world_values(*pixel_arrays),
):
assert all(int_coord == np64_coord)
COUPLED_WCS_HEADER = {
"WCSAXES": 3,
"CRPIX1": (100 + 1) / 2,
"CRPIX2": (25 + 1) / 2,
"CRPIX3": 1.0,
"PC1_1": 0.0,
"PC1_2": -1.0,
"PC1_3": 0.0,
"PC2_1": 1.0,
"PC2_2": 0.0,
"PC2_3": -1.0,
"CDELT1": 5,
"CDELT2": 5,
"CDELT3": 0.055,
"CUNIT1": "arcsec",
"CUNIT2": "arcsec",
"CUNIT3": "Angstrom",
"CTYPE1": "HPLN-TAN",
"CTYPE2": "HPLT-TAN",
"CTYPE3": "WAVE",
"CRVAL1": 0.0,
"CRVAL2": 0.0,
"CRVAL3": 1.05,
}
def test_coupled_world_slicing():
fits_wcs = WCS(header=COUPLED_WCS_HEADER)
sl = SlicedLowLevelWCS(fits_wcs, 0)
world = fits_wcs.pixel_to_world_values(0, 0, 0)
out_pix = sl.world_to_pixel_values(world[0], world[1])
assert np.allclose(out_pix[0], 0)
| bsd-3-clause | dc21bd99877124d548a0765913f065a7 | 30.535814 | 87 | 0.601841 | 2.950479 | false | false | false | false |
astropy/astropy | astropy/cosmology/io/model.py | 3 | 9400 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The following are private functions, included here **FOR REFERENCE ONLY** since
the io registry cannot be displayed. These functions are registered into
:meth:`~astropy.cosmology.Cosmology.to_format` and
:meth:`~astropy.cosmology.Cosmology.from_format` and should only be accessed
via these methods.
""" # this is shown in the docs.
import abc
import copy
import inspect
import numpy as np
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.core import Cosmology
from astropy.modeling import FittableModel, Model
from astropy.utils.decorators import classproperty
from .utils import convert_parameter_to_model_parameter
__all__ = [] # nothing is publicly scoped
class _CosmologyModel(FittableModel):
"""Base class for Cosmology redshift-method Models.
.. note::
This class is not publicly scoped so should not be used directly.
Instead, from a Cosmology instance use ``.to_format("astropy.model")``
to create an instance of a subclass of this class.
`_CosmologyModel` (subclasses) wrap a redshift-method of a
:class:`~astropy.cosmology.Cosmology` class, converting each non-`None`
|Cosmology| :class:`~astropy.cosmology.Parameter` to a
:class:`astropy.modeling.Model` :class:`~astropy.modeling.Parameter`
and the redshift-method to the model's ``__call__ / evaluate``.
See Also
--------
astropy.cosmology.Cosmology.to_format
"""
@abc.abstractmethod
def _cosmology_class(self):
"""Cosmology class as a private attribute. Set in subclasses."""
@abc.abstractmethod
def _method_name(self):
"""Cosmology method name as a private attribute. Set in subclasses."""
@classproperty
def cosmology_class(cls):
"""|Cosmology| class."""
return cls._cosmology_class
@property
def cosmology(self):
"""Return |Cosmology| using `~astropy.modeling.Parameter` values."""
cosmo = self._cosmology_class(
name=self.name,
**{
k: (v.value if not (v := getattr(self, k)).unit else v.quantity)
for k in self.param_names
},
)
return cosmo
@classproperty
def method_name(self):
"""Redshift-method name on |Cosmology| instance."""
return self._method_name
# ---------------------------------------------------------------
def evaluate(self, *args, **kwargs):
"""Evaluate method {method!r} of {cosmo_cls!r} Cosmology.
The Model wraps the :class:`~astropy.cosmology.Cosmology` method,
converting each |Cosmology| :class:`~astropy.cosmology.Parameter` to a
:class:`astropy.modeling.Model` :class:`~astropy.modeling.Parameter`
(unless the Parameter is None, in which case it is skipped).
Here an instance of the cosmology is created using the current
Parameter values and the method is evaluated given the input.
Parameters
----------
*args, **kwargs
The first ``n_inputs`` of ``*args`` are for evaluating the method
of the cosmology. The remaining args and kwargs are passed to the
cosmology class constructor.
Any unspecified Cosmology Parameter use the current value of the
corresponding Model Parameter.
Returns
-------
Any
Results of evaluating the Cosmology method.
"""
# create BoundArgument with all available inputs beyond the Parameters,
# which will be filled in next
ba = self.cosmology_class._init_signature.bind_partial(
*args[self.n_inputs :], **kwargs
)
# fill in missing Parameters
for k in self.param_names:
if k not in ba.arguments:
v = getattr(self, k)
ba.arguments[k] = v.value if not v.unit else v.quantity
# unvectorize, since Cosmology is not vectorized
# TODO! remove when vectorized
if np.shape(ba.arguments[k]): # only in __call__
# m_nu is a special case # TODO! fix by making it 'structured'
if k == "m_nu" and len(ba.arguments[k].shape) == 1:
continue
ba.arguments[k] = ba.arguments[k][0]
# make instance of cosmology
cosmo = self._cosmology_class(**ba.arguments)
# evaluate method
result = getattr(cosmo, self._method_name)(*args[: self.n_inputs])
return result
##############################################################################
def from_model(model):
"""Load |Cosmology| from `~astropy.modeling.Model` object.
Parameters
----------
model : `_CosmologyModel` subclass instance
See ``Cosmology.to_format.help("astropy.model") for details.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
Examples
--------
>>> from astropy.cosmology import Cosmology, Planck18
>>> model = Planck18.to_format("astropy.model", method="lookback_time")
>>> Cosmology.from_format(model)
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897)
"""
cosmology = model.cosmology_class
meta = copy.deepcopy(model.meta)
# assemble the Parameters
params = {}
for n in model.param_names:
p = getattr(model, n)
params[p.name] = p.quantity if p.unit else p.value
# put all attributes in a dict
meta[p.name] = {
n: getattr(p, n)
for n in dir(p)
if not (n.startswith("_") or callable(getattr(p, n)))
}
ba = cosmology._init_signature.bind(name=model.name, **params, meta=meta)
return cosmology(*ba.args, **ba.kwargs)
def to_model(cosmology, *_, method):
"""Convert a `~astropy.cosmology.Cosmology` to a `~astropy.modeling.Model`.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` subclass instance
method : str, keyword-only
The name of the method on the ``cosmology``.
Returns
-------
`_CosmologyModel` subclass instance
The Model wraps the |Cosmology| method, converting each non-`None`
:class:`~astropy.cosmology.Parameter` to a
:class:`astropy.modeling.Model` :class:`~astropy.modeling.Parameter`
and the method to the model's ``__call__ / evaluate``.
Examples
--------
>>> from astropy.cosmology import Planck18
>>> model = Planck18.to_format("astropy.model", method="lookback_time")
>>> model
<FlatLambdaCDMCosmologyLookbackTimeModel(H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. , 0. , 0.06] eV, Ob0=0.04897,
name='Planck18')>
"""
cosmo_cls = cosmology.__class__
# get bound method & sig from cosmology (unbound if class).
if not hasattr(cosmology, method):
raise AttributeError(f"{method} is not a method on {cosmology.__class__}.")
func = getattr(cosmology, method)
if not callable(func):
raise ValueError(f"{cosmology.__class__}.{method} is not callable.")
msig = inspect.signature(func)
# introspect for number of positional inputs, ignoring "self"
n_inputs = len([p for p in tuple(msig.parameters.values()) if (p.kind in (0, 1))])
attrs = {} # class attributes
attrs["_cosmology_class"] = cosmo_cls
attrs["_method_name"] = method
attrs["n_inputs"] = n_inputs
attrs["n_outputs"] = 1
params = {} # Parameters (also class attributes)
for n in cosmology.__parameters__:
v = getattr(cosmology, n) # parameter value
if v is None: # skip unspecified parameters
continue
# add as Model Parameter
params[n] = convert_parameter_to_model_parameter(
getattr(cosmo_cls, n), v, cosmology.meta.get(n)
)
# class name is cosmology name + Cosmology + method name + Model
clsname = (
cosmo_cls.__qualname__.replace(".", "_")
+ "Cosmology"
+ method.replace("_", " ").title().replace(" ", "")
+ "Model"
)
# make Model class
CosmoModel = type(clsname, (_CosmologyModel,), {**attrs, **params})
# override __signature__ and format the doc.
setattr(CosmoModel.evaluate, "__signature__", msig)
CosmoModel.evaluate.__doc__ = CosmoModel.evaluate.__doc__.format(
cosmo_cls=cosmo_cls.__qualname__, method=method
)
# instantiate class using default values
ps = {n: getattr(cosmology, n) for n in params.keys()}
model = CosmoModel(**ps, name=cosmology.name, meta=copy.deepcopy(cosmology.meta))
return model
def model_identify(origin, format, *args, **kwargs):
"""Identify if object uses the :class:`~astropy.modeling.Model` format.
Returns
-------
bool
"""
itis = False
if origin == "read":
itis = isinstance(args[1], Model) and (format in (None, "astropy.model"))
return itis
# ===================================================================
# Register
convert_registry.register_reader("astropy.model", Cosmology, from_model)
convert_registry.register_writer("astropy.model", Cosmology, to_model)
convert_registry.register_identifier("astropy.model", Cosmology, model_identify)
| bsd-3-clause | 226d6118670145c7a2dcefb026008f41 | 33.558824 | 86 | 0.612128 | 3.743528 | false | false | false | false |
astropy/astropy | astropy/time/tests/test_delta.py | 3 | 22673 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import itertools
import operator
from datetime import timedelta
from decimal import Decimal
import numpy as np
import pytest
from astropy import units as u
from astropy.table import Table
from astropy.time import (
STANDARD_TIME_SCALES,
TIME_DELTA_SCALES,
TIME_SCALES,
OperandTypeError,
ScaleValueError,
Time,
TimeDelta,
TimeDeltaMissingUnitWarning,
)
from astropy.utils import iers
allclose_jd = functools.partial(np.allclose, rtol=2.0**-52, atol=0)
allclose_jd2 = functools.partial(
np.allclose, rtol=2.0**-52, atol=2.0**-52
) # 20 ps atol
allclose_sec = functools.partial(
np.allclose, rtol=2.0**-52, atol=2.0**-52 * 24 * 3600
) # 20 ps atol
orig_auto_download = iers.conf.auto_download
def setup_module(module):
"""Use offline IERS table only."""
iers.conf.auto_download = False
def teardown_module(module):
"""Restore original setting."""
iers.conf.auto_download = orig_auto_download
class TestTimeDelta:
"""Test TimeDelta class"""
def setup_method(self):
self.t = Time("2010-01-01", scale="utc")
self.t2 = Time("2010-01-02 00:00:01", scale="utc")
self.t3 = Time(
"2010-01-03 01:02:03",
scale="utc",
precision=9,
in_subfmt="date_hms",
out_subfmt="date_hm",
location=(-75.0 * u.degree, 30.0 * u.degree, 500 * u.m),
)
self.t4 = Time("2010-01-01", scale="local")
self.dt = TimeDelta(100.0, format="sec")
self.dt_array = TimeDelta(np.arange(100, 1000, 100), format="sec")
def test_sub(self):
# time - time
dt = self.t2 - self.t
assert repr(dt).startswith(
"<TimeDelta object: scale='tai' format='jd' value=1.00001157407"
)
assert allclose_jd(dt.jd, 86401.0 / 86400.0)
assert allclose_sec(dt.sec, 86401.0)
# time - delta_time
t = self.t2 - dt
assert t.iso == self.t.iso
# delta_time - delta_time
dt2 = dt - self.dt
assert allclose_sec(dt2.sec, 86301.0)
# delta_time - time
with pytest.raises(OperandTypeError):
dt - self.t
def test_add(self):
# time + time
with pytest.raises(OperandTypeError):
self.t2 + self.t
# time + delta_time
dt = self.t2 - self.t
t2 = self.t + dt
assert t2.iso == self.t2.iso
# delta_time + delta_time
dt2 = dt + self.dt
assert allclose_sec(dt2.sec, 86501.0)
# delta_time + time
dt = self.t2 - self.t
t2 = dt + self.t
assert t2.iso == self.t2.iso
def test_add_vector(self):
"""Check time arithmetic as well as properly keeping track of whether
a time is a scalar or a vector"""
t = Time(0.0, format="mjd", scale="tai")
t2 = Time([0.0, 1.0], format="mjd", scale="tai")
dt = TimeDelta(100.0, format="jd")
dt2 = TimeDelta([100.0, 200.0], format="jd")
out = t + dt
assert allclose_jd(out.mjd, 100.0)
assert out.isscalar
out = t + dt2
assert allclose_jd(out.mjd, [100.0, 200.0])
assert not out.isscalar
out = t2 + dt
assert allclose_jd(out.mjd, [100.0, 101.0])
assert not out.isscalar
out = dt + dt
assert allclose_jd(out.jd, 200.0)
assert out.isscalar
out = dt + dt2
assert allclose_jd(out.jd, [200.0, 300.0])
assert not out.isscalar
# Reverse the argument order
out = dt + t
assert allclose_jd(out.mjd, 100.0)
assert out.isscalar
out = dt2 + t
assert allclose_jd(out.mjd, [100.0, 200.0])
assert not out.isscalar
out = dt + t2
assert allclose_jd(out.mjd, [100.0, 101.0])
assert not out.isscalar
out = dt2 + dt
assert allclose_jd(out.jd, [200.0, 300.0])
assert not out.isscalar
def test_sub_vector(self):
"""Check time arithmetic as well as properly keeping track of whether
a time is a scalar or a vector"""
t = Time(0.0, format="mjd", scale="tai")
t2 = Time([0.0, 1.0], format="mjd", scale="tai")
dt = TimeDelta(100.0, format="jd")
dt2 = TimeDelta([100.0, 200.0], format="jd")
out = t - dt
assert allclose_jd(out.mjd, -100.0)
assert out.isscalar
out = t - dt2
assert allclose_jd(out.mjd, [-100.0, -200.0])
assert not out.isscalar
out = t2 - dt
assert allclose_jd(out.mjd, [-100.0, -99.0])
assert not out.isscalar
out = dt - dt
assert allclose_jd(out.jd, 0.0)
assert out.isscalar
out = dt - dt2
assert allclose_jd(out.jd, [0.0, -100.0])
assert not out.isscalar
@pytest.mark.parametrize(
"values", [(2455197.5, 2455198.5), ([2455197.5], [2455198.5])]
)
def test_copy_timedelta(self, values):
"""Test copying the values of a TimeDelta object by passing it into the
Time initializer.
"""
val1, val2 = values
t = Time(val1, format="jd", scale="utc")
t2 = Time(val2, format="jd", scale="utc")
dt = t2 - t
dt2 = TimeDelta(dt, copy=False)
assert np.all(dt.jd == dt2.jd)
assert dt._time.jd1 is dt2._time.jd1
assert dt._time.jd2 is dt2._time.jd2
dt2 = TimeDelta(dt, copy=True)
assert np.all(dt.jd == dt2.jd)
assert dt._time.jd1 is not dt2._time.jd1
assert dt._time.jd2 is not dt2._time.jd2
# Include initializers
dt2 = TimeDelta(dt, format="sec")
assert allclose_sec(dt2.value, 86400.0)
def test_neg_abs(self):
for dt in (self.dt, self.dt_array):
dt2 = -dt
assert np.all(dt2.jd == -dt.jd)
dt3 = abs(dt)
assert np.all(dt3.jd == dt.jd)
dt4 = abs(dt2)
assert np.all(dt4.jd == dt.jd)
def test_mul_div(self):
for dt in (self.dt, self.dt_array):
dt2 = dt + dt + dt
dt3 = 3.0 * dt
assert allclose_jd(dt2.jd, dt3.jd)
dt4 = dt3 / 3.0
assert allclose_jd(dt4.jd, dt.jd)
dt5 = self.dt * np.arange(3)
assert dt5[0].jd == 0.0
assert dt5[-1].jd == (self.dt + self.dt).jd
dt6 = self.dt * [0, 1, 2]
assert np.all(dt6.jd == dt5.jd)
with pytest.raises(OperandTypeError):
self.dt * self.t
with pytest.raises(TypeError):
self.dt * object()
def test_mean(self):
def is_consistent(time_delta: TimeDelta):
mean_expected = (
np.sum(time_delta.jd1) + np.sum(time_delta.jd2)
) / time_delta.size
mean_test = time_delta.mean().jd1 + time_delta.mean().jd2
return mean_test == mean_expected
assert is_consistent(self.dt)
assert is_consistent(self.dt_array)
def test_keep_properties(self):
# closes #1924 (partially)
dt = TimeDelta(1000.0, format="sec")
for t in (self.t, self.t3):
ta = t + dt
assert ta.location is t.location
assert ta.precision == t.precision
assert ta.in_subfmt == t.in_subfmt
assert ta.out_subfmt == t.out_subfmt
tr = dt + t
assert tr.location is t.location
assert tr.precision == t.precision
assert tr.in_subfmt == t.in_subfmt
assert tr.out_subfmt == t.out_subfmt
ts = t - dt
assert ts.location is t.location
assert ts.precision == t.precision
assert ts.in_subfmt == t.in_subfmt
assert ts.out_subfmt == t.out_subfmt
t_tdb = self.t.tdb
assert hasattr(t_tdb, "_delta_tdb_tt")
assert not hasattr(t_tdb, "_delta_ut1_utc")
t_tdb_ut1 = t_tdb.ut1
assert hasattr(t_tdb_ut1, "_delta_tdb_tt")
assert hasattr(t_tdb_ut1, "_delta_ut1_utc")
t_tdb_ut1_utc = t_tdb_ut1.utc
assert hasattr(t_tdb_ut1_utc, "_delta_tdb_tt")
assert hasattr(t_tdb_ut1_utc, "_delta_ut1_utc")
# adding or subtracting some time should remove the delta's
# since these are time-dependent and should be recalculated
for op in (operator.add, operator.sub):
t1 = op(t_tdb, dt)
assert not hasattr(t1, "_delta_tdb_tt")
assert not hasattr(t1, "_delta_ut1_utc")
t2 = op(t_tdb_ut1, dt)
assert not hasattr(t2, "_delta_tdb_tt")
assert not hasattr(t2, "_delta_ut1_utc")
t3 = op(t_tdb_ut1_utc, dt)
assert not hasattr(t3, "_delta_tdb_tt")
assert not hasattr(t3, "_delta_ut1_utc")
def test_set_format(self):
"""
Test basics of setting format attribute.
"""
dt = TimeDelta(86400.0, format="sec")
assert dt.value == 86400.0
assert dt.format == "sec"
dt.format = "jd"
assert dt.value == 1.0
assert dt.format == "jd"
dt.format = "datetime"
assert dt.value == timedelta(days=1)
assert dt.format == "datetime"
def test_from_non_float(self):
dt = TimeDelta("1.000000000000001", format="jd")
assert dt != TimeDelta(1.000000000000001, format="jd") # precision loss.
assert dt == TimeDelta(1, 0.000000000000001, format="jd")
dt2 = TimeDelta(Decimal("1.000000000000001"), format="jd")
assert dt2 == dt
def test_to_value(self):
dt = TimeDelta(86400.0, format="sec")
assert dt.to_value("jd") == 1.0
assert dt.to_value("jd", "str") == "1.0"
assert dt.to_value("sec", subfmt="str") == "86400.0"
with pytest.raises(
ValueError,
match="not one of the known formats.*failed to parse as a unit",
):
dt.to_value("julian")
with pytest.raises(TypeError, match="missing required format or unit"):
dt.to_value()
class TestTimeDeltaScales:
"""Test scale conversion for Time Delta.
Go through @taldcroft's list of expected behavior from #1932"""
def setup_method(self):
# pick a date that includes a leap second for better testing
self.iso_times = [
"2012-06-30 12:00:00",
"2012-06-30 23:59:59",
"2012-07-01 00:00:00",
"2012-07-01 12:00:00",
]
self.t = {
scale: Time(self.iso_times, scale=scale, precision=9)
for scale in TIME_SCALES
}
self.dt = {scale: self.t[scale] - self.t[scale][0] for scale in TIME_SCALES}
def test_delta_scales_definition(self):
for scale in list(TIME_DELTA_SCALES) + [None]:
TimeDelta([0.0, 1.0, 10.0], format="sec", scale=scale)
with pytest.raises(ScaleValueError):
TimeDelta([0.0, 1.0, 10.0], format="sec", scale="utc")
@pytest.mark.parametrize(
("scale1", "scale2"),
list(itertools.product(STANDARD_TIME_SCALES, STANDARD_TIME_SCALES)),
)
def test_standard_scales_for_time_minus_time(self, scale1, scale2):
"""T(X) - T2(Y) -- does T(X) - T2(Y).X and return dT(X)
and T(X) +/- dT(Y) -- does (in essence) (T(X).Y +/- dT(Y)).X
I.e., time differences of two times should have the scale of the
first time. The one exception is UTC, which returns TAI.
There are no standard timescales for which this does not work.
"""
t1 = self.t[scale1]
t2 = self.t[scale2]
dt = t1 - t2
if scale1 in TIME_DELTA_SCALES:
assert dt.scale == scale1
else:
assert scale1 == "utc"
assert dt.scale == "tai"
# now check with delta time; also check reversibility
t1_recover_t2_scale = t2 + dt
assert t1_recover_t2_scale.scale == scale2
t1_recover = getattr(t1_recover_t2_scale, scale1)
assert allclose_jd(t1_recover.jd, t1.jd)
t2_recover_t1_scale = t1 - dt
assert t2_recover_t1_scale.scale == scale1
t2_recover = getattr(t2_recover_t1_scale, scale2)
assert allclose_jd(t2_recover.jd, t2.jd)
def test_local_scales_for_time_minus_time(self):
"""T1(local) - T2(local) should return dT(local)
T1(local) +/- dT(local) or T1(local) +/- Quantity(time-like) should
also return T(local)
I.e. Tests that time differences of two local scale times should
return delta time with local timescale. Furthermore, checks that
arithmetic of T(local) with dT(None) or time-like quantity does work.
Also tests that subtracting two Time objects, one having local time
scale and other having standard time scale should raise TypeError.
"""
t1 = self.t["local"]
t2 = Time("2010-01-01", scale="local")
dt = t1 - t2
assert dt.scale == "local"
# now check with delta time
t1_recover = t2 + dt
assert t1_recover.scale == "local"
assert allclose_jd(t1_recover.jd, t1.jd)
# check that dT(None) can be subtracted from T(local)
dt2 = TimeDelta([10.0], format="sec", scale=None)
t3 = t2 - dt2
assert t3.scale == t2.scale
# check that time quantity can be subtracted from T(local)
q = 10 * u.s
assert (t2 - q).value == (t2 - dt2).value
# Check that one cannot subtract/add times with a standard scale
# from a local one (or vice versa)
t1 = self.t["local"]
for scale in STANDARD_TIME_SCALES:
t2 = self.t[scale]
with pytest.raises(TypeError):
t1 - t2
with pytest.raises(TypeError):
t2 - t1
with pytest.raises(TypeError):
t2 - dt
with pytest.raises(TypeError):
t2 + dt
with pytest.raises(TypeError):
dt + t2
def test_scales_for_delta_minus_delta(self):
"""dT(X) +/- dT2(Y) -- Add/substract JDs for dT(X) and dT(Y).X
I.e. this will succeed if dT(Y) can be converted to scale X.
Returns delta time in scale X
"""
# geocentric timescales
dt_tai = self.dt["tai"]
dt_tt = self.dt["tt"]
dt0 = dt_tai - dt_tt
assert dt0.scale == "tai"
# tai and tt have the same scale, so differences should be the same
assert allclose_sec(dt0.sec, 0.0)
dt_tcg = self.dt["tcg"]
dt1 = dt_tai - dt_tcg
assert dt1.scale == "tai"
# tai and tcg do not have the same scale, so differences different
assert not allclose_sec(dt1.sec, 0.0)
t_tai_tcg = self.t["tai"].tcg
dt_tai_tcg = t_tai_tcg - t_tai_tcg[0]
dt2 = dt_tai - dt_tai_tcg
assert dt2.scale == "tai"
# but if tcg difference calculated from tai, it should roundtrip
assert allclose_sec(dt2.sec, 0.0)
# check that if we put TCG first, we get a TCG scale back
dt3 = dt_tai_tcg - dt_tai
assert dt3.scale == "tcg"
assert allclose_sec(dt3.sec, 0.0)
for scale in "tdb", "tcb", "ut1":
with pytest.raises(TypeError):
dt_tai - self.dt[scale]
# barycentric timescales
dt_tcb = self.dt["tcb"]
dt_tdb = self.dt["tdb"]
dt4 = dt_tcb - dt_tdb
assert dt4.scale == "tcb"
assert not allclose_sec(dt1.sec, 0.0)
t_tcb_tdb = self.t["tcb"].tdb
dt_tcb_tdb = t_tcb_tdb - t_tcb_tdb[0]
dt5 = dt_tcb - dt_tcb_tdb
assert dt5.scale == "tcb"
assert allclose_sec(dt5.sec, 0.0)
for scale in "utc", "tai", "tt", "tcg", "ut1":
with pytest.raises(TypeError):
dt_tcb - self.dt[scale]
# rotational timescale
dt_ut1 = self.dt["ut1"]
dt5 = dt_ut1 - dt_ut1[-1]
assert dt5.scale == "ut1"
assert dt5[-1].sec == 0.0
for scale in "utc", "tai", "tt", "tcg", "tcb", "tdb":
with pytest.raises(TypeError):
dt_ut1 - self.dt[scale]
# local time scale
dt_local = self.dt["local"]
dt6 = dt_local - dt_local[-1]
assert dt6.scale == "local"
assert dt6[-1].sec == 0.0
for scale in "utc", "tai", "tt", "tcg", "tcb", "tdb", "ut1":
with pytest.raises(TypeError):
dt_local - self.dt[scale]
@pytest.mark.parametrize(
("scale", "op"),
list(itertools.product(TIME_SCALES, (operator.add, operator.sub))),
)
def test_scales_for_delta_scale_is_none(self, scale, op):
"""T(X) +/- dT(None) or T(X) +/- Quantity(time-like)
This is always allowed and just adds JDs, i.e., the scale of
the TimeDelta or time-like Quantity will be taken to be X.
The one exception is again for X=UTC, where TAI is assumed instead,
so that a day is always defined as 86400 seconds.
"""
dt_none = TimeDelta([0.0, 1.0, -1.0, 1000.0], format="sec")
assert dt_none.scale is None
q_time = dt_none.to("s")
dt = self.dt[scale]
dt1 = op(dt, dt_none)
assert dt1.scale == dt.scale
assert allclose_jd(dt1.jd, op(dt.jd, dt_none.jd))
dt2 = op(dt_none, dt)
assert dt2.scale == dt.scale
assert allclose_jd(dt2.jd, op(dt_none.jd, dt.jd))
dt3 = op(q_time, dt)
assert dt3.scale == dt.scale
assert allclose_jd(dt3.jd, dt2.jd)
t = self.t[scale]
t1 = op(t, dt_none)
assert t1.scale == t.scale
assert allclose_jd(t1.jd, op(t.jd, dt_none.jd))
if op is operator.add:
t2 = op(dt_none, t)
assert t2.scale == t.scale
assert allclose_jd(t2.jd, t1.jd)
t3 = op(t, q_time)
assert t3.scale == t.scale
assert allclose_jd(t3.jd, t1.jd)
@pytest.mark.parametrize("scale", TIME_SCALES)
def test_delta_day_is_86400_seconds(self, scale):
"""TimeDelta or Quantity holding 1 day always means 24*60*60 seconds
This holds true for all timescales but UTC, for which leap-second
days are longer or shorter by one second.
"""
t = self.t[scale]
dt_day = TimeDelta(1.0, format="jd")
q_day = dt_day.to("day")
dt_day_leap = t[-1] - t[0]
# ^ = exclusive or, so either equal and not UTC, or not equal and UTC
assert allclose_jd(dt_day_leap.jd, dt_day.jd) ^ (scale == "utc")
t1 = t[0] + dt_day
assert allclose_jd(t1.jd, t[-1].jd) ^ (scale == "utc")
t2 = q_day + t[0]
assert allclose_jd(t2.jd, t[-1].jd) ^ (scale == "utc")
t3 = t[-1] - dt_day
assert allclose_jd(t3.jd, t[0].jd) ^ (scale == "utc")
t4 = t[-1] - q_day
assert allclose_jd(t4.jd, t[0].jd) ^ (scale == "utc")
def test_timedelta_setitem():
t = TimeDelta([1, 2, 3] * u.d, format="jd")
t[0] = 0.5
assert allclose_jd(t.value, [0.5, 2, 3])
t[1:] = 4.5
assert allclose_jd(t.value, [0.5, 4.5, 4.5])
t[:] = 86400 * u.s
assert allclose_jd(t.value, [1, 1, 1])
t[1] = TimeDelta(2, format="jd")
assert allclose_jd(t.value, [1, 2, 1])
with pytest.raises(ValueError) as err:
t[1] = 1 * u.m
assert "cannot convert value to a compatible TimeDelta" in str(err.value)
def test_timedelta_setitem_sec():
t = TimeDelta([1, 2, 3], format="sec")
t[0] = 0.5
assert allclose_jd(t.value, [0.5, 2, 3])
t[1:] = 4.5
assert allclose_jd(t.value, [0.5, 4.5, 4.5])
t[:] = 1 * u.day
assert allclose_jd(t.value, [86400, 86400, 86400])
t[1] = TimeDelta(2, format="jd")
assert allclose_jd(t.value, [86400, 86400 * 2, 86400])
with pytest.raises(ValueError) as err:
t[1] = 1 * u.m
assert "cannot convert value to a compatible TimeDelta" in str(err.value)
def test_timedelta_mask():
t = TimeDelta([1, 2] * u.d, format="jd")
t[1] = np.ma.masked
assert np.all(t.mask == [False, True])
assert allclose_jd(t[0].value, 1)
assert t.value[1] is np.ma.masked
def test_python_timedelta_scalar():
td = timedelta(days=1, seconds=1)
td1 = TimeDelta(td, format="datetime")
assert td1.sec == 86401.0
td2 = TimeDelta(86401.0, format="sec")
assert td2.datetime == td
def test_python_timedelta_vector():
td = [
[timedelta(days=1), timedelta(days=2)],
[timedelta(days=3), timedelta(days=4)],
]
td1 = TimeDelta(td, format="datetime")
assert np.all(td1.jd == [[1, 2], [3, 4]])
td2 = TimeDelta([[1, 2], [3, 4]], format="jd")
assert np.all(td2.datetime == td)
def test_timedelta_to_datetime():
td = TimeDelta(1, format="jd")
assert td.to_datetime() == timedelta(days=1)
td2 = TimeDelta([[1, 2], [3, 4]], format="jd")
td = [
[timedelta(days=1), timedelta(days=2)],
[timedelta(days=3), timedelta(days=4)],
]
assert np.all(td2.to_datetime() == td)
def test_insert_timedelta():
tm = TimeDelta([1, 2], format="sec")
# Insert a scalar using an auto-parsed string
tm2 = tm.insert(1, TimeDelta([10, 20], format="sec"))
assert np.all(tm2 == TimeDelta([1, 10, 20, 2], format="sec"))
def test_no_units_warning():
with pytest.warns(TimeDeltaMissingUnitWarning):
delta = TimeDelta(1)
assert delta.to_value(u.day) == 1
with pytest.warns(TimeDeltaMissingUnitWarning):
table = Table({"t": [1, 2, 3]})
delta = TimeDelta(table["t"])
assert np.all(delta.to_value(u.day) == [1, 2, 3])
with pytest.warns(TimeDeltaMissingUnitWarning):
delta = TimeDelta(np.array([1, 2, 3]))
assert np.all(delta.to_value(u.day) == [1, 2, 3])
with pytest.warns(TimeDeltaMissingUnitWarning):
t = Time("2012-01-01") + 1
assert t.isot[:10] == "2012-01-02"
with pytest.warns(TimeDeltaMissingUnitWarning):
comp = TimeDelta([1, 2, 3], format="jd") >= 2
assert np.all(comp == [False, True, True])
with pytest.warns(TimeDeltaMissingUnitWarning):
# 2 is also interpreted as days, not seconds
assert (TimeDelta(5 * u.s) > 2) is False
# with unit is ok
assert TimeDelta(1 * u.s).to_value(u.s) == 1
# with format is also ok
assert TimeDelta(1, format="sec").to_value(u.s) == 1
assert TimeDelta(1, format="jd").to_value(u.day) == 1
# table column with units
table = Table({"t": [1, 2, 3] * u.s})
assert np.all(TimeDelta(table["t"]).to_value(u.s) == [1, 2, 3])
| bsd-3-clause | 6a3970d3e4e6a65299504fed74ec2164 | 32.002911 | 84 | 0.559388 | 3.171493 | false | true | false | false |
astropy/astropy | astropy/cosmology/funcs/tests/test_funcs.py | 3 | 13389 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import sys
from io import StringIO
import numpy as np
import pytest
from astropy import units as u
from astropy.cosmology import core, flrw
from astropy.cosmology.funcs import _z_at_scalar_value, z_at_value
from astropy.cosmology.realizations import (
WMAP1,
WMAP3,
WMAP5,
WMAP7,
WMAP9,
Planck13,
Planck15,
Planck18,
)
from astropy.units import allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.exceptions import AstropyUserWarning
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_z_at_value_scalar():
# These are tests of expected values, and hence have less precision
# than the roundtrip tests below (test_z_at_value_roundtrip);
# here we have to worry about the cosmological calculations
# giving slightly different values on different architectures,
# there we are checking internal consistency on the same architecture
# and so can be more demanding
cosmo = Planck13
assert allclose(z_at_value(cosmo.age, 2 * u.Gyr), 3.19812268, rtol=1e-6)
assert allclose(z_at_value(cosmo.lookback_time, 7 * u.Gyr), 0.795198375, rtol=1e-6)
assert allclose(z_at_value(cosmo.distmod, 46 * u.mag), 1.991389168, rtol=1e-6)
assert allclose(
z_at_value(cosmo.luminosity_distance, 1e4 * u.Mpc), 1.36857907, rtol=1e-6
)
assert allclose(
z_at_value(cosmo.luminosity_distance, 26.037193804 * u.Gpc, ztol=1e-10),
3,
rtol=1e-9,
)
assert allclose(
z_at_value(cosmo.angular_diameter_distance, 1500 * u.Mpc, zmax=2),
0.681277696,
rtol=1e-6,
)
assert allclose(
z_at_value(cosmo.angular_diameter_distance, 1500 * u.Mpc, zmin=2.5),
3.7914908,
rtol=1e-6,
)
# test behavior when the solution is outside z limits (should
# raise a CosmologyError)
with pytest.raises(core.CosmologyError):
with pytest.warns(AstropyUserWarning, match=r"fval is not bracketed"):
z_at_value(cosmo.angular_diameter_distance, 1500 * u.Mpc, zmax=0.5)
with pytest.raises(core.CosmologyError):
with pytest.warns(AstropyUserWarning, match=r"fval is not bracketed"):
z_at_value(cosmo.angular_diameter_distance, 1500 * u.Mpc, zmin=4.0)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
class Test_ZatValue:
def setup_class(self):
self.cosmo = Planck13
def test_broadcast_arguments(self):
"""Test broadcast of arguments."""
# broadcasting main argument
assert allclose(
z_at_value(self.cosmo.age, [2, 7] * u.Gyr),
[3.1981206134773115, 0.7562044333305182],
rtol=1e-6,
)
# basic broadcast of secondary arguments
assert allclose(
z_at_value(
self.cosmo.angular_diameter_distance,
1500 * u.Mpc,
zmin=[0, 2.5],
zmax=[2, 4],
),
[0.681277696, 3.7914908],
rtol=1e-6,
)
# more interesting broadcast
assert allclose(
z_at_value(
self.cosmo.angular_diameter_distance,
1500 * u.Mpc,
zmin=[[0, 2.5]],
zmax=[2, 4],
),
[[0.681277696, 3.7914908]],
rtol=1e-6,
)
def test_broadcast_bracket(self):
"""`bracket` has special requirements."""
# start with an easy one
assert allclose(
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=None),
3.1981206134773115,
rtol=1e-6,
)
# now actually have a bracket
assert allclose(
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=[0, 4]),
3.1981206134773115,
rtol=1e-6,
)
# now a bad length
with pytest.raises(ValueError, match="sequence"):
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=[0, 4, 4, 5])
# now the wrong dtype : an ndarray, but not an object array
with pytest.raises(TypeError, match="dtype"):
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=np.array([0, 4]))
# now an object array of brackets
bracket = np.array([[0, 4], [0, 3, 4]], dtype=object)
assert allclose(
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=bracket),
[3.1981206134773115, 3.1981206134773115],
rtol=1e-6,
)
def test_bad_broadcast(self):
"""Shapes mismatch as expected"""
with pytest.raises(ValueError, match="broadcast"):
z_at_value(
self.cosmo.angular_diameter_distance,
1500 * u.Mpc,
zmin=[0, 2.5, 0.1],
zmax=[2, 4],
)
def test_scalar_input_to_output(self):
"""Test scalar input returns a scalar."""
z = z_at_value(
self.cosmo.angular_diameter_distance, 1500 * u.Mpc, zmin=0, zmax=2
)
assert isinstance(z, u.Quantity)
assert z.dtype == np.float64
assert z.shape == ()
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_z_at_value_numpyvectorize():
"""Test that numpy vectorize fails on Quantities.
If this test starts failing then numpy vectorize can be used instead of
the home-brewed vectorization. Please submit a PR making the change.
"""
z_at_value = np.vectorize(
_z_at_scalar_value, excluded=["func", "method", "verbose"]
)
with pytest.raises(u.UnitConversionError, match="dimensionless quantities"):
z_at_value(Planck15.age, 10 * u.Gyr)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_z_at_value_verbose(monkeypatch):
cosmo = Planck13
# Test the "verbose" flag. Since this uses "print", need to mod stdout
mock_stdout = StringIO()
monkeypatch.setattr(sys, "stdout", mock_stdout)
resx = z_at_value(cosmo.age, 2 * u.Gyr, verbose=True)
assert str(resx.value) in mock_stdout.getvalue() # test "verbose" prints res
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
@pytest.mark.parametrize("method", ["Brent", "Golden", "Bounded"])
def test_z_at_value_bracketed(method):
"""
Test 2 solutions for angular diameter distance by not constraining zmin, zmax,
but setting `bracket` on the appropriate side of the turning point z.
Setting zmin / zmax should override `bracket`.
"""
cosmo = Planck13
if method == "Bounded":
with pytest.warns(AstropyUserWarning, match=r"fval is not bracketed"):
z = z_at_value(cosmo.angular_diameter_distance, 1500 * u.Mpc, method=method)
if z > 1.6:
z = 3.7914908
bracket = (0.9, 1.5)
else:
z = 0.6812777
bracket = (1.6, 2.0)
with pytest.warns(UserWarning, match=r"Option 'bracket' is ignored"):
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=bracket,
),
z,
rtol=1e-6,
)
else:
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(0.3, 1.0),
),
0.6812777,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(2.0, 4.0),
),
3.7914908,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(0.1, 1.5),
),
0.6812777,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(0.1, 1.0, 2.0),
),
0.6812777,
rtol=1e-6,
)
with pytest.warns(AstropyUserWarning, match=r"fval is not bracketed"):
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(0.9, 1.5),
),
0.6812777,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(1.6, 2.0),
),
3.7914908,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(1.6, 2.0),
zmax=1.6,
),
0.6812777,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(0.9, 1.5),
zmin=1.5,
),
3.7914908,
rtol=1e-6,
)
with pytest.raises(core.CosmologyError):
with pytest.warns(AstropyUserWarning, match=r"fval is not bracketed"):
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(3.9, 5.0),
zmin=4.0,
)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
@pytest.mark.parametrize("method", ["Brent", "Golden", "Bounded"])
def test_z_at_value_unconverged(method):
"""
Test warnings on non-converged solution when setting `maxfun` to too small iteration number -
only 'Bounded' returns status value and specific message.
"""
cosmo = Planck18
ztol = {"Brent": [1e-4, 1e-4], "Golden": [1e-3, 1e-2], "Bounded": [1e-3, 1e-1]}
if method == "Bounded":
ctx = pytest.warns(
AstropyUserWarning,
match="Solver returned 1: Maximum number of function calls reached",
)
else:
ctx = pytest.warns(AstropyUserWarning, match="Solver returned None")
with ctx:
z0 = z_at_value(
cosmo.angular_diameter_distance, 1 * u.Gpc, zmax=2, maxfun=13, method=method
)
with ctx:
z1 = z_at_value(
cosmo.angular_diameter_distance, 1 * u.Gpc, zmin=2, maxfun=13, method=method
)
assert allclose(z0, 0.32442, rtol=ztol[method][0])
assert allclose(z1, 8.18551, rtol=ztol[method][1])
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
@pytest.mark.parametrize(
"cosmo",
[
Planck13,
Planck15,
Planck18,
WMAP1,
WMAP3,
WMAP5,
WMAP7,
WMAP9,
flrw.LambdaCDM,
flrw.FlatLambdaCDM,
flrw.wpwaCDM,
flrw.w0wzCDM,
flrw.wCDM,
flrw.FlatwCDM,
flrw.w0waCDM,
flrw.Flatw0waCDM,
],
)
def test_z_at_value_roundtrip(cosmo):
"""
Calculate values from a known redshift, and then check that
z_at_value returns the right answer.
"""
z = 0.5
# Skip Ok, w, de_density_scale because in the Planck cosmologies
# they are redshift independent and hence uninvertable,
# *_distance_z1z2 methods take multiple arguments, so require
# special handling
# clone is not a redshift-dependent method
# nu_relative_density is not redshift-dependent in the WMAP cosmologies
skip = (
"Ok",
"Otot",
"angular_diameter_distance_z1z2",
"clone",
"is_equivalent",
"de_density_scale",
"w",
)
if str(cosmo.name).startswith("WMAP"):
skip += ("nu_relative_density",)
methods = inspect.getmembers(cosmo, predicate=inspect.ismethod)
for name, func in methods:
if name.startswith("_") or name in skip:
continue
fval = func(z)
# we need zmax here to pick the right solution for
# angular_diameter_distance and related methods.
# Be slightly more generous with rtol than the default 1e-8
# used in z_at_value
got = z_at_value(func, fval, bracket=[0.3, 1.0], ztol=1e-12)
assert allclose(got, z, rtol=2e-11), f"Round-trip testing {name} failed"
# Test distance functions between two redshifts; only for realizations
if isinstance(cosmo.name, str):
z2 = 2.0
func_z1z2 = [
lambda z1: cosmo._comoving_distance_z1z2(z1, z2),
lambda z1: cosmo._comoving_transverse_distance_z1z2(z1, z2),
lambda z1: cosmo.angular_diameter_distance_z1z2(z1, z2),
]
for func in func_z1z2:
fval = func(z)
assert allclose(z, z_at_value(func, fval, zmax=1.5, ztol=1e-12), rtol=2e-11)
| bsd-3-clause | 62aea4a077009fc5189a1f12a571ba88 | 31.418886 | 97 | 0.55456 | 3.457903 | false | true | false | false |
astropy/astropy | astropy/io/misc/tests/test_parquet.py | 3 | 22139 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import (
Angle,
CartesianRepresentation,
EarthLocation,
Latitude,
Longitude,
SkyCoord,
SphericalCosLatDifferential,
SphericalRepresentation,
)
from astropy.io.misc.parquet import get_pyarrow, parquet_identify
from astropy.table import Column, NdarrayMixin, QTable, Table
from astropy.table.table_helpers import simple_table
from astropy.time import Time, TimeDelta
from astropy.units import allclose as quantity_allclose
from astropy.units.quantity import QuantityInfo
from astropy.utils.compat.optional_deps import HAS_PANDAS
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
# Skip all tests in this file if we cannot import pyarrow
pyarrow = pytest.importorskip("pyarrow")
ALL_DTYPES = [
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.int8,
np.int16,
np.int32,
np.int64,
np.float32,
np.float64,
np.bool_,
"|S3",
"U3",
]
def _default_values(dtype):
if dtype == np.bool_:
return [0, 1, 1]
elif dtype == "|S3":
return [b"abc", b"def", b"ghi"]
elif dtype == "U3":
return ["abc", "def", "ghi"]
else:
return [1, 2, 3]
def test_read_write_simple(tmp_path):
"""Test writing/reading a simple parquet file."""
test_file = tmp_path / "test.parquet"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file)
t2 = Table.read(test_file)
assert np.all(t2["a"] == [1, 2, 3])
def test_read_write_existing(tmp_path):
"""Test writing an existing file without overwriting."""
test_file = tmp_path / "test.parquet"
with open(test_file, "w") as f: # create empty file
pass
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t1.write(test_file)
def test_read_write_existing_overwrite(tmp_path):
"""Test overwriting an existing file."""
test_file = tmp_path / "test.parquet"
with open(test_file, "w") as f: # create empty file
pass
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file, overwrite=True)
t2 = Table.read(test_file)
assert np.all(t2["a"] == [1, 2, 3])
def test_read_fileobj(tmp_path):
"""Test reading a file object."""
test_file = tmp_path / "test.parquet"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file)
import io
with io.FileIO(test_file, mode="r") as input_file:
t2 = Table.read(input_file)
assert np.all(t2["a"] == [1, 2, 3])
def test_read_pathlikeobj(tmp_path):
"""Test reading a path-like object."""
test_file = tmp_path / "test.parquet"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file)
import pathlib
p = pathlib.Path(test_file)
t2 = Table.read(p)
assert np.all(t2["a"] == [1, 2, 3])
def test_read_wrong_fileobj():
"""Test reading an incorrect fileobject type."""
class FakeFile:
def not_read(self):
pass
f = FakeFile()
with pytest.raises(
TypeError, match="pyarrow can only open path-like or file-like objects."
):
Table.read(f, format="parquet")
def test_identify_wrong_fileobj():
"""Test identifying an incorrect fileobj."""
class FakeFile:
def not_read(self):
pass
f = FakeFile()
assert not parquet_identify("test", "test", f)
def test_identify_file_wrong_extension():
"""Test identifying an incorrect extension."""
assert not parquet_identify("test", "test.notparquet", None)
def test_identify_file_correct_extension():
"""Test identifying an incorrect extension."""
assert parquet_identify("test", "test.parquet", None)
assert parquet_identify("test", "test.parq", None)
def test_identify_file_noobject_nopath():
"""Test running identify with no object or path."""
assert not parquet_identify("test", None, None)
def test_write_wrong_type():
"""Test writing to a filename of the wrong type."""
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
with pytest.raises(TypeError, match="should be a string"):
t1.write(1212, format="parquet")
@pytest.mark.parametrize("dtype", ALL_DTYPES)
def test_preserve_single_dtypes(tmp_path, dtype):
"""Test that round-tripping a single column preserves datatypes."""
test_file = tmp_path / "test.parquet"
values = _default_values(dtype)
t1 = Table()
t1.add_column(Column(name="a", data=np.array(values, dtype=dtype)))
t1.write(test_file)
t2 = Table.read(test_file)
assert np.all(t2["a"] == values)
assert t2["a"].dtype == dtype
def test_preserve_all_dtypes(tmp_path):
"""Test that round-tripping preserves a table with all the datatypes."""
test_file = tmp_path / "test.parquet"
t1 = Table()
for dtype in ALL_DTYPES:
values = _default_values(dtype)
t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype)))
t1.write(test_file)
t2 = Table.read(test_file)
for dtype in ALL_DTYPES:
values = _default_values(dtype)
assert np.all(t2[str(dtype)] == values)
assert t2[str(dtype)].dtype == dtype
def test_preserve_meta(tmp_path):
"""Test that writing/reading preserves metadata."""
test_file = tmp_path / "test.parquet"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.meta["a"] = 1
t1.meta["b"] = "hello"
t1.meta["c"] = 3.14159
t1.meta["d"] = True
t1.meta["e"] = np.array([1, 2, 3])
t1.write(test_file)
t2 = Table.read(test_file)
for key in t1.meta:
assert np.all(t1.meta[key] == t2.meta[key])
def test_preserve_serialized(tmp_path):
"""Test that writing/reading preserves unit/format/description."""
test_file = tmp_path / "test.parquet"
t1 = Table()
t1["a"] = Column(data=[1, 2, 3], unit="s")
t1["a"].meta["a0"] = "A0"
t1["a"].meta["a1"] = {"a1": [0, 1]}
t1["a"].format = "7.3f"
t1["a"].description = "A column"
t1.meta["b"] = 1
t1.meta["c"] = {"c0": [0, 1]}
t1.write(test_file, overwrite=True)
t2 = Table.read(test_file)
assert t1["a"].unit == t2["a"].unit
assert t1["a"].format == t2["a"].format
assert t1["a"].description == t2["a"].description
assert t1["a"].meta == t2["a"].meta
assert t1.meta == t2.meta
def test_metadata_very_large(tmp_path):
"""Test that very large datasets work"""
test_file = tmp_path / "test.parquet"
t1 = Table()
t1["a"] = Column(data=[1, 2, 3], unit="s")
t1["a"].meta["a0"] = "A0"
t1["a"].meta["a1"] = {"a1": [0, 1]}
t1["a"].format = "7.3f"
t1["a"].description = "A column"
t1.meta["b"] = 1
t1.meta["c"] = {"c0": [0, 1]}
t1.meta["meta_big"] = "0" * (2**16 + 1)
t1.meta["meta_biggerstill"] = "0" * (2**18)
t1.write(test_file, overwrite=True)
t2 = Table.read(test_file)
assert t1["a"].unit == t2["a"].unit
assert t1["a"].format == t2["a"].format
assert t1["a"].description == t2["a"].description
assert t1["a"].meta == t2["a"].meta
assert t1.meta == t2.meta
def test_fail_meta_serialize(tmp_path):
"""Test that we cannot preserve objects in metadata."""
test_file = tmp_path / "test.parquet"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.meta["f"] = str
with pytest.raises(Exception) as err:
t1.write(test_file)
assert "cannot represent an object" in str(err.value)
assert "<class 'str'>" in str(err.value)
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
"""Convenient routine to check objects and attributes match."""
if compare_class:
assert obj1.__class__ is obj2.__class__
info_attrs = [
"info.name",
"info.format",
"info.unit",
"info.description",
"info.meta",
]
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split("."):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
# Mixin info.meta can None instead of empty OrderedDict(), #6720 would
# fix this.
if attr == "info.meta":
if a1 is None:
a1 = {}
if a2 is None:
a2 = {}
if isinstance(a1, np.ndarray) and a1.dtype.kind == "f":
assert quantity_allclose(a1, a2, rtol=1e-15)
else:
assert np.all(a1 == a2)
# Testing Parquet table read/write with mixins. This is mostly
# copied from HDF5/FITS mixin testing, and it might be good to unify it.
# Analogous tests also exist for ECSV.
el = EarthLocation(x=1 * u.km, y=3 * u.km, z=5 * u.km)
el2 = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km)
sr = SphericalRepresentation([0, 1] * u.deg, [2, 3] * u.deg, 1 * u.kpc)
cr = CartesianRepresentation([0, 1] * u.pc, [4, 5] * u.pc, [8, 6] * u.pc)
sd = SphericalCosLatDifferential(
[0, 1] * u.mas / u.yr, [0, 1] * u.mas / u.yr, 10 * u.km / u.s
)
srd = SphericalRepresentation(sr, differentials=sd)
sc = SkyCoord([1, 2], [3, 4], unit="deg,deg", frame="fk4", obstime="J1990.5")
scd = SkyCoord(
[1, 2],
[3, 4],
[5, 6],
unit="deg,deg,m",
frame="fk4",
obstime=["J1990.5", "J1991.5"],
)
scdc = scd.copy()
scdc.representation_type = "cartesian"
scpm = SkyCoord(
[1, 2],
[3, 4],
[5, 6],
unit="deg,deg,pc",
pm_ra_cosdec=[7, 8] * u.mas / u.yr,
pm_dec=[9, 10] * u.mas / u.yr,
)
scpmrv = SkyCoord(
[1, 2],
[3, 4],
[5, 6],
unit="deg,deg,pc",
pm_ra_cosdec=[7, 8] * u.mas / u.yr,
pm_dec=[9, 10] * u.mas / u.yr,
radial_velocity=[11, 12] * u.km / u.s,
)
scrv = SkyCoord(
[1, 2], [3, 4], [5, 6], unit="deg,deg,pc", radial_velocity=[11, 12] * u.km / u.s
)
tm = Time([2450814.5, 2450815.5], format="jd", scale="tai", location=el)
# NOTE: in the test below the name of the column "x" for the Quantity is
# important since it tests the fix for #10215 (namespace clash, where "x"
# clashes with "el2.x").
mixin_cols = {
"tm": tm,
"dt": TimeDelta([1, 2] * u.day),
"sc": sc,
"scd": scd,
"scdc": scdc,
"scpm": scpm,
"scpmrv": scpmrv,
"scrv": scrv,
"x": [1, 2] * u.m,
"qdb": [10, 20] * u.dB(u.mW),
"qdex": [4.5, 5.5] * u.dex(u.cm / u.s**2),
"qmag": [21, 22] * u.ABmag,
"lat": Latitude([1, 2] * u.deg),
"lon": Longitude([1, 2] * u.deg, wrap_angle=180.0 * u.deg),
"ang": Angle([1, 2] * u.deg),
"el2": el2,
"sr": sr,
"cr": cr,
"sd": sd,
"srd": srd,
}
time_attrs = ["value", "shape", "format", "scale", "location"]
compare_attrs = {
"c1": ["data"],
"c2": ["data"],
"tm": time_attrs,
"dt": ["shape", "value", "format", "scale"],
"sc": ["ra", "dec", "representation_type", "frame.name"],
"scd": ["ra", "dec", "distance", "representation_type", "frame.name"],
"scdc": ["x", "y", "z", "representation_type", "frame.name"],
"scpm": [
"ra",
"dec",
"distance",
"pm_ra_cosdec",
"pm_dec",
"representation_type",
"frame.name",
],
"scpmrv": [
"ra",
"dec",
"distance",
"pm_ra_cosdec",
"pm_dec",
"radial_velocity",
"representation_type",
"frame.name",
],
"scrv": [
"ra",
"dec",
"distance",
"radial_velocity",
"representation_type",
"frame.name",
],
"x": ["value", "unit"],
"qdb": ["value", "unit"],
"qdex": ["value", "unit"],
"qmag": ["value", "unit"],
"lon": ["value", "unit", "wrap_angle"],
"lat": ["value", "unit"],
"ang": ["value", "unit"],
"el2": ["x", "y", "z", "ellipsoid"],
"nd": ["x", "y", "z"],
"sr": ["lon", "lat", "distance"],
"cr": ["x", "y", "z"],
"sd": ["d_lon_coslat", "d_lat", "d_distance"],
"srd": [
"lon",
"lat",
"distance",
"differentials.s.d_lon_coslat",
"differentials.s.d_lat",
"differentials.s.d_distance",
],
}
def test_parquet_mixins_qtable_to_table(tmp_path):
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
filename = tmp_path / "test_simple.parquet"
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
t.write(filename, format="parquet")
t2 = Table.read(filename, format="parquet")
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
# Special-case Time, which does not yet support round-tripping
# the format.
if isinstance(col2, Time):
col2.format = col.format
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ["unit"]
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.all(col.value == col2)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_parquet_mixins_as_one(table_cls, tmp_path):
"""Test write/read all cols at once and validate intermediate column names"""
filename = tmp_path / "test_simple.parquet"
names = sorted(mixin_cols)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta["C"] = "spam"
t.meta["comments"] = ["this", "is", "a", "comment"]
t.meta["history"] = ["first", "second", "third"]
t.write(filename, format="parquet")
t2 = table_cls.read(filename, format="parquet")
assert t2.meta["C"] == "spam"
assert t2.meta["comments"] == ["this", "is", "a", "comment"]
assert t2.meta["history"] == ["first", "second", "third"]
assert t.colnames == t2.colnames
@pytest.mark.parametrize("name_col", list(mixin_cols.items()))
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_parquet_mixins_per_column(table_cls, name_col, tmp_path):
"""Test write/read one col at a time and do detailed validation"""
filename = tmp_path / "test_simple.parquet"
name, col = name_col
c = [1.0, 2.0]
t = table_cls([c, col, c], names=["c1", name, "c2"])
t[name].info.description = "my description"
t[name].info.meta = {"list": list(range(50)), "dict": {"a": "b" * 200}}
if not t.has_mixin_columns:
pytest.skip("column is not a mixin (e.g. Quantity subclass in Table)")
if isinstance(t[name], NdarrayMixin):
pytest.xfail("NdarrayMixin not supported")
t.write(filename, format="parquet")
t2 = table_cls.read(filename, format="parquet")
assert t.colnames == t2.colnames
for colname in t.colnames:
assert_objects_equal(t[colname], t2[colname], compare_attrs[colname])
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith("tm"):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
def test_round_trip_masked_table_default(tmp_path):
"""Test round-trip of MaskedColumn through Parquet using default serialization
that writes a separate mask column. Note:
>>> simple_table(masked=True)
<Table masked=True length=3>
a b c
int64 float64 str1
----- ------- ----
-- 1.0 c
2 2.0 --
3 -- e
"""
filename = tmp_path / "test.parquet"
t = simple_table(masked=True) # int, float, and str cols with one masked element
t["c"] = [b"c", b"d", b"e"]
t["c"].mask[1] = True
t.write(filename, format="parquet")
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_parquet_mixins_read_one_name(table_cls, tmp_path):
"""Test write all cols at once, and read one at a time."""
filename = tmp_path / "test_simple.parquet"
names = sorted(mixin_cols)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta["C"] = "spam"
t.meta["comments"] = ["this", "is", "a", "comment"]
t.meta["history"] = ["first", "second", "third"]
t.write(filename, format="parquet")
for name in names:
t2 = table_cls.read(filename, format="parquet", include_names=[name])
assert t2.meta["C"] == "spam"
assert t2.meta["comments"] == ["this", "is", "a", "comment"]
assert t2.meta["history"] == ["first", "second", "third"]
assert t2.colnames == [name]
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_parquet_mixins_read_exclude_names(table_cls, tmp_path):
"""Test write all cols at once, and read all but one at a time."""
filename = tmp_path / "test_simple.parquet"
names = sorted(mixin_cols)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta["C"] = "spam"
t.meta["comments"] = ["this", "is", "a", "comment"]
t.meta["history"] = ["first", "second", "third"]
t.write(filename, format="parquet")
t2 = table_cls.read(filename, format="parquet", exclude_names=names[0:5])
assert t.colnames[5:] == t2.colnames
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_parquet_mixins_read_no_columns(table_cls, tmp_path):
"""Test write all cols at once, and try to read no valid columns."""
filename = tmp_path / "test_simple.parquet"
names = sorted(mixin_cols)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta["C"] = "spam"
t.meta["comments"] = ["this", "is", "a", "comment"]
t.meta["history"] = ["first", "second", "third"]
t.write(filename, format="parquet")
with pytest.raises(ValueError, match="No include_names specified"):
t2 = table_cls.read(
filename,
format="parquet",
include_names=["not_a_column", "also_not_a_column"],
)
@pytest.mark.parametrize("table_cls", (Table, QTable))
def test_parquet_mixins_read_schema(table_cls, tmp_path):
"""Test write all cols at once, and read the schema."""
filename = tmp_path / "test_simple.parquet"
names = sorted(mixin_cols)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta["C"] = "spam"
t.meta["comments"] = ["this", "is", "a", "comment"]
t.meta["history"] = ["first", "second", "third"]
t.write(filename, format="parquet")
t2 = table_cls.read(filename, format="parquet", schema_only=True)
assert t2.meta["C"] == "spam"
assert t2.meta["comments"] == ["this", "is", "a", "comment"]
assert t2.meta["history"] == ["first", "second", "third"]
assert t.colnames == t2.colnames
assert len(t2) == 0
def test_parquet_filter(tmp_path):
"""Test reading a parquet file with a filter."""
filename = tmp_path / "test_simple.parquet"
t1 = Table()
t1["a"] = Column(data=np.arange(100), dtype=np.int32)
t1["b"] = Column(data=np.arange(100, 0, -1), dtype=np.float64)
t1.write(filename, overwrite=True)
t2 = Table.read(filename, filters=[("a", "<", 50)])
assert t2["a"].max() < 50
t2 = Table.read(filename, filters=[("b", "<", 50)])
assert t2["b"].max() < 50
def test_parquet_read_generic(tmp_path):
"""Test reading a generic parquet file."""
filename = tmp_path / "test_generic.parq"
t1 = Table()
for dtype in ALL_DTYPES:
values = _default_values(dtype)
t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype)))
# Write the table generically via pyarrow.parquet
names = t1.dtype.names
type_list = [
(name, pyarrow.from_numpy_dtype(t1[name].dtype.type)) for name in names
]
schema = pyarrow.schema(type_list)
_, parquet, writer_version = get_pyarrow()
# We use version='2.0' for full support of datatypes including uint32.
with parquet.ParquetWriter(filename, schema, version=writer_version) as writer:
arrays = [pyarrow.array(t1[name].data) for name in names]
writer.write_table(pyarrow.Table.from_arrays(arrays, schema=schema))
with pytest.warns(AstropyUserWarning, match="No table::len"):
t2 = Table.read(filename)
for dtype in ALL_DTYPES:
values = _default_values(dtype)
assert np.all(t2[str(dtype)] == values)
assert t2[str(dtype)].dtype == dtype
@pytest.mark.skipif(not HAS_PANDAS, reason="requires pandas")
def test_parquet_read_pandas(tmp_path):
"""Test reading a pandas parquet file."""
filename = tmp_path / "test_pandas.parq"
t1 = Table()
for dtype in ALL_DTYPES:
values = _default_values(dtype)
t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype)))
df = t1.to_pandas()
# We use version='2.0' for full support of datatypes including uint32.
_, _, writer_version = get_pyarrow()
df.to_parquet(filename, version=writer_version)
with pytest.warns(AstropyUserWarning, match="No table::len"):
t2 = Table.read(filename)
for dtype in ALL_DTYPES:
values = _default_values(dtype)
assert np.all(t2[str(dtype)] == values)
assert t2[str(dtype)].dtype == dtype
| bsd-3-clause | 4d47dd02a6ea300ab229a3cda4db2a51 | 28.245707 | 91 | 0.59009 | 3.068043 | false | true | false | false |
astropy/astropy | astropy/coordinates/tests/test_unit_representation.py | 3 | 2792 | """
This file tests the behavior of subclasses of Representation and Frames
"""
from copy import deepcopy
import astropy.coordinates
import astropy.units as u
from astropy.coordinates import ICRS, Latitude, Longitude
from astropy.coordinates.baseframe import RepresentationMapping, frame_transform_graph
from astropy.coordinates.representation import (
REPRESENTATION_CLASSES,
SphericalRepresentation,
UnitSphericalRepresentation,
_invalidate_reprdiff_cls_hash,
)
from astropy.coordinates.transformations import FunctionTransform
# Classes setup, borrowed from SunPy.
# Here we define the classes *inside* the tests to make sure that we can wipe
# the slate clean when the tests have finished running.
def setup_function(func):
func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
def teardown_function(func):
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG)
_invalidate_reprdiff_cls_hash()
def test_unit_representation_subclass():
class Longitude180(Longitude):
def __new__(cls, angle, unit=None, wrap_angle=180 * u.deg, **kwargs):
self = super().__new__(
cls, angle, unit=unit, wrap_angle=wrap_angle, **kwargs
)
return self
class UnitSphericalWrap180Representation(UnitSphericalRepresentation):
attr_classes = {"lon": Longitude180, "lat": Latitude}
class SphericalWrap180Representation(SphericalRepresentation):
attr_classes = {"lon": Longitude180, "lat": Latitude, "distance": u.Quantity}
_unit_representation = UnitSphericalWrap180Representation
class MyFrame(ICRS):
default_representation = SphericalWrap180Representation
frame_specific_representation_info = {
"spherical": [
RepresentationMapping("lon", "ra"),
RepresentationMapping("lat", "dec"),
]
}
frame_specific_representation_info[
"unitsphericalwrap180"
] = frame_specific_representation_info[
"sphericalwrap180"
] = frame_specific_representation_info[
"spherical"
]
@frame_transform_graph.transform(
FunctionTransform, MyFrame, astropy.coordinates.ICRS
)
def myframe_to_icrs(myframe_coo, icrs):
return icrs.realize_frame(myframe_coo._data)
f = MyFrame(10 * u.deg, 10 * u.deg)
assert isinstance(f._data, UnitSphericalWrap180Representation)
assert isinstance(f.ra, Longitude180)
g = f.transform_to(astropy.coordinates.ICRS())
assert isinstance(g, astropy.coordinates.ICRS)
assert isinstance(g._data, UnitSphericalWrap180Representation)
frame_transform_graph.remove_transform(MyFrame, astropy.coordinates.ICRS, None)
| bsd-3-clause | 277a66fc865401afe5d655c379170314 | 33.9 | 86 | 0.703438 | 4.093842 | false | false | false | false |
astropy/astropy | astropy/samp/tests/web_profile_test_helpers.py | 3 | 9400 | import threading
import time
import xmlrpc.client as xmlrpc
from astropy.samp.client import SAMPClient
from astropy.samp.errors import SAMPClientError, SAMPHubError
from astropy.samp.hub import WebProfileDialog
from astropy.samp.hub_proxy import SAMPHubProxy
from astropy.samp.integrated_client import SAMPIntegratedClient
from astropy.samp.utils import ServerProxyPool
class AlwaysApproveWebProfileDialog(WebProfileDialog):
def __init__(self):
self.polling = True
WebProfileDialog.__init__(self)
def show_dialog(self, *args):
self.consent()
def poll(self):
while self.polling:
self.handle_queue()
time.sleep(0.1)
def stop(self):
self.polling = False
class SAMPWebHubProxy(SAMPHubProxy):
"""
Proxy class to simplify the client interaction with a SAMP hub (via the web
profile).
In practice web clients should run from the browser, so this is provided as
a means of testing a hub's support for the web profile from Python.
"""
def connect(self, pool_size=20, web_port=21012):
"""
Connect to the current SAMP Hub on localhost:web_port
Parameters
----------
pool_size : int, optional
The number of socket connections opened to communicate with the
Hub.
"""
self._connected = False
try:
self.proxy = ServerProxyPool(
pool_size,
xmlrpc.ServerProxy,
f"http://127.0.0.1:{web_port}",
allow_none=1,
)
self.ping()
self._connected = True
except xmlrpc.ProtocolError as p:
raise SAMPHubError(f"Protocol Error {p.errcode}: {p.errmsg}")
@property
def _samp_hub(self):
"""
Property to abstract away the path to the hub, which allows this class
to be used for both the standard and the web profile.
"""
return self.proxy.samp.webhub
def set_xmlrpc_callback(self, private_key, xmlrpc_addr):
raise NotImplementedError(
"set_xmlrpc_callback is not defined for the web profile"
)
def register(self, identity_info):
"""
Proxy to ``register`` SAMP Hub method.
"""
return self._samp_hub.register(identity_info)
def allow_reverse_callbacks(self, private_key, allow):
"""
Proxy to ``allowReverseCallbacks`` SAMP Hub method.
"""
return self._samp_hub.allowReverseCallbacks(private_key, allow)
def pull_callbacks(self, private_key, timeout):
"""
Proxy to ``pullCallbacks`` SAMP Hub method.
"""
return self._samp_hub.pullCallbacks(private_key, timeout)
class SAMPWebClient(SAMPClient):
"""
Utility class which provides facilities to create and manage a SAMP
compliant XML-RPC server that acts as SAMP callable web client application.
In practice web clients should run from the browser, so this is provided as
a means of testing a hub's support for the web profile from Python.
Parameters
----------
hub : :class:`~astropy.samp.hub_proxy.SAMPWebHubProxy`
An instance of :class:`~astropy.samp.hub_proxy.SAMPWebHubProxy` to
be used for messaging with the SAMP Hub.
name : str, optional
Client name (corresponding to ``samp.name`` metadata keyword).
description : str, optional
Client description (corresponding to ``samp.description.text`` metadata
keyword).
metadata : dict, optional
Client application metadata in the standard SAMP format.
callable : bool, optional
Whether the client can receive calls and notifications. If set to
`False`, then the client can send notifications and calls, but can not
receive any.
"""
def __init__(self, hub, name=None, description=None, metadata=None, callable=True):
# GENERAL
self._is_running = False
self._is_registered = False
if metadata is None:
metadata = {}
if name is not None:
metadata["samp.name"] = name
if description is not None:
metadata["samp.description.text"] = description
self._metadata = metadata
self._callable = callable
# HUB INTERACTION
self.client = None
self._public_id = None
self._private_key = None
self._hub_id = None
self._notification_bindings = {}
self._call_bindings = {
"samp.app.ping": [self._ping, {}],
"client.env.get": [self._client_env_get, {}],
}
self._response_bindings = {}
self.hub = hub
self._registration_lock = threading.Lock()
self._registered_event = threading.Event()
if self._callable:
self._thread = threading.Thread(target=self._serve_forever)
self._thread.daemon = True
def _serve_forever(self):
while self.is_running:
# Wait until we are actually registered before trying to do
# anything, to avoid busy looping
# Watch for callbacks here
self._registered_event.wait()
with self._registration_lock:
if not self._is_registered:
return
results = self.hub.pull_callbacks(self.get_private_key(), 0)
for result in results:
if result["samp.methodName"] == "receiveNotification":
self.receive_notification(
self._private_key, *result["samp.params"]
)
elif result["samp.methodName"] == "receiveCall":
self.receive_call(self._private_key, *result["samp.params"])
elif result["samp.methodName"] == "receiveResponse":
self.receive_response(self._private_key, *result["samp.params"])
self.hub.disconnect()
def register(self):
"""
Register the client to the SAMP Hub.
"""
if self.hub.is_connected:
if self._private_key is not None:
raise SAMPClientError("Client already registered")
result = self.hub.register("Astropy SAMP Web Client")
if result["samp.self-id"] == "":
raise SAMPClientError(
"Registation failed - samp.self-id was not set by the hub."
)
if result["samp.private-key"] == "":
raise SAMPClientError(
"Registation failed - samp.private-key was not set by the hub."
)
self._public_id = result["samp.self-id"]
self._private_key = result["samp.private-key"]
self._hub_id = result["samp.hub-id"]
if self._callable:
self._declare_subscriptions()
self.hub.allow_reverse_callbacks(self._private_key, True)
if self._metadata != {}:
self.declare_metadata()
self._is_registered = True
# Let the client thread proceed
self._registered_event.set()
else:
raise SAMPClientError(
"Unable to register to the SAMP Hub. Hub proxy not connected."
)
def unregister(self):
# We have to hold the registration lock if the client is callable
# to avoid a race condition where the client queries the hub for
# pushCallbacks after it has already been unregistered from the hub
with self._registration_lock:
super().unregister()
class SAMPIntegratedWebClient(SAMPIntegratedClient):
"""
A Simple SAMP web client.
In practice web clients should run from the browser, so this is provided as
a means of testing a hub's support for the web profile from Python.
This class is meant to simplify the client usage providing a proxy class
that merges the :class:`~astropy.samp.client.SAMPWebClient` and
:class:`~astropy.samp.hub_proxy.SAMPWebHubProxy` functionalities in a
simplified API.
Parameters
----------
name : str, optional
Client name (corresponding to ``samp.name`` metadata keyword).
description : str, optional
Client description (corresponding to ``samp.description.text`` metadata
keyword).
metadata : dict, optional
Client application metadata in the standard SAMP format.
callable : bool, optional
Whether the client can receive calls and notifications. If set to
`False`, then the client can send notifications and calls, but can not
receive any.
"""
def __init__(self, name=None, description=None, metadata=None, callable=True):
self.hub = SAMPWebHubProxy()
self.client = SAMPWebClient(self.hub, name, description, metadata, callable)
def connect(self, pool_size=20, web_port=21012):
"""
Connect with the current or specified SAMP Hub, start and register the
client.
Parameters
----------
pool_size : int, optional
The number of socket connections opened to communicate with the
Hub.
"""
self.hub.connect(pool_size, web_port=web_port)
self.client.start()
self.client.register()
| bsd-3-clause | a2be2f69a3a5d373478aa7f66f0e8151 | 32.215548 | 88 | 0.601489 | 4.47406 | false | false | false | false |
astropy/astropy | astropy/table/pprint.py | 3 | 30057 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import fnmatch
import os
import re
import sys
import numpy as np
from astropy import log
from astropy.utils.console import Getch, color_print, conf, terminal_size
from astropy.utils.data_info import dtype_info_name
__all__ = []
def default_format_func(format_, val):
if isinstance(val, bytes):
return val.decode("utf-8", errors="replace")
else:
return str(val)
# The first three functions are helpers for _auto_format_func
def _use_str_for_masked_values(format_func):
"""Wrap format function to trap masked values.
String format functions and most user functions will not be able to deal
with masked values, so we wrap them to ensure they are passed to str().
"""
return lambda format_, val: (
str(val) if val is np.ma.masked else format_func(format_, val)
)
def _possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
"""
yield lambda format_, val: format(val, format_)
yield lambda format_, val: format_.format(val)
yield lambda format_, val: format_ % val
yield lambda format_, val: format_.format(**{k: val[k] for k in val.dtype.names})
def get_auto_format_func(
col=None, possible_string_format_functions=_possible_string_format_functions
):
"""
Return a wrapped ``auto_format_func`` function which is used in
formatting table columns. This is primarily an internal function but
gets used directly in other parts of astropy, e.g. `astropy.io.ascii`.
Parameters
----------
col_name : object, optional
Hashable object to identify column like id or name. Default is None.
possible_string_format_functions : func, optional
Function that yields possible string formatting functions
(defaults to internal function to do this).
Returns
-------
Wrapped ``auto_format_func`` function
"""
def _auto_format_func(format_, val):
"""Format ``val`` according to ``format_`` for a plain format specifier,
old- or new-style format strings, or using a user supplied function.
More importantly, determine and cache (in _format_funcs) a function
that will do this subsequently. In this way this complicated logic is
only done for the first value.
Returns the formatted value.
"""
if format_ is None:
return default_format_func(format_, val)
if format_ in col.info._format_funcs:
return col.info._format_funcs[format_](format_, val)
if callable(format_):
format_func = lambda format_, val: format_(val) # noqa: E731
try:
out = format_func(format_, val)
if not isinstance(out, str):
raise ValueError(
"Format function for value {} returned {} "
"instead of string type".format(val, type(val))
)
except Exception as err:
# For a masked element, the format function call likely failed
# to handle it. Just return the string representation for now,
# and retry when a non-masked value comes along.
if val is np.ma.masked:
return str(val)
raise ValueError(f"Format function for value {val} failed.") from err
# If the user-supplied function handles formatting masked elements, use
# it directly. Otherwise, wrap it in a function that traps them.
try:
format_func(format_, np.ma.masked)
except Exception:
format_func = _use_str_for_masked_values(format_func)
else:
# For a masked element, we cannot set string-based format functions yet,
# as all tests below will fail. Just return the string representation
# of masked for now, and retry when a non-masked value comes along.
if val is np.ma.masked:
return str(val)
for format_func in possible_string_format_functions(format_):
try:
# Does this string format method work?
out = format_func(format_, val)
# Require that the format statement actually did something.
if out == format_:
raise ValueError("the format passed in did nothing.")
except Exception:
continue
else:
break
else:
# None of the possible string functions passed muster.
raise ValueError(
f"unable to parse format string {format_} for its column."
)
# String-based format functions will fail on masked elements;
# wrap them in a function that traps them.
format_func = _use_str_for_masked_values(format_func)
col.info._format_funcs[format_] = format_func
return out
return _auto_format_func
def _get_pprint_include_names(table):
"""Get the set of names to show in pprint from the table pprint_include_names
and pprint_exclude_names attributes.
These may be fnmatch unix-style globs.
"""
def get_matches(name_globs, default):
match_names = set()
if name_globs: # For None or () use the default
for name in table.colnames:
for name_glob in name_globs:
if fnmatch.fnmatch(name, name_glob):
match_names.add(name)
break
else:
match_names.update(default)
return match_names
include_names = get_matches(table.pprint_include_names(), table.colnames)
exclude_names = get_matches(table.pprint_exclude_names(), [])
return include_names - exclude_names
class TableFormatter:
@staticmethod
def _get_pprint_size(max_lines=None, max_width=None):
"""Get the output size (number of lines and character width) for Column and
Table pformat/pprint methods.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be determined
using the ``astropy.table.conf.max_lines`` configuration item. If a
negative value of ``max_lines`` is supplied then there is no line
limit applied.
The same applies for max_width except the configuration item is
``astropy.table.conf.max_width``.
Parameters
----------
max_lines : int or None
Maximum lines of output (header + data rows)
max_width : int or None
Maximum width (characters) output
Returns
-------
max_lines, max_width : int
"""
# Declare to keep static type checker happy.
lines = None
width = None
if max_lines is None:
max_lines = conf.max_lines
if max_width is None:
max_width = conf.max_width
if max_lines is None or max_width is None:
lines, width = terminal_size()
if max_lines is None:
max_lines = lines
elif max_lines < 0:
max_lines = sys.maxsize
if max_lines < 8:
max_lines = 8
if max_width is None:
max_width = width
elif max_width < 0:
max_width = sys.maxsize
if max_width < 10:
max_width = 10
return max_lines, max_width
def _pformat_col(
self,
col,
max_lines=None,
show_name=True,
show_unit=None,
show_dtype=False,
show_length=None,
html=False,
align=None,
):
"""Return a list of formatted string representation of column values.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include column dtype. Default is False.
show_length : bool
Include column length at end. Default is to show this only
if the column is not shown completely.
html : bool
Output column as HTML
align : str
Left/right alignment of columns. Default is '>' (right) for all
columns. Other allowed values are '<', '^', and '0=' for left,
centered, and 0-padded, respectively.
Returns
-------
lines : list
List of lines with formatted column values
outs : dict
Dict which is used to pass back additional values
defined within the iterator.
"""
if show_unit is None:
show_unit = col.info.unit is not None
outs = {} # Some values from _pformat_col_iter iterator that are needed here
col_strs_iter = self._pformat_col_iter(
col,
max_lines,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
show_length=show_length,
outs=outs,
)
# Replace tab and newline with text representations so they display nicely.
# Newline in particular is a problem in a multicolumn table.
col_strs = [
val.replace("\t", "\\t").replace("\n", "\\n") for val in col_strs_iter
]
if len(col_strs) > 0:
col_width = max(len(x) for x in col_strs)
if html:
from astropy.utils.xml.writer import xml_escape
n_header = outs["n_header"]
for i, col_str in enumerate(col_strs):
# _pformat_col output has a header line '----' which is not needed here
if i == n_header - 1:
continue
td = "th" if i < n_header else "td"
val = f"<{td}>{xml_escape(col_str.strip())}</{td}>"
row = "<tr>" + val + "</tr>"
if i < n_header:
row = "<thead>" + row + "</thead>"
col_strs[i] = row
if n_header > 0:
# Get rid of '---' header line
col_strs.pop(n_header - 1)
col_strs.insert(0, "<table>")
col_strs.append("</table>")
# Now bring all the column string values to the same fixed width
else:
col_width = max(len(x) for x in col_strs) if col_strs else 1
# Center line header content and generate dashed headerline
for i in outs["i_centers"]:
col_strs[i] = col_strs[i].center(col_width)
if outs["i_dashes"] is not None:
col_strs[outs["i_dashes"]] = "-" * col_width
# Format columns according to alignment. `align` arg has precedent, otherwise
# use `col.format` if it starts as a legal alignment string. If neither applies
# then right justify.
re_fill_align = re.compile(r"(?P<fill>.?)(?P<align>[<^>=])")
match = None
if align:
# If there is an align specified then it must match
match = re_fill_align.match(align)
if not match:
raise ValueError(
"column align must be one of '<', '^', '>', or '='"
)
elif isinstance(col.info.format, str):
# col.info.format need not match, in which case rjust gets used
match = re_fill_align.match(col.info.format)
if match:
fill_char = match.group("fill")
align_char = match.group("align")
if align_char == "=":
if fill_char != "0":
raise ValueError("fill character must be '0' for '=' align")
# str.zfill gets used which does not take fill char arg
fill_char = ""
else:
fill_char = ""
align_char = ">"
justify_methods = {"<": "ljust", "^": "center", ">": "rjust", "=": "zfill"}
justify_method = justify_methods[align_char]
justify_args = (col_width, fill_char) if fill_char else (col_width,)
for i, col_str in enumerate(col_strs):
col_strs[i] = getattr(col_str, justify_method)(*justify_args)
if outs["show_length"]:
col_strs.append(f"Length = {len(col)} rows")
return col_strs, outs
def _name_and_structure(self, name, dtype, sep=" "):
"""Format a column name, including a possible structure.
Normally, just returns the name, but if it has a structured dtype,
will add the parts in between square brackets. E.g.,
"name [f0, f1]" or "name [f0[sf0, sf1], f1]".
"""
if dtype is None or dtype.names is None:
return name
structure = ", ".join(
[
self._name_and_structure(name, dt, sep="")
for name, (dt, _) in dtype.fields.items()
]
)
return f"{name}{sep}[{structure}]"
def _pformat_col_iter(
self,
col,
max_lines,
show_name,
show_unit,
outs,
show_dtype=False,
show_length=None,
):
"""Iterator which yields formatted string representation of column values.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
outs : dict
Must be a dict which is used to pass back additional values
defined within the iterator.
show_dtype : bool
Include column dtype. Default is False.
show_length : bool
Include column length at end. Default is to show this only
if the column is not shown completely.
"""
max_lines, _ = self._get_pprint_size(max_lines, -1)
dtype = getattr(col, "dtype", None)
multidims = getattr(col, "shape", [0])[1:]
if multidims:
multidim0 = tuple(0 for n in multidims)
multidim1 = tuple(n - 1 for n in multidims)
multidims_all_ones = np.prod(multidims) == 1
multidims_has_zero = 0 in multidims
i_dashes = None
i_centers = [] # Line indexes where content should be centered
n_header = 0
if show_name:
i_centers.append(n_header)
# Get column name (or 'None' if not set)
col_name = str(col.info.name)
n_header += 1
yield self._name_and_structure(col_name, dtype)
if show_unit:
i_centers.append(n_header)
n_header += 1
yield str(col.info.unit or "")
if show_dtype:
i_centers.append(n_header)
n_header += 1
if dtype is not None:
col_dtype = dtype_info_name((dtype, multidims))
else:
col_dtype = col.__class__.__qualname__ or "object"
yield col_dtype
if show_unit or show_name or show_dtype:
i_dashes = n_header
n_header += 1
yield "---"
max_lines -= n_header
n_print2 = max_lines // 2
n_rows = len(col)
# This block of code is responsible for producing the function that
# will format values for this column. The ``format_func`` function
# takes two args (col_format, val) and returns the string-formatted
# version. Some points to understand:
#
# - col_format could itself be the formatting function, so it will
# actually end up being called with itself as the first arg. In
# this case the function is expected to ignore its first arg.
#
# - auto_format_func is a function that gets called on the first
# column value that is being formatted. It then determines an
# appropriate formatting function given the actual value to be
# formatted. This might be deterministic or it might involve
# try/except. The latter allows for different string formatting
# options like %f or {:5.3f}. When auto_format_func is called it:
# 1. Caches the function in the _format_funcs dict so for subsequent
# values the right function is called right away.
# 2. Returns the formatted value.
#
# - possible_string_format_functions is a function that yields a
# succession of functions that might successfully format the
# value. There is a default, but Mixin methods can override this.
# See Quantity for an example.
#
# - get_auto_format_func() returns a wrapped version of auto_format_func
# with the column id and possible_string_format_functions as
# enclosed variables.
col_format = col.info.format or getattr(col.info, "default_format", None)
pssf = (
getattr(col.info, "possible_string_format_functions", None)
or _possible_string_format_functions
)
auto_format_func = get_auto_format_func(col, pssf)
format_func = col.info._format_funcs.get(col_format, auto_format_func)
if len(col) > max_lines:
if show_length is None:
show_length = True
i0 = n_print2 - (1 if show_length else 0)
i1 = n_rows - n_print2 - max_lines % 2
indices = np.concatenate(
[np.arange(0, i0 + 1), np.arange(i1 + 1, len(col))]
)
else:
i0 = -1
indices = np.arange(len(col))
def format_col_str(idx):
if multidims:
# Prevents columns like Column(data=[[(1,)],[(2,)]], name='a')
# with shape (n,1,...,1) from being printed as if there was
# more than one element in a row
if multidims_all_ones:
return format_func(col_format, col[(idx,) + multidim0])
elif multidims_has_zero:
# Any zero dimension means there is no data to print
return ""
else:
left = format_func(col_format, col[(idx,) + multidim0])
right = format_func(col_format, col[(idx,) + multidim1])
return f"{left} .. {right}"
else:
return format_func(col_format, col[idx])
# Add formatted values if within bounds allowed by max_lines
for idx in indices:
if idx == i0:
yield "..."
else:
try:
yield format_col_str(idx)
except ValueError:
raise ValueError(
'Unable to parse format string "{}" for entry "{}" '
'in column "{}"'.format(col_format, col[idx], col.info.name)
)
outs["show_length"] = show_length
outs["n_header"] = n_header
outs["i_centers"] = i_centers
outs["i_dashes"] = i_dashes
def _pformat_table(
self,
table,
max_lines=None,
max_width=None,
show_name=True,
show_unit=None,
show_dtype=False,
html=False,
tableid=None,
tableclass=None,
align=None,
):
"""Return a list of lines for the formatted string representation of
the table.
Parameters
----------
max_lines : int or None
Maximum number of rows to output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is to False.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or None
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(table)
tableclass : str or list of str or None
CSS classes for the table; only used if html is set. Default is
none
align : str or list or tuple
Left/right alignment of columns. Default is '>' (right) for all
columns. Other allowed values are '<', '^', and '0=' for left,
centered, and 0-padded, respectively. A list of strings can be
provided for alignment of tables with multiple columns.
Returns
-------
rows : list
Formatted table as a list of strings
outs : dict
Dict which is used to pass back additional values
defined within the iterator.
"""
# "Print" all the values into temporary lists by column for subsequent
# use and to determine the width
max_lines, max_width = self._get_pprint_size(max_lines, max_width)
if show_unit is None:
show_unit = any(col.info.unit for col in table.columns.values())
# Coerce align into a correctly-sized list of alignments (if possible)
n_cols = len(table.columns)
if align is None or isinstance(align, str):
align = [align] * n_cols
elif isinstance(align, (list, tuple)):
if len(align) != n_cols:
raise ValueError(
"got {} alignment values instead of "
"the number of columns ({})".format(len(align), n_cols)
)
else:
raise TypeError(
"align keyword must be str or list or tuple (got {})".format(
type(align)
)
)
# Process column visibility from table pprint_include_names and
# pprint_exclude_names attributes and get the set of columns to show.
pprint_include_names = _get_pprint_include_names(table)
cols = []
outs = None # Initialize so static type checker is happy
for align_, col in zip(align, table.columns.values()):
if col.info.name not in pprint_include_names:
continue
lines, outs = self._pformat_col(
col,
max_lines,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
align=align_,
)
if outs["show_length"]:
lines = lines[:-1]
cols.append(lines)
if not cols:
return ["<No columns>"], {"show_length": False}
# Use the values for the last column since they are all the same
n_header = outs["n_header"]
n_rows = len(cols[0])
def outwidth(cols):
return sum(len(c[0]) for c in cols) + len(cols) - 1
dots_col = ["..."] * n_rows
middle = len(cols) // 2
while outwidth(cols) > max_width:
if len(cols) == 1:
break
if len(cols) == 2:
cols[1] = dots_col
break
if cols[middle] is dots_col:
cols.pop(middle)
middle = len(cols) // 2
cols[middle] = dots_col
# Now "print" the (already-stringified) column values into a
# row-oriented list.
rows = []
if html:
from astropy.utils.xml.writer import xml_escape
if tableid is None:
tableid = f"table{id(table)}"
if tableclass is not None:
if isinstance(tableclass, list):
tableclass = " ".join(tableclass)
rows.append(f'<table id="{tableid}" class="{tableclass}">')
else:
rows.append(f'<table id="{tableid}">')
for i in range(n_rows):
# _pformat_col output has a header line '----' which is not needed here
if i == n_header - 1:
continue
td = "th" if i < n_header else "td"
vals = (f"<{td}>{xml_escape(col[i].strip())}</{td}>" for col in cols)
row = "<tr>" + "".join(vals) + "</tr>"
if i < n_header:
row = "<thead>" + row + "</thead>"
rows.append(row)
rows.append("</table>")
else:
for i in range(n_rows):
row = " ".join(col[i] for col in cols)
rows.append(row)
return rows, outs
def _more_tabcol(
self,
tabcol,
max_lines=None,
max_width=None,
show_name=True,
show_unit=None,
show_dtype=False,
):
"""Interactive "more" of a table or column.
Parameters
----------
max_lines : int or None
Maximum number of rows to output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
"""
allowed_keys = "f br<>qhpn"
# Count the header lines
n_header = 0
if show_name:
n_header += 1
if show_unit:
n_header += 1
if show_dtype:
n_header += 1
if show_name or show_unit or show_dtype:
n_header += 1
# Set up kwargs for pformat call. Only Table gets max_width.
kwargs = dict(
max_lines=-1,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
)
if hasattr(tabcol, "columns"): # tabcol is a table
kwargs["max_width"] = max_width
# If max_lines is None (=> query screen size) then increase by 2.
# This is because get_pprint_size leaves 6 extra lines so that in
# ipython you normally see the last input line.
max_lines1, max_width = self._get_pprint_size(max_lines, max_width)
if max_lines is None:
max_lines1 += 2
delta_lines = max_lines1 - n_header
# Set up a function to get a single character on any platform
inkey = Getch()
i0 = 0 # First table/column row to show
showlines = True
while True:
i1 = i0 + delta_lines # Last table/col row to show
if showlines: # Don't always show the table (e.g. after help)
try:
os.system("cls" if os.name == "nt" else "clear")
except Exception:
pass # No worries if clear screen call fails
lines = tabcol[i0:i1].pformat(**kwargs)
colors = (
"red" if i < n_header else "default" for i in range(len(lines))
)
for color, line in zip(colors, lines):
color_print(line, color)
showlines = True
print()
print("-- f, <space>, b, r, p, n, <, >, q h (help) --", end=" ")
# Get a valid key
while True:
try:
key = inkey().lower()
except Exception:
print("\n")
log.error(
"Console does not support getting a character"
" as required by more(). Use pprint() instead."
)
return
if key in allowed_keys:
break
print(key)
if key.lower() == "q":
break
elif key == " " or key == "f":
i0 += delta_lines
elif key == "b":
i0 = i0 - delta_lines
elif key == "r":
pass
elif key == "<":
i0 = 0
elif key == ">":
i0 = len(tabcol)
elif key == "p":
i0 -= 1
elif key == "n":
i0 += 1
elif key == "h":
showlines = False
print(
"""
Browsing keys:
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help""",
end=" ",
)
if i0 < 0:
i0 = 0
if i0 >= len(tabcol) - delta_lines:
i0 = len(tabcol) - delta_lines
print("\n")
| bsd-3-clause | c17798f42909f91af6a91ae8f3113c49 | 34.444575 | 92 | 0.529095 | 4.374472 | false | false | false | false |
astropy/astropy | astropy/coordinates/builtin_frames/itrs_observed_transforms.py | 3 | 5672 | import erfa
import numpy as np
from astropy import units as u
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.matrix_utilities import matrix_transpose, rotation_matrix
from astropy.coordinates.representation import CartesianRepresentation
from astropy.coordinates.transformations import FunctionTransformWithFiniteDifference
from .altaz import AltAz
from .hadec import HADec
from .itrs import ITRS
# Minimum cos(alt) and sin(alt) for refraction purposes
CELMIN = 1e-6
SELMIN = 0.05
# Latitude of the north pole.
NORTH_POLE = 90.0 * u.deg
def itrs_to_altaz_mat(lon, lat):
# form ITRS to AltAz matrix
# AltAz frame is left handed
minus_x = np.eye(3)
minus_x[0][0] = -1.0
mat = minus_x @ rotation_matrix(NORTH_POLE - lat, "y") @ rotation_matrix(lon, "z")
return mat
def itrs_to_hadec_mat(lon):
# form ITRS to HADec matrix
# HADec frame is left handed
minus_y = np.eye(3)
minus_y[1][1] = -1.0
mat = minus_y @ rotation_matrix(lon, "z")
return mat
def altaz_to_hadec_mat(lat):
# form AltAz to HADec matrix
z180 = np.eye(3)
z180[0][0] = -1.0
z180[1][1] = -1.0
mat = z180 @ rotation_matrix(NORTH_POLE - lat, "y")
return mat
def add_refraction(aa_crepr, observed_frame):
# add refraction to AltAz cartesian representation
refa, refb = erfa.refco(
observed_frame.pressure.to_value(u.hPa),
observed_frame.temperature.to_value(u.deg_C),
observed_frame.relative_humidity.value,
observed_frame.obswl.to_value(u.micron),
)
# reference: erfa.atioq()
norm, uv = erfa.pn(aa_crepr.get_xyz(xyz_axis=-1).to_value())
# Cosine and sine of altitude, with precautions.
sel = np.maximum(uv[..., 2], SELMIN)
cel = np.maximum(np.sqrt(uv[..., 0] ** 2 + uv[..., 1] ** 2), CELMIN)
# A*tan(z)+B*tan^3(z) model, with Newton-Raphson correction.
tan_z = cel / sel
w = refb * tan_z**2
delta_el = (refa + w) * tan_z / (1.0 + (refa + 3.0 * w) / (sel**2))
# Apply the change, giving observed vector
cosdel = 1.0 - 0.5 * delta_el**2
f = cosdel - delta_el * sel / cel
uv[..., 0] *= f
uv[..., 1] *= f
uv[..., 2] = cosdel * uv[..., 2] + delta_el * cel
# Need to renormalize to get agreement with CIRS->Observed on distance
norm2, uv = erfa.pn(uv)
uv = erfa.sxp(norm, uv)
return CartesianRepresentation(uv, xyz_axis=-1, unit=aa_crepr.x.unit, copy=False)
def remove_refraction(aa_crepr, observed_frame):
# remove refraction from AltAz cartesian representation
refa, refb = erfa.refco(
observed_frame.pressure.to_value(u.hPa),
observed_frame.temperature.to_value(u.deg_C),
observed_frame.relative_humidity.value,
observed_frame.obswl.to_value(u.micron),
)
# reference: erfa.atoiq()
norm, uv = erfa.pn(aa_crepr.get_xyz(xyz_axis=-1).to_value())
# Cosine and sine of altitude, with precautions.
sel = np.maximum(uv[..., 2], SELMIN)
cel = np.sqrt(uv[..., 0] ** 2 + uv[..., 1] ** 2)
# A*tan(z)+B*tan^3(z) model
tan_z = cel / sel
delta_el = (refa + refb * tan_z**2) * tan_z
# Apply the change, giving observed vector.
az, el = erfa.c2s(uv)
el -= delta_el
uv = erfa.s2c(az, el)
uv = erfa.sxp(norm, uv)
return CartesianRepresentation(uv, xyz_axis=-1, unit=aa_crepr.x.unit, copy=False)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ITRS, AltAz)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ITRS, HADec)
def itrs_to_observed(itrs_coo, observed_frame):
if np.any(itrs_coo.location != observed_frame.location) or np.any(
itrs_coo.obstime != observed_frame.obstime
):
# This transform will go through the CIRS and alter stellar aberration.
itrs_coo = itrs_coo.transform_to(
ITRS(obstime=observed_frame.obstime, location=observed_frame.location)
)
lon, lat, height = observed_frame.location.to_geodetic("WGS84")
if isinstance(observed_frame, AltAz) or (observed_frame.pressure > 0.0):
crepr = itrs_coo.cartesian.transform(itrs_to_altaz_mat(lon, lat))
if observed_frame.pressure > 0.0:
crepr = add_refraction(crepr, observed_frame)
if isinstance(observed_frame, HADec):
crepr = crepr.transform(altaz_to_hadec_mat(lat))
else:
crepr = itrs_coo.cartesian.transform(itrs_to_hadec_mat(lon))
return observed_frame.realize_frame(crepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, AltAz, ITRS)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, HADec, ITRS)
def observed_to_itrs(observed_coo, itrs_frame):
lon, lat, height = observed_coo.location.to_geodetic("WGS84")
if isinstance(observed_coo, AltAz) or (observed_coo.pressure > 0.0):
crepr = observed_coo.cartesian
if observed_coo.pressure > 0.0:
if isinstance(observed_coo, HADec):
crepr = crepr.transform(matrix_transpose(altaz_to_hadec_mat(lat)))
crepr = remove_refraction(crepr, observed_coo)
crepr = crepr.transform(matrix_transpose(itrs_to_altaz_mat(lon, lat)))
else:
crepr = observed_coo.cartesian.transform(
matrix_transpose(itrs_to_hadec_mat(lon))
)
itrs_at_obs_time = ITRS(
crepr, obstime=observed_coo.obstime, location=observed_coo.location
)
# This final transform may be a no-op if the obstimes and locations are the same.
# Otherwise, this transform will go through the CIRS and alter stellar aberration.
return itrs_at_obs_time.transform_to(itrs_frame)
| bsd-3-clause | c9c67bb6d2e5189993f749fba97747d2 | 37.585034 | 86 | 0.661495 | 2.96808 | false | false | false | false |
astropy/astropy | astropy/stats/jackknife.py | 3 | 5907 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
__all__ = ["jackknife_resampling", "jackknife_stats"]
__doctest_requires__ = {"jackknife_stats": ["scipy"]}
def jackknife_resampling(data):
"""Performs jackknife resampling on numpy arrays.
Jackknife resampling is a technique to generate 'n' deterministic samples
of size 'n-1' from a measured sample of size 'n'. Basically, the i-th
sample, (1<=i<=n), is generated by means of removing the i-th measurement
of the original sample. Like the bootstrap resampling, this statistical
technique finds applications in estimating variance, bias, and confidence
intervals.
Parameters
----------
data : ndarray
Original sample (1-D array) from which the jackknife resamples will be
generated.
Returns
-------
resamples : ndarray
The i-th row is the i-th jackknife sample, i.e., the original sample
with the i-th measurement deleted.
References
----------
.. [1] McIntosh, Avery. "The Jackknife Estimation Method".
<https://arxiv.org/abs/1606.00497>
.. [2] Efron, Bradley. "The Jackknife, the Bootstrap, and other
Resampling Plans". Technical Report No. 63, Division of Biostatistics,
Stanford University, December, 1980.
.. [3] Jackknife resampling <https://en.wikipedia.org/wiki/Jackknife_resampling>
"""
n = data.shape[0]
if n <= 0:
raise ValueError("data must contain at least one measurement.")
resamples = np.empty([n, n - 1])
for i in range(n):
resamples[i] = np.delete(data, i)
return resamples
def jackknife_stats(data, statistic, confidence_level=0.95):
"""Performs jackknife estimation on the basis of jackknife resamples.
This function requires `SciPy <https://www.scipy.org/>`_ to be installed.
Parameters
----------
data : ndarray
Original sample (1-D array).
statistic : function
Any function (or vector of functions) on the basis of the measured
data, e.g, sample mean, sample variance, etc. The jackknife estimate of
this statistic will be returned.
confidence_level : float, optional
Confidence level for the confidence interval of the Jackknife estimate.
Must be a real-valued number in (0,1). Default value is 0.95.
Returns
-------
estimate : float or `~numpy.ndarray`
The i-th element is the bias-corrected "jackknifed" estimate.
bias : float or `~numpy.ndarray`
The i-th element is the jackknife bias.
std_err : float or `~numpy.ndarray`
The i-th element is the jackknife standard error.
conf_interval : ndarray
If ``statistic`` is single-valued, the first and second elements are
the lower and upper bounds, respectively. If ``statistic`` is
vector-valued, each column corresponds to the confidence interval for
each component of ``statistic``. The first and second rows contain the
lower and upper bounds, respectively.
Examples
--------
1. Obtain Jackknife resamples:
>>> import numpy as np
>>> from astropy.stats import jackknife_resampling
>>> from astropy.stats import jackknife_stats
>>> data = np.array([1,2,3,4,5,6,7,8,9,0])
>>> resamples = jackknife_resampling(data)
>>> resamples
array([[2., 3., 4., 5., 6., 7., 8., 9., 0.],
[1., 3., 4., 5., 6., 7., 8., 9., 0.],
[1., 2., 4., 5., 6., 7., 8., 9., 0.],
[1., 2., 3., 5., 6., 7., 8., 9., 0.],
[1., 2., 3., 4., 6., 7., 8., 9., 0.],
[1., 2., 3., 4., 5., 7., 8., 9., 0.],
[1., 2., 3., 4., 5., 6., 8., 9., 0.],
[1., 2., 3., 4., 5., 6., 7., 9., 0.],
[1., 2., 3., 4., 5., 6., 7., 8., 0.],
[1., 2., 3., 4., 5., 6., 7., 8., 9.]])
>>> resamples.shape
(10, 9)
2. Obtain Jackknife estimate for the mean, its bias, its standard error,
and its 95% confidence interval:
>>> test_statistic = np.mean
>>> estimate, bias, stderr, conf_interval = jackknife_stats(
... data, test_statistic, 0.95)
>>> estimate
4.5
>>> bias
0.0
>>> stderr # doctest: +FLOAT_CMP
0.95742710775633832
>>> conf_interval
array([2.62347735, 6.37652265])
3. Example for two estimates
>>> test_statistic = lambda x: (np.mean(x), np.var(x))
>>> estimate, bias, stderr, conf_interval = jackknife_stats(
... data, test_statistic, 0.95)
>>> estimate
array([4.5 , 9.16666667])
>>> bias
array([ 0. , -0.91666667])
>>> stderr
array([0.95742711, 2.69124476])
>>> conf_interval
array([[ 2.62347735, 3.89192387],
[ 6.37652265, 14.44140947]])
IMPORTANT: Note that confidence intervals are given as columns
"""
# jackknife confidence interval
if not (0 < confidence_level < 1):
raise ValueError("confidence level must be in (0, 1).")
# make sure original data is proper
n = data.shape[0]
if n <= 0:
raise ValueError("data must contain at least one measurement.")
# Only import scipy if inputs are valid
from scipy.special import erfinv
resamples = jackknife_resampling(data)
stat_data = statistic(data)
jack_stat = np.apply_along_axis(statistic, 1, resamples)
mean_jack_stat = np.mean(jack_stat, axis=0)
# jackknife bias
bias = (n - 1) * (mean_jack_stat - stat_data)
# jackknife standard error
std_err = np.sqrt(
(n - 1)
* np.mean((jack_stat - mean_jack_stat) * (jack_stat - mean_jack_stat), axis=0)
)
# bias-corrected "jackknifed estimate"
estimate = stat_data - bias
z_score = np.sqrt(2.0) * erfinv(confidence_level)
conf_interval = estimate + z_score * np.array((-std_err, std_err))
return estimate, bias, std_err, conf_interval
| bsd-3-clause | e9d986675b695f3c4daa5a92b021cee6 | 32.185393 | 86 | 0.602675 | 3.464516 | false | false | false | false |
astropy/astropy | astropy/units/quantity_helper/helpers.py | 3 | 15123 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# The idea for this module (but no code) was borrowed from the
# quantities (http://pythonhosted.org/quantities/) package.
"""Helper functions for Quantity.
In particular, this implements the logic that determines scaling and result
units for a given ufunc, given input units.
"""
from fractions import Fraction
import numpy as np
from astropy.units.core import (
UnitConversionError,
UnitsError,
UnitTypeError,
dimensionless_unscaled,
unit_scale_converter,
)
from . import UFUNC_HELPERS, UNSUPPORTED_UFUNCS
def _d(unit):
if unit is None:
return dimensionless_unscaled
else:
return unit
def get_converter(from_unit, to_unit):
"""Like Unit._get_converter, except returns None if no scaling is needed,
i.e., if the inferred scale is unity."""
converter = from_unit._get_converter(to_unit)
return None if converter is unit_scale_converter else converter
def get_converters_and_unit(f, unit1, unit2):
converters = [None, None]
# By default, we try adjusting unit2 to unit1, so that the result will
# be unit1 as well. But if there is no second unit, we have to try
# adjusting unit1 (to dimensionless, see below).
if unit2 is None:
if unit1 is None:
# No units for any input -- e.g., np.add(a1, a2, out=q)
return converters, dimensionless_unscaled
changeable = 0
# swap units.
unit2 = unit1
unit1 = None
elif unit2 is unit1:
# ensure identical units is fast ("==" is slow, so avoid that).
return converters, unit1
else:
changeable = 1
# Try to get a converter from unit2 to unit1.
if unit1 is None:
try:
converters[changeable] = get_converter(unit2, dimensionless_unscaled)
except UnitsError:
# special case: would be OK if unitless number is zero, inf, nan
converters[1 - changeable] = False
return converters, unit2
else:
return converters, dimensionless_unscaled
else:
try:
converters[changeable] = get_converter(unit2, unit1)
except UnitsError:
raise UnitConversionError(
f"Can only apply '{f.__name__}' function to quantities "
"with compatible dimensions"
)
return converters, unit1
# SINGLE ARGUMENT UFUNC HELPERS
#
# The functions below take a single argument, which is the quantity upon which
# the ufunc is being used. The output of the helper function should be two
# values: a list with a single converter to be used to scale the input before
# it is being passed to the ufunc (or None if no conversion is needed), and
# the unit the output will be in.
def helper_onearg_test(f, unit):
return ([None], None)
def helper_invariant(f, unit):
return ([None], _d(unit))
def helper_square(f, unit):
return ([None], unit**2 if unit is not None else dimensionless_unscaled)
def helper_reciprocal(f, unit):
return ([None], unit**-1 if unit is not None else dimensionless_unscaled)
one_half = 0.5 # faster than Fraction(1, 2)
one_third = Fraction(1, 3)
def helper_sqrt(f, unit):
return ([None], unit**one_half if unit is not None else dimensionless_unscaled)
def helper_cbrt(f, unit):
return ([None], (unit**one_third if unit is not None else dimensionless_unscaled))
def helper_modf(f, unit):
if unit is None:
return [None], (dimensionless_unscaled, dimensionless_unscaled)
try:
return (
[get_converter(unit, dimensionless_unscaled)],
(dimensionless_unscaled, dimensionless_unscaled),
)
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to dimensionless quantities"
)
def helper__ones_like(f, unit):
return [None], dimensionless_unscaled
def helper_dimensionless_to_dimensionless(f, unit):
if unit is None:
return [None], dimensionless_unscaled
try:
return ([get_converter(unit, dimensionless_unscaled)], dimensionless_unscaled)
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to dimensionless quantities"
)
def helper_dimensionless_to_radian(f, unit):
from astropy.units.si import radian
if unit is None:
return [None], radian
try:
return [get_converter(unit, dimensionless_unscaled)], radian
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to dimensionless quantities"
)
def helper_degree_to_radian(f, unit):
from astropy.units.si import degree, radian
try:
return [get_converter(unit, degree)], radian
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to quantities with angle units"
)
def helper_radian_to_degree(f, unit):
from astropy.units.si import degree, radian
try:
return [get_converter(unit, radian)], degree
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to quantities with angle units"
)
def helper_radian_to_dimensionless(f, unit):
from astropy.units.si import radian
try:
return [get_converter(unit, radian)], dimensionless_unscaled
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to quantities with angle units"
)
def helper_frexp(f, unit):
if not unit.is_unity():
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to unscaled dimensionless"
" quantities"
)
return [None], (None, None)
# TWO ARGUMENT UFUNC HELPERS
#
# The functions below take a two arguments. The output of the helper function
# should be two values: a tuple of two converters to be used to scale the
# inputs before being passed to the ufunc (None if no conversion is needed),
# and the unit the output will be in.
def helper_multiplication(f, unit1, unit2):
return [None, None], _d(unit1) * _d(unit2)
def helper_division(f, unit1, unit2):
return [None, None], _d(unit1) / _d(unit2)
def helper_power(f, unit1, unit2):
# TODO: find a better way to do this, currently need to signal that one
# still needs to raise power of unit1 in main code
if unit2 is None:
return [None, None], False
try:
return [None, get_converter(unit2, dimensionless_unscaled)], False
except UnitsError:
raise UnitTypeError("Can only raise something to a dimensionless quantity")
def helper_ldexp(f, unit1, unit2):
if unit2 is not None:
raise TypeError("Cannot use ldexp with a quantity as second argument.")
else:
return [None, None], _d(unit1)
def helper_copysign(f, unit1, unit2):
# if first arg is not a quantity, just return plain array
if unit1 is None:
return [None, None], None
else:
return [None, None], unit1
def helper_heaviside(f, unit1, unit2):
try:
converter2 = (
get_converter(unit2, dimensionless_unscaled) if unit2 is not None else None
)
except UnitsError:
raise UnitTypeError(
"Can only apply 'heaviside' function with a dimensionless second argument."
)
return ([None, converter2], dimensionless_unscaled)
def helper_two_arg_dimensionless(f, unit1, unit2):
try:
converter1 = (
get_converter(unit1, dimensionless_unscaled) if unit1 is not None else None
)
converter2 = (
get_converter(unit2, dimensionless_unscaled) if unit2 is not None else None
)
except UnitsError:
raise UnitTypeError(
f"Can only apply '{f.__name__}' function to dimensionless quantities"
)
return ([converter1, converter2], dimensionless_unscaled)
# This used to be a separate function that just called get_converters_and_unit.
# Using it directly saves a few us; keeping the clearer name.
helper_twoarg_invariant = get_converters_and_unit
def helper_twoarg_comparison(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, None
def helper_twoarg_invtrig(f, unit1, unit2):
from astropy.units.si import radian
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, radian
def helper_twoarg_floor_divide(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, dimensionless_unscaled
def helper_divmod(f, unit1, unit2):
converters, result_unit = get_converters_and_unit(f, unit1, unit2)
return converters, (dimensionless_unscaled, result_unit)
def helper_clip(f, unit1, unit2, unit3):
# Treat the array being clipped as primary.
converters = [None]
if unit1 is None:
result_unit = dimensionless_unscaled
try:
converters += [
(None if unit is None else get_converter(unit, dimensionless_unscaled))
for unit in (unit2, unit3)
]
except UnitsError:
raise UnitConversionError(
f"Can only apply '{f.__name__}' function to quantities with "
"compatible dimensions"
)
else:
result_unit = unit1
for unit in unit2, unit3:
try:
converter = get_converter(_d(unit), result_unit)
except UnitsError:
if unit is None:
# special case: OK if unitless number is zero, inf, nan
converters.append(False)
else:
raise UnitConversionError(
f"Can only apply '{f.__name__}' function to quantities with "
"compatible dimensions"
)
else:
converters.append(converter)
return converters, result_unit
# list of ufuncs:
# https://numpy.org/doc/stable/reference/ufuncs.html#available-ufuncs
UNSUPPORTED_UFUNCS |= {
np.bitwise_and,
np.bitwise_or,
np.bitwise_xor,
np.invert,
np.left_shift,
np.right_shift,
np.logical_and,
np.logical_or,
np.logical_xor,
np.logical_not,
np.isnat,
np.gcd,
np.lcm,
}
# SINGLE ARGUMENT UFUNCS
# ufuncs that do not care about the unit and do not return a Quantity
# (but rather a boolean, or -1, 0, or +1 for np.sign).
onearg_test_ufuncs = (np.isfinite, np.isinf, np.isnan, np.sign, np.signbit)
for ufunc in onearg_test_ufuncs:
UFUNC_HELPERS[ufunc] = helper_onearg_test
# ufuncs that return a value with the same unit as the input
invariant_ufuncs = (
np.absolute,
np.fabs,
np.conj,
np.conjugate,
np.negative,
np.spacing,
np.rint,
np.floor,
np.ceil,
np.trunc,
np.positive,
)
for ufunc in invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_invariant
# ufuncs that require dimensionless input and and give dimensionless output
dimensionless_to_dimensionless_ufuncs = (
np.exp,
np.expm1,
np.exp2,
np.log,
np.log10,
np.log2,
np.log1p,
)
# Default numpy does not ship an "erf" ufunc, but some versions hacked by
# intel do. This is bad, since it means code written for that numpy will
# not run on non-hacked numpy. But still, we might as well support it.
if isinstance(getattr(np.core.umath, "erf", None), np.ufunc):
dimensionless_to_dimensionless_ufuncs += (np.core.umath.erf,)
for ufunc in dimensionless_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_dimensionless
# ufuncs that require dimensionless input and give output in radians
dimensionless_to_radian_ufuncs = (
np.arccos,
np.arcsin,
np.arctan,
np.arccosh,
np.arcsinh,
np.arctanh,
)
for ufunc in dimensionless_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_radian
# ufuncs that require input in degrees and give output in radians
degree_to_radian_ufuncs = (np.radians, np.deg2rad)
for ufunc in degree_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_degree_to_radian
# ufuncs that require input in radians and give output in degrees
radian_to_degree_ufuncs = (np.degrees, np.rad2deg)
for ufunc in radian_to_degree_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_degree
# ufuncs that require input in radians and give dimensionless output
radian_to_dimensionless_ufuncs = (np.cos, np.sin, np.tan, np.cosh, np.sinh, np.tanh)
for ufunc in radian_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_dimensionless
# ufuncs handled as special cases
UFUNC_HELPERS[np.sqrt] = helper_sqrt
UFUNC_HELPERS[np.square] = helper_square
UFUNC_HELPERS[np.reciprocal] = helper_reciprocal
UFUNC_HELPERS[np.cbrt] = helper_cbrt
UFUNC_HELPERS[np.core.umath._ones_like] = helper__ones_like
UFUNC_HELPERS[np.modf] = helper_modf
UFUNC_HELPERS[np.frexp] = helper_frexp
# TWO ARGUMENT UFUNCS
# two argument ufuncs that require dimensionless input and and give
# dimensionless output
two_arg_dimensionless_ufuncs = (np.logaddexp, np.logaddexp2)
for ufunc in two_arg_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_two_arg_dimensionless
# two argument ufuncs that return a value with the same unit as the input
twoarg_invariant_ufuncs = (
np.add,
np.subtract,
np.hypot,
np.maximum,
np.minimum,
np.fmin,
np.fmax,
np.nextafter,
np.remainder,
np.mod,
np.fmod,
)
for ufunc in twoarg_invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invariant
# two argument ufuncs that need compatible inputs and return a boolean
twoarg_comparison_ufuncs = (
np.greater,
np.greater_equal,
np.less,
np.less_equal,
np.not_equal,
np.equal,
)
for ufunc in twoarg_comparison_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_comparison
# two argument ufuncs that do inverse trigonometry
twoarg_invtrig_ufuncs = (np.arctan2,)
# another private function in numpy; use getattr in case it disappears
if isinstance(getattr(np.core.umath, "_arg", None), np.ufunc):
twoarg_invtrig_ufuncs += (np.core.umath._arg,)
for ufunc in twoarg_invtrig_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invtrig
# ufuncs handled as special cases
UFUNC_HELPERS[np.multiply] = helper_multiplication
if isinstance(getattr(np, "matmul", None), np.ufunc):
UFUNC_HELPERS[np.matmul] = helper_multiplication
UFUNC_HELPERS[np.divide] = helper_division
UFUNC_HELPERS[np.true_divide] = helper_division
UFUNC_HELPERS[np.power] = helper_power
UFUNC_HELPERS[np.ldexp] = helper_ldexp
UFUNC_HELPERS[np.copysign] = helper_copysign
UFUNC_HELPERS[np.floor_divide] = helper_twoarg_floor_divide
UFUNC_HELPERS[np.heaviside] = helper_heaviside
UFUNC_HELPERS[np.float_power] = helper_power
UFUNC_HELPERS[np.divmod] = helper_divmod
# Check for clip ufunc; note that np.clip is a wrapper function, not the ufunc.
if isinstance(getattr(np.core.umath, "clip", None), np.ufunc):
UFUNC_HELPERS[np.core.umath.clip] = helper_clip
del ufunc
| bsd-3-clause | 0f34daba7c18458aff16a9292ff2ca38 | 29.306613 | 87 | 0.668518 | 3.513708 | false | false | false | false |
astropy/astropy | astropy/coordinates/tests/test_celestial_transformations.py | 3 | 15888 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import (
CartesianDifferential,
CartesianRepresentation,
EarthLocation,
SkyCoord,
galactocentric_frame_defaults,
)
from astropy.coordinates.builtin_frames import (
CIRS,
FK4,
FK5,
GCRS,
HCRS,
ICRS,
LSR,
FK4NoETerms,
Galactic,
GalacticLSR,
Galactocentric,
Supergalactic,
)
from astropy.coordinates.distances import Distance
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
from astropy.units import allclose
# used below in the next parametrized test
m31_sys = [ICRS, FK5, FK4, Galactic]
m31_coo = [
(10.6847929, 41.2690650),
(10.6847929, 41.2690650),
(10.0004738, 40.9952444),
(121.1744050, -21.5729360),
]
m31_dist = Distance(770, u.kpc)
convert_precision = 1 * u.arcsec
roundtrip_precision = 1e-4 * u.degree
dist_precision = 1e-9 * u.kpc
m31_params = []
for i in range(len(m31_sys)):
for j in range(len(m31_sys)):
if i < j:
m31_params.append((m31_sys[i], m31_sys[j], m31_coo[i], m31_coo[j]))
@pytest.mark.parametrize(("fromsys", "tosys", "fromcoo", "tocoo"), m31_params)
def test_m31_coord_transforms(fromsys, tosys, fromcoo, tocoo):
"""
This tests a variety of coordinate conversions for the Chandra point-source
catalog location of M31 from NED.
"""
coo1 = fromsys(ra=fromcoo[0] * u.deg, dec=fromcoo[1] * u.deg, distance=m31_dist)
coo2 = coo1.transform_to(tosys())
if tosys is FK4:
coo2_prec = coo2.transform_to(FK4(equinox=Time("B1950")))
# convert_precision <1 arcsec
assert (coo2_prec.spherical.lon - tocoo[0] * u.deg) < convert_precision
assert (coo2_prec.spherical.lat - tocoo[1] * u.deg) < convert_precision
else:
assert (coo2.spherical.lon - tocoo[0] * u.deg) < convert_precision # <1 arcsec
assert (coo2.spherical.lat - tocoo[1] * u.deg) < convert_precision
assert coo1.distance.unit == u.kpc
assert coo2.distance.unit == u.kpc
assert m31_dist.unit == u.kpc
assert (coo2.distance - m31_dist) < dist_precision
# check round-tripping
coo1_2 = coo2.transform_to(fromsys())
assert (coo1_2.spherical.lon - fromcoo[0] * u.deg) < roundtrip_precision
assert (coo1_2.spherical.lat - fromcoo[1] * u.deg) < roundtrip_precision
assert (coo1_2.distance - m31_dist) < dist_precision
def test_precession():
"""
Ensures that FK4 and FK5 coordinates precess their equinoxes
"""
j2000 = Time("J2000")
b1950 = Time("B1950")
j1975 = Time("J1975")
b1975 = Time("B1975")
fk4 = FK4(ra=1 * u.radian, dec=0.5 * u.radian)
assert fk4.equinox.byear == b1950.byear
fk4_2 = fk4.transform_to(FK4(equinox=b1975))
assert fk4_2.equinox.byear == b1975.byear
fk5 = FK5(ra=1 * u.radian, dec=0.5 * u.radian)
assert fk5.equinox.jyear == j2000.jyear
fk5_2 = fk5.transform_to(FK4(equinox=j1975))
assert fk5_2.equinox.jyear == j1975.jyear
def test_fk5_galactic():
"""
Check that FK5 -> Galactic gives the same as FK5 -> FK4 -> Galactic.
"""
fk5 = FK5(ra=1 * u.deg, dec=2 * u.deg)
direct = fk5.transform_to(Galactic())
indirect = fk5.transform_to(FK4()).transform_to(Galactic())
assert direct.separation(indirect).degree < 1.0e-10
direct = fk5.transform_to(Galactic())
indirect = fk5.transform_to(FK4NoETerms()).transform_to(Galactic())
assert direct.separation(indirect).degree < 1.0e-10
def test_galactocentric():
# when z_sun=0, transformation should be very similar to Galactic
icrs_coord = ICRS(
ra=np.linspace(0, 360, 10) * u.deg,
dec=np.linspace(-90, 90, 10) * u.deg,
distance=1.0 * u.kpc,
)
g_xyz = icrs_coord.transform_to(Galactic()).cartesian.xyz
with galactocentric_frame_defaults.set("pre-v4.0"):
gc_xyz = icrs_coord.transform_to(Galactocentric(z_sun=0 * u.kpc)).cartesian.xyz
diff = np.abs(g_xyz - gc_xyz)
assert allclose(diff[0], 8.3 * u.kpc, atol=1e-5 * u.kpc)
assert allclose(diff[1:], 0 * u.kpc, atol=1e-5 * u.kpc)
# generate some test coordinates
g = Galactic(
l=[0, 0, 45, 315] * u.deg,
b=[-45, 45, 0, 0] * u.deg,
distance=[np.sqrt(2)] * 4 * u.kpc,
)
with galactocentric_frame_defaults.set("pre-v4.0"):
xyz = g.transform_to(
Galactocentric(galcen_distance=1.0 * u.kpc, z_sun=0.0 * u.pc)
).cartesian.xyz
true_xyz = np.array([[0, 0, -1.0], [0, 0, 1], [0, 1, 0], [0, -1, 0]]).T * u.kpc
assert allclose(xyz.to(u.kpc), true_xyz.to(u.kpc), atol=1e-5 * u.kpc)
# check that ND arrays work
# from Galactocentric to Galactic
x = np.linspace(-10.0, 10.0, 100) * u.kpc
y = np.linspace(-10.0, 10.0, 100) * u.kpc
z = np.zeros_like(x)
# from Galactic to Galactocentric
l = np.linspace(15, 30.0, 100) * u.deg
b = np.linspace(-10.0, 10.0, 100) * u.deg
d = np.ones_like(l.value) * u.kpc
with galactocentric_frame_defaults.set("latest"):
g1 = Galactocentric(x=x, y=y, z=z)
g2 = Galactocentric(
x=x.reshape(100, 1, 1), y=y.reshape(100, 1, 1), z=z.reshape(100, 1, 1)
)
g1t = g1.transform_to(Galactic())
g2t = g2.transform_to(Galactic())
assert_allclose(g1t.cartesian.xyz, g2t.cartesian.xyz[:, :, 0, 0])
g1 = Galactic(l=l, b=b, distance=d)
g2 = Galactic(
l=l.reshape(100, 1, 1),
b=b.reshape(100, 1, 1),
distance=d.reshape(100, 1, 1),
)
g1t = g1.transform_to(Galactocentric())
g2t = g2.transform_to(Galactocentric())
np.testing.assert_almost_equal(
g1t.cartesian.xyz.value, g2t.cartesian.xyz.value[:, :, 0, 0]
)
def test_supergalactic():
"""
Check Galactic<->Supergalactic and Galactic<->ICRS conversion.
"""
# Check supergalactic North pole.
npole = Galactic(l=47.37 * u.degree, b=+6.32 * u.degree)
assert allclose(npole.transform_to(Supergalactic()).sgb.deg, +90, atol=1e-9)
# Check the origin of supergalactic longitude.
lon0 = Supergalactic(sgl=0 * u.degree, sgb=0 * u.degree)
lon0_gal = lon0.transform_to(Galactic())
assert allclose(lon0_gal.l.deg, 137.37, atol=1e-9)
assert allclose(lon0_gal.b.deg, 0, atol=1e-9)
# Test Galactic<->ICRS with some positions that appear in Foley et al. 2008
# (https://ui.adsabs.harvard.edu/abs/2008A%26A...484..143F)
# GRB 021219
supergalactic = Supergalactic(sgl=29.91 * u.degree, sgb=+73.72 * u.degree)
icrs = SkyCoord("18h50m27s +31d57m17s")
assert supergalactic.separation(icrs) < 0.005 * u.degree
# GRB 030320
supergalactic = Supergalactic(sgl=-174.44 * u.degree, sgb=+46.17 * u.degree)
icrs = SkyCoord("17h51m36s -25d18m52s")
assert supergalactic.separation(icrs) < 0.005 * u.degree
class TestHCRS:
"""
Check HCRS<->ICRS coordinate conversions.
Uses ICRS Solar positions predicted by get_body_barycentric; with `t1` and
`tarr` as defined below, the ICRS Solar positions were predicted using, e.g.
coord.ICRS(coord.get_body_barycentric(tarr, 'sun')).
"""
def setup_method(self):
self.t1 = Time("2013-02-02T23:00")
self.t2 = Time("2013-08-02T23:00")
self.tarr = Time(["2013-02-02T23:00", "2013-08-02T23:00"])
self.sun_icrs_scalar = ICRS(
ra=244.52984668 * u.deg,
dec=-22.36943723 * u.deg,
distance=406615.66347377 * u.km,
)
# array of positions corresponds to times in `tarr`
self.sun_icrs_arr = ICRS(
ra=[244.52989062, 271.40976248] * u.deg,
dec=[-22.36943605, -25.07431079] * u.deg,
distance=[406615.66347377, 375484.13558956] * u.km,
)
# corresponding HCRS positions
self.sun_hcrs_t1 = HCRS(
CartesianRepresentation([0.0, 0.0, 0.0] * u.km), obstime=self.t1
)
twod_rep = CartesianRepresentation([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]] * u.km)
self.sun_hcrs_tarr = HCRS(twod_rep, obstime=self.tarr)
self.tolerance = 5 * u.km
def test_from_hcrs(self):
# test scalar transform
transformed = self.sun_hcrs_t1.transform_to(ICRS())
separation = transformed.separation_3d(self.sun_icrs_scalar)
assert_allclose(separation, 0 * u.km, atol=self.tolerance)
# test non-scalar positions and times
transformed = self.sun_hcrs_tarr.transform_to(ICRS())
separation = transformed.separation_3d(self.sun_icrs_arr)
assert_allclose(separation, 0 * u.km, atol=self.tolerance)
def test_from_icrs(self):
# scalar positions
transformed = self.sun_icrs_scalar.transform_to(HCRS(obstime=self.t1))
separation = transformed.separation_3d(self.sun_hcrs_t1)
assert_allclose(separation, 0 * u.km, atol=self.tolerance)
# nonscalar positions
transformed = self.sun_icrs_arr.transform_to(HCRS(obstime=self.tarr))
separation = transformed.separation_3d(self.sun_hcrs_tarr)
assert_allclose(separation, 0 * u.km, atol=self.tolerance)
class TestHelioBaryCentric:
"""
Check GCRS<->Heliocentric and Barycentric coordinate conversions.
Uses the WHT observing site (information grabbed from data/sites.json).
"""
def setup_method(self):
wht = EarthLocation(342.12 * u.deg, 28.758333333333333 * u.deg, 2327 * u.m)
self.obstime = Time("2013-02-02T23:00")
self.wht_itrs = wht.get_itrs(obstime=self.obstime)
def test_heliocentric(self):
gcrs = self.wht_itrs.transform_to(GCRS(obstime=self.obstime))
helio = gcrs.transform_to(HCRS(obstime=self.obstime))
# Check it doesn't change from previous times.
previous = [-1.02597256e11, 9.71725820e10, 4.21268419e10] * u.m
assert_allclose(helio.cartesian.xyz, previous)
# And that it agrees with SLALIB to within 14km
helio_slalib = [-0.685820296, 0.6495585893, 0.2816005464] * u.au
assert np.sqrt(((helio.cartesian.xyz - helio_slalib) ** 2).sum()) < 14.0 * u.km
def test_barycentric(self):
gcrs = self.wht_itrs.transform_to(GCRS(obstime=self.obstime))
bary = gcrs.transform_to(ICRS())
previous = [-1.02758958e11, 9.68331109e10, 4.19720938e10] * u.m
assert_allclose(bary.cartesian.xyz, previous)
# And that it agrees with SLALIB answer to within 14km
bary_slalib = [-0.6869012079, 0.6472893646, 0.2805661191] * u.au
assert np.sqrt(((bary.cartesian.xyz - bary_slalib) ** 2).sum()) < 14.0 * u.km
def test_lsr_sanity():
# random numbers, but zero velocity in ICRS frame
icrs = ICRS(
ra=15.1241 * u.deg,
dec=17.5143 * u.deg,
distance=150.12 * u.pc,
pm_ra_cosdec=0 * u.mas / u.yr,
pm_dec=0 * u.mas / u.yr,
radial_velocity=0 * u.km / u.s,
)
lsr = icrs.transform_to(LSR())
lsr_diff = lsr.data.differentials["s"]
cart_lsr_vel = lsr_diff.represent_as(CartesianRepresentation, base=lsr.data)
lsr_vel = ICRS(cart_lsr_vel)
gal_lsr = lsr_vel.transform_to(Galactic()).cartesian.xyz
assert allclose(gal_lsr.to(u.km / u.s, u.dimensionless_angles()), lsr.v_bary.d_xyz)
# moving with LSR velocity
lsr = LSR(
ra=15.1241 * u.deg,
dec=17.5143 * u.deg,
distance=150.12 * u.pc,
pm_ra_cosdec=0 * u.mas / u.yr,
pm_dec=0 * u.mas / u.yr,
radial_velocity=0 * u.km / u.s,
)
icrs = lsr.transform_to(ICRS())
icrs_diff = icrs.data.differentials["s"]
cart_vel = icrs_diff.represent_as(CartesianRepresentation, base=icrs.data)
vel = ICRS(cart_vel)
gal_icrs = vel.transform_to(Galactic()).cartesian.xyz
assert allclose(
gal_icrs.to(u.km / u.s, u.dimensionless_angles()), -lsr.v_bary.d_xyz
)
def test_hcrs_icrs_differentials():
# Regression to ensure that we can transform velocities from HCRS to LSR.
# Numbers taken from the original issue, gh-6835.
hcrs = HCRS(
ra=8.67 * u.deg,
dec=53.09 * u.deg,
distance=117 * u.pc,
pm_ra_cosdec=4.8 * u.mas / u.yr,
pm_dec=-15.16 * u.mas / u.yr,
radial_velocity=23.42 * u.km / u.s,
)
icrs = hcrs.transform_to(ICRS())
# The position and velocity should not change much
assert allclose(hcrs.cartesian.xyz, icrs.cartesian.xyz, rtol=1e-8)
assert allclose(hcrs.velocity.d_xyz, icrs.velocity.d_xyz, rtol=1e-2)
hcrs2 = icrs.transform_to(HCRS())
# The values should round trip
assert allclose(hcrs.cartesian.xyz, hcrs2.cartesian.xyz, rtol=1e-12)
assert allclose(hcrs.velocity.d_xyz, hcrs2.velocity.d_xyz, rtol=1e-12)
def test_cirs_icrs():
"""
Test CIRS<->ICRS transformations, including self transform
"""
t = Time("J2010")
MOONDIST = 385000 * u.km # approximate moon semi-major orbit axis of moon
MOONDIST_CART = CartesianRepresentation(
3**-0.5 * MOONDIST, 3**-0.5 * MOONDIST, 3**-0.5 * MOONDIST
)
loc = EarthLocation(lat=0 * u.deg, lon=0 * u.deg)
cirs_geo_frame = CIRS(obstime=t)
cirs_topo_frame = CIRS(obstime=t, location=loc)
moon_geo = cirs_geo_frame.realize_frame(MOONDIST_CART)
moon_topo = moon_geo.transform_to(cirs_topo_frame)
# now check that the distance change is similar to earth radius
assert (
1000 * u.km
< np.abs(moon_topo.distance - moon_geo.distance).to(u.au)
< 7000 * u.km
)
# now check that it round-trips
moon2 = moon_topo.transform_to(moon_geo)
assert_allclose(moon_geo.cartesian.xyz, moon2.cartesian.xyz)
# now check ICRS transform gives a decent distance from Barycentre
moon_icrs = moon_geo.transform_to(ICRS())
assert_allclose(moon_icrs.distance - 1 * u.au, 0.0 * u.R_sun, atol=3 * u.R_sun)
@pytest.mark.parametrize("frame", [LSR, GalacticLSR])
def test_lsr_loopback(frame):
xyz = CartesianRepresentation(1, 2, 3) * u.AU
xyz = xyz.with_differentials(CartesianDifferential(4, 5, 6) * u.km / u.s)
v_bary = CartesianDifferential(5, 10, 15) * u.km / u.s
# Test that the loopback properly handles a change in v_bary
from_coo = frame(xyz) # default v_bary
to_frame = frame(v_bary=v_bary)
explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame)
implicit_coo = from_coo.transform_to(to_frame)
# Confirm that the explicit transformation changes the velocity but not the position
assert allclose(explicit_coo.cartesian.xyz, from_coo.cartesian.xyz, rtol=1e-10)
assert not allclose(
explicit_coo.velocity.d_xyz, from_coo.velocity.d_xyz, rtol=1e-10
)
# Confirm that the loopback matches the explicit transformation
assert allclose(explicit_coo.cartesian.xyz, implicit_coo.cartesian.xyz, rtol=1e-10)
assert allclose(
explicit_coo.velocity.d_xyz, implicit_coo.velocity.d_xyz, rtol=1e-10
)
@pytest.mark.parametrize(
"to_frame",
[
Galactocentric(galcen_coord=ICRS(300 * u.deg, -30 * u.deg)),
Galactocentric(galcen_distance=10 * u.kpc),
Galactocentric(z_sun=10 * u.pc),
Galactocentric(roll=1 * u.deg),
],
)
def test_galactocentric_loopback(to_frame):
xyz = CartesianRepresentation(1, 2, 3) * u.pc
from_coo = Galactocentric(xyz)
explicit_coo = from_coo.transform_to(ICRS()).transform_to(to_frame)
implicit_coo = from_coo.transform_to(to_frame)
# Confirm that the explicit transformation changes the position
assert not allclose(explicit_coo.cartesian.xyz, from_coo.cartesian.xyz, rtol=1e-10)
# Confirm that the loopback matches the explicit transformation
assert allclose(explicit_coo.cartesian.xyz, implicit_coo.cartesian.xyz, rtol=1e-10)
| bsd-3-clause | 1d624807977d7415dbd26ed0cf485c24 | 34.543624 | 88 | 0.637273 | 2.777137 | false | true | false | false |
astropy/astropy | astropy/cosmology/tests/test_units.py | 3 | 15708 | """Testing :mod:`astropy.cosmology.units`."""
##############################################################################
# IMPORTS
import pytest
import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology import Planck13, default_cosmology
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.exceptions import AstropyDeprecationWarning
##############################################################################
# TESTS
##############################################################################
def test_has_expected_units():
"""
Test that this module has the expected set of units. Some of the units are
imported from :mod:`astropy.units`, or vice versa. Here we test presence,
not usage. Units from :mod:`astropy.units` are tested in that module. Units
defined in :mod:`astropy.cosmology` will be tested subsequently.
"""
with pytest.warns(AstropyDeprecationWarning, match="`littleh`"):
assert u.astrophys.littleh is cu.littleh
def test_has_expected_equivalencies():
"""
Test that this module has the expected set of equivalencies. Many of the
equivalencies are imported from :mod:`astropy.units`, so here we test
presence, not usage. Equivalencies from :mod:`astropy.units` are tested in
that module. Equivalencies defined in :mod:`astropy.cosmology` will be
tested subsequently.
"""
with pytest.warns(AstropyDeprecationWarning, match="`with_H0`"):
assert u.equivalencies.with_H0 is cu.with_H0
def test_littleh():
"""Test :func:`astropy.cosmology.units.with_H0`."""
H0_70 = 70 * u.km / u.s / u.Mpc
h70dist = 70 * u.Mpc / cu.littleh
assert_quantity_allclose(h70dist.to(u.Mpc, cu.with_H0(H0_70)), 100 * u.Mpc)
# make sure using the default cosmology works
cosmodist = default_cosmology.get().H0.value * u.Mpc / cu.littleh
assert_quantity_allclose(cosmodist.to(u.Mpc, cu.with_H0()), 100 * u.Mpc)
# Now try a luminosity scaling
h1lum = 0.49 * u.Lsun * cu.littleh**-2
assert_quantity_allclose(h1lum.to(u.Lsun, cu.with_H0(H0_70)), 1 * u.Lsun)
# And the trickiest one: magnitudes. Using H0=10 here for the round numbers
H0_10 = 10 * u.km / u.s / u.Mpc
# assume the "true" magnitude M = 12.
# Then M - 5*log_10(h) = M + 5 = 17
withlittlehmag = 17 * (u.mag - u.MagUnit(cu.littleh**2))
assert_quantity_allclose(withlittlehmag.to(u.mag, cu.with_H0(H0_10)), 12 * u.mag)
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
def test_dimensionless_redshift():
"""Test :func:`astropy.cosmology.units.dimensionless_redshift`."""
z = 3 * cu.redshift
val = 3 * u.one
# show units not equal
assert z.unit == cu.redshift
assert z.unit != u.one
assert u.get_physical_type(z) == "redshift"
# test equivalency enabled by default
assert z == val
# also test that it works for powers
assert (3 * cu.redshift**3) == val
# and in composite units
assert (3 * u.km / cu.redshift**3) == 3 * u.km
# test it also works as an equivalency
with u.set_enabled_equivalencies([]): # turn off default equivalencies
assert z.to(u.one, equivalencies=cu.dimensionless_redshift()) == val
with pytest.raises(ValueError):
z.to(u.one)
# if this fails, something is really wrong
with u.add_enabled_equivalencies(cu.dimensionless_redshift()):
assert z == val
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
def test_redshift_temperature():
"""Test :func:`astropy.cosmology.units.redshift_temperature`."""
cosmo = Planck13.clone(Tcmb0=3 * u.K)
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
Tcmb = cosmo.Tcmb(z)
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.redshift_temperature()
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.redshift_temperature()
assert_quantity_allclose(z.to(u.K, equivalency), default_cosmo.Tcmb(z))
assert default_cosmo.Tcmb(z) != Tcmb
# 2) Specifying the cosmology
equivalency = cu.redshift_temperature(cosmo)
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# Test `atzkw`
equivalency = cu.redshift_temperature(cosmo, ztol=1e-10)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
def test_redshift_hubble():
"""Test :func:`astropy.cosmology.units.redshift_hubble`."""
unit = u.km / u.s / u.Mpc
cosmo = Planck13.clone(H0=100 * unit)
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
H = cosmo.H(z)
h = H.to_value(u.km / u.s / u.Mpc) / 100 * cu.littleh
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.redshift_hubble()
# H
assert_quantity_allclose(z.to(unit, equivalency), H)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z)
# little-h
assert_quantity_allclose(z.to(cu.littleh, equivalency), h)
assert_quantity_allclose(h.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.redshift_hubble()
assert_quantity_allclose(z.to(unit, equivalency), default_cosmo.H(z))
assert default_cosmo.H(z) != H
# 2) Specifying the cosmology
equivalency = cu.redshift_hubble(cosmo)
# H
assert_quantity_allclose(z.to(unit, equivalency), H)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z)
# little-h
assert_quantity_allclose(z.to(cu.littleh, equivalency), h)
assert_quantity_allclose(h.to(cu.redshift, equivalency), z)
# Test `atzkw`
equivalency = cu.redshift_hubble(cosmo, ztol=1e-10)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z) # H
assert_quantity_allclose(h.to(cu.redshift, equivalency), z) # little-h
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
@pytest.mark.parametrize(
"kind",
[cu.redshift_distance.__defaults__[-1], "comoving", "lookback", "luminosity"],
)
def test_redshift_distance(kind):
"""Test :func:`astropy.cosmology.units.redshift_distance`."""
z = 15 * cu.redshift
d = getattr(Planck13, kind + "_distance")(z)
equivalency = cu.redshift_distance(cosmology=Planck13, kind=kind)
# properties of Equivalency
assert equivalency.name[0] == "redshift_distance"
assert equivalency.kwargs[0]["cosmology"] == Planck13
assert equivalency.kwargs[0]["distance"] == kind
# roundtrip
assert_quantity_allclose(z.to(u.Mpc, equivalency), d)
assert_quantity_allclose(d.to(cu.redshift, equivalency), z)
def test_redshift_distance_wrong_kind():
"""Test :func:`astropy.cosmology.units.redshift_distance` wrong kind."""
with pytest.raises(ValueError, match="`kind`"):
cu.redshift_distance(kind=None)
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
class Test_with_redshift:
"""Test `astropy.cosmology.units.with_redshift`."""
@pytest.fixture(scope="class")
def cosmo(self):
"""Test cosmology."""
return Planck13.clone(Tcmb0=3 * u.K)
# ===========================================
def test_cosmo_different(self, cosmo):
"""The default is different than the test cosmology."""
default_cosmo = default_cosmology.get()
assert default_cosmo != cosmo # shows changing default
def test_no_equivalency(self, cosmo):
"""Test the equivalency ``with_redshift`` without any enabled."""
equivalency = cu.with_redshift(distance=None, hubble=False, Tcmb=False)
assert len(equivalency) == 0
# -------------------------------------------
def test_temperature_off(self, cosmo):
"""Test ``with_redshift`` with the temperature off."""
z = 15 * cu.redshift
err_msg = (
r"^'redshift' \(redshift\) and 'K' \(temperature\) are not convertible$"
)
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(Tcmb=False)
with pytest.raises(u.UnitConversionError, match=err_msg):
z.to(u.K, equivalency)
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, Tcmb=False)
with pytest.raises(u.UnitConversionError, match=err_msg):
z.to(u.K, equivalency)
def test_temperature(self, cosmo):
"""Test temperature equivalency component."""
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
Tcmb = cosmo.Tcmb(z)
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(Tcmb=True)
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.with_redshift(Tcmb=True)
assert_quantity_allclose(z.to(u.K, equivalency), default_cosmo.Tcmb(z))
assert default_cosmo.Tcmb(z) != Tcmb
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, Tcmb=True)
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# Test `atzkw`
# this is really just a test that 'atzkw' doesn't fail
equivalency = cu.with_redshift(cosmo, Tcmb=True, atzkw={"ztol": 1e-10})
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# -------------------------------------------
def test_hubble_off(self, cosmo):
"""Test ``with_redshift`` with Hubble off."""
unit = u.km / u.s / u.Mpc
z = 15 * cu.redshift
err_msg = (
r"^'redshift' \(redshift\) and 'km / \(Mpc s\)' \(frequency\) are not "
"convertible$"
)
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(hubble=False)
with pytest.raises(u.UnitConversionError, match=err_msg):
z.to(unit, equivalency)
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, hubble=False)
with pytest.raises(u.UnitConversionError, match=err_msg):
z.to(unit, equivalency)
def test_hubble(self, cosmo):
"""Test Hubble equivalency component."""
unit = u.km / u.s / u.Mpc
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
H = cosmo.H(z)
h = H.to_value(u.km / u.s / u.Mpc) / 100 * cu.littleh
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(hubble=True)
# H
assert_quantity_allclose(z.to(unit, equivalency), H)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z)
# little-h
assert_quantity_allclose(z.to(cu.littleh, equivalency), h)
assert_quantity_allclose(h.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.with_redshift(hubble=True)
assert_quantity_allclose(z.to(unit, equivalency), default_cosmo.H(z))
assert default_cosmo.H(z) != H
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, hubble=True)
# H
assert_quantity_allclose(z.to(unit, equivalency), H)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z)
# little-h
assert_quantity_allclose(z.to(cu.littleh, equivalency), h)
assert_quantity_allclose(h.to(cu.redshift, equivalency), z)
# Test `atzkw`
# this is really just a test that 'atzkw' doesn't fail
equivalency = cu.with_redshift(cosmo, hubble=True, atzkw={"ztol": 1e-10})
assert_quantity_allclose(H.to(cu.redshift, equivalency), z) # H
assert_quantity_allclose(h.to(cu.redshift, equivalency), z) # h
# -------------------------------------------
def test_distance_off(self, cosmo):
"""Test ``with_redshift`` with the distance off."""
z = 15 * cu.redshift
err_msg = r"^'redshift' \(redshift\) and 'Mpc' \(length\) are not convertible$"
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(distance=None)
with pytest.raises(u.UnitConversionError, match=err_msg):
z.to(u.Mpc, equivalency)
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, distance=None)
with pytest.raises(u.UnitConversionError, match=err_msg):
z.to(u.Mpc, equivalency)
def test_distance_default(self):
"""Test distance equivalency default."""
z = 15 * cu.redshift
d = default_cosmology.get().comoving_distance(z)
equivalency = cu.with_redshift()
assert_quantity_allclose(z.to(u.Mpc, equivalency), d)
assert_quantity_allclose(d.to(cu.redshift, equivalency), z)
def test_distance_wrong_kind(self):
"""Test distance equivalency, but the wrong kind."""
with pytest.raises(ValueError, match="`kind`"):
cu.with_redshift(distance=ValueError)
@pytest.mark.parametrize("kind", ["comoving", "lookback", "luminosity"])
def test_distance(self, kind):
"""Test distance equivalency."""
cosmo = Planck13
z = 15 * cu.redshift
dist = getattr(cosmo, kind + "_distance")(z)
default_cosmo = default_cosmology.get()
assert default_cosmo != cosmo # shows changing default
# 1) without specifying the cosmology
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(distance=kind)
assert_quantity_allclose(z.to(u.Mpc, equivalency), dist)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.with_redshift(distance=kind)
assert_quantity_allclose(
z.to(u.Mpc, equivalency), getattr(default_cosmo, kind + "_distance")(z)
)
assert not u.allclose(getattr(default_cosmo, kind + "_distance")(z), dist)
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, distance=kind)
assert_quantity_allclose(z.to(u.Mpc, equivalency), dist)
assert_quantity_allclose(dist.to(cu.redshift, equivalency), z)
# Test atzkw
# this is really just a test that 'atzkw' doesn't fail
equivalency = cu.with_redshift(cosmo, distance=kind, atzkw={"ztol": 1e-10})
assert_quantity_allclose(dist.to(cu.redshift, equivalency), z)
def test_equivalency_context_manager():
base_registry = u.get_current_unit_registry()
# check starting with only the dimensionless_redshift equivalency.
assert len(base_registry.equivalencies) == 1
assert str(base_registry.equivalencies[0][0]) == "redshift"
| bsd-3-clause | 410d59b7e377d1138b7619c598962561 | 38.467337 | 87 | 0.63369 | 3.254195 | false | true | false | false |
astropy/astropy | astropy/visualization/wcsaxes/helpers.py | 3 | 6100 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Helpers functions for different kinds of WCSAxes instances
"""
import numpy as np
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredEllipse, AnchoredSizeBar
import astropy.units as u
from astropy.wcs.utils import proj_plane_pixel_scales
__all__ = ["add_beam", "add_scalebar"]
CORNERS = {
"top right": 1,
"top left": 2,
"bottom left": 3,
"bottom right": 4,
"right": 5,
"left": 6,
"bottom": 8,
"top": 9,
}
def add_beam(
ax,
header=None,
major=None,
minor=None,
angle=None,
corner="bottom left",
frame=False,
borderpad=0.4,
pad=0.5,
**kwargs,
):
"""
Display the beam shape and size
Parameters
----------
ax : :class:`~astropy.visualization.wcsaxes.WCSAxes`
WCSAxes instance in which the beam shape and size is displayed. The WCS
must be celestial.
header : :class:`~astropy.io.fits.Header`, optional
Header containing the beam parameters. If specified, the ``BMAJ``,
``BMIN``, and ``BPA`` keywords will be searched in the FITS header
to set the major and minor axes and the position angle on the sky.
major : float or :class:`~astropy.units.Quantity`, optional
Major axis of the beam in degrees or an angular quantity.
minor : float, or :class:`~astropy.units.Quantity`, optional
Minor axis of the beam in degrees or an angular quantity.
angle : float or :class:`~astropy.units.Quantity`, optional
Position angle of the beam on the sky in degrees or an angular
quantity in the anticlockwise direction.
corner : str, optional
The beam location. Acceptable values are ``'left'``, ``'right'``,
``'top'``, 'bottom', ``'top left'``, ``'top right'``, ``'bottom left'``
(default), and ``'bottom right'``.
frame : bool, optional
Whether to display a frame behind the beam (default is ``False``).
borderpad : float, optional
Border padding, in fraction of the font size. Default is 0.4.
pad : float, optional
Padding around the beam, in fraction of the font size. Default is 0.5.
kwargs
Additional arguments are passed to :class:`matplotlib.patches.Ellipse`.
Notes
-----
This function may be inaccurate when:
- The pixel scales at the reference pixel are different from the pixel scales
within the image extent (e.g., when the reference pixel is well outside of
the image extent and the projection is non-linear)
- The pixel scales in the two directions are very different from each other
(e.g., rectangular pixels)
"""
if header and major:
raise ValueError(
"Either header or major/minor/angle must be specified, not both."
)
if header:
major = header["BMAJ"]
minor = header["BMIN"]
angle = header["BPA"]
if isinstance(major, u.Quantity):
major = major.to(u.degree).value
if isinstance(minor, u.Quantity):
minor = minor.to(u.degree).value
if isinstance(angle, u.Quantity):
angle = angle.to(u.degree).value
if ax.wcs.is_celestial:
pix_scale = proj_plane_pixel_scales(ax.wcs)
sx = pix_scale[0]
sy = pix_scale[1]
degrees_per_pixel = np.sqrt(sx * sy)
else:
raise ValueError("Cannot show beam when WCS is not celestial")
minor /= degrees_per_pixel
major /= degrees_per_pixel
corner = CORNERS[corner]
beam = AnchoredEllipse(
ax.transData,
width=minor,
height=major,
angle=angle,
loc=corner,
pad=pad,
borderpad=borderpad,
frameon=frame,
)
beam.ellipse.set(**kwargs)
ax.add_artist(beam)
def add_scalebar(
ax,
length,
label=None,
corner="bottom right",
frame=False,
borderpad=0.4,
pad=0.5,
**kwargs,
):
"""Add a scale bar
Parameters
----------
ax : :class:`~astropy.visualization.wcsaxes.WCSAxes`
WCSAxes instance in which the scale bar is displayed. The WCS must be
celestial.
length : float or :class:`~astropy.units.Quantity`
The lenght of the scalebar in degrees or an angular quantity
label : str, optional
Label to place below the scale bar
corner : str, optional
Where to place the scale bar. Acceptable values are:, ``'left'``,
``'right'``, ``'top'``, ``'bottom'``, ``'top left'``, ``'top right'``,
``'bottom left'`` and ``'bottom right'`` (default)
frame : bool, optional
Whether to display a frame behind the scale bar (default is ``False``)
borderpad : float, optional
Border padding, in fraction of the font size. Default is 0.4.
pad : float, optional
Padding around the scale bar, in fraction of the font size. Default is 0.5.
kwargs
Additional arguments are passed to
:class:`mpl_toolkits.axes_grid1.anchored_artists.AnchoredSizeBar`.
Notes
-----
This function may be inaccurate when:
- The pixel scales at the reference pixel are different from the pixel scales
within the image extent (e.g., when the reference pixel is well outside of
the image extent and the projection is non-linear)
- The pixel scales in the two directions are very different from each other
(e.g., rectangular pixels)
"""
if isinstance(length, u.Quantity):
length = length.to(u.degree).value
if ax.wcs.is_celestial:
pix_scale = proj_plane_pixel_scales(ax.wcs)
sx = pix_scale[0]
sy = pix_scale[1]
degrees_per_pixel = np.sqrt(sx * sy)
else:
raise ValueError("Cannot show scalebar when WCS is not celestial")
length = length / degrees_per_pixel
corner = CORNERS[corner]
scalebar = AnchoredSizeBar(
ax.transData,
length,
label,
corner,
pad=pad,
borderpad=borderpad,
sep=5,
frameon=frame,
**kwargs,
)
ax.add_artist(scalebar)
| bsd-3-clause | 3eb626529cc605438ebfae3982eba320 | 28.901961 | 85 | 0.624262 | 3.870558 | false | false | false | false |
astropy/astropy | astropy/convolution/core.py | 3 | 12177 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains the convolution and filter functionalities of astropy.
A few conceptual notes:
A filter kernel is mainly characterized by its response function. In the 1D
case we speak of "impulse response function", in the 2D case we call it "point
spread function". This response function is given for every kernel by an
astropy `FittableModel`, which is evaluated on a grid to obtain a filter array,
which can then be applied to binned data.
The model is centered on the array and should have an amplitude such that the array
integrates to one per default.
Currently only symmetric 2D kernels are supported.
"""
import copy
import warnings
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
from .utils import add_kernel_arrays_1D, add_kernel_arrays_2D, discretize_model
MAX_NORMALIZATION = 100
__all__ = ["Kernel", "Kernel1D", "Kernel2D", "kernel_arithmetics"]
class Kernel:
"""
Convolution kernel base class.
Parameters
----------
array : ndarray
Kernel array.
"""
_separable = False
_is_bool = True
_model = None
def __init__(self, array):
self._array = np.asanyarray(array)
@property
def truncation(self):
"""
Absolute deviation of the sum of the kernel array values from
one.
"""
return np.abs(1.0 - self._array.sum())
@property
def is_bool(self):
"""
Indicates if kernel is bool.
If the kernel is bool the multiplication in the convolution could
be omitted, to increase the performance.
"""
return self._is_bool
@property
def model(self):
"""
Kernel response model.
"""
return self._model
@property
def dimension(self):
"""
Kernel dimension.
"""
return self.array.ndim
@property
def center(self):
"""
Index of the kernel center.
"""
return [axes_size // 2 for axes_size in self._array.shape]
def normalize(self, mode="integral"):
"""
Normalize the filter kernel.
Parameters
----------
mode : {'integral', 'peak'}
One of the following modes:
* 'integral' (default)
Kernel is normalized such that its integral = 1.
* 'peak'
Kernel is normalized such that its peak = 1.
"""
if mode == "integral":
normalization = self._array.sum()
elif mode == "peak":
normalization = self._array.max()
else:
raise ValueError("invalid mode, must be 'integral' or 'peak'")
# Warn the user for kernels that sum to zero
if normalization == 0:
warnings.warn(
"The kernel cannot be normalized because it sums to zero.",
AstropyUserWarning,
)
else:
np.divide(self._array, normalization, self._array)
self._kernel_sum = self._array.sum()
@property
def shape(self):
"""
Shape of the kernel array.
"""
return self._array.shape
@property
def separable(self):
"""
Indicates if the filter kernel is separable.
A 2D filter is separable, when its filter array can be written as the
outer product of two 1D arrays.
If a filter kernel is separable, higher dimension convolutions will be
performed by applying the 1D filter array consecutively on every dimension.
This is significantly faster, than using a filter array with the same
dimension.
"""
return self._separable
@property
def array(self):
"""
Filter kernel array.
"""
return self._array
def __add__(self, kernel):
"""
Add two filter kernels.
"""
return kernel_arithmetics(self, kernel, "add")
def __sub__(self, kernel):
"""
Subtract two filter kernels.
"""
return kernel_arithmetics(self, kernel, "sub")
def __mul__(self, value):
"""
Multiply kernel with number or convolve two kernels.
"""
return kernel_arithmetics(self, value, "mul")
def __rmul__(self, value):
"""
Multiply kernel with number or convolve two kernels.
"""
return kernel_arithmetics(self, value, "mul")
def __array__(self):
"""
Array representation of the kernel.
"""
return self._array
def __array_wrap__(self, array, context=None):
"""
Wrapper for multiplication with numpy arrays.
"""
if type(context[0]) == np.ufunc:
return NotImplemented
else:
return array
class Kernel1D(Kernel):
"""
Base class for 1D filter kernels.
Parameters
----------
model : `~astropy.modeling.FittableModel`
Model to be evaluated.
x_size : int or None, optional
Size of the kernel array. Default = ⌊8*width+1⌋.
Only used if ``array`` is None.
array : ndarray or None, optional
Kernel array.
width : number
Width of the filter kernel.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
"""
def __init__(self, model=None, x_size=None, array=None, **kwargs):
# Initialize from model
if self._model:
if array is not None:
# Reject "array" keyword for kernel models, to avoid them not being
# populated as expected.
raise TypeError("Array argument not allowed for kernel models.")
if x_size is None:
x_size = self._default_size
elif x_size != int(x_size):
raise TypeError("x_size should be an integer")
# Set ranges where to evaluate the model
if x_size % 2 == 0: # even kernel
x_range = (-(int(x_size)) // 2 + 0.5, (int(x_size)) // 2 + 0.5)
else: # odd kernel
x_range = (-(int(x_size) - 1) // 2, (int(x_size) - 1) // 2 + 1)
array = discretize_model(self._model, x_range, **kwargs)
# Initialize from array
elif array is None:
raise TypeError("Must specify either array or model.")
super().__init__(array)
class Kernel2D(Kernel):
"""
Base class for 2D filter kernels.
Parameters
----------
model : `~astropy.modeling.FittableModel`
Model to be evaluated.
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*width + 1⌋.
Only used if ``array`` is None.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*width + 1⌋.
Only used if ``array`` is None,
array : ndarray or None, optional
Kernel array. Default is None.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
width : number
Width of the filter kernel.
factor : number, optional
Factor of oversampling. Default factor = 10.
"""
def __init__(self, model=None, x_size=None, y_size=None, array=None, **kwargs):
# Initialize from model
if self._model:
if array is not None:
# Reject "array" keyword for kernel models, to avoid them not being
# populated as expected.
raise TypeError("Array argument not allowed for kernel models.")
if x_size is None:
x_size = self._default_size
elif x_size != int(x_size):
raise TypeError("x_size should be an integer")
if y_size is None:
y_size = x_size
elif y_size != int(y_size):
raise TypeError("y_size should be an integer")
# Set ranges where to evaluate the model
if x_size % 2 == 0: # even kernel
x_range = (-(int(x_size)) // 2 + 0.5, (int(x_size)) // 2 + 0.5)
else: # odd kernel
x_range = (-(int(x_size) - 1) // 2, (int(x_size) - 1) // 2 + 1)
if y_size % 2 == 0: # even kernel
y_range = (-(int(y_size)) // 2 + 0.5, (int(y_size)) // 2 + 0.5)
else: # odd kernel
y_range = (-(int(y_size) - 1) // 2, (int(y_size) - 1) // 2 + 1)
array = discretize_model(self._model, x_range, y_range, **kwargs)
# Initialize from array
elif array is None:
raise TypeError("Must specify either array or model.")
super().__init__(array)
def kernel_arithmetics(kernel, value, operation):
"""
Add, subtract or multiply two kernels.
Parameters
----------
kernel : `astropy.convolution.Kernel`
Kernel instance.
value : `astropy.convolution.Kernel`, float, or int
Value to operate with.
operation : {'add', 'sub', 'mul'}
One of the following operations:
* 'add'
Add two kernels
* 'sub'
Subtract two kernels
* 'mul'
Multiply kernel with number or convolve two kernels.
"""
# 1D kernels
if isinstance(kernel, Kernel1D) and isinstance(value, Kernel1D):
if operation == "add":
new_array = add_kernel_arrays_1D(kernel.array, value.array)
if operation == "sub":
new_array = add_kernel_arrays_1D(kernel.array, -value.array)
if operation == "mul":
raise Exception(
"Kernel operation not supported. Maybe you want "
"to use convolve(kernel1, kernel2) instead."
)
new_kernel = Kernel1D(array=new_array)
new_kernel._separable = kernel._separable and value._separable
new_kernel._is_bool = kernel._is_bool or value._is_bool
# 2D kernels
elif isinstance(kernel, Kernel2D) and isinstance(value, Kernel2D):
if operation == "add":
new_array = add_kernel_arrays_2D(kernel.array, value.array)
if operation == "sub":
new_array = add_kernel_arrays_2D(kernel.array, -value.array)
if operation == "mul":
raise Exception(
"Kernel operation not supported. Maybe you want "
"to use convolve(kernel1, kernel2) instead."
)
new_kernel = Kernel2D(array=new_array)
new_kernel._separable = kernel._separable and value._separable
new_kernel._is_bool = kernel._is_bool or value._is_bool
# kernel and number
elif isinstance(kernel, (Kernel1D, Kernel2D)) and np.isscalar(value):
if operation == "mul":
new_kernel = copy.copy(kernel)
new_kernel._array *= value
else:
raise Exception("Kernel operation not supported.")
else:
raise Exception("Kernel operation not supported.")
return new_kernel
| bsd-3-clause | 4e1381d38ef8fef24de4ec1d2459de8e | 30.762402 | 83 | 0.562022 | 4.28798 | false | false | false | false |
astropy/astropy | astropy/convolution/tests/test_convolve_fft.py | 3 | 35262 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
from contextlib import nullcontext
import numpy as np
import pytest
from numpy.testing import (
assert_allclose,
assert_array_almost_equal_nulp,
assert_array_equal,
)
from astropy import units as u
from astropy.convolution.convolve import convolve, convolve_fft
from astropy.utils.exceptions import AstropyUserWarning
VALID_DTYPES = (">f4", "<f4", ">f8", "<f8")
VALID_DTYPE_MATRIX = list(itertools.product(VALID_DTYPES, VALID_DTYPES))
BOUNDARY_OPTIONS = [None, "fill", "wrap"]
NANTREATMENT_OPTIONS = ("interpolate", "fill")
NORMALIZE_OPTIONS = [True, False]
PRESERVE_NAN_OPTIONS = [True, False]
"""
What does convolution mean? We use the 'same size' assumption here (i.e.,
you expect an array of the exact same size as the one you put in)
Convolving any array with a kernel that is [1] should result in the same array returned
Working example array: [1, 2, 3, 4, 5]
Convolved with [1] = [1, 2, 3, 4, 5]
Convolved with [1, 1] = [1, 3, 5, 7, 9] THIS IS NOT CONSISTENT!
Convolved with [1, 0] = [1, 2, 3, 4, 5]
Convolved with [0, 1] = [0, 1, 2, 3, 4]
"""
# NOTE: use_numpy_fft is redundant if you don't have FFTW installed
option_names = ("boundary", "nan_treatment", "normalize_kernel", "dealias")
options = list(
itertools.product(
BOUNDARY_OPTIONS, NANTREATMENT_OPTIONS, (True, False), (True, False)
)
)
option_names_preserve_nan = (
"boundary",
"nan_treatment",
"normalize_kernel",
"preserve_nan",
)
options_preserve_nan = list(
itertools.product(
BOUNDARY_OPTIONS, NANTREATMENT_OPTIONS, (True, False), (True, False)
)
)
def expected_boundary_warning(boundary=None):
# Helper that returns the appropriate context manager for the boundary=None
# warning depending on the value of boundary.
if boundary is None:
ctx = pytest.warns(
AstropyUserWarning,
match=(
"The convolve_fft version of boundary=None "
"is equivalent to the convolve boundary='fill'"
),
)
else:
ctx = nullcontext()
return ctx
def expected_dealias_error(boundary=None, dealias=False):
# Helper that returns the appropriate context manager for the boundary=None
# warning depending on the value of boundary.
if dealias and boundary == "wrap":
ctx = pytest.raises(ValueError)
else:
ctx = nullcontext()
return ctx
def assert_floatclose(x, y):
"""Assert arrays are close to within expected floating point rounding.
Check that the result is correct at the precision expected for 64 bit
numbers, taking account that the tolerance has to reflect that all powers
in the FFTs enter our values.
"""
# The number used is set by the fact that the Windows FFT sometimes
# returns an answer that is EXACTLY 10*np.spacing.
assert_allclose(x, y, atol=10 * np.spacing(x.max()), rtol=0.0)
class TestConvolve1D:
@pytest.mark.parametrize(option_names, options)
def test_quantity(self, boundary, nan_treatment, normalize_kernel, dealias):
"""
Test that convolve_fft works correctly when input array is a Quantity
"""
x = np.array([1.0, 4.0, 5.0, 6.0, 5.0, 7.0, 8.0], dtype="float64") * u.ph
y = np.array([0.2, 0.6, 0.2], dtype="float64")
with expected_boundary_warning(boundary=boundary):
with expected_dealias_error(boundary=boundary, dealias=dealias):
z = convolve_fft(
x,
y,
boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
dealias=dealias,
)
assert x.unit == z.unit
@pytest.mark.parametrize(option_names, options)
def test_unity_1_none(self, boundary, nan_treatment, normalize_kernel, dealias):
"""
Test that a unit kernel with a single element returns the same array
"""
x = np.array([1.0, 2.0, 3.0], dtype="float64")
y = np.array([1.0], dtype="float64")
with expected_boundary_warning(boundary=boundary):
with expected_dealias_error(boundary=boundary, dealias=dealias):
z = convolve_fft(
x,
y,
boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
dealias=dealias,
)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names, options)
def test_unity_3(self, boundary, nan_treatment, normalize_kernel, dealias):
"""
Test that a unit kernel with three elements returns the same array
(except when boundary is None).
"""
x = np.array([1.0, 2.0, 3.0], dtype="float64")
y = np.array([0.0, 1.0, 0.0], dtype="float64")
with expected_boundary_warning(boundary=boundary):
with expected_dealias_error(boundary=boundary, dealias=dealias):
z = convolve_fft(
x,
y,
boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
dealias=dealias,
)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names, options)
def test_uniform_3(self, boundary, nan_treatment, normalize_kernel, dealias):
"""
Test that the different modes are producing the correct results using
a uniform kernel with three elements
"""
x = np.array([1.0, 0.0, 3.0], dtype="float64")
y = np.array([1.0, 1.0, 1.0], dtype="float64")
with expected_boundary_warning(boundary=boundary):
with expected_dealias_error(boundary=boundary, dealias=dealias):
z = convolve_fft(
x,
y,
boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
dealias=dealias,
)
answer_key = (boundary, nan_treatment, normalize_kernel)
answer_dict = {
"sum_fill_zeros": np.array([1.0, 4.0, 3.0], dtype="float64"),
"average_fill_zeros": np.array(
[1 / 3.0, 4 / 3.0, 1.0], dtype="float64"
),
"sum_wrap": np.array([4.0, 4.0, 4.0], dtype="float64"),
"average_wrap": np.array(
[4 / 3.0, 4 / 3.0, 4 / 3.0], dtype="float64"
),
}
result_dict = {
# boundary, nan_treatment, normalize_kernel
("fill", "interpolate", True): answer_dict["average_fill_zeros"],
("wrap", "interpolate", True): answer_dict["average_wrap"],
("fill", "interpolate", False): answer_dict["sum_fill_zeros"],
("wrap", "interpolate", False): answer_dict["sum_wrap"],
}
for k in list(result_dict.keys()):
result_dict[(k[0], "fill", k[2])] = result_dict[k]
for k in list(result_dict.keys()):
if k[0] == "fill":
result_dict[(None, k[1], k[2])] = result_dict[k]
assert_floatclose(z, result_dict[answer_key])
@pytest.mark.parametrize(option_names, options)
def test_halfity_3(self, boundary, nan_treatment, normalize_kernel, dealias):
"""
Test that the different modes are producing the correct results using
a uniform, non-unity kernel with three elements
"""
x = np.array([1.0, 0.0, 3.0], dtype="float64")
y = np.array([0.5, 0.5, 0.5], dtype="float64")
with expected_boundary_warning(boundary=boundary):
with expected_dealias_error(boundary=boundary, dealias=dealias):
z = convolve_fft(
x,
y,
boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
dealias=dealias,
)
answer_dict = {
"sum": np.array([0.5, 2.0, 1.5], dtype="float64"),
"sum_zeros": np.array([0.5, 2.0, 1.5], dtype="float64"),
"sum_nozeros": np.array([0.5, 2.0, 1.5], dtype="float64"),
"average": np.array([1 / 3.0, 4 / 3.0, 1.0], dtype="float64"),
"sum_wrap": np.array([2.0, 2.0, 2.0], dtype="float64"),
"average_wrap": np.array(
[4 / 3.0, 4 / 3.0, 4 / 3.0], dtype="float64"
),
"average_zeros": np.array([1 / 3.0, 4 / 3.0, 1.0], dtype="float64"),
"average_nozeros": np.array([0.5, 4 / 3.0, 1.5], dtype="float64"),
}
if normalize_kernel:
answer_key = "average"
else:
answer_key = "sum"
if boundary == "wrap":
answer_key += "_wrap"
else:
# average = average_zeros; sum = sum_zeros
answer_key += "_zeros"
assert_floatclose(z, answer_dict[answer_key])
@pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan)
def test_unity_3_withnan(
self, boundary, nan_treatment, normalize_kernel, preserve_nan
):
"""
Test that a unit kernel with three elements returns the same array
(except when boundary is None). This version includes a NaN value in
the original array.
"""
x = np.array([1.0, np.nan, 3.0], dtype="float64")
y = np.array([0.0, 1.0, 0.0], dtype="float64")
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(
x,
y,
boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan,
)
if preserve_nan:
assert np.isnan(z[1])
z = np.nan_to_num(z)
assert_floatclose(z, [1.0, 0.0, 3.0])
inputs = (
np.array([1.0, np.nan, 3.0], dtype="float64"),
np.array([1.0, np.inf, 3.0], dtype="float64"),
)
outputs = (
np.array([1.0, 0.0, 3.0], dtype="float64"),
np.array([1.0, 0.0, 3.0], dtype="float64"),
)
options_unity1withnan = list(
itertools.product(
BOUNDARY_OPTIONS,
NANTREATMENT_OPTIONS,
(True, False),
(True, False),
inputs,
outputs,
)
)
@pytest.mark.parametrize(
option_names_preserve_nan + ("inval", "outval"), options_unity1withnan
)
def test_unity_1_withnan(
self, boundary, nan_treatment, normalize_kernel, preserve_nan, inval, outval
):
"""
Test that a unit kernel with three elements returns the same array
(except when boundary is None). This version includes a NaN value in
the original array.
"""
x = inval
y = np.array([1.0], dtype="float64")
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(
x,
y,
boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan,
)
if preserve_nan:
assert np.isnan(z[1])
z = np.nan_to_num(z)
assert_floatclose(z, outval)
@pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan)
def test_uniform_3_withnan(
self, boundary, nan_treatment, normalize_kernel, preserve_nan
):
"""
Test that the different modes are producing the correct results using
a uniform kernel with three elements. This version includes a NaN
value in the original array.
"""
x = np.array([1.0, np.nan, 3.0], dtype="float64")
y = np.array([1.0, 1.0, 1.0], dtype="float64")
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(
x,
y,
boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan,
)
if preserve_nan:
assert np.isnan(z[1])
answer_dict = {
"sum": np.array([1.0, 4.0, 3.0], dtype="float64"),
"sum_nozeros": np.array([1.0, 4.0, 3.0], dtype="float64"),
"sum_zeros": np.array([1.0, 4.0, 3.0], dtype="float64"),
"sum_nozeros_interpnan": np.array([1.0, 4.0, 3.0], dtype="float64"),
"average": np.array([1.0, 2.0, 3.0], dtype="float64"),
"sum_wrap": np.array([4.0, 4.0, 4.0], dtype="float64"),
"average_wrap": np.array([4 / 3.0, 4 / 3.0, 4 / 3.0], dtype="float64"),
"average_wrap_interpnan": np.array([2, 2, 2], dtype="float64"),
"average_nozeros": np.array([1 / 2.0, 4 / 3.0, 3 / 2.0], dtype="float64"),
"average_nozeros_interpnan": np.array([1.0, 2.0, 3.0], dtype="float64"),
"average_zeros": np.array([1 / 3.0, 4 / 3.0, 3 / 3.0], dtype="float64"),
"average_zeros_interpnan": np.array(
[1 / 2.0, 4 / 2.0, 3 / 2.0], dtype="float64"
),
}
for key in list(answer_dict.keys()):
if "sum" in key:
answer_dict[key + "_interpnan"] = answer_dict[key] * 3.0 / 2.0
if normalize_kernel:
answer_key = "average"
else:
answer_key = "sum"
if boundary == "wrap":
answer_key += "_wrap"
else:
# average = average_zeros; sum = sum_zeros
answer_key += "_zeros"
if nan_treatment == "interpolate":
answer_key += "_interpnan"
posns = np.isfinite(z)
answer = answer_dict[answer_key][posns]
# check that fill is set and that the 1'th position that was originally
# NaN is included in the check
if (nan_treatment == "fill") and posns[1]:
# we fill the center with the sum of the input array divided by
# three, since we've now pre-filled the center value with zero
answer[1] = 4 / (3.0 if normalize_kernel else 1.0)
assert_floatclose(z[posns], answer)
def test_nan_interpolate(self):
# Test masked array
array = np.array([1.0, np.nan, 3.0], dtype="float64")
kernel = np.array([1, 1, 1])
masked_array = np.ma.masked_array(array, mask=[0, 1, 0])
result = convolve_fft(
masked_array,
kernel,
boundary="fill",
nan_treatment="interpolate",
fill_value=np.nan,
)
assert_floatclose(result, [1, 2, 3])
def test_nan_fill(self):
# regression for #8121
# Test masked array
array = np.array([1.0, np.nan, 3.0], dtype="float64")
kernel = np.array([1, 1, 1])
result = convolve_fft(
array, kernel, boundary="fill", nan_treatment="fill", fill_value=0
)
# note that, because fill_value also affects boundary='fill', the edge
# pixels are treated as zero rather than being ignored.
assert_floatclose(result, [1 / 3.0, 4 / 3.0, 1.0])
def test_nan_fill_two(self):
# regression for #8121
# Test masked array
array = np.array([1.0, np.nan, 3.0], dtype="float64")
kernel = np.array([1, 1, 1])
result = convolve_fft(
array, kernel, boundary="fill", nan_treatment="fill", fill_value=1
)
# note that, because fill_value also affects boundary='fill', the edge
# pixels are treated as fill_value=1 rather than being ignored.
assert_floatclose(result, [1.0, 5 / 3.0, 5 / 3.0])
def test_masked_array(self):
"""
Check whether convolve_fft works with masked arrays.
"""
# Test masked array
array = np.array([1.0, 2.0, 3.0], dtype="float64")
kernel = np.array([1, 1, 1])
masked_array = np.ma.masked_array(array, mask=[0, 1, 0])
result = convolve_fft(masked_array, kernel, boundary="fill", fill_value=0.0)
assert_floatclose(result, [1.0 / 2, 2, 3.0 / 2])
# Now test against convolve()
convolve_result = convolve(
masked_array, kernel, boundary="fill", fill_value=0.0
)
assert_floatclose(convolve_result, result)
# Test masked kernel
array = np.array([1.0, 2.0, 3.0], dtype="float64")
kernel = np.array([1, 1, 1])
masked_kernel = np.ma.masked_array(kernel, mask=[0, 1, 0])
result = convolve_fft(array, masked_kernel, boundary="fill", fill_value=0.0)
assert_floatclose(result, [1, 2, 1])
# Now test against convolve()
convolve_result = convolve(
array, masked_kernel, boundary="fill", fill_value=0.0
)
assert_floatclose(convolve_result, result)
def test_normalize_function(self):
"""
Check if convolve_fft works when passing a normalize function.
"""
array = [1, 2, 3]
kernel = [3, 3, 3]
result = convolve_fft(array, kernel, normalize_kernel=np.max)
assert_floatclose(result, [3, 6, 5])
@pytest.mark.parametrize(option_names, options)
def test_normalization_is_respected(
self, boundary, nan_treatment, normalize_kernel, dealias
):
"""
Check that if normalize_kernel is False then the normalization
tolerance is respected.
"""
array = np.array([1, 2, 3])
# A simple identity kernel to which a non-zero normalization is added.
base_kernel = np.array([1.0])
# Use the same normalization error tolerance in all cases.
normalization_rtol = 1e-4
# Add the error below to the kernel.
norm_error = [normalization_rtol / 10, normalization_rtol * 10]
for err in norm_error:
kernel = base_kernel + err
result = convolve_fft(
array,
kernel,
normalize_kernel=normalize_kernel,
nan_treatment=nan_treatment,
normalization_zero_tol=normalization_rtol,
)
if normalize_kernel:
# Kernel has been normalized to 1.
assert_floatclose(result, array)
else:
# Kernel should not have been normalized...
assert_floatclose(result, array * kernel)
class TestConvolve2D:
@pytest.mark.parametrize(option_names, options)
def test_unity_1x1_none(self, boundary, nan_treatment, normalize_kernel, dealias):
"""
Test that a 1x1 unit kernel returns the same array
"""
x = np.array(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype="float64"
)
y = np.array([[1.0]], dtype="float64")
with expected_boundary_warning(boundary=boundary):
with expected_dealias_error(boundary=boundary, dealias=dealias):
z = convolve_fft(
x,
y,
boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
dealias=dealias,
)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names, options)
def test_unity_3x3(self, boundary, nan_treatment, normalize_kernel, dealias):
"""
Test that a 3x3 unit kernel returns the same array (except when
boundary is None).
"""
x = np.array(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype="float64"
)
y = np.array(
[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]], dtype="float64"
)
with expected_boundary_warning(boundary=boundary):
with expected_dealias_error(boundary=boundary, dealias=dealias):
z = convolve_fft(
x,
y,
boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
dealias=dealias,
)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names, options)
def test_uniform_3x3(self, boundary, nan_treatment, normalize_kernel, dealias):
"""
Test that the different modes are producing the correct results using
a 3x3 uniform kernel.
"""
x = np.array(
[[0.0, 0.0, 3.0], [1.0, 0.0, 0.0], [0.0, 2.0, 0.0]], dtype="float64"
)
y = np.array(
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], dtype="float64"
)
with expected_boundary_warning(boundary=boundary):
with expected_dealias_error(boundary=boundary, dealias=dealias):
z = convolve_fft(
x,
y,
boundary=boundary,
nan_treatment=nan_treatment,
fill_value=np.nan if normalize_kernel else 0,
normalize_kernel=normalize_kernel,
dealias=dealias,
)
w = np.array(
[[4.0, 6.0, 4.0], [6.0, 9.0, 6.0], [4.0, 6.0, 4.0]], dtype="float64"
)
answer_dict = {
"sum": np.array(
[[1.0, 4.0, 3.0], [3.0, 6.0, 5.0], [3.0, 3.0, 2.0]],
dtype="float64",
),
"sum_wrap": np.array(
[[6.0, 6.0, 6.0], [6.0, 6.0, 6.0], [6.0, 6.0, 6.0]],
dtype="float64",
),
}
answer_dict["average"] = answer_dict["sum"] / w
answer_dict["average_wrap"] = answer_dict["sum_wrap"] / 9.0
answer_dict["average_withzeros"] = answer_dict["sum"] / 9.0
answer_dict["sum_withzeros"] = answer_dict["sum"]
if normalize_kernel:
answer_key = "average"
else:
answer_key = "sum"
if boundary == "wrap":
answer_key += "_wrap"
elif nan_treatment == "fill":
answer_key += "_withzeros"
a = answer_dict[answer_key]
assert_floatclose(z, a)
@pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan)
def test_unity_3x3_withnan(
self, boundary, nan_treatment, normalize_kernel, preserve_nan
):
"""
Test that a 3x3 unit kernel returns the same array (except when
boundary is None). This version includes a NaN value in the original
array.
"""
x = np.array(
[[1.0, 2.0, 3.0], [4.0, np.nan, 6.0], [7.0, 8.0, 9.0]], dtype="float64"
)
y = np.array(
[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]], dtype="float64"
)
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(
x,
y,
boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan,
)
if preserve_nan:
assert np.isnan(z[1, 1])
z = np.nan_to_num(z)
x = np.nan_to_num(x)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan)
def test_uniform_3x3_withnan(
self, boundary, nan_treatment, normalize_kernel, preserve_nan
):
"""
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
"""
x = np.array(
[[0.0, 0.0, 3.0], [1.0, np.nan, 0.0], [0.0, 2.0, 0.0]], dtype="float64"
)
y = np.array(
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], dtype="float64"
)
# commented out: allow unnormalized nan-ignoring convolution
# # kernel is not normalized, so this situation -> exception
# if nan_treatment and not normalize_kernel:
# with pytest.raises(ValueError):
# z = convolve_fft(x, y, boundary=boundary,
# nan_treatment=nan_treatment,
# normalize_kernel=normalize_kernel,
# ignore_edge_zeros=ignore_edge_zeros,
# )
# return
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(
x,
y,
boundary=boundary,
nan_treatment=nan_treatment,
# you cannot fill w/nan, you can only interpolate over it
fill_value=(
np.nan if normalize_kernel and nan_treatment == "interpolate" else 0
),
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan,
)
if preserve_nan:
assert np.isnan(z[1, 1])
# weights
w_n = np.array(
[[3.0, 5.0, 3.0], [5.0, 8.0, 5.0], [3.0, 5.0, 3.0]], dtype="float64"
)
w_z = np.array(
[[4.0, 6.0, 4.0], [6.0, 9.0, 6.0], [4.0, 6.0, 4.0]], dtype="float64"
)
answer_dict = {
"sum": np.array(
[[1.0, 4.0, 3.0], [3.0, 6.0, 5.0], [3.0, 3.0, 2.0]], dtype="float64"
),
"sum_wrap": np.array(
[[6.0, 6.0, 6.0], [6.0, 6.0, 6.0], [6.0, 6.0, 6.0]], dtype="float64"
),
}
answer_dict["average"] = answer_dict["sum"] / w_z
answer_dict["average_interpnan"] = answer_dict["sum"] / w_n
answer_dict["average_wrap_interpnan"] = answer_dict["sum_wrap"] / 8.0
answer_dict["average_wrap"] = answer_dict["sum_wrap"] / 9.0
answer_dict["average_withzeros"] = answer_dict["sum"] / 9.0
answer_dict["average_withzeros_interpnan"] = answer_dict["sum"] / 8.0
answer_dict["sum_withzeros"] = answer_dict["sum"]
answer_dict["sum_interpnan"] = answer_dict["sum"] * 9 / 8.0
answer_dict["sum_withzeros_interpnan"] = answer_dict["sum"]
answer_dict["sum_wrap_interpnan"] = answer_dict["sum_wrap"] * 9 / 8.0
if normalize_kernel:
answer_key = "average"
else:
answer_key = "sum"
if boundary == "wrap":
answer_key += "_wrap"
elif nan_treatment == "fill":
answer_key += "_withzeros"
if nan_treatment == "interpolate":
answer_key += "_interpnan"
answer_dict[answer_key]
# Skip the NaN at [1, 1] when preserve_nan=True
posns = np.where(np.isfinite(z))
# for reasons unknown, the Windows FFT returns an answer for the [0, 0]
# component that is EXACTLY 10*np.spacing
assert_floatclose(z[posns], z[posns])
def test_big_fail(self):
"""
Test that convolve_fft raises an exception if a too-large array is passed in.
"""
with pytest.raises((ValueError, MemoryError)):
# while a good idea, this approach did not work; it actually writes to disk
# arr = np.memmap('file.np', mode='w+', shape=(512, 512, 512), dtype=complex)
# this just allocates the memory but never touches it; it's better:
arr = np.empty([512, 512, 512], dtype=complex)
# note 512**3 * 16 bytes = 2.0 GB
convolve_fft(arr, arr)
def test_padding(self):
"""
Test that convolve_fft pads to _next_fast_lengths and does not expand all dimensions
to length of longest side (#11242/#10047).
"""
# old implementation expanded this to up to 2048**3
shape = (1, 1226, 518)
img = np.zeros(shape, dtype="float64")
img[0, 600:610, 300:304] = 1.0
kernel = np.zeros((1, 7, 7), dtype="float64")
kernel[0, 3, 3] = 1.0
with pytest.warns(
AstropyUserWarning,
match="psf_pad was set to False, which overrides the boundary='fill'",
):
img_fft = convolve_fft(
img, kernel, return_fft=True, psf_pad=False, fft_pad=False
)
assert_array_equal(img_fft.shape, shape)
img_fft = convolve_fft(
img, kernel, return_fft=True, psf_pad=False, fft_pad=True
)
# should be from either hardcoded _good_sizes[] or scipy.fft.next_fast_len()
assert img_fft.shape in ((1, 1250, 540), (1, 1232, 525))
img_fft = convolve_fft(
img, kernel, return_fft=True, psf_pad=True, fft_pad=False
)
assert_array_equal(img_fft.shape, np.array(shape) + np.array(kernel.shape))
img_fft = convolve_fft(img, kernel, return_fft=True, psf_pad=True, fft_pad=True)
assert img_fft.shape in ((2, 1250, 540), (2, 1250, 525))
@pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS)
def test_non_normalized_kernel(self, boundary):
x = np.array([[0.0, 0.0, 4.0], [1.0, 2.0, 0.0], [0.0, 3.0, 0.0]], dtype="float")
y = np.array(
[[1.0, -1.0, 1.0], [-1.0, 0.0, -1.0], [1.0, -1.0, 1.0]], dtype="float"
)
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(
x, y, boundary=boundary, nan_treatment="fill", normalize_kernel=False
)
if boundary in (None, "fill"):
assert_floatclose(
z,
np.array(
[[1.0, -5.0, 2.0], [1.0, 0.0, -3.0], [-2.0, -1.0, -1.0]],
dtype="float",
),
)
elif boundary == "wrap":
assert_floatclose(
z,
np.array(
[[0.0, -8.0, 6.0], [5.0, 0.0, -4.0], [2.0, 3.0, -4.0]],
dtype="float",
),
)
else:
raise ValueError("Invalid boundary specification")
@pytest.mark.parametrize("boundary", BOUNDARY_OPTIONS)
def test_asymmetric_kernel(boundary):
"""
Make sure that asymmetric convolution
functions go the right direction
"""
x = np.array([3.0, 0.0, 1.0], dtype=">f8")
y = np.array([1, 2, 3], dtype=">f8")
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary, normalize_kernel=False)
if boundary in (None, "fill"):
assert_array_almost_equal_nulp(z, np.array([6.0, 10.0, 2.0], dtype="float"), 10)
elif boundary == "wrap":
assert_array_almost_equal_nulp(z, np.array([9.0, 10.0, 5.0], dtype="float"), 10)
@pytest.mark.parametrize(
("boundary", "nan_treatment", "normalize_kernel", "preserve_nan", "dtype"),
itertools.product(
BOUNDARY_OPTIONS,
NANTREATMENT_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS,
VALID_DTYPES,
),
)
def test_input_unmodified(
boundary, nan_treatment, normalize_kernel, preserve_nan, dtype
):
"""
Test that convolve_fft works correctly when inputs are lists
"""
array = [1.0, 4.0, 5.0, 6.0, 5.0, 7.0, 8.0]
kernel = [0.2, 0.6, 0.2]
x = np.array(array, dtype=dtype)
y = np.array(kernel, dtype=dtype)
# Make pseudoimmutable
x.flags.writeable = False
y.flags.writeable = False
with expected_boundary_warning(boundary=boundary):
convolve_fft(
x,
y,
boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan,
)
assert np.all(np.array(array, dtype=dtype) == x)
assert np.all(np.array(kernel, dtype=dtype) == y)
@pytest.mark.parametrize(
("boundary", "nan_treatment", "normalize_kernel", "preserve_nan", "dtype"),
itertools.product(
BOUNDARY_OPTIONS,
NANTREATMENT_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS,
VALID_DTYPES,
),
)
def test_input_unmodified_with_nan(
boundary, nan_treatment, normalize_kernel, preserve_nan, dtype
):
"""
Test that convolve_fft doesn't modify the input data
"""
array = [1.0, 4.0, 5.0, np.nan, 5.0, 7.0, 8.0]
kernel = [0.2, 0.6, 0.2]
x = np.array(array, dtype=dtype)
y = np.array(kernel, dtype=dtype)
# Make pseudoimmutable
x.flags.writeable = False
y.flags.writeable = False
# make copies for post call comparison
x_copy = x.copy()
y_copy = y.copy()
with expected_boundary_warning(boundary=boundary):
convolve_fft(
x,
y,
boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan,
)
# ( NaN == NaN ) = False
# Only compare non NaN values for canonical equivalence
# and then check NaN explicitly with np.isnan()
array_is_nan = np.isnan(array)
kernel_is_nan = np.isnan(kernel)
array_not_nan = ~array_is_nan
kernel_not_nan = ~kernel_is_nan
assert np.all(x_copy[array_not_nan] == x[array_not_nan])
assert np.all(y_copy[kernel_not_nan] == y[kernel_not_nan])
assert np.all(np.isnan(x[array_is_nan]))
assert np.all(np.isnan(y[kernel_is_nan]))
@pytest.mark.parametrize(
"error_kwarg", [{"psf_pad": True}, {"fft_pad": True}, {"dealias": True}]
)
def test_convolve_fft_boundary_wrap_error(error_kwarg):
x = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype=">f8")
y = np.array([[1.0]], dtype=">f8")
assert (convolve_fft(x, y, boundary="wrap") == x).all()
with pytest.raises(ValueError) as err:
convolve_fft(x, y, boundary="wrap", **error_kwarg)
assert (
str(err.value)
== f"With boundary='wrap', {list(error_kwarg.keys())[0]} cannot be enabled."
)
def test_convolve_fft_boundary_extend_error():
x = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype=">f8")
y = np.array([[1.0]], dtype=">f8")
with pytest.raises(
NotImplementedError,
match=r"The 'extend' option is not implemented for fft-based convolution",
):
convolve_fft(x, y, boundary="extend")
| bsd-3-clause | 2acc0f3bd036b0f95c5f3f12c41b68ab | 34.191617 | 92 | 0.531195 | 3.574093 | false | true | false | false |
astropy/astropy | astropy/time/core.py | 3 | 124592 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The astropy.time package provides functionality for manipulating times and
dates. Specific emphasis is placed on supporting time scales (e.g. UTC, TAI,
UT1) and time representations (e.g. JD, MJD, ISO 8601) that are used in
astronomy.
"""
import copy
import enum
import operator
import os
import threading
from datetime import date, datetime, timedelta
from time import strftime
from warnings import warn
import erfa
import numpy as np
from astropy import constants as const
from astropy import units as u
from astropy.extern import _strptime
from astropy.units import UnitConversionError
from astropy.utils import ShapedLikeNDArray
from astropy.utils.data_info import MixinInfo, data_info_factory
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
# Import TimeFromEpoch to avoid breaking code that followed the old example of
# making a custom timescale in the documentation.
from .formats import TimeFromEpoch # noqa: F401
from .formats import (
TIME_DELTA_FORMATS,
TIME_FORMATS,
TimeAstropyTime,
TimeDatetime,
TimeJD,
TimeUnique,
)
from .time_helper.function_helpers import CUSTOM_FUNCTIONS, UNSUPPORTED_FUNCTIONS
from .utils import day_frac
__all__ = [
"TimeBase",
"Time",
"TimeDelta",
"TimeInfo",
"TimeInfoBase",
"update_leap_seconds",
"TIME_SCALES",
"STANDARD_TIME_SCALES",
"TIME_DELTA_SCALES",
"ScaleValueError",
"OperandTypeError",
"TimeDeltaMissingUnitWarning",
]
STANDARD_TIME_SCALES = ("tai", "tcb", "tcg", "tdb", "tt", "ut1", "utc")
LOCAL_SCALES = ("local",)
TIME_TYPES = {
scale: scales for scales in (STANDARD_TIME_SCALES, LOCAL_SCALES) for scale in scales
}
TIME_SCALES = STANDARD_TIME_SCALES + LOCAL_SCALES
MULTI_HOPS = {
("tai", "tcb"): ("tt", "tdb"),
("tai", "tcg"): ("tt",),
("tai", "ut1"): ("utc",),
("tai", "tdb"): ("tt",),
("tcb", "tcg"): ("tdb", "tt"),
("tcb", "tt"): ("tdb",),
("tcb", "ut1"): ("tdb", "tt", "tai", "utc"),
("tcb", "utc"): ("tdb", "tt", "tai"),
("tcg", "tdb"): ("tt",),
("tcg", "ut1"): ("tt", "tai", "utc"),
("tcg", "utc"): ("tt", "tai"),
("tdb", "ut1"): ("tt", "tai", "utc"),
("tdb", "utc"): ("tt", "tai"),
("tt", "ut1"): ("tai", "utc"),
("tt", "utc"): ("tai",),
}
GEOCENTRIC_SCALES = ("tai", "tt", "tcg")
BARYCENTRIC_SCALES = ("tcb", "tdb")
ROTATIONAL_SCALES = ("ut1",)
TIME_DELTA_TYPES = {
scale: scales
for scales in (
GEOCENTRIC_SCALES,
BARYCENTRIC_SCALES,
ROTATIONAL_SCALES,
LOCAL_SCALES,
)
for scale in scales
}
TIME_DELTA_SCALES = (
GEOCENTRIC_SCALES + BARYCENTRIC_SCALES + ROTATIONAL_SCALES + LOCAL_SCALES
)
# For time scale changes, we need L_G and L_B, which are stored in erfam.h as
# /* L_G = 1 - d(TT)/d(TCG) */
# define ERFA_ELG (6.969290134e-10)
# /* L_B = 1 - d(TDB)/d(TCB), and TDB (s) at TAI 1977/1/1.0 */
# define ERFA_ELB (1.550519768e-8)
# These are exposed in erfa as erfa.ELG and erfa.ELB.
# Implied: d(TT)/d(TCG) = 1-L_G
# and d(TCG)/d(TT) = 1/(1-L_G) = 1 + (1-(1-L_G))/(1-L_G) = 1 + L_G/(1-L_G)
# scale offsets as second = first + first * scale_offset[(first,second)]
SCALE_OFFSETS = {
("tt", "tai"): None,
("tai", "tt"): None,
("tcg", "tt"): -erfa.ELG,
("tt", "tcg"): erfa.ELG / (1.0 - erfa.ELG),
("tcg", "tai"): -erfa.ELG,
("tai", "tcg"): erfa.ELG / (1.0 - erfa.ELG),
("tcb", "tdb"): -erfa.ELB,
("tdb", "tcb"): erfa.ELB / (1.0 - erfa.ELB),
}
# triple-level dictionary, yay!
SIDEREAL_TIME_MODELS = {
"mean": {
"IAU2006": {"function": erfa.gmst06, "scales": ("ut1", "tt")},
"IAU2000": {"function": erfa.gmst00, "scales": ("ut1", "tt")},
"IAU1982": {"function": erfa.gmst82, "scales": ("ut1",), "include_tio": False},
},
"apparent": {
"IAU2006A": {"function": erfa.gst06a, "scales": ("ut1", "tt")},
"IAU2000A": {"function": erfa.gst00a, "scales": ("ut1", "tt")},
"IAU2000B": {"function": erfa.gst00b, "scales": ("ut1",)},
"IAU1994": {"function": erfa.gst94, "scales": ("ut1",), "include_tio": False},
},
}
class _LeapSecondsCheck(enum.Enum):
NOT_STARTED = 0 # No thread has reached the check
RUNNING = 1 # A thread is running update_leap_seconds (_LEAP_SECONDS_LOCK is held)
DONE = 2 # update_leap_seconds has completed
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.NOT_STARTED
_LEAP_SECONDS_LOCK = threading.RLock()
def _compress_array_dims(arr):
"""Compress array by allowing at most 2 * edgeitems + 1 in each dimension.
Parameters
----------
arr : array-like
Array to compress.
Returns
-------
out : array-like
Compressed array.
"""
idxs = []
edgeitems = np.get_printoptions()["edgeitems"]
# Build up a list of index arrays for each dimension, allowing no more than
# 2 * edgeitems + 1 elements in each dimension.
for dim in range(arr.ndim):
if arr.shape[dim] > 2 * edgeitems:
# The middle [edgeitems] value does not matter as it gets replaced
# by ... in the output.
idxs.append(
np.concatenate(
[np.arange(edgeitems), [edgeitems], np.arange(-edgeitems, 0)]
)
)
else:
idxs.append(np.arange(arr.shape[dim]))
# Use the magic np.ix_ function to effectively treat each index array as a
# slicing operator.
idxs_ix = np.ix_(*idxs)
out = arr[idxs_ix]
return out
class TimeInfoBase(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
This base class is common between TimeInfo and TimeDeltaInfo.
"""
attr_names = MixinInfo.attr_names | {"serialize_method"}
_supports_indexing = True
# The usual tuple of attributes needed for serialization is replaced
# by a property, since Time can be serialized different ways.
_represent_as_dict_extra_attrs = (
"format",
"scale",
"precision",
"in_subfmt",
"out_subfmt",
"location",
"_delta_ut1_utc",
"_delta_tdb_tt",
)
# When serializing, write out the `value` attribute using the column name.
_represent_as_dict_primary_data = "value"
mask_val = np.ma.masked
@property
def _represent_as_dict_attrs(self):
method = self.serialize_method[self._serialize_context]
if method == "formatted_value":
out = ("value",)
elif method == "jd1_jd2":
out = ("jd1", "jd2")
else:
raise ValueError("serialize method must be 'formatted_value' or 'jd1_jd2'")
return out + self._represent_as_dict_extra_attrs
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
# If ``True`` for a context, then use formatted ``value`` attribute
# (e.g. the ISO time string). If ``False`` then use float jd1 and jd2.
self.serialize_method = {
"fits": "jd1_jd2",
"ecsv": "formatted_value",
"hdf5": "jd1_jd2",
"yaml": "jd1_jd2",
"parquet": "jd1_jd2",
None: "jd1_jd2",
}
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
Returns
-------
arrays : list of ndarray
"""
parent = self._parent
jd_approx = parent.jd
jd_remainder = (parent - parent.__class__(jd_approx, format="jd")).jd
return [jd_approx, jd_remainder]
@property
def unit(self):
return None
info_summary_stats = staticmethod(
data_info_factory(
names=MixinInfo._stats,
funcs=[getattr(np, stat) for stat in MixinInfo._stats],
)
)
# When Time has mean, std, min, max methods:
# funcs = [lambda x: getattr(x, stat)() for stat_name in MixinInfo._stats])
def _construct_from_dict(self, map):
if "jd1" in map and "jd2" in map:
# Initialize as JD but revert to desired format and out_subfmt (if needed)
format = map.pop("format")
out_subfmt = map.pop("out_subfmt", None)
map["format"] = "jd"
map["val"] = map.pop("jd1")
map["val2"] = map.pop("jd2")
out = self._parent_cls(**map)
out.format = format
if out_subfmt is not None:
out.out_subfmt = out_subfmt
else:
map["val"] = map.pop("value")
out = self._parent_cls(**map)
return out
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new Time instance which is consistent with the input Time objects
``cols`` and has ``length`` rows.
This is intended for creating an empty Time instance whose elements can
be set in-place for table operations like join or vstack. It checks
that the input locations and attributes are consistent. This is used
when a Time object is used as a mixin column in an astropy Table.
Parameters
----------
cols : list
List of input columns (Time objects)
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Time (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "description")
)
attrs.pop("dtype") # Not relevant for Time
col0 = cols[0]
# Check that location is consistent for all Time objects
for col in cols[1:]:
# This is the method used by __setitem__ to ensure that the right side
# has a consistent location (and coerce data if necessary, but that does
# not happen in this case since `col` is already a Time object). If this
# passes then any subsequent table operations via setitem will work.
try:
col0._make_value_equivalent(slice(None), col)
except ValueError:
raise ValueError("input columns have inconsistent locations")
# Make a new Time object with the desired shape and attributes
shape = (length,) + attrs.pop("shape")
jd2000 = 2451544.5 # Arbitrary JD value J2000.0 that will work with ERFA
jd1 = np.full(shape, jd2000, dtype="f8")
jd2 = np.zeros(shape, dtype="f8")
tm_attrs = {
attr: getattr(col0, attr) for attr in ("scale", "location", "precision")
}
out = self._parent_cls(jd1, jd2, format="jd", **tm_attrs)
out.format = col0.format
out.out_subfmt = col0.out_subfmt
out.in_subfmt = col0.in_subfmt
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class TimeInfo(TimeInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
def _represent_as_dict(self, attrs=None):
"""Get the values for the parent ``attrs`` and return as a dict.
By default, uses '_represent_as_dict_attrs'.
"""
map = super()._represent_as_dict(attrs=attrs)
# TODO: refactor these special cases into the TimeFormat classes?
# The datetime64 format requires special handling for ECSV (see #12840).
# The `value` has numpy dtype datetime64 but this is not an allowed
# datatype for ECSV. Instead convert to a string representation.
if (
self._serialize_context == "ecsv"
and map["format"] == "datetime64"
and "value" in map
):
map["value"] = map["value"].astype("U")
# The datetime format is serialized as ISO with no loss of precision.
if map["format"] == "datetime" and "value" in map:
map["value"] = np.vectorize(lambda x: x.isoformat())(map["value"])
return map
def _construct_from_dict(self, map):
# See comment above. May need to convert string back to datetime64.
# Note that _serialize_context is not set here so we just look for the
# string value directly.
if (
map["format"] == "datetime64"
and "value" in map
and map["value"].dtype.kind == "U"
):
map["value"] = map["value"].astype("datetime64")
# Convert back to datetime objects for datetime format.
if map["format"] == "datetime" and "value" in map:
from datetime import datetime
map["value"] = np.vectorize(datetime.fromisoformat)(map["value"])
delta_ut1_utc = map.pop("_delta_ut1_utc", None)
delta_tdb_tt = map.pop("_delta_tdb_tt", None)
out = super()._construct_from_dict(map)
if delta_ut1_utc is not None:
out._delta_ut1_utc = delta_ut1_utc
if delta_tdb_tt is not None:
out._delta_tdb_tt = delta_tdb_tt
return out
class TimeDeltaInfo(TimeInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_extra_attrs = ("format", "scale")
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new TimeDelta instance which is consistent with the input Time objects
``cols`` and has ``length`` rows.
This is intended for creating an empty Time instance whose elements can
be set in-place for table operations like join or vstack. It checks
that the input locations and attributes are consistent. This is used
when a Time object is used as a mixin column in an astropy Table.
Parameters
----------
cols : list
List of input columns (Time objects)
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Time (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "description")
)
attrs.pop("dtype") # Not relevant for Time
col0 = cols[0]
# Make a new Time object with the desired shape and attributes
shape = (length,) + attrs.pop("shape")
jd1 = np.zeros(shape, dtype="f8")
jd2 = np.zeros(shape, dtype="f8")
out = self._parent_cls(jd1, jd2, format="jd", scale=col0.scale)
out.format = col0.format
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class TimeBase(ShapedLikeNDArray):
"""Base time class from which Time and TimeDelta inherit."""
# Make sure that reverse arithmetic (e.g., TimeDelta.__rmul__)
# gets called over the __mul__ of Numpy arrays.
__array_priority__ = 20000
# Declare that Time can be used as a Table column by defining the
# attribute where column attributes will be stored.
_astropy_column_attrs = None
def __getnewargs__(self):
return (self._time,)
def _init_from_vals(
self,
val,
val2,
format,
scale,
copy,
precision=None,
in_subfmt=None,
out_subfmt=None,
):
"""
Set the internal _format, scale, and _time attrs from user
inputs. This handles coercion into the correct shapes and
some basic input validation.
"""
if precision is None:
precision = 3
if in_subfmt is None:
in_subfmt = "*"
if out_subfmt is None:
out_subfmt = "*"
# Coerce val into an array
val = _make_array(val, copy)
# If val2 is not None, ensure consistency
if val2 is not None:
val2 = _make_array(val2, copy)
try:
np.broadcast(val, val2)
except ValueError:
raise ValueError(
"Input val and val2 have inconsistent shape; "
"they cannot be broadcast together."
)
if scale is not None:
if not (isinstance(scale, str) and scale.lower() in self.SCALES):
raise ScaleValueError(
f"Scale {scale!r} is not in the allowed scales "
f"{sorted(self.SCALES)}"
)
# If either of the input val, val2 are masked arrays then
# find the masked elements and fill them.
mask, val, val2 = _check_for_masked_and_fill(val, val2)
# Parse / convert input values into internal jd1, jd2 based on format
self._time = self._get_time_fmt(
val, val2, format, scale, precision, in_subfmt, out_subfmt
)
self._format = self._time.name
# Hack from #9969 to allow passing the location value that has been
# collected by the TimeAstropyTime format class up to the Time level.
# TODO: find a nicer way.
if hasattr(self._time, "_location"):
self.location = self._time._location
del self._time._location
# If any inputs were masked then masked jd2 accordingly. From above
# routine ``mask`` must be either Python bool False or an bool ndarray
# with shape broadcastable to jd2.
if mask is not False:
mask = np.broadcast_to(mask, self._time.jd2.shape)
self._time.jd1[mask] = 2451544.5 # Set to JD for 2000-01-01
self._time.jd2[mask] = np.nan
def _get_time_fmt(self, val, val2, format, scale, precision, in_subfmt, out_subfmt):
"""
Given the supplied val, val2, format and scale try to instantiate
the corresponding TimeFormat class to convert the input values into
the internal jd1 and jd2.
If format is `None` and the input is a string-type or object array then
guess available formats and stop when one matches.
"""
if format is None and (
val.dtype.kind in ("S", "U", "O", "M") or val.dtype.names
):
# Input is a string, object, datetime, or a table-like ndarray
# (structured array, recarray). These input types can be
# uniquely identified by the format classes.
formats = [
(name, cls)
for name, cls in self.FORMATS.items()
if issubclass(cls, TimeUnique)
]
# AstropyTime is a pseudo-format that isn't in the TIME_FORMATS registry,
# but try to guess it at the end.
formats.append(("astropy_time", TimeAstropyTime))
elif not (isinstance(format, str) and format.lower() in self.FORMATS):
if format is None:
raise ValueError(
"No time format was given, and the input is not unique"
)
else:
raise ValueError(
f"Format {format!r} is not one of the allowed formats "
f"{sorted(self.FORMATS)}"
)
else:
formats = [(format, self.FORMATS[format])]
assert formats
problems = {}
for name, cls in formats:
try:
return cls(val, val2, scale, precision, in_subfmt, out_subfmt)
except UnitConversionError:
raise
except (ValueError, TypeError) as err:
# If ``format`` specified then there is only one possibility, so raise
# immediately and include the upstream exception message to make it
# easier for user to see what is wrong.
if len(formats) == 1:
raise ValueError(
f"Input values did not match the format class {format}:"
+ os.linesep
+ f"{err.__class__.__name__}: {err}"
) from err
else:
problems[name] = err
else:
raise ValueError(
"Input values did not match any of the formats where the format "
f"keyword is optional: {problems}"
) from problems[formats[0][0]]
@property
def writeable(self):
return self._time.jd1.flags.writeable & self._time.jd2.flags.writeable
@writeable.setter
def writeable(self, value):
self._time.jd1.flags.writeable = value
self._time.jd2.flags.writeable = value
@property
def format(self):
"""
Get or set time format.
The format defines the way times are represented when accessed via the
``.value`` attribute. By default it is the same as the format used for
initializing the `Time` instance, but it can be set to any other value
that could be used for initialization. These can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'unix_tai', 'cxcsec', 'gps', 'plot_date',
'stardate', 'datetime', 'ymdhms', 'iso', 'isot', 'yday', 'datetime64',
'fits', 'byear', 'jyear', 'byear_str', 'jyear_str']
"""
return self._format
@format.setter
def format(self, format):
"""Set time format"""
if format not in self.FORMATS:
raise ValueError(f"format must be one of {list(self.FORMATS)}")
format_cls = self.FORMATS[format]
# Get the new TimeFormat object to contain time in new format. Possibly
# coerce in/out_subfmt to '*' (default) if existing subfmt values are
# not valid in the new format.
self._time = format_cls(
self._time.jd1,
self._time.jd2,
self._time._scale,
self.precision,
in_subfmt=format_cls._get_allowed_subfmt(self.in_subfmt),
out_subfmt=format_cls._get_allowed_subfmt(self.out_subfmt),
from_jd=True,
)
self._format = format
def to_string(self):
"""Output a string representation of the Time or TimeDelta object.
Similar to ``str(self.value)`` (which uses numpy array formatting) but
array values are evaluated only for the items that actually are output.
For large arrays this can be a substantial performance improvement.
Returns
-------
out : str
String representation of the time values.
"""
npo = np.get_printoptions()
if self.size < npo["threshold"]:
out = str(self.value)
else:
# Compress time object by allowing at most 2 * npo["edgeitems"] + 1
# in each dimension. Then force numpy to use "summary mode" of
# showing only the edge items by setting the size threshold to 0.
# TODO: use np.core.arrayprint._leading_trailing if we have support for
# np.concatenate. See #8610.
tm = _compress_array_dims(self)
with np.printoptions(threshold=0):
out = str(tm.value)
return out
def __repr__(self):
return "<{} object: scale='{}' format='{}' value={}>".format(
self.__class__.__name__, self.scale, self.format, self.to_string()
)
def __str__(self):
return self.to_string()
def __hash__(self):
try:
loc = getattr(self, "location", None)
if loc is not None:
loc = loc.x.to_value(u.m), loc.y.to_value(u.m), loc.z.to_value(u.m)
return hash((self.jd1, self.jd2, self.scale, loc))
except TypeError:
if self.ndim != 0:
reason = "(must be scalar)"
elif self.masked:
reason = "(value is masked)"
else:
raise
raise TypeError(f"unhashable type: '{self.__class__.__name__}' {reason}")
@property
def scale(self):
"""Time scale"""
return self._time.scale
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError(
f"Scale {scale!r} is not in the allowed scales {sorted(self.SCALES)}"
)
if scale == "utc" or self.scale == "utc":
# If doing a transform involving UTC then check that the leap
# seconds table is up to date.
_check_leapsec()
# Determine the chain of scale transformations to get from the current
# scale to the new scale. MULTI_HOPS contains a dict of all
# transformations (xforms) that require intermediate xforms.
# The MULTI_HOPS dict is keyed by (sys1, sys2) in alphabetical order.
xform = (self.scale, scale)
xform_sort = tuple(sorted(xform))
multi = MULTI_HOPS.get(xform_sort, ())
xforms = xform_sort[:1] + multi + xform_sort[-1:]
# If we made the reverse xform then reverse it now.
if xform_sort != xform:
xforms = tuple(reversed(xforms))
# Transform the jd1,2 pairs through the chain of scale xforms.
jd1, jd2 = self._time.jd1, self._time.jd2_filled
for sys1, sys2 in zip(xforms[:-1], xforms[1:]):
# Some xforms require an additional delta_ argument that is
# provided through Time methods. These values may be supplied by
# the user or computed based on available approximations. The
# get_delta_ methods are available for only one combination of
# sys1, sys2 though the property applies for both xform directions.
args = [jd1, jd2]
for sys12 in ((sys1, sys2), (sys2, sys1)):
dt_method = "_get_delta_{}_{}".format(*sys12)
try:
get_dt = getattr(self, dt_method)
except AttributeError:
pass
else:
args.append(get_dt(jd1, jd2))
break
conv_func = getattr(erfa, sys1 + sys2)
jd1, jd2 = conv_func(*args)
jd1, jd2 = day_frac(jd1, jd2)
if self.masked:
jd2[self.mask] = np.nan
self._time = self.FORMATS[self.format](
jd1,
jd2,
scale,
self.precision,
self.in_subfmt,
self.out_subfmt,
from_jd=True,
)
@property
def precision(self):
"""
Decimal precision when outputting seconds as floating point (int
value between 0 and 9 inclusive).
"""
return self._time.precision
@precision.setter
def precision(self, val):
del self.cache
self._time.precision = val
@property
def in_subfmt(self):
"""
Unix wildcard pattern to select subformats for parsing string input
times.
"""
return self._time.in_subfmt
@in_subfmt.setter
def in_subfmt(self, val):
self._time.in_subfmt = val
del self.cache
@property
def out_subfmt(self):
"""
Unix wildcard pattern to select subformats for outputting times.
"""
return self._time.out_subfmt
@out_subfmt.setter
def out_subfmt(self, val):
# Setting the out_subfmt property here does validation of ``val``
self._time.out_subfmt = val
del self.cache
@property
def shape(self):
"""The shape of the time instances.
Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a
tuple. Note that if different instances share some but not all
underlying data, setting the shape of one instance can make the other
instance unusable. Hence, it is strongly recommended to get new,
reshaped instances with the ``reshape`` method.
Raises
------
ValueError
If the new shape has the wrong total number of elements.
AttributeError
If the shape of the ``jd1``, ``jd2``, ``location``,
``delta_ut1_utc``, or ``delta_tdb_tt`` attributes cannot be changed
without the arrays being copied. For these cases, use the
`Time.reshape` method (which copies any arrays that cannot be
reshaped in-place).
"""
return self._time.jd1.shape
@shape.setter
def shape(self, shape):
del self.cache
# We have to keep track of arrays that were already reshaped,
# since we may have to return those to their original shape if a later
# shape-setting fails.
reshaped = []
oldshape = self.shape
# In-place reshape of data/attributes. Need to access _time.jd1/2 not
# self.jd1/2 because the latter are not guaranteed to be the actual
# data, and in fact should not be directly changeable from the public
# API.
for obj, attr in (
(self._time, "jd1"),
(self._time, "jd2"),
(self, "_delta_ut1_utc"),
(self, "_delta_tdb_tt"),
(self, "location"),
):
val = getattr(obj, attr, None)
if val is not None and val.size > 1:
try:
val.shape = shape
except Exception:
for val2 in reshaped:
val2.shape = oldshape
raise
else:
reshaped.append(val)
def _shaped_like_input(self, value):
if self._time.jd1.shape:
if isinstance(value, np.ndarray):
return value
else:
raise TypeError(
f"JD is an array ({self._time.jd1!r}) but value is not ({value!r})"
)
else:
# zero-dimensional array, is it safe to unbox?
if (
isinstance(value, np.ndarray)
and not value.shape
and not np.ma.is_masked(value)
):
if value.dtype.kind == "M":
# existing test doesn't want datetime64 converted
return value[()]
elif value.dtype.fields:
# Unpack but keep field names; .item() doesn't
# Still don't get python types in the fields
return value[()]
else:
return value.item()
else:
return value
@property
def jd1(self):
"""
First of the two doubles that internally store time value(s) in JD.
"""
jd1 = self._time.mask_if_needed(self._time.jd1)
return self._shaped_like_input(jd1)
@property
def jd2(self):
"""
Second of the two doubles that internally store time value(s) in JD.
"""
jd2 = self._time.mask_if_needed(self._time.jd2)
return self._shaped_like_input(jd2)
def to_value(self, format, subfmt="*"):
"""Get time values expressed in specified output format.
This method allows representing the ``Time`` object in the desired
output ``format`` and optional sub-format ``subfmt``. Available
built-in formats include ``jd``, ``mjd``, ``iso``, and so forth. Each
format can have its own sub-formats
For built-in numerical formats like ``jd`` or ``unix``, ``subfmt`` can
be one of 'float', 'long', 'decimal', 'str', or 'bytes'. Here, 'long'
uses ``numpy.longdouble`` for somewhat enhanced precision (with
the enhancement depending on platform), and 'decimal'
:class:`decimal.Decimal` for full precision. For 'str' and 'bytes', the
number of digits is also chosen such that time values are represented
accurately.
For built-in date-like string formats, one of 'date_hms', 'date_hm', or
'date' (or 'longdate_hms', etc., for 5-digit years in
`~astropy.time.TimeFITS`). For sub-formats including seconds, the
number of digits used for the fractional seconds is as set by
`~astropy.time.Time.precision`.
Parameters
----------
format : str
The format in which one wants the time values. Default: the current
format.
subfmt : str or None, optional
Value or wildcard pattern to select the sub-format in which the
values should be given. The default of '*' picks the first
available for a given format, i.e., 'float' or 'date_hms'.
If `None`, use the instance's ``out_subfmt``.
"""
# TODO: add a precision argument (but ensure it is keyword argument
# only, to make life easier for TimeDelta.to_value()).
if format not in self.FORMATS:
raise ValueError(f"format must be one of {list(self.FORMATS)}")
cache = self.cache["format"]
# Try to keep cache behaviour like it was in astropy < 4.0.
key = format if subfmt is None else (format, subfmt)
if key not in cache:
if format == self.format:
tm = self
else:
tm = self.replicate(format=format)
# Some TimeFormat subclasses may not be able to handle being passes
# on a out_subfmt. This includes some core classes like
# TimeBesselianEpochString that do not have any allowed subfmts. But
# those do deal with `self.out_subfmt` internally, so if subfmt is
# the same, we do not pass it on.
kwargs = {}
if subfmt is not None and subfmt != tm.out_subfmt:
kwargs["out_subfmt"] = subfmt
try:
value = tm._time.to_value(parent=tm, **kwargs)
except TypeError as exc:
# Try validating subfmt, e.g. for formats like 'jyear_str' that
# do not implement out_subfmt in to_value() (because there are
# no allowed subformats). If subfmt is not valid this gives the
# same exception as would have occurred if the call to
# `to_value()` had succeeded.
tm._time._select_subfmts(subfmt)
# Subfmt was valid, so fall back to the original exception to see
# if it was lack of support for out_subfmt as a call arg.
if "unexpected keyword argument 'out_subfmt'" in str(exc):
raise ValueError(
f"to_value() method for format {format!r} does not "
"support passing a 'subfmt' argument"
) from None
else:
# Some unforeseen exception so raise.
raise
value = tm._shaped_like_input(value)
cache[key] = value
return cache[key]
@property
def value(self):
"""Time value(s) in current format"""
return self.to_value(self.format, None)
@property
def masked(self):
return self._time.masked
@property
def mask(self):
return self._time.mask
def insert(self, obj, values, axis=0):
"""
Insert values before the given indices in the column and return
a new `~astropy.time.Time` or `~astropy.time.TimeDelta` object.
The values to be inserted must conform to the rules for in-place setting
of ``Time`` objects (see ``Get and set values`` in the ``Time``
documentation).
The API signature matches the ``np.insert`` API, but is more limited.
The specification of insert index ``obj`` must be a single integer,
and the ``axis`` must be ``0`` for simple row insertion before the
index.
Parameters
----------
obj : int
Integer index before which ``values`` is inserted.
values : array-like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
axis : int, optional
Axis along which to insert ``values``. Default is 0, which is the
only allowed value and will insert a row.
Returns
-------
out : `~astropy.time.Time` subclass
New time object with inserted value(s)
"""
# Validate inputs: obj arg is integer, axis=0, self is not a scalar, and
# input index is in bounds.
try:
idx0 = operator.index(obj)
except TypeError:
raise TypeError("obj arg must be an integer")
if axis != 0:
raise ValueError("axis must be 0")
if not self.shape:
raise TypeError(
f"cannot insert into scalar {self.__class__.__name__} object"
)
if abs(idx0) > len(self):
raise IndexError(
f"index {idx0} is out of bounds for axis 0 with size {len(self)}"
)
# Turn negative index into positive
if idx0 < 0:
idx0 = len(self) + idx0
# For non-Time object, use numpy to help figure out the length. (Note annoying
# case of a string input that has a length which is not the length we want).
if not isinstance(values, self.__class__):
values = np.asarray(values)
n_values = len(values) if values.shape else 1
# Finally make the new object with the correct length and set values for the
# three sections, before insert, the insert, and after the insert.
out = self.__class__.info.new_like(
[self], len(self) + n_values, name=self.info.name
)
out._time.jd1[:idx0] = self._time.jd1[:idx0]
out._time.jd2[:idx0] = self._time.jd2[:idx0]
# This uses the Time setting machinery to coerce and validate as necessary.
out[idx0 : idx0 + n_values] = values
out._time.jd1[idx0 + n_values :] = self._time.jd1[idx0:]
out._time.jd2[idx0 + n_values :] = self._time.jd2[idx0:]
return out
def __setitem__(self, item, value):
if not self.writeable:
if self.shape:
raise ValueError(
f"{self.__class__.__name__} object is read-only. Make a "
'copy() or set "writeable" attribute to True.'
)
else:
raise ValueError(
f"scalar {self.__class__.__name__} object is read-only."
)
# Any use of setitem results in immediate cache invalidation
del self.cache
# Setting invalidates transform deltas
for attr in ("_delta_tdb_tt", "_delta_ut1_utc"):
if hasattr(self, attr):
delattr(self, attr)
if value is np.ma.masked or value is np.nan:
self._time.jd2[item] = np.nan
return
value = self._make_value_equivalent(item, value)
# Finally directly set the jd1/2 values. Locations are known to match.
if self.scale is not None:
value = getattr(value, self.scale)
self._time.jd1[item] = value._time.jd1
self._time.jd2[item] = value._time.jd2
def isclose(self, other, atol=None):
"""Returns a boolean or boolean array where two Time objects are
element-wise equal within a time tolerance.
This evaluates the expression below::
abs(self - other) <= atol
Parameters
----------
other : `~astropy.time.Time`
Time object for comparison.
atol : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Absolute tolerance for equality with units of time (e.g. ``u.s`` or
``u.day``). Default is two bits in the 128-bit JD time representation,
equivalent to about 40 picosecs.
"""
if atol is None:
# Note: use 2 bits instead of 1 bit based on experience in precision
# tests, since taking the difference with a UTC time means one has
# to do a scale change.
atol = 2 * np.finfo(float).eps * u.day
if not isinstance(atol, (u.Quantity, TimeDelta)):
raise TypeError(
"'atol' argument must be a Quantity or TimeDelta instance, got "
f"{atol.__class__.__name__} instead"
)
try:
# Separate these out so user sees where the problem is
dt = self - other
dt = abs(dt)
out = dt <= atol
except Exception as err:
raise TypeError(
"'other' argument must support subtraction with Time "
"and return a value that supports comparison with "
f"{atol.__class__.__name__}: {err}"
)
return out
def copy(self, format=None):
"""
Return a fully independent copy the Time object, optionally changing
the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
In this method a full copy of the internal time arrays will be made.
The internal time arrays are normally not changeable by the user so in
most cases the ``replicate()`` method should be used.
Parameters
----------
format : str, optional
Time format of the copy.
Returns
-------
tm : Time object
Copy of this object
"""
return self._apply("copy", format=format)
def replicate(self, format=None, copy=False, cls=None):
"""
Return a replica of the Time object, optionally changing the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
If ``copy`` is set to `True` then a full copy of the internal time arrays
will be made. By default the replica will use a reference to the
original arrays when possible to save memory. The internal time arrays
are normally not changeable by the user so in most cases it should not
be necessary to set ``copy`` to `True`.
The convenience method copy() is available in which ``copy`` is `True`
by default.
Parameters
----------
format : str, optional
Time format of the replica.
copy : bool, optional
Return a true copy instead of using references where possible.
Returns
-------
tm : Time object
Replica of this object
"""
return self._apply("copy" if copy else "replicate", format=format, cls=cls)
def _apply(self, method, *args, format=None, cls=None, **kwargs):
"""Create a new time object, possibly applying a method to the arrays.
Parameters
----------
method : str or callable
If string, can be 'replicate' or the name of a relevant
`~numpy.ndarray` method. In the former case, a new time instance
with unchanged internal data is created, while in the latter the
method is applied to the internal ``jd1`` and ``jd2`` arrays, as
well as to possible ``location``, ``_delta_ut1_utc``, and
``_delta_tdb_tt`` arrays.
If a callable, it is directly applied to the above arrays.
Examples: 'copy', '__getitem__', 'reshape', `~numpy.broadcast_to`.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``. If the ``format`` keyword
argument is present, this will be used as the Time format of the
replica.
Examples
--------
Some ways this is used internally::
copy : ``_apply('copy')``
replicate : ``_apply('replicate')``
reshape : ``_apply('reshape', new_shape)``
index or slice : ``_apply('__getitem__', item)``
broadcast : ``_apply(np.broadcast, shape=new_shape)``
"""
new_format = self.format if format is None else format
if callable(method):
apply_method = lambda array: method(array, *args, **kwargs)
else:
if method == "replicate":
apply_method = None
else:
apply_method = operator.methodcaller(method, *args, **kwargs)
jd1, jd2 = self._time.jd1, self._time.jd2
if apply_method:
jd1 = apply_method(jd1)
jd2 = apply_method(jd2)
# Get a new instance of our class and set its attributes directly.
tm = super().__new__(cls or self.__class__)
tm._time = TimeJD(
jd1,
jd2,
self.scale,
precision=0,
in_subfmt="*",
out_subfmt="*",
from_jd=True,
)
# Optional ndarray attributes.
for attr in ("_delta_ut1_utc", "_delta_tdb_tt", "location"):
try:
val = getattr(self, attr)
except AttributeError:
continue
if apply_method:
# Apply the method to any value arrays (though skip if there is
# only an array scalar and the method would return a view,
# since in that case nothing would change).
if getattr(val, "shape", ()):
val = apply_method(val)
elif method == "copy" or method == "flatten":
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
val = copy.copy(val)
setattr(tm, attr, val)
# Copy other 'info' attr only if it has actually been defined and the
# time object is not a scalar (issue #10688).
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if "info" in self.__dict__:
tm.info = self.info
# Make the new internal _time object corresponding to the format
# in the copy. If the format is unchanged this process is lightweight
# and does not create any new arrays.
if new_format not in tm.FORMATS:
raise ValueError(f"format must be one of {list(tm.FORMATS)}")
NewFormat = tm.FORMATS[new_format]
tm._time = NewFormat(
tm._time.jd1,
tm._time.jd2,
tm._time._scale,
precision=self.precision,
in_subfmt=NewFormat._get_allowed_subfmt(self.in_subfmt),
out_subfmt=NewFormat._get_allowed_subfmt(self.out_subfmt),
from_jd=True,
)
tm._format = new_format
tm.SCALES = self.SCALES
return tm
def __copy__(self):
"""
Overrides the default behavior of the `copy.copy` function in
the python stdlib to behave like `Time.copy`. Does *not* make a
copy of the JD arrays - only copies by reference.
"""
return self.replicate()
def __deepcopy__(self, memo):
"""
Overrides the default behavior of the `copy.deepcopy` function
in the python stdlib to behave like `Time.copy`. Does make a
copy of the JD arrays.
"""
return self.copy()
def _advanced_index(self, indices, axis=None, keepdims=False):
"""Turn argmin, argmax output into an advanced index.
Argmin, argmax output contains indices along a given axis in an array
shaped like the other dimensions. To use this to get values at the
correct location, a list is constructed in which the other axes are
indexed sequentially. For ``keepdims`` is ``True``, the net result is
the same as constructing an index grid with ``np.ogrid`` and then
replacing the ``axis`` item with ``indices`` with its shaped expanded
at ``axis``. For ``keepdims`` is ``False``, the result is the same but
with the ``axis`` dimension removed from all list entries.
For ``axis`` is ``None``, this calls :func:`~numpy.unravel_index`.
Parameters
----------
indices : array
Output of argmin or argmax.
axis : int or None
axis along which argmin or argmax was used.
keepdims : bool
Whether to construct indices that keep or remove the axis along
which argmin or argmax was used. Default: ``False``.
Returns
-------
advanced_index : list of arrays
Suitable for use as an advanced index.
"""
if axis is None:
return np.unravel_index(indices, self.shape)
ndim = self.ndim
if axis < 0:
axis = axis + ndim
if keepdims and indices.ndim < self.ndim:
indices = np.expand_dims(indices, axis)
index = [
indices
if i == axis
else np.arange(s).reshape(
(1,) * (i if keepdims or i < axis else i - 1)
+ (s,)
+ (1,) * (ndim - i - (1 if keepdims or i > axis else 2))
)
for i, s in enumerate(self.shape)
]
return tuple(index)
def argmin(self, axis=None, out=None):
"""Return indices of the minimum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmin`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmin` for detailed documentation.
"""
# First get the minimum at normal precision.
jd1, jd2 = self.jd1, self.jd2
approx = np.min(jd1 + jd2, axis, keepdims=True)
# Approx is very close to the true minimum, and by subtracting it at
# full precision, all numbers near 0 can be represented correctly,
# so we can be sure we get the true minimum.
# The below is effectively what would be done for
# dt = (self - self.__class__(approx, format='jd')).jd
# which translates to:
# approx_jd1, approx_jd2 = day_frac(approx, 0.)
# dt = (self.jd1 - approx_jd1) + (self.jd2 - approx_jd2)
dt = (jd1 - approx) + jd2
return dt.argmin(axis, out)
def argmax(self, axis=None, out=None):
"""Return indices of the maximum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmax`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmax` for detailed documentation.
"""
# For procedure, see comment on argmin.
jd1, jd2 = self.jd1, self.jd2
approx = np.max(jd1 + jd2, axis, keepdims=True)
dt = (jd1 - approx) + jd2
return dt.argmax(axis, out)
def argsort(self, axis=-1):
"""Returns the indices that would sort the time array.
This is similar to :meth:`~numpy.ndarray.argsort`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied. Internally,
it uses :func:`~numpy.lexsort`, and hence no sort method can be chosen.
"""
# For procedure, see comment on argmin.
jd1, jd2 = self.jd1, self.jd2
approx = jd1 + jd2
remainder = (jd1 - approx) + jd2
if axis is None:
return np.lexsort((remainder.ravel(), approx.ravel()))
else:
return np.lexsort(keys=(remainder, approx), axis=axis)
def min(self, axis=None, out=None, keepdims=False):
"""Minimum along a given axis.
This is similar to :meth:`~numpy.ndarray.min`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.min``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
return self[self._advanced_index(self.argmin(axis), axis, keepdims)]
def max(self, axis=None, out=None, keepdims=False):
"""Maximum along a given axis.
This is similar to :meth:`~numpy.ndarray.max`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.max``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
return self[self._advanced_index(self.argmax(axis), axis, keepdims)]
def ptp(self, axis=None, out=None, keepdims=False):
"""Peak to peak (maximum - minimum) along a given axis.
This is similar to :meth:`~numpy.ndarray.ptp`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used.
Note that the ``out`` argument is present only for compatibility with
`~numpy.ptp`; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
return self.max(axis, keepdims=keepdims) - self.min(axis, keepdims=keepdims)
def sort(self, axis=-1):
"""Return a copy sorted along the specified axis.
This is similar to :meth:`~numpy.ndarray.sort`, but internally uses
indexing with :func:`~numpy.lexsort` to ensure that the full precision
given by the two doubles ``jd1`` and ``jd2`` is kept, and that
corresponding attributes are properly sorted and copied as well.
Parameters
----------
axis : int or None
Axis to be sorted. If ``None``, the flattened array is sorted.
By default, sort over the last axis.
"""
return self[self._advanced_index(self.argsort(axis), axis, keepdims=True)]
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
"""Mean along a given axis.
This is similar to :meth:`~numpy.ndarray.mean`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2`` is
used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.mean``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
Similarly, the ``dtype`` argument is also present for compatibility
only; it has no meaning for `Time`.
Parameters
----------
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
dtype : None
Only present for compatibility with :meth:`~numpy.ndarray.mean`,
must be `None`.
out : None
Only present for compatibility with :meth:`~numpy.ndarray.mean`,
must be `None`.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
where : array_like of bool, optional
Elements to include in the mean. See `~numpy.ufunc.reduce` for
details.
Returns
-------
m : Time
A new Time instance containing the mean values
"""
if dtype is not None:
raise ValueError("Cannot set ``dtype`` on `Time` instances")
if out is not None:
raise ValueError(
"Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``."
)
where = where & ~self.mask
where_broadcasted = np.broadcast_to(where, self.shape)
kwargs = dict(
axis=axis,
keepdims=keepdims,
where=where,
)
divisor = np.sum(where_broadcasted, axis=axis, keepdims=keepdims)
if np.any(divisor == 0):
raise ValueError(
"Mean over zero elements is not supported as it would give an undefined"
" time;see issue https://github.com/astropy/astropy/issues/6509"
)
jd1, jd2 = day_frac(
val1=np.sum(np.ma.getdata(self.jd1), **kwargs),
val2=np.sum(np.ma.getdata(self.jd2), **kwargs),
divisor=divisor,
)
result = type(self)(
val=jd1,
val2=jd2,
format="jd",
scale=self.scale,
copy=False,
)
result.format = self.format
return result
@property
def cache(self):
"""
Return the cache associated with this instance.
"""
return self._time.cache
@cache.deleter
def cache(self):
del self._time.cache
def __getattr__(self, attr):
"""
Get dynamic attributes to output format or do timescale conversion.
"""
if attr in self.SCALES and self.scale is not None:
cache = self.cache["scale"]
if attr not in cache:
if attr == self.scale:
tm = self
else:
tm = self.replicate()
tm._set_scale(attr)
if tm.shape:
# Prevent future modification of cached array-like object
tm.writeable = False
cache[attr] = tm
return cache[attr]
elif attr in self.FORMATS:
return self.to_value(attr, subfmt=None)
elif attr in TIME_SCALES: # allowed ones done above (self.SCALES)
if self.scale is None:
raise ScaleValueError(
"Cannot convert TimeDelta with "
"undefined scale to any defined scale."
)
else:
raise ScaleValueError(
f"Cannot convert {self.__class__.__name__} with scale "
f"'{self.scale}' to scale '{attr}'"
)
else:
# Should raise AttributeError
return self.__getattribute__(attr)
def __dir__(self):
return sorted(set(super().__dir__()) | set(self.SCALES) | set(self.FORMATS))
def _match_shape(self, val):
"""
Ensure that `val` is matched to length of self. If val has length 1
then broadcast, otherwise cast to double and make sure shape matches.
"""
val = _make_array(val, copy=True) # be conservative and copy
if val.size > 1 and val.shape != self.shape:
try:
# check the value can be broadcast to the shape of self.
val = np.broadcast_to(val, self.shape, subok=True)
except Exception:
raise ValueError(
"Attribute shape must match or be broadcastable to that of "
"Time object. Typically, give either a single value or "
"one for each time."
)
return val
def _time_comparison(self, other, op):
"""If other is of same class as self, compare difference in self.scale.
Otherwise, return NotImplemented
"""
if other.__class__ is not self.__class__:
try:
other = self.__class__(other, scale=self.scale)
except Exception:
# Let other have a go.
return NotImplemented
if (
self.scale is not None
and self.scale not in other.SCALES
or other.scale is not None
and other.scale not in self.SCALES
):
# Other will also not be able to do it, so raise a TypeError
# immediately, allowing us to explain why it doesn't work.
raise TypeError(
f"Cannot compare {self.__class__.__name__} instances with "
f"scales '{self.scale}' and '{other.scale}'"
)
if self.scale is not None and other.scale is not None:
other = getattr(other, self.scale)
return op((self.jd1 - other.jd1) + (self.jd2 - other.jd2), 0.0)
def __lt__(self, other):
return self._time_comparison(other, operator.lt)
def __le__(self, other):
return self._time_comparison(other, operator.le)
def __eq__(self, other):
"""
If other is an incompatible object for comparison, return `False`.
Otherwise, return `True` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.eq)
def __ne__(self, other):
"""
If other is an incompatible object for comparison, return `True`.
Otherwise, return `False` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.ne)
def __gt__(self, other):
return self._time_comparison(other, operator.gt)
def __ge__(self, other):
return self._time_comparison(other, operator.ge)
class Time(TimeBase):
"""
Represent and manipulate times and dates for astronomy.
A `Time` object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format`` and must correspond to the specified time ``scale``. The
optional ``val2`` time input should be supplied only for numeric input
formats (e.g. JD) where very high precision (better than 64-bit precision)
is required.
The allowed values for ``format`` can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'unix_tai', 'cxcsec', 'gps', 'plot_date',
'stardate', 'datetime', 'ymdhms', 'iso', 'isot', 'yday', 'datetime64',
'fits', 'byear', 'jyear', 'byear_str', 'jyear_str']
See also: http://docs.astropy.org/en/stable/time/
Parameters
----------
val : sequence, ndarray, number, str, bytes, or `~astropy.time.Time` object
Value(s) to initialize the time or times. Bytes are decoded as ascii.
val2 : sequence, ndarray, or number; optional
Value(s) to initialize the time or times. Only used for numerical
input, to help preserve precision.
format : str, optional
Format of input value(s)
scale : str, optional
Time scale of input value(s), must be one of the following:
('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')
precision : int, optional
Digits of precision in string representation of time
in_subfmt : str, optional
Unix glob to select subformats for parsing input times
out_subfmt : str, optional
Unix glob to select subformat for outputting times
location : `~astropy.coordinates.EarthLocation` or tuple, optional
If given as an tuple, it should be able to initialize an
an EarthLocation instance, i.e., either contain 3 items with units of
length for geocentric coordinates, or contain a longitude, latitude,
and an optional height for geodetic coordinates.
Can be a single location, or one for each input time.
If not given, assumed to be the center of the Earth for time scale
transformations to and from the solar-system barycenter.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_SCALES
"""List of time scales"""
FORMATS = TIME_FORMATS
"""Dict of time formats"""
def __new__(
cls,
val,
val2=None,
format=None,
scale=None,
precision=None,
in_subfmt=None,
out_subfmt=None,
location=None,
copy=False,
):
if isinstance(val, Time):
self = val.replicate(format=format, copy=copy, cls=cls)
else:
self = super().__new__(cls)
return self
def __init__(
self,
val,
val2=None,
format=None,
scale=None,
precision=None,
in_subfmt=None,
out_subfmt=None,
location=None,
copy=False,
):
if location is not None:
from astropy.coordinates import EarthLocation
if isinstance(location, EarthLocation):
self.location = location
else:
self.location = EarthLocation(*location)
if self.location.size == 1:
self.location = self.location.squeeze()
else:
if not hasattr(self, "location"):
self.location = None
if isinstance(val, Time):
# Update _time formatting parameters if explicitly specified
if precision is not None:
self._time.precision = precision
if in_subfmt is not None:
self._time.in_subfmt = in_subfmt
if out_subfmt is not None:
self._time.out_subfmt = out_subfmt
self.SCALES = TIME_TYPES[self.scale]
if scale is not None:
self._set_scale(scale)
else:
self._init_from_vals(
val, val2, format, scale, copy, precision, in_subfmt, out_subfmt
)
self.SCALES = TIME_TYPES[self.scale]
if self.location is not None and (
self.location.size > 1 and self.location.shape != self.shape
):
try:
# check the location can be broadcast to self's shape.
self.location = np.broadcast_to(self.location, self.shape, subok=True)
except Exception as err:
raise ValueError(
f"The location with shape {self.location.shape} cannot be "
f"broadcast against time with shape {self.shape}. "
"Typically, either give a single location or one for each time."
) from err
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent Time object"""
# If there is a vector location then broadcast to the Time shape
# and then select with ``item``
if self.location is not None and self.location.shape:
self_location = np.broadcast_to(self.location, self.shape, subok=True)[item]
else:
self_location = self.location
if isinstance(value, Time):
# Make sure locations are compatible. Location can be either None or
# a Location object.
if self_location is None and value.location is None:
match = True
elif (self_location is None and value.location is not None) or (
self_location is not None and value.location is None
):
match = False
else:
match = np.all(self_location == value.location)
if not match:
raise ValueError(
"cannot set to Time with different location: expected "
f"location={self_location} and got location={value.location}"
)
else:
try:
value = self.__class__(value, scale=self.scale, location=self_location)
except Exception:
try:
value = self.__class__(
value,
scale=self.scale,
format=self.format,
location=self_location,
)
except Exception as err:
raise ValueError(
f"cannot convert value to a compatible Time object: {err}"
)
return value
@classmethod
def now(cls):
"""
Creates a new object corresponding to the instant in time this
method is called.
.. note::
"Now" is determined using the `~datetime.datetime.utcnow`
function, so its accuracy and precision is determined by that
function. Generally that means it is set by the accuracy of
your system clock.
Returns
-------
nowtime : :class:`~astropy.time.Time`
A new `Time` object (or a subclass of `Time` if this is called from
such a subclass) at the current time.
"""
# call `utcnow` immediately to be sure it's ASAP
dtnow = datetime.utcnow()
return cls(val=dtnow, format="datetime", scale="utc")
info = TimeInfo()
@classmethod
def strptime(cls, time_string, format_string, **kwargs):
"""
Parse a string to a Time according to a format specification.
See `time.strptime` documentation for format specification.
>>> Time.strptime('2012-Jun-30 23:59:60', '%Y-%b-%d %H:%M:%S')
<Time object: scale='utc' format='isot' value=2012-06-30T23:59:60.000>
Parameters
----------
time_string : str, sequence, or ndarray
Objects containing time data of type string
format_string : str
String specifying format of time_string.
kwargs : dict
Any keyword arguments for ``Time``. If the ``format`` keyword
argument is present, this will be used as the Time format.
Returns
-------
time_obj : `~astropy.time.Time`
A new `~astropy.time.Time` object corresponding to the input
``time_string``.
"""
time_array = np.asarray(time_string)
if time_array.dtype.kind not in ("U", "S"):
raise TypeError(
"Expected type is string, a bytes-like object or a sequence "
f"of these. Got dtype '{time_array.dtype.kind}'"
)
to_string = (
str
if time_array.dtype.kind == "U"
else lambda x: str(x.item(), encoding="ascii")
)
iterator = np.nditer([time_array, None], op_dtypes=[time_array.dtype, "U30"])
for time, formatted in iterator:
tt, fraction = _strptime._strptime(to_string(time), format_string)
time_tuple = tt[:6] + (fraction,)
formatted[...] = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}.{:06}".format(
*time_tuple
)
format = kwargs.pop("format", None)
out = cls(*iterator.operands[1:], format="isot", **kwargs)
if format is not None:
out.format = format
return out
def strftime(self, format_spec):
"""
Convert Time to a string or a numpy.array of strings according to a
format specification.
See `time.strftime` documentation for format specification.
Parameters
----------
format_spec : str
Format definition of return string.
Returns
-------
formatted : str or numpy.array
String or numpy.array of strings formatted according to the given
format string.
"""
formatted_strings = []
for sk in self.replicate("iso")._time.str_kwargs():
date_tuple = date(sk["year"], sk["mon"], sk["day"]).timetuple()
datetime_tuple = (
sk["year"],
sk["mon"],
sk["day"],
sk["hour"],
sk["min"],
sk["sec"],
date_tuple[6],
date_tuple[7],
-1,
)
fmtd_str = format_spec
if "%f" in fmtd_str:
fmtd_str = fmtd_str.replace(
"%f",
"{frac:0{precision}}".format(
frac=sk["fracsec"], precision=self.precision
),
)
fmtd_str = strftime(fmtd_str, datetime_tuple)
formatted_strings.append(fmtd_str)
if self.isscalar:
return formatted_strings[0]
else:
return np.array(formatted_strings).reshape(self.shape)
def light_travel_time(
self, skycoord, kind="barycentric", location=None, ephemeris=None
):
"""Light travel time correction to the barycentre or heliocentre.
The frame transformations used to calculate the location of the solar
system barycentre and the heliocentre rely on the erfa routine epv00,
which is consistent with the JPL DE405 ephemeris to an accuracy of
11.2 km, corresponding to a light travel time of 4 microseconds.
The routine assumes the source(s) are at large distance, i.e., neglects
finite-distance effects.
Parameters
----------
skycoord : `~astropy.coordinates.SkyCoord`
The sky location to calculate the correction for.
kind : str, optional
``'barycentric'`` (default) or ``'heliocentric'``
location : `~astropy.coordinates.EarthLocation`, optional
The location of the observatory to calculate the correction for.
If no location is given, the ``location`` attribute of the Time
object is used
ephemeris : str, optional
Solar system ephemeris to use (e.g., 'builtin', 'jpl'). By default,
use the one set with ``astropy.coordinates.solar_system_ephemeris.set``.
For more information, see `~astropy.coordinates.solar_system_ephemeris`.
Returns
-------
time_offset : `~astropy.time.TimeDelta`
The time offset between the barycentre or Heliocentre and Earth,
in TDB seconds. Should be added to the original time to get the
time in the Solar system barycentre or the Heliocentre.
Also, the time conversion to BJD will then include the relativistic correction as well.
"""
if kind.lower() not in ("barycentric", "heliocentric"):
raise ValueError(
"'kind' parameter must be one of 'heliocentric' or 'barycentric'"
)
if location is None:
if self.location is None:
raise ValueError(
"An EarthLocation needs to be set or passed in to calculate bary- "
"or heliocentric corrections"
)
location = self.location
from astropy.coordinates import (
GCRS,
HCRS,
ICRS,
CartesianRepresentation,
UnitSphericalRepresentation,
solar_system_ephemeris,
)
# ensure sky location is ICRS compatible
if not skycoord.is_transformable_to(ICRS()):
raise ValueError("Given skycoord is not transformable to the ICRS")
# get location of observatory in ITRS coordinates at this Time
try:
itrs = location.get_itrs(obstime=self)
except Exception:
raise ValueError(
"Supplied location does not have a valid `get_itrs` method"
)
with solar_system_ephemeris.set(ephemeris):
if kind.lower() == "heliocentric":
# convert to heliocentric coordinates, aligned with ICRS
cpos = itrs.transform_to(HCRS(obstime=self)).cartesian.xyz
else:
# first we need to convert to GCRS coordinates with the correct
# obstime, since ICRS coordinates have no frame time
gcrs_coo = itrs.transform_to(GCRS(obstime=self))
# convert to barycentric (BCRS) coordinates, aligned with ICRS
cpos = gcrs_coo.transform_to(ICRS()).cartesian.xyz
# get unit ICRS vector to star
spos = (
skycoord.icrs.represent_as(UnitSphericalRepresentation)
.represent_as(CartesianRepresentation)
.xyz
)
# Move X,Y,Z to last dimension, to enable possible broadcasting below.
cpos = np.rollaxis(cpos, 0, cpos.ndim)
spos = np.rollaxis(spos, 0, spos.ndim)
# calculate light travel time correction
tcor_val = (spos * cpos).sum(axis=-1) / const.c
return TimeDelta(tcor_val, scale="tdb")
def earth_rotation_angle(self, longitude=None):
"""Calculate local Earth rotation angle.
Parameters
----------
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance. If the special
string 'tio', the result will be relative to the Terrestrial
Intermediate Origin (TIO) (i.e., the output of `~erfa.era00`).
Returns
-------
`~astropy.coordinates.Longitude`
Local Earth rotation angle with units of hourangle.
See Also
--------
astropy.time.Time.sidereal_time
References
----------
IAU 2006 NFA Glossary
(currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html)
Notes
-----
The difference between apparent sidereal time and Earth rotation angle
is the equation of the origins, which is the angle between the Celestial
Intermediate Origin (CIO) and the equinox. Applying apparent sidereal
time to the hour angle yields the true apparent Right Ascension with
respect to the equinox, while applying the Earth rotation angle yields
the intermediate (CIRS) Right Ascension with respect to the CIO.
The result includes the TIO locator (s'), which positions the Terrestrial
Intermediate Origin on the equator of the Celestial Intermediate Pole (CIP)
and is rigorously corrected for polar motion.
(except when ``longitude='tio'``).
""" # noqa: E501
if isinstance(longitude, str) and longitude == "tio":
longitude = 0
include_tio = False
else:
include_tio = True
return self._sid_time_or_earth_rot_ang(
longitude=longitude,
function=erfa.era00,
scales=("ut1",),
include_tio=include_tio,
)
def sidereal_time(self, kind, longitude=None, model=None):
"""Calculate sidereal time.
Parameters
----------
kind : str
``'mean'`` or ``'apparent'``, i.e., accounting for precession
only, or also for nutation.
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance. If the special
string 'greenwich' or 'tio', the result will be relative to longitude
0 for models before 2000, and relative to the Terrestrial Intermediate
Origin (TIO) for later ones (i.e., the output of the relevant ERFA
function that calculates greenwich sidereal time).
model : str or None; optional
Precession (and nutation) model to use. The available ones are:
- {0}: {1}
- {2}: {3}
If `None` (default), the last (most recent) one from the appropriate
list above is used.
Returns
-------
`~astropy.coordinates.Longitude`
Local sidereal time, with units of hourangle.
See Also
--------
astropy.time.Time.earth_rotation_angle
References
----------
IAU 2006 NFA Glossary
(currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html)
Notes
-----
The difference between apparent sidereal time and Earth rotation angle
is the equation of the origins, which is the angle between the Celestial
Intermediate Origin (CIO) and the equinox. Applying apparent sidereal
time to the hour angle yields the true apparent Right Ascension with
respect to the equinox, while applying the Earth rotation angle yields
the intermediate (CIRS) Right Ascension with respect to the CIO.
For the IAU precession models from 2000 onwards, the result includes the
TIO locator (s'), which positions the Terrestrial Intermediate Origin on
the equator of the Celestial Intermediate Pole (CIP) and is rigorously
corrected for polar motion (except when ``longitude='tio'`` or ``'greenwich'``).
""" # noqa: E501 (docstring is formatted below)
if kind.lower() not in SIDEREAL_TIME_MODELS:
raise ValueError(
"The kind of sidereal time has to be "
+ " or ".join(sorted(SIDEREAL_TIME_MODELS))
)
available_models = SIDEREAL_TIME_MODELS[kind.lower()]
if model is None:
model = sorted(available_models)[-1]
elif model.upper() not in available_models:
raise ValueError(
f"Model {model} not implemented for {kind} sidereal time; "
f"available models are {sorted(available_models)}"
)
model_kwargs = available_models[model.upper()]
if isinstance(longitude, str) and longitude in ("tio", "greenwich"):
longitude = 0
model_kwargs = model_kwargs.copy()
model_kwargs["include_tio"] = False
return self._sid_time_or_earth_rot_ang(longitude=longitude, **model_kwargs)
if isinstance(sidereal_time.__doc__, str):
sidereal_time.__doc__ = sidereal_time.__doc__.format(
"apparent",
sorted(SIDEREAL_TIME_MODELS["apparent"]),
"mean",
sorted(SIDEREAL_TIME_MODELS["mean"]),
)
def _sid_time_or_earth_rot_ang(self, longitude, function, scales, include_tio=True):
"""Calculate a local sidereal time or Earth rotation angle.
Parameters
----------
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance.
function : callable
The ERFA function to use.
scales : tuple of str
The time scales that the function requires on input.
include_tio : bool, optional
Whether to includes the TIO locator corrected for polar motion.
Should be `False` for pre-2000 IAU models. Default: `True`.
Returns
-------
`~astropy.coordinates.Longitude`
Local sidereal time or Earth rotation angle, with units of hourangle.
""" # noqa: E501
from astropy.coordinates import EarthLocation, Longitude
from astropy.coordinates.builtin_frames.utils import get_polar_motion
from astropy.coordinates.matrix_utilities import rotation_matrix
if longitude is None:
if self.location is None:
raise ValueError(
"No longitude is given but the location for "
"the Time object is not set."
)
longitude = self.location.lon
elif isinstance(longitude, EarthLocation):
longitude = longitude.lon
else:
# Sanity check on input; default unit is degree.
longitude = Longitude(longitude, u.degree, copy=False)
theta = self._call_erfa(function, scales)
if include_tio:
# TODO: this duplicates part of coordinates.erfa_astrom.ErfaAstrom.apio;
# maybe posisble to factor out to one or the other.
sp = self._call_erfa(erfa.sp00, ("tt",))
xp, yp = get_polar_motion(self)
# Form the rotation matrix, CIRS to apparent [HA,Dec].
r = (
rotation_matrix(longitude, "z")
@ rotation_matrix(-yp, "x", unit=u.radian)
@ rotation_matrix(-xp, "y", unit=u.radian)
@ rotation_matrix(theta + sp, "z", unit=u.radian)
)
# Solve for angle.
angle = np.arctan2(r[..., 0, 1], r[..., 0, 0]) << u.radian
else:
angle = longitude + (theta << u.radian)
return Longitude(angle, u.hourangle)
def _call_erfa(self, function, scales):
# TODO: allow erfa functions to be used on Time with __array_ufunc__.
erfa_parameters = [
getattr(getattr(self, scale)._time, jd_part)
for scale in scales
for jd_part in ("jd1", "jd2_filled")
]
result = function(*erfa_parameters)
if self.masked:
result[self.mask] = np.nan
return result
def get_delta_ut1_utc(self, iers_table=None, return_status=False):
"""Find UT1 - UTC differences by interpolating in IERS Table.
Parameters
----------
iers_table : `~astropy.utils.iers.IERS`, optional
Table containing UT1-UTC differences from IERS Bulletins A
and/or B. Default: `~astropy.utils.iers.earth_orientation_table`
(which in turn defaults to the combined version provided by
`~astropy.utils.iers.IERS_Auto`).
return_status : bool
Whether to return status values. If `False` (default), iers
raises `IndexError` if any time is out of the range
covered by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status=`True```)::
``astropy.utils.iers.FROM_IERS_B``
``astropy.utils.iers.FROM_IERS_A``
``astropy.utils.iers.FROM_IERS_A_PREDICTION``
``astropy.utils.iers.TIME_BEFORE_IERS_RANGE``
``astropy.utils.iers.TIME_BEYOND_IERS_RANGE``
Notes
-----
In normal usage, UT1-UTC differences are calculated automatically
on the first instance ut1 is needed.
Examples
--------
To check in code whether any times are before the IERS table range::
>>> from astropy.utils.iers import TIME_BEFORE_IERS_RANGE
>>> t = Time(['1961-01-01', '2000-01-01'], scale='utc')
>>> delta, status = t.get_delta_ut1_utc(return_status=True) # doctest: +REMOTE_DATA
>>> status == TIME_BEFORE_IERS_RANGE # doctest: +REMOTE_DATA
array([ True, False]...)
"""
if iers_table is None:
from astropy.utils.iers import earth_orientation_table
iers_table = earth_orientation_table.get()
return iers_table.ut1_utc(self.utc, return_status=return_status)
# Property for ERFA DUT arg = UT1 - UTC
def _get_delta_ut1_utc(self, jd1=None, jd2=None):
"""
Get ERFA DUT arg = UT1 - UTC. This getter takes optional jd1 and
jd2 args because it gets called that way when converting time scales.
If delta_ut1_utc is not yet set, this will interpolate them from the
the IERS table.
"""
# Sec. 4.3.1: the arg DUT is the quantity delta_UT1 = UT1 - UTC in
# seconds. It is obtained from tables published by the IERS.
if not hasattr(self, "_delta_ut1_utc"):
from astropy.utils.iers import earth_orientation_table
iers_table = earth_orientation_table.get()
# jd1, jd2 are normally set (see above), except if delta_ut1_utc
# is access directly; ensure we behave as expected for that case
if jd1 is None:
self_utc = self.utc
jd1, jd2 = self_utc._time.jd1, self_utc._time.jd2_filled
scale = "utc"
else:
scale = self.scale
# interpolate UT1-UTC in IERS table
delta = iers_table.ut1_utc(jd1, jd2)
# if we interpolated using UT1 jds, we may be off by one
# second near leap seconds (and very slightly off elsewhere)
if scale == "ut1":
# calculate UTC using the offset we got; the ERFA routine
# is tolerant of leap seconds, so will do this right
jd1_utc, jd2_utc = erfa.ut1utc(jd1, jd2, delta.to_value(u.s))
# calculate a better estimate using the nearly correct UTC
delta = iers_table.ut1_utc(jd1_utc, jd2_utc)
self._set_delta_ut1_utc(delta)
return self._delta_ut1_utc
def _set_delta_ut1_utc(self, val):
del self.cache
if hasattr(val, "to"): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_ut1_utc = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_ut1_utc = property(_get_delta_ut1_utc, _set_delta_ut1_utc)
"""UT1 - UTC time scale offset"""
# Property for ERFA DTR arg = TDB - TT
def _get_delta_tdb_tt(self, jd1=None, jd2=None):
if not hasattr(self, "_delta_tdb_tt"):
# If jd1 and jd2 are not provided (which is the case for property
# attribute access) then require that the time scale is TT or TDB.
# Otherwise the computations here are not correct.
if jd1 is None or jd2 is None:
if self.scale not in ("tt", "tdb"):
raise ValueError(
"Accessing the delta_tdb_tt attribute is only "
"possible for TT or TDB time scales"
)
else:
jd1 = self._time.jd1
jd2 = self._time.jd2_filled
# First go from the current input time (which is either
# TDB or TT) to an approximate UT1. Since TT and TDB are
# pretty close (few msec?), assume TT. Similarly, since the
# UT1 terms are very small, use UTC instead of UT1.
njd1, njd2 = erfa.tttai(jd1, jd2)
njd1, njd2 = erfa.taiutc(njd1, njd2)
# subtract 0.5, so UT is fraction of the day from midnight
ut = day_frac(njd1 - 0.5, njd2)[1]
if self.location is None:
# Assume geocentric.
self._delta_tdb_tt = erfa.dtdb(jd1, jd2, ut, 0.0, 0.0, 0.0)
else:
location = self.location
# Geodetic params needed for d_tdb_tt()
lon = location.lon
rxy = np.hypot(location.x, location.y)
z = location.z
self._delta_tdb_tt = erfa.dtdb(
jd1,
jd2,
ut,
lon.to_value(u.radian),
rxy.to_value(u.km),
z.to_value(u.km),
)
return self._delta_tdb_tt
def _set_delta_tdb_tt(self, val):
del self.cache
if hasattr(val, "to"): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_tdb_tt = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_tdb_tt = property(_get_delta_tdb_tt, _set_delta_tdb_tt)
"""TDB - TT time scale offset"""
def __sub__(self, other):
# T - Tdelta = T
# T - T = Tdelta
other_is_delta = not isinstance(other, Time)
if other_is_delta: # T - Tdelta
# Check other is really a TimeDelta or something that can initialize.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# we need a constant scale to calculate, which is guaranteed for
# TimeDelta, but not for Time (which can be UTC)
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale("tai")
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError(
"Cannot subtract Time and TimeDelta instances "
f"with scales '{self.scale}' and '{other.scale}'"
)
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ("_delta_ut1_utc", "_delta_tdb_tt"):
if hasattr(out, attr):
delattr(out, attr)
else: # T - T
# the scales should be compatible (e.g., cannot convert TDB to LOCAL)
if other.scale not in self.SCALES:
raise TypeError(
"Cannot subtract Time instances "
f"with scales '{self.scale}' and '{other.scale}'"
)
self_time = (
self._time if self.scale in TIME_DELTA_SCALES else self.tai._time
)
# set up TimeDelta, subtraction to be done shortly
out = TimeDelta(
self_time.jd1, self_time.jd2, format="jd", scale=self_time.scale
)
if other.scale != out.scale:
other = getattr(other, out.scale)
jd1 = out._time.jd1 - other._time.jd1
jd2 = out._time.jd2 - other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
if other_is_delta:
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
def __add__(self, other):
# T + Tdelta = T
# T + T = error
if isinstance(other, Time):
raise OperandTypeError(self, other, "+")
# Check other is really a TimeDelta or something that can initialize.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# ideally, we calculate in the scale of the Time item, since that is
# what we want the output in, but this may not be possible, since
# TimeDelta cannot be converted arbitrarily
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale("tai")
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError(
"Cannot add Time and TimeDelta instances "
f"with scales '{self.scale}' and '{other.scale}'"
)
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ("_delta_ut1_utc", "_delta_tdb_tt"):
if hasattr(out, attr):
delattr(out, attr)
jd1 = out._time.jd1 + other._time.jd1
jd2 = out._time.jd2 + other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
# Reverse addition is possible: <something-Tdelta-ish> + T
# but there is no case of <something> - T, so no __rsub__.
def __radd__(self, other):
return self.__add__(other)
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
scale = self.scale
if scale == "utc":
self = self.tai
result = super().mean(
axis=axis, dtype=dtype, out=out, keepdims=keepdims, where=where
)
if scale == "utc":
result = result.utc
result.out_subfmt = self.out_subfmt
location = self.location
if self.location is not None:
if self.location.shape:
if axis is None:
axis_normalized = tuple(range(self.ndim))
elif isinstance(axis, int):
axis_normalized = (axis,)
else:
axis_normalized = axis
sl = [slice(None)] * self.location.ndim
for a in axis_normalized:
sl[a] = slice(0, 1)
if np.any(self.location != self.location[tuple(sl)]):
raise ValueError(
"`location` must be constant over the reduction axes."
)
if not keepdims:
for a in axis_normalized:
sl[a] = 0
location = self.location[tuple(sl)]
result.location = location
return result
def __array_function__(self, function, types, args, kwargs):
"""
Wrap numpy functions.
Parameters
----------
function : callable
Numpy function to wrap
types : iterable of classes
Classes that provide an ``__array_function__`` override. Can
in principle be used to interact with other classes. Below,
mostly passed on to `~numpy.ndarray`, which can only interact
with subclasses.
args : tuple
Positional arguments provided in the function call.
kwargs : dict
Keyword arguments provided in the function call.
"""
if function in CUSTOM_FUNCTIONS:
f = CUSTOM_FUNCTIONS[function]
return f(*args, **kwargs)
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else:
return super().__array_function__(function, types, args, kwargs)
def to_datetime(self, timezone=None):
# TODO: this could likely go through to_value, as long as that
# had an **kwargs part that was just passed on to _time.
tm = self.replicate(format="datetime")
return tm._shaped_like_input(tm._time.to_value(timezone))
to_datetime.__doc__ = TimeDatetime.to_value.__doc__
class TimeDeltaMissingUnitWarning(AstropyDeprecationWarning):
"""Warning for missing unit or format in TimeDelta"""
pass
class TimeDelta(TimeBase):
"""
Represent the time difference between two times.
A TimeDelta object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format``. The optional ``val2`` time input should be supplied only for
numeric input formats (e.g. JD) where very high precision (better than
64-bit precision) is required.
The allowed values for ``format`` can be listed with::
>>> list(TimeDelta.FORMATS)
['sec', 'jd', 'datetime']
Note that for time differences, the scale can be among three groups:
geocentric ('tai', 'tt', 'tcg'), barycentric ('tcb', 'tdb'), and rotational
('ut1'). Within each of these, the scales for time differences are the
same. Conversion between geocentric and barycentric is possible, as there
is only a scale factor change, but one cannot convert to or from 'ut1', as
this requires knowledge of the actual times, not just their difference. For
a similar reason, 'utc' is not a valid scale for a time difference: a UTC
day is not always 86400 seconds.
See also:
- https://docs.astropy.org/en/stable/time/
- https://docs.astropy.org/en/stable/time/index.html#time-deltas
Parameters
----------
val : sequence, ndarray, number, `~astropy.units.Quantity` or `~astropy.time.TimeDelta` object
Value(s) to initialize the time difference(s). Any quantities will
be converted appropriately (with care taken to avoid rounding
errors for regular time units).
val2 : sequence, ndarray, number, or `~astropy.units.Quantity`; optional
Additional values, as needed to preserve precision.
format : str, optional
Format of input value(s). For numerical inputs without units,
"jd" is assumed and values are interpreted as days.
A deprecation warning is raised in this case. To avoid the warning,
either specify the format or add units to the input values.
scale : str, optional
Time scale of input value(s), must be one of the following values:
('tdb', 'tt', 'ut1', 'tcg', 'tcb', 'tai'). If not given (or
``None``), the scale is arbitrary; when added or subtracted from a
``Time`` instance, it will be used without conversion.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_DELTA_SCALES
"""List of time delta scales."""
FORMATS = TIME_DELTA_FORMATS
"""Dict of time delta formats."""
info = TimeDeltaInfo()
def __new__(
cls,
val,
val2=None,
format=None,
scale=None,
precision=None,
in_subfmt=None,
out_subfmt=None,
location=None,
copy=False,
):
if isinstance(val, TimeDelta):
self = val.replicate(format=format, copy=copy, cls=cls)
else:
self = super().__new__(cls)
return self
def __init__(self, val, val2=None, format=None, scale=None, copy=False):
if isinstance(val, TimeDelta):
if scale is not None:
self._set_scale(scale)
else:
format = format or self._get_format(val)
self._init_from_vals(val, val2, format, scale, copy)
if scale is not None:
self.SCALES = TIME_DELTA_TYPES[scale]
@staticmethod
def _get_format(val):
if isinstance(val, timedelta):
return "datetime"
if getattr(val, "unit", None) is None:
warn(
"Numerical value without unit or explicit format passed to"
" TimeDelta, assuming days",
TimeDeltaMissingUnitWarning,
)
return "jd"
def replicate(self, *args, **kwargs):
out = super().replicate(*args, **kwargs)
out.SCALES = self.SCALES
return out
def to_datetime(self):
"""
Convert to ``datetime.timedelta`` object.
"""
tm = self.replicate(format="datetime")
return tm._shaped_like_input(tm._time.value)
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError(
"Scale {scale!r} is not in the allowed scales {sorted(self.SCALES)}"
)
# For TimeDelta, there can only be a change in scale factor,
# which is written as time2 - time1 = scale_offset * time1
scale_offset = SCALE_OFFSETS[(self.scale, scale)]
if scale_offset is None:
self._time.scale = scale
else:
jd1, jd2 = self._time.jd1, self._time.jd2
offset1, offset2 = day_frac(jd1, jd2, factor=scale_offset)
self._time = self.FORMATS[self.format](
jd1 + offset1,
jd2 + offset2,
scale,
self.precision,
self.in_subfmt,
self.out_subfmt,
from_jd=True,
)
def _add_sub(self, other, op):
"""Perform common elements of addition / subtraction for two delta times"""
# If not a TimeDelta then see if it can be turned into a TimeDelta.
if not isinstance(other, TimeDelta):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# the scales should be compatible (e.g., cannot convert TDB to TAI)
if (
self.scale is not None
and self.scale not in other.SCALES
or other.scale is not None
and other.scale not in self.SCALES
):
raise TypeError(
"Cannot add TimeDelta instances with scales '{}' and '{}'".format(
self.scale, other.scale
)
)
# adjust the scale of other if the scale of self is set (or no scales)
if self.scale is not None or other.scale is None:
out = self.replicate()
if other.scale is not None:
other = getattr(other, self.scale)
else:
out = other.replicate()
jd1 = op(self._time.jd1, other._time.jd1)
jd2 = op(self._time.jd2, other._time.jd2)
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
return out
def __add__(self, other):
# If other is a Time then use Time.__add__ to do the calculation.
if isinstance(other, Time):
return other.__add__(self)
return self._add_sub(other, operator.add)
def __sub__(self, other):
# TimeDelta - Time is an error
if isinstance(other, Time):
raise OperandTypeError(self, other, "-")
return self._add_sub(other, operator.sub)
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
out = self.__sub__(other)
return -out
def __neg__(self):
"""Negation of a `TimeDelta` object."""
new = self.copy()
new._time.jd1 = -self._time.jd1
new._time.jd2 = -self._time.jd2
return new
def __abs__(self):
"""Absolute value of a `TimeDelta` object."""
jd1, jd2 = self._time.jd1, self._time.jd2
negative = jd1 + jd2 < 0
new = self.copy()
new._time.jd1 = np.where(negative, -jd1, jd1)
new._time.jd2 = np.where(negative, -jd2, jd2)
return new
def __mul__(self, other):
"""Multiplication of `TimeDelta` objects by numbers/arrays."""
# Check needed since otherwise the self.jd1 * other multiplication
# would enter here again (via __rmul__)
if isinstance(other, Time):
raise OperandTypeError(self, other, "*")
elif (isinstance(other, u.UnitBase) and other == u.dimensionless_unscaled) or (
isinstance(other, str) and other == ""
):
return self.copy()
# If other is something consistent with a dimensionless quantity
# (could just be a float or an array), then we can just multiple in.
try:
other = u.Quantity(other, u.dimensionless_unscaled, copy=False)
except Exception:
# If not consistent with a dimensionless quantity, try downgrading
# self to a quantity and see if things work.
try:
return self.to(u.day) * other
except Exception:
# The various ways we could multiply all failed;
# returning NotImplemented to give other a final chance.
return NotImplemented
jd1, jd2 = day_frac(self.jd1, self.jd2, factor=other.value)
out = TimeDelta(jd1, jd2, format="jd", scale=self.scale)
if self.format != "jd":
out = out.replicate(format=self.format)
return out
def __rmul__(self, other):
"""Multiplication of numbers/arrays with `TimeDelta` objects."""
return self.__mul__(other)
def __truediv__(self, other):
"""Division of `TimeDelta` objects by numbers/arrays."""
# Cannot do __mul__(1./other) as that looses precision
if (isinstance(other, u.UnitBase) and other == u.dimensionless_unscaled) or (
isinstance(other, str) and other == ""
):
return self.copy()
# If other is something consistent with a dimensionless quantity
# (could just be a float or an array), then we can just divide in.
try:
other = u.Quantity(other, u.dimensionless_unscaled, copy=False)
except Exception:
# If not consistent with a dimensionless quantity, try downgrading
# self to a quantity and see if things work.
try:
return self.to(u.day) / other
except Exception:
# The various ways we could divide all failed;
# returning NotImplemented to give other a final chance.
return NotImplemented
jd1, jd2 = day_frac(self.jd1, self.jd2, divisor=other.value)
out = TimeDelta(jd1, jd2, format="jd", scale=self.scale)
if self.format != "jd":
out = out.replicate(format=self.format)
return out
def __rtruediv__(self, other):
"""Division by `TimeDelta` objects of numbers/arrays."""
# Here, we do not have to worry about returning NotImplemented,
# since other has already had a chance to look at us.
return other / self.to(u.day)
def to(self, unit, equivalencies=[]):
"""
Convert to a quantity in the specified unit.
Parameters
----------
unit : unit-like
The unit to convert to.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If `None`, no
equivalencies will be applied at all, not even any set globallyq
or within a context.
Returns
-------
quantity : `~astropy.units.Quantity`
The quantity in the units specified.
See also
--------
to_value : get the numerical value in a given unit.
"""
return u.Quantity(self._time.jd1 + self._time.jd2, u.day).to(
unit, equivalencies=equivalencies
)
def to_value(self, *args, **kwargs):
"""Get time delta values expressed in specified output format or unit.
This method is flexible and handles both conversion to a specified
``TimeDelta`` format / sub-format AND conversion to a specified unit.
If positional argument(s) are provided then the first one is checked
to see if it is a valid ``TimeDelta`` format, and next it is checked
to see if it is a valid unit or unit string.
To convert to a ``TimeDelta`` format and optional sub-format the options
are::
tm = TimeDelta(1.0 * u.s)
tm.to_value('jd') # equivalent of tm.jd
tm.to_value('jd', 'decimal') # convert to 'jd' as a Decimal object
tm.to_value('jd', subfmt='decimal')
tm.to_value(format='jd', subfmt='decimal')
To convert to a unit with optional equivalencies, the options are::
tm.to_value('hr') # convert to u.hr (hours)
tm.to_value('hr', []) # specify equivalencies as a positional arg
tm.to_value('hr', equivalencies=[])
tm.to_value(unit='hr', equivalencies=[])
The built-in `~astropy.time.TimeDelta` options for ``format`` are:
{'jd', 'sec', 'datetime'}.
For the two numerical formats 'jd' and 'sec', the available ``subfmt``
options are: {'float', 'long', 'decimal', 'str', 'bytes'}. Here, 'long'
uses ``numpy.longdouble`` for somewhat enhanced precision (with the
enhancement depending on platform), and 'decimal' instances of
:class:`decimal.Decimal` for full precision. For the 'str' and 'bytes'
sub-formats, the number of digits is also chosen such that time values
are represented accurately. Default: as set by ``out_subfmt`` (which by
default picks the first available for a given format, i.e., 'float').
Parameters
----------
format : str, optional
The format in which one wants the `~astropy.time.TimeDelta` values.
Default: the current format.
subfmt : str, optional
Possible sub-format in which the values should be given. Default: as
set by ``out_subfmt`` (which by default picks the first available
for a given format, i.e., 'float' or 'date_hms').
unit : `~astropy.units.UnitBase` instance or str, optional
The unit in which the value should be given.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`astropy:unit_equivalencies`). If `None`, no
equivalencies will be applied at all, not even any set globally or
within a context.
Returns
-------
value : ndarray or scalar
The value in the format or units specified.
See also
--------
to : Convert to a `~astropy.units.Quantity` instance in a given unit.
value : The time value in the current format.
"""
if not (args or kwargs):
raise TypeError("to_value() missing required format or unit argument")
# TODO: maybe allow 'subfmt' also for units, keeping full precision
# (effectively, by doing the reverse of quantity_day_frac)?
# This way, only equivalencies could lead to possible precision loss.
if "format" in kwargs or (
args != () and (args[0] is None or args[0] in self.FORMATS)
):
# Super-class will error with duplicate arguments, etc.
return super().to_value(*args, **kwargs)
# With positional arguments, we try parsing the first one as a unit,
# so that on failure we can give a more informative exception.
if args:
try:
unit = u.Unit(args[0])
except ValueError as exc:
raise ValueError(
"first argument is not one of the known "
f"formats ({list(self.FORMATS)}) and failed to parse as a unit."
) from exc
args = (unit,) + args[1:]
return u.Quantity(self._time.jd1 + self._time.jd2, u.day).to_value(
*args, **kwargs
)
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent TimeDelta object"""
if not isinstance(value, TimeDelta):
try:
value = self.__class__(value, scale=self.scale, format=self.format)
except Exception as err:
raise ValueError(
f"cannot convert value to a compatible TimeDelta object: {err}"
)
return value
def isclose(self, other, atol=None, rtol=0.0):
"""Returns a boolean or boolean array where two TimeDelta objects are
element-wise equal within a time tolerance.
This effectively evaluates the expression below::
abs(self - other) <= atol + rtol * abs(other)
Parameters
----------
other : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Quantity or TimeDelta object for comparison.
atol : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`
Absolute tolerance for equality with units of time (e.g. ``u.s`` or
``u.day``). Default is one bit in the 128-bit JD time representation,
equivalent to about 20 picosecs.
rtol : float
Relative tolerance for equality
"""
try:
other_day = other.to_value(u.day)
except Exception as err:
raise TypeError(f"'other' argument must support conversion to days: {err}")
if atol is None:
atol = np.finfo(float).eps * u.day
if not isinstance(atol, (u.Quantity, TimeDelta)):
raise TypeError(
"'atol' argument must be a Quantity or TimeDelta instance, got "
f"{atol.__class__.__name__} instead"
)
return np.isclose(
self.to_value(u.day), other_day, rtol=rtol, atol=atol.to_value(u.day)
)
class ScaleValueError(Exception):
pass
def _make_array(val, copy=False):
"""
Take ``val`` and convert/reshape to an array. If ``copy`` is `True`
then copy input values.
Returns
-------
val : ndarray
Array version of ``val``.
"""
if isinstance(val, (tuple, list)) and len(val) > 0 and isinstance(val[0], Time):
dtype = object
else:
dtype = None
val = np.array(val, copy=copy, subok=True, dtype=dtype)
# Allow only float64, string or object arrays as input
# (object is for datetime, maybe add more specific test later?)
# This also ensures the right byteorder for float64 (closes #2942).
if val.dtype.kind == "f" and val.dtype.itemsize >= np.dtype(np.float64).itemsize:
pass
elif val.dtype.kind in "OSUMaV":
pass
else:
val = np.asanyarray(val, dtype=np.float64)
return val
def _check_for_masked_and_fill(val, val2):
"""
If ``val`` or ``val2`` are masked arrays then fill them and cast
to ndarray.
Returns a mask corresponding to the logical-or of masked elements
in ``val`` and ``val2``. If neither is masked then the return ``mask``
is ``None``.
If either ``val`` or ``val2`` are masked then they are replaced
with filled versions of themselves.
Parameters
----------
val : ndarray or MaskedArray
Input val
val2 : ndarray or MaskedArray
Input val2
Returns
-------
mask, val, val2: ndarray or None
Mask: (None or bool ndarray), val, val2: ndarray
"""
def get_as_filled_ndarray(mask, val):
"""
Fill the given MaskedArray ``val`` from the first non-masked
element in the array. This ensures that upstream Time initialization
will succeed.
Note that nothing happens if there are no masked elements.
"""
fill_value = None
if np.any(val.mask):
# Final mask is the logical-or of inputs
mask = mask | val.mask
# First unmasked element. If all elements are masked then
# use fill_value=None from above which will use val.fill_value.
# As long as the user has set this appropriately then all will
# be fine.
val_unmasked = val.compressed() # 1-d ndarray of unmasked values
if len(val_unmasked) > 0:
fill_value = val_unmasked[0]
# Fill the input ``val``. If fill_value is None then this just returns
# an ndarray view of val (no copy).
val = val.filled(fill_value)
return mask, val
mask = False
if isinstance(val, np.ma.MaskedArray):
mask, val = get_as_filled_ndarray(mask, val)
if isinstance(val2, np.ma.MaskedArray):
mask, val2 = get_as_filled_ndarray(mask, val2)
return mask, val, val2
class OperandTypeError(TypeError):
def __init__(self, left, right, op=None):
op_string = "" if op is None else f" for {op}"
super().__init__(
"Unsupported operand type(s){}: '{}' and '{}'".format(
op_string, left.__class__.__name__, right.__class__.__name__
)
)
def _check_leapsec():
global _LEAP_SECONDS_CHECK
if _LEAP_SECONDS_CHECK != _LeapSecondsCheck.DONE:
with _LEAP_SECONDS_LOCK:
# There are three ways we can get here:
# 1. First call (NOT_STARTED).
# 2. Re-entrant call (RUNNING). We skip the initialisation
# and don't worry about leap second errors.
# 3. Another thread which raced with the first call
# (RUNNING). The first thread has relinquished the
# lock to us, so initialization is complete.
if _LEAP_SECONDS_CHECK == _LeapSecondsCheck.NOT_STARTED:
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.RUNNING
update_leap_seconds()
_LEAP_SECONDS_CHECK = _LeapSecondsCheck.DONE
def update_leap_seconds(files=None):
"""If the current ERFA leap second table is out of date, try to update it.
Uses `astropy.utils.iers.LeapSeconds.auto_open` to try to find an
up-to-date table. See that routine for the definition of "out of date".
In order to make it safe to call this any time, all exceptions are turned
into warnings,
Parameters
----------
files : list of path-like, optional
List of files/URLs to attempt to open. By default, uses defined by
`astropy.utils.iers.LeapSeconds.auto_open`, which includes the table
used by ERFA itself, so if that is up to date, nothing will happen.
Returns
-------
n_update : int
Number of items updated.
"""
try:
from astropy.utils import iers
table = iers.LeapSeconds.auto_open(files)
return erfa.leap_seconds.update(table)
except Exception as exc:
warn(
f"leap-second auto-update failed due to the following exception: {exc!r}",
AstropyWarning,
)
return 0
| bsd-3-clause | 0ff87c7f001cee8f333401f0ec51045e | 36.539018 | 107 | 0.569298 | 4.168351 | false | false | false | false |
astropy/astropy | astropy/coordinates/sky_coordinate.py | 3 | 91316 | import copy
import operator
import re
import warnings
import erfa
import numpy as np
from astropy import units as u
from astropy.constants import c as speed_of_light
from astropy.table import QTable
from astropy.time import Time
from astropy.utils import ShapedLikeNDArray
from astropy.utils.data_info import MixinInfo
from astropy.utils.exceptions import AstropyUserWarning
from .angles import Angle
from .baseframe import BaseCoordinateFrame, GenericFrame, frame_transform_graph
from .distances import Distance
from .representation import (
RadialDifferential,
SphericalDifferential,
SphericalRepresentation,
UnitSphericalCosLatDifferential,
UnitSphericalDifferential,
UnitSphericalRepresentation,
)
from .sky_coordinate_parsers import (
_get_frame_class,
_get_frame_without_data,
_parse_coordinate_data,
)
__all__ = ["SkyCoord", "SkyCoordInfo"]
class SkyCoordInfo(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
attrs_from_parent = {"unit"} # Unit is read-only
_supports_indexing = False
@staticmethod
def default_format(val):
repr_data = val.info._repr_data
formats = ["{0." + compname + ".value:}" for compname in repr_data.components]
return ",".join(formats).format(repr_data)
@property
def unit(self):
repr_data = self._repr_data
unit = ",".join(
str(getattr(repr_data, comp).unit) or "None"
for comp in repr_data.components
)
return unit
@property
def _repr_data(self):
if self._parent is None:
return None
sc = self._parent
if issubclass(sc.representation_type, SphericalRepresentation) and isinstance(
sc.data, UnitSphericalRepresentation
):
repr_data = sc.represent_as(sc.data.__class__, in_frame_units=True)
else:
repr_data = sc.represent_as(sc.representation_type, in_frame_units=True)
return repr_data
def _represent_as_dict(self):
sc = self._parent
attrs = list(sc.representation_component_names)
# Don't output distance unless it's actually distance.
if isinstance(sc.data, UnitSphericalRepresentation):
attrs = attrs[:-1]
diff = sc.data.differentials.get("s")
if diff is not None:
diff_attrs = list(sc.get_representation_component_names("s"))
# Don't output proper motions if they haven't been specified.
if isinstance(diff, RadialDifferential):
diff_attrs = diff_attrs[2:]
# Don't output radial velocity unless it's actually velocity.
elif isinstance(
diff, (UnitSphericalDifferential, UnitSphericalCosLatDifferential)
):
diff_attrs = diff_attrs[:-1]
attrs.extend(diff_attrs)
attrs.extend(frame_transform_graph.frame_attributes.keys())
out = super()._represent_as_dict(attrs)
out["representation_type"] = sc.representation_type.get_name()
out["frame"] = sc.frame.name
# Note that sc.info.unit is a fake composite unit (e.g. 'deg,deg,None'
# or None,None,m) and is not stored. The individual attributes have
# units.
return out
def new_like(self, skycoords, length, metadata_conflicts="warn", name=None):
"""
Return a new SkyCoord instance which is consistent with the input
SkyCoord objects ``skycoords`` and has ``length`` rows. Being
"consistent" is defined as being able to set an item from one to each of
the rest without any exception being raised.
This is intended for creating a new SkyCoord instance whose elements can
be set in-place for table operations like join or vstack. This is used
when a SkyCoord object is used as a mixin column in an astropy Table.
The data values are not predictable and it is expected that the consumer
of the object will fill in all values.
Parameters
----------
skycoords : list
List of input SkyCoord objects
length : int
Length of the output skycoord object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output name (sets output skycoord.info.name)
Returns
-------
skycoord : SkyCoord (or subclass)
Instance of this class consistent with ``skycoords``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
skycoords, metadata_conflicts, name, ("meta", "description")
)
skycoord0 = skycoords[0]
# Make a new SkyCoord object with the desired length and attributes
# by using the _apply / __getitem__ machinery to effectively return
# skycoord0[[0, 0, ..., 0, 0]]. This will have the all the right frame
# attributes with the right shape.
indexes = np.zeros(length, dtype=np.int64)
out = skycoord0[indexes]
# Use __setitem__ machinery to check for consistency of all skycoords
for skycoord in skycoords[1:]:
try:
out[0] = skycoord[0]
except Exception as err:
raise ValueError("Input skycoords are inconsistent.") from err
# Set (merged) info attributes
for attr in ("name", "meta", "description"):
if attr in attrs:
setattr(out.info, attr, attrs[attr])
return out
class SkyCoord(ShapedLikeNDArray):
"""High-level object providing a flexible interface for celestial coordinate
representation, manipulation, and transformation between systems.
The `SkyCoord` class accepts a wide variety of inputs for initialization. At
a minimum these must provide one or more celestial coordinate values with
unambiguous units. Inputs may be scalars or lists/tuples/arrays, yielding
scalar or array coordinates (can be checked via ``SkyCoord.isscalar``).
Typically one also specifies the coordinate frame, though this is not
required. The general pattern for spherical representations is::
SkyCoord(COORD, [FRAME], keyword_args ...)
SkyCoord(LON, LAT, [FRAME], keyword_args ...)
SkyCoord(LON, LAT, [DISTANCE], frame=FRAME, unit=UNIT, keyword_args ...)
SkyCoord([FRAME], <lon_attr>=LON, <lat_attr>=LAT, keyword_args ...)
It is also possible to input coordinate values in other representations
such as cartesian or cylindrical. In this case one includes the keyword
argument ``representation_type='cartesian'`` (for example) along with data
in ``x``, ``y``, and ``z``.
See also: https://docs.astropy.org/en/stable/coordinates/
Examples
--------
The examples below illustrate common ways of initializing a `SkyCoord`
object. For a complete description of the allowed syntax see the
full coordinates documentation. First some imports::
>>> from astropy.coordinates import SkyCoord # High-level coordinates
>>> from astropy.coordinates import ICRS, Galactic, FK4, FK5 # Low-level frames
>>> from astropy.coordinates import Angle, Latitude, Longitude # Angles
>>> import astropy.units as u
The coordinate values and frame specification can now be provided using
positional and keyword arguments::
>>> c = SkyCoord(10, 20, unit="deg") # defaults to ICRS frame
>>> c = SkyCoord([1, 2, 3], [-30, 45, 8], frame="icrs", unit="deg") # 3 coords
>>> coords = ["1:12:43.2 +31:12:43", "1 12 43.2 +31 12 43"]
>>> c = SkyCoord(coords, frame=FK4, unit=(u.hourangle, u.deg), obstime="J1992.21")
>>> c = SkyCoord("1h12m43.2s +1d12m43s", frame=Galactic) # Units from string
>>> c = SkyCoord(frame="galactic", l="1h12m43.2s", b="+1d12m43s")
>>> ra = Longitude([1, 2, 3], unit=u.deg) # Could also use Angle
>>> dec = np.array([4.5, 5.2, 6.3]) * u.deg # Astropy Quantity
>>> c = SkyCoord(ra, dec, frame='icrs')
>>> c = SkyCoord(frame=ICRS, ra=ra, dec=dec, obstime='2001-01-02T12:34:56')
>>> c = FK4(1 * u.deg, 2 * u.deg) # Uses defaults for obstime, equinox
>>> c = SkyCoord(c, obstime='J2010.11', equinox='B1965') # Override defaults
>>> c = SkyCoord(w=0, u=1, v=2, unit='kpc', frame='galactic',
... representation_type='cartesian')
>>> c = SkyCoord([ICRS(ra=1*u.deg, dec=2*u.deg), ICRS(ra=3*u.deg, dec=4*u.deg)])
Velocity components (proper motions or radial velocities) can also be
provided in a similar manner::
>>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, radial_velocity=10*u.km/u.s)
>>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=2*u.mas/u.yr, pm_dec=1*u.mas/u.yr)
As shown, the frame can be a `~astropy.coordinates.BaseCoordinateFrame`
class or the corresponding string alias. The frame classes that are built in
to astropy are `ICRS`, `FK5`, `FK4`, `FK4NoETerms`, and `Galactic`.
The string aliases are simply lower-case versions of the class name, and
allow for creating a `SkyCoord` object and transforming frames without
explicitly importing the frame classes.
Parameters
----------
frame : `~astropy.coordinates.BaseCoordinateFrame` class or string, optional
Type of coordinate frame this `SkyCoord` should represent. Defaults to
to ICRS if not given or given as None.
unit : `~astropy.units.Unit`, string, or tuple of :class:`~astropy.units.Unit` or str, optional
Units for supplied coordinate values.
If only one unit is supplied then it applies to all values.
Note that passing only one unit might lead to unit conversion errors
if the coordinate values are expected to have mixed physical meanings
(e.g., angles and distances).
obstime : time-like, optional
Time(s) of observation.
equinox : time-like, optional
Coordinate frame equinox time.
representation_type : str or Representation class
Specifies the representation, e.g. 'spherical', 'cartesian', or
'cylindrical'. This affects the positional args and other keyword args
which must correspond to the given representation.
copy : bool, optional
If `True` (default), a copy of any coordinate data is made. This
argument can only be passed in as a keyword argument.
**keyword_args
Other keyword arguments as applicable for user-defined coordinate frames.
Common options include:
ra, dec : angle-like, optional
RA and Dec for frames where ``ra`` and ``dec`` are keys in the
frame's ``representation_component_names``, including `ICRS`,
`FK5`, `FK4`, and `FK4NoETerms`.
pm_ra_cosdec, pm_dec : `~astropy.units.Quantity` ['angular speed'], optional
Proper motion components, in angle per time units.
l, b : angle-like, optional
Galactic ``l`` and ``b`` for for frames where ``l`` and ``b`` are
keys in the frame's ``representation_component_names``, including
the `Galactic` frame.
pm_l_cosb, pm_b : `~astropy.units.Quantity` ['angular speed'], optional
Proper motion components in the `Galactic` frame, in angle per time
units.
x, y, z : float or `~astropy.units.Quantity` ['length'], optional
Cartesian coordinates values
u, v, w : float or `~astropy.units.Quantity` ['length'], optional
Cartesian coordinates values for the Galactic frame.
radial_velocity : `~astropy.units.Quantity` ['speed'], optional
The component of the velocity along the line-of-sight (i.e., the
radial direction), in velocity units.
"""
# Declare that SkyCoord can be used as a Table column by defining the
# info property.
info = SkyCoordInfo()
def __init__(self, *args, copy=True, **kwargs):
# these are frame attributes set on this SkyCoord but *not* a part of
# the frame object this SkyCoord contains
self._extra_frameattr_names = set()
# If all that is passed in is a frame instance that already has data,
# we should bypass all of the parsing and logic below. This is here
# to make this the fastest way to create a SkyCoord instance. Many of
# the classmethods implemented for performance enhancements will use
# this as the initialization path
if (
len(args) == 1
and len(kwargs) == 0
and isinstance(args[0], (BaseCoordinateFrame, SkyCoord))
):
coords = args[0]
if isinstance(coords, SkyCoord):
self._extra_frameattr_names = coords._extra_frameattr_names
self.info = coords.info
# Copy over any extra frame attributes
for attr_name in self._extra_frameattr_names:
# Setting it will also validate it.
setattr(self, attr_name, getattr(coords, attr_name))
coords = coords.frame
if not coords.has_data:
raise ValueError(
"Cannot initialize from a coordinate frame "
"instance without coordinate data"
)
if copy:
self._sky_coord_frame = coords.copy()
else:
self._sky_coord_frame = coords
else:
# Get the frame instance without coordinate data but with all frame
# attributes set - these could either have been passed in with the
# frame as an instance, or passed in as kwargs here
frame_cls, frame_kwargs = _get_frame_without_data(args, kwargs)
# Parse the args and kwargs to assemble a sanitized and validated
# kwargs dict for initializing attributes for this object and for
# creating the internal self._sky_coord_frame object
args = list(args) # Make it mutable
skycoord_kwargs, components, info = _parse_coordinate_data(
frame_cls(**frame_kwargs), args, kwargs
)
# In the above two parsing functions, these kwargs were identified
# as valid frame attributes for *some* frame, but not the frame that
# this SkyCoord will have. We keep these attributes as special
# skycoord frame attributes:
for attr in skycoord_kwargs:
# Setting it will also validate it.
setattr(self, attr, skycoord_kwargs[attr])
if info is not None:
self.info = info
# Finally make the internal coordinate object.
frame_kwargs.update(components)
self._sky_coord_frame = frame_cls(copy=copy, **frame_kwargs)
if not self._sky_coord_frame.has_data:
raise ValueError("Cannot create a SkyCoord without data")
@property
def frame(self):
return self._sky_coord_frame
@property
def representation_type(self):
return self.frame.representation_type
@representation_type.setter
def representation_type(self, value):
self.frame.representation_type = value
# TODO: remove these in future
@property
def representation(self):
return self.frame.representation
@representation.setter
def representation(self, value):
self.frame.representation = value
@property
def shape(self):
return self.frame.shape
def __eq__(self, value):
"""Equality operator for SkyCoord
This implements strict equality and requires that the frames are
equivalent, extra frame attributes are equivalent, and that the
representation data are exactly equal.
"""
if isinstance(value, BaseCoordinateFrame):
if value._data is None:
raise ValueError("Can only compare SkyCoord to Frame with data")
return self.frame == value
if not isinstance(value, SkyCoord):
return NotImplemented
# Make sure that any extra frame attribute names are equivalent.
for attr in self._extra_frameattr_names | value._extra_frameattr_names:
if not self.frame._frameattr_equiv(
getattr(self, attr), getattr(value, attr)
):
raise ValueError(
f"cannot compare: extra frame attribute '{attr}' is not equivalent"
" (perhaps compare the frames directly to avoid this exception)"
)
return self._sky_coord_frame == value._sky_coord_frame
def __ne__(self, value):
return np.logical_not(self == value)
def _apply(self, method, *args, **kwargs):
"""Create a new instance, applying a method to the underlying data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be
applied to the underlying arrays in the representation (e.g., ``x``,
``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`),
as well as to any frame attributes that have a shape, with the results
used to create a new instance.
Internally, it is also used to apply functions to the above parts
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
*args
Any positional arguments for ``method``.
**kwargs : dict
Any keyword arguments for ``method``.
"""
def apply_method(value):
if isinstance(value, ShapedLikeNDArray):
return value._apply(method, *args, **kwargs)
else:
if callable(method):
return method(value, *args, **kwargs)
else:
return getattr(value, method)(*args, **kwargs)
# create a new but empty instance, and copy over stuff
new = super().__new__(self.__class__)
new._sky_coord_frame = self._sky_coord_frame._apply(method, *args, **kwargs)
new._extra_frameattr_names = self._extra_frameattr_names.copy()
for attr in self._extra_frameattr_names:
value = getattr(self, attr)
if getattr(value, "shape", ()):
value = apply_method(value)
elif method == "copy" or method == "flatten":
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
value = copy.copy(value)
setattr(new, "_" + attr, value)
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if "info" in self.__dict__:
new.info = self.info
return new
def __setitem__(self, item, value):
"""Implement self[item] = value for SkyCoord
The right hand ``value`` must be strictly consistent with self:
- Identical class
- Equivalent frames
- Identical representation_types
- Identical representation differentials keys
- Identical frame attributes
- Identical "extra" frame attributes (e.g. obstime for an ICRS coord)
With these caveats the setitem ends up as effectively a setitem on
the representation data.
self.frame.data[item] = value.frame.data
"""
if self.__class__ is not value.__class__:
raise TypeError(
"can only set from object of same class: "
f"{self.__class__.__name__} vs. {value.__class__.__name__}"
)
# Make sure that any extra frame attribute names are equivalent.
for attr in self._extra_frameattr_names | value._extra_frameattr_names:
if not self.frame._frameattr_equiv(
getattr(self, attr), getattr(value, attr)
):
raise ValueError(f"attribute {attr} is not equivalent")
# Set the frame values. This checks frame equivalence and also clears
# the cache to ensure that the object is not in an inconsistent state.
self._sky_coord_frame[item] = value._sky_coord_frame
def insert(self, obj, values, axis=0):
"""
Insert coordinate values before the given indices in the object and
return a new Frame object.
The values to be inserted must conform to the rules for in-place setting
of ``SkyCoord`` objects.
The API signature matches the ``np.insert`` API, but is more limited.
The specification of insert index ``obj`` must be a single integer,
and the ``axis`` must be ``0`` for simple insertion before the index.
Parameters
----------
obj : int
Integer index before which ``values`` is inserted.
values : array-like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
axis : int, optional
Axis along which to insert ``values``. Default is 0, which is the
only allowed value and will insert a row.
Returns
-------
out : `~astropy.coordinates.SkyCoord` instance
New coordinate object with inserted value(s)
"""
# Validate inputs: obj arg is integer, axis=0, self is not a scalar, and
# input index is in bounds.
try:
idx0 = operator.index(obj)
except TypeError:
raise TypeError("obj arg must be an integer")
if axis != 0:
raise ValueError("axis must be 0")
if not self.shape:
raise TypeError(
f"cannot insert into scalar {self.__class__.__name__} object"
)
if abs(idx0) > len(self):
raise IndexError(
f"index {idx0} is out of bounds for axis 0 with size {len(self)}"
)
# Turn negative index into positive
if idx0 < 0:
idx0 = len(self) + idx0
n_values = len(values) if values.shape else 1
# Finally make the new object with the correct length and set values for the
# three sections, before insert, the insert, and after the insert.
out = self.__class__.info.new_like(
[self], len(self) + n_values, name=self.info.name
)
# Set the output values. This is where validation of `values` takes place to ensure
# that it can indeed be inserted.
out[:idx0] = self[:idx0]
out[idx0 : idx0 + n_values] = values
out[idx0 + n_values :] = self[idx0:]
return out
def is_transformable_to(self, new_frame):
"""
Determines if this coordinate frame can be transformed to another
given frame.
Parameters
----------
new_frame : frame class, frame object, or str
The proposed frame to transform into.
Returns
-------
transformable : bool or str
`True` if this can be transformed to ``new_frame``, `False` if
not, or the string 'same' if ``new_frame`` is the same system as
this object but no transformation is defined.
Notes
-----
A return value of 'same' means the transformation will work, but it will
just give back a copy of this object. The intended usage is::
if coord.is_transformable_to(some_unknown_frame):
coord2 = coord.transform_to(some_unknown_frame)
This will work even if ``some_unknown_frame`` turns out to be the same
frame class as ``coord``. This is intended for cases where the frame
is the same regardless of the frame attributes (e.g. ICRS), but be
aware that it *might* also indicate that someone forgot to define the
transformation between two objects of the same frame class but with
different attributes.
"""
# TODO! like matplotlib, do string overrides for modified methods
new_frame = (
_get_frame_class(new_frame) if isinstance(new_frame, str) else new_frame
)
return self.frame.is_transformable_to(new_frame)
def transform_to(self, frame, merge_attributes=True):
"""Transform this coordinate to a new frame.
The precise frame transformed to depends on ``merge_attributes``.
If `False`, the destination frame is used exactly as passed in.
But this is often not quite what one wants. E.g., suppose one wants to
transform an ICRS coordinate that has an obstime attribute to FK4; in
this case, one likely would want to use this information. Thus, the
default for ``merge_attributes`` is `True`, in which the precedence is
as follows: (1) explicitly set (i.e., non-default) values in the
destination frame; (2) explicitly set values in the source; (3) default
value in the destination frame.
Note that in either case, any explicitly set attributes on the source
`SkyCoord` that are not part of the destination frame's definition are
kept (stored on the resulting `SkyCoord`), and thus one can round-trip
(e.g., from FK4 to ICRS to FK4 without losing obstime).
Parameters
----------
frame : str, `BaseCoordinateFrame` class or instance, or `SkyCoord` instance
The frame to transform this coordinate into. If a `SkyCoord`, the
underlying frame is extracted, and all other information ignored.
merge_attributes : bool, optional
Whether the default attributes in the destination frame are allowed
to be overridden by explicitly set attributes in the source
(see note above; default: `True`).
Returns
-------
coord : `SkyCoord`
A new object with this coordinate represented in the `frame` frame.
Raises
------
ValueError
If there is no possible transformation route.
"""
from astropy.coordinates.errors import ConvertError
frame_kwargs = {}
# Frame name (string) or frame class? Coerce into an instance.
try:
frame = _get_frame_class(frame)()
except Exception:
pass
if isinstance(frame, SkyCoord):
frame = frame.frame # Change to underlying coord frame instance
if isinstance(frame, BaseCoordinateFrame):
new_frame_cls = frame.__class__
# Get frame attributes, allowing defaults to be overridden by
# explicitly set attributes of the source if ``merge_attributes``.
for attr in frame_transform_graph.frame_attributes:
self_val = getattr(self, attr, None)
frame_val = getattr(frame, attr, None)
if frame_val is not None and not (
merge_attributes and frame.is_frame_attr_default(attr)
):
frame_kwargs[attr] = frame_val
elif self_val is not None and not self.is_frame_attr_default(attr):
frame_kwargs[attr] = self_val
elif frame_val is not None:
frame_kwargs[attr] = frame_val
else:
raise ValueError(
"Transform `frame` must be a frame name, class, or instance"
)
# Get the composite transform to the new frame
trans = frame_transform_graph.get_transform(self.frame.__class__, new_frame_cls)
if trans is None:
raise ConvertError(
f"Cannot transform from {self.frame.__class__} to {new_frame_cls}"
)
# Make a generic frame which will accept all the frame kwargs that
# are provided and allow for transforming through intermediate frames
# which may require one or more of those kwargs.
generic_frame = GenericFrame(frame_kwargs)
# Do the transformation, returning a coordinate frame of the desired
# final type (not generic).
new_coord = trans(self.frame, generic_frame)
# Finally make the new SkyCoord object from the `new_coord` and
# remaining frame_kwargs that are not frame_attributes in `new_coord`.
for attr in set(new_coord.frame_attributes) & set(frame_kwargs.keys()):
frame_kwargs.pop(attr)
# Always remove the origin frame attribute, as that attribute only makes
# sense with a SkyOffsetFrame (in which case it will be stored on the frame).
# See gh-11277.
# TODO: Should it be a property of the frame attribute that it can
# or cannot be stored on a SkyCoord?
frame_kwargs.pop("origin", None)
return self.__class__(new_coord, **frame_kwargs)
def apply_space_motion(self, new_obstime=None, dt=None):
"""
Compute the position of the source represented by this coordinate object
to a new time using the velocities stored in this object and assuming
linear space motion (including relativistic corrections). This is
sometimes referred to as an "epoch transformation."
The initial time before the evolution is taken from the ``obstime``
attribute of this coordinate. Note that this method currently does not
support evolving coordinates where the *frame* has an ``obstime`` frame
attribute, so the ``obstime`` is only used for storing the before and
after times, not actually as an attribute of the frame. Alternatively,
if ``dt`` is given, an ``obstime`` need not be provided at all.
Parameters
----------
new_obstime : `~astropy.time.Time`, optional
The time at which to evolve the position to. Requires that the
``obstime`` attribute be present on this frame.
dt : `~astropy.units.Quantity`, `~astropy.time.TimeDelta`, optional
An amount of time to evolve the position of the source. Cannot be
given at the same time as ``new_obstime``.
Returns
-------
new_coord : `SkyCoord`
A new coordinate object with the evolved location of this coordinate
at the new time. ``obstime`` will be set on this object to the new
time only if ``self`` also has ``obstime``.
"""
from .builtin_frames.icrs import ICRS
if (new_obstime is None) == (dt is None):
raise ValueError(
"You must specify one of `new_obstime` or `dt`, but not both."
)
# Validate that we have velocity info
if "s" not in self.frame.data.differentials:
raise ValueError("SkyCoord requires velocity data to evolve the position.")
if "obstime" in self.frame.frame_attributes:
raise NotImplementedError(
"Updating the coordinates in a frame with explicit time dependence is"
" currently not supported. If you would like this functionality, please"
" open an issue on github:\nhttps://github.com/astropy/astropy"
)
if new_obstime is not None and self.obstime is None:
# If no obstime is already on this object, raise an error if a new
# obstime is passed: we need to know the time / epoch at which the
# the position / velocity were measured initially
raise ValueError(
"This object has no associated `obstime`. apply_space_motion() must"
" receive a time difference, `dt`, and not a new obstime."
)
# Compute t1 and t2, the times used in the starpm call, which *only*
# uses them to compute a delta-time
t1 = self.obstime
if dt is None:
# self.obstime is not None and new_obstime is not None b/c of above
# checks
t2 = new_obstime
else:
# new_obstime is definitely None b/c of the above checks
if t1 is None:
# MAGIC NUMBER: if the current SkyCoord object has no obstime,
# assume J2000 to do the dt offset. This is not actually used
# for anything except a delta-t in starpm, so it's OK that it's
# not necessarily the "real" obstime
t1 = Time("J2000")
new_obstime = None # we don't actually know the initial obstime
t2 = t1 + dt
else:
t2 = t1 + dt
new_obstime = t2
# starpm wants tdb time
t1 = t1.tdb
t2 = t2.tdb
# proper motion in RA should not include the cos(dec) term, see the
# erfa function eraStarpv, comment (4). So we convert to the regular
# spherical differentials.
icrsrep = self.icrs.represent_as(SphericalRepresentation, SphericalDifferential)
icrsvel = icrsrep.differentials["s"]
parallax_zero = False
try:
plx = icrsrep.distance.to_value(u.arcsecond, u.parallax())
except u.UnitConversionError: # No distance: set to 0 by convention
plx = 0.0
parallax_zero = True
try:
rv = icrsvel.d_distance.to_value(u.km / u.s)
except u.UnitConversionError: # No RV
rv = 0.0
starpm = erfa.pmsafe(
icrsrep.lon.radian,
icrsrep.lat.radian,
icrsvel.d_lon.to_value(u.radian / u.yr),
icrsvel.d_lat.to_value(u.radian / u.yr),
plx,
rv,
t1.jd1,
t1.jd2,
t2.jd1,
t2.jd2,
)
if parallax_zero:
new_distance = None
else:
new_distance = Distance(parallax=starpm[4] << u.arcsec)
icrs2 = ICRS(
ra=u.Quantity(starpm[0], u.radian, copy=False),
dec=u.Quantity(starpm[1], u.radian, copy=False),
pm_ra=u.Quantity(starpm[2], u.radian / u.yr, copy=False),
pm_dec=u.Quantity(starpm[3], u.radian / u.yr, copy=False),
distance=new_distance,
radial_velocity=u.Quantity(starpm[5], u.km / u.s, copy=False),
differential_type=SphericalDifferential,
)
# Update the obstime of the returned SkyCoord, and need to carry along
# the frame attributes
frattrs = {
attrnm: getattr(self, attrnm) for attrnm in self._extra_frameattr_names
}
frattrs["obstime"] = new_obstime
result = self.__class__(icrs2, **frattrs).transform_to(self.frame)
# Without this the output might not have the right differential type.
# Not sure if this fixes the problem or just hides it. See #11932
result.differential_type = self.differential_type
return result
def _is_name(self, string):
"""
Returns whether a string is one of the aliases for the frame.
"""
return self.frame.name == string or (
isinstance(self.frame.name, list) and string in self.frame.name
)
def __getattr__(self, attr):
"""
Overrides getattr to return coordinates that this can be transformed
to, based on the alias attr in the primary transform graph.
"""
if "_sky_coord_frame" in self.__dict__:
if self._is_name(attr):
return self # Should this be a deepcopy of self?
# Anything in the set of all possible frame_attr_names is handled
# here. If the attr is relevant for the current frame then delegate
# to self.frame otherwise get it from self._<attr>.
if attr in frame_transform_graph.frame_attributes:
if attr in self.frame.frame_attributes:
return getattr(self.frame, attr)
else:
return getattr(self, "_" + attr, None)
# Some attributes might not fall in the above category but still
# are available through self._sky_coord_frame.
if not attr.startswith("_") and hasattr(self._sky_coord_frame, attr):
return getattr(self._sky_coord_frame, attr)
# Try to interpret as a new frame for transforming.
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
return self.transform_to(attr)
# Fail
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{attr}'"
)
def __setattr__(self, attr, val):
# This is to make anything available through __getattr__ immutable
if "_sky_coord_frame" in self.__dict__:
if self._is_name(attr):
raise AttributeError(f"'{attr}' is immutable")
if not attr.startswith("_") and hasattr(self._sky_coord_frame, attr):
setattr(self._sky_coord_frame, attr, val)
return
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
raise AttributeError(f"'{attr}' is immutable")
if attr in frame_transform_graph.frame_attributes:
# All possible frame attributes can be set, but only via a private
# variable. See __getattr__ above.
super().__setattr__("_" + attr, val)
# Validate it
frame_transform_graph.frame_attributes[attr].__get__(self)
# And add to set of extra attributes
self._extra_frameattr_names |= {attr}
else:
# Otherwise, do the standard Python attribute setting
super().__setattr__(attr, val)
def __delattr__(self, attr):
# mirror __setattr__ above
if "_sky_coord_frame" in self.__dict__:
if self._is_name(attr):
raise AttributeError(f"'{attr}' is immutable")
if not attr.startswith("_") and hasattr(self._sky_coord_frame, attr):
delattr(self._sky_coord_frame, attr)
return
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
raise AttributeError(f"'{attr}' is immutable")
if attr in frame_transform_graph.frame_attributes:
# All possible frame attributes can be deleted, but need to remove
# the corresponding private variable. See __getattr__ above.
super().__delattr__("_" + attr)
# Also remove it from the set of extra attributes
self._extra_frameattr_names -= {attr}
else:
# Otherwise, do the standard Python attribute setting
super().__delattr__(attr)
def __dir__(self):
"""
Override the builtin `dir` behavior to include:
- Transforms available by aliases
- Attribute / methods of the underlying self.frame object
"""
dir_values = set(super().__dir__())
# determine the aliases that this can be transformed to.
for name in frame_transform_graph.get_names():
frame_cls = frame_transform_graph.lookup_name(name)
if self.frame.is_transformable_to(frame_cls):
dir_values.add(name)
# Add public attributes of self.frame
dir_values.update(
{attr for attr in dir(self.frame) if not attr.startswith("_")}
)
# Add all possible frame attributes
dir_values.update(frame_transform_graph.frame_attributes.keys())
return sorted(dir_values)
def __repr__(self):
clsnm = self.__class__.__name__
coonm = self.frame.__class__.__name__
frameattrs = self.frame._frame_attrs_repr()
if frameattrs:
frameattrs = ": " + frameattrs
data = self.frame._data_repr()
if data:
data = ": " + data
return f"<{clsnm} ({coonm}{frameattrs}){data}>"
def to_string(self, style="decimal", **kwargs):
"""
A string representation of the coordinates.
The default styles definitions are::
'decimal': 'lat': {'decimal': True, 'unit': "deg"}
'lon': {'decimal': True, 'unit': "deg"}
'dms': 'lat': {'unit': "deg"}
'lon': {'unit': "deg"}
'hmsdms': 'lat': {'alwayssign': True, 'pad': True, 'unit': "deg"}
'lon': {'pad': True, 'unit': "hour"}
See :meth:`~astropy.coordinates.Angle.to_string` for details and
keyword arguments (the two angles forming the coordinates are are
both :class:`~astropy.coordinates.Angle` instances). Keyword
arguments have precedence over the style defaults and are passed
to :meth:`~astropy.coordinates.Angle.to_string`.
Parameters
----------
style : {'hmsdms', 'dms', 'decimal'}
The formatting specification to use. These encode the three most
common ways to represent coordinates. The default is `decimal`.
**kwargs
Keyword args passed to :meth:`~astropy.coordinates.Angle.to_string`.
"""
sph_coord = self.frame.represent_as(SphericalRepresentation)
styles = {
"hmsdms": {
"lonargs": {"unit": u.hour, "pad": True},
"latargs": {"unit": u.degree, "pad": True, "alwayssign": True},
},
"dms": {"lonargs": {"unit": u.degree}, "latargs": {"unit": u.degree}},
"decimal": {
"lonargs": {"unit": u.degree, "decimal": True},
"latargs": {"unit": u.degree, "decimal": True},
},
}
lonargs = {}
latargs = {}
if style in styles:
lonargs.update(styles[style]["lonargs"])
latargs.update(styles[style]["latargs"])
else:
raise ValueError(f"Invalid style. Valid options are: {','.join(styles)}")
lonargs.update(kwargs)
latargs.update(kwargs)
if np.isscalar(sph_coord.lon.value):
coord_string = (
f"{sph_coord.lon.to_string(**lonargs)}"
f" {sph_coord.lat.to_string(**latargs)}"
)
else:
coord_string = []
for lonangle, latangle in zip(sph_coord.lon.ravel(), sph_coord.lat.ravel()):
coord_string += [
f"{lonangle.to_string(**lonargs)} {latangle.to_string(**latargs)}"
]
if len(sph_coord.shape) > 1:
coord_string = np.array(coord_string).reshape(sph_coord.shape)
return coord_string
def to_table(self):
"""
Convert this |SkyCoord| to a |QTable|.
Any attributes that have the same length as the |SkyCoord| will be
converted to columns of the |QTable|. All other attributes will be
recorded as metadata.
Returns
-------
`~astropy.table.QTable`
A |QTable| containing the data of this |SkyCoord|.
Examples
--------
>>> sc = SkyCoord(ra=[40, 70]*u.deg, dec=[0, -20]*u.deg,
... obstime=Time([2000, 2010], format='jyear'))
>>> t = sc.to_table()
>>> t
<QTable length=2>
ra dec obstime
deg deg
float64 float64 Time
------- ------- -------
40.0 0.0 2000.0
70.0 -20.0 2010.0
>>> t.meta
{'representation_type': 'spherical', 'frame': 'icrs'}
"""
self_as_dict = self.info._represent_as_dict()
tabledata = {}
metadata = {}
# Record attributes that have the same length as self as columns in the
# table, and the other attributes as table metadata. This matches
# table.serialize._represent_mixin_as_column().
for key, value in self_as_dict.items():
if getattr(value, "shape", ())[:1] == (len(self),):
tabledata[key] = value
else:
metadata[key] = value
return QTable(tabledata, meta=metadata)
def is_equivalent_frame(self, other):
"""
Checks if this object's frame as the same as that of the ``other``
object.
To be the same frame, two objects must be the same frame class and have
the same frame attributes. For two `SkyCoord` objects, *all* of the
frame attributes have to match, not just those relevant for the object's
frame.
Parameters
----------
other : SkyCoord or BaseCoordinateFrame
The other object to check.
Returns
-------
isequiv : bool
True if the frames are the same, False if not.
Raises
------
TypeError
If ``other`` isn't a `SkyCoord` or a `BaseCoordinateFrame` or subclass.
"""
if isinstance(other, BaseCoordinateFrame):
return self.frame.is_equivalent_frame(other)
elif isinstance(other, SkyCoord):
if other.frame.name != self.frame.name:
return False
for fattrnm in frame_transform_graph.frame_attributes:
if not BaseCoordinateFrame._frameattr_equiv(
getattr(self, fattrnm), getattr(other, fattrnm)
):
return False
return True
else:
# not a BaseCoordinateFrame nor a SkyCoord object
raise TypeError(
"Tried to do is_equivalent_frame on something that isn't frame-like"
)
# High-level convenience methods
def separation(self, other):
"""
Computes on-sky separation between this coordinate and another.
.. note::
If the ``other`` coordinate object is in a different frame, it is
first transformed to the frame of this object. This can lead to
unintuitive behavior if not accounted for. Particularly of note is
that ``self.separation(other)`` and ``other.separation(self)`` may
not give the same answer in this case.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Angle`
The on-sky separation between this and the ``other`` coordinate.
Notes
-----
The separation is calculated using the Vincenty formula, which
is stable at all locations, including poles and antipodes [1]_.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
from . import Angle
from .angle_utilities import angular_separation
if not self.is_equivalent_frame(other):
try:
kwargs = (
{"merge_attributes": False} if isinstance(other, SkyCoord) else {}
)
other = other.transform_to(self, **kwargs)
except TypeError:
raise TypeError(
"Can only get separation to another SkyCoord "
"or a coordinate frame with data"
)
lon1 = self.spherical.lon
lat1 = self.spherical.lat
lon2 = other.spherical.lon
lat2 = other.spherical.lat
# Get the separation as a Quantity, convert to Angle in degrees
sep = angular_separation(lon1, lat1, lon2, lat2)
return Angle(sep, unit=u.degree)
def separation_3d(self, other):
"""
Computes three dimensional separation between this coordinate
and another.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Distance`
The real-space distance between these two coordinates.
Raises
------
ValueError
If this or the other coordinate do not have distances.
"""
if not self.is_equivalent_frame(other):
try:
kwargs = (
{"merge_attributes": False} if isinstance(other, SkyCoord) else {}
)
other = other.transform_to(self, **kwargs)
except TypeError:
raise TypeError(
"Can only get separation to another SkyCoord "
"or a coordinate frame with data"
)
if issubclass(self.data.__class__, UnitSphericalRepresentation):
raise ValueError(
"This object does not have a distance; cannot compute 3d separation."
)
if issubclass(other.data.__class__, UnitSphericalRepresentation):
raise ValueError(
"The other object does not have a distance; "
"cannot compute 3d separation."
)
c1 = self.cartesian.without_differentials()
c2 = other.cartesian.without_differentials()
return Distance((c1 - c2).norm())
def spherical_offsets_to(self, tocoord):
r"""
Computes angular offsets to go *from* this coordinate *to* another.
Parameters
----------
tocoord : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to find the offset to.
Returns
-------
lon_offset : `~astropy.coordinates.Angle`
The angular offset in the longitude direction. The definition of
"longitude" depends on this coordinate's frame (e.g., RA for
equatorial coordinates).
lat_offset : `~astropy.coordinates.Angle`
The angular offset in the latitude direction. The definition of
"latitude" depends on this coordinate's frame (e.g., Dec for
equatorial coordinates).
Raises
------
ValueError
If the ``tocoord`` is not in the same frame as this one. This is
different from the behavior of the `separation`/`separation_3d`
methods because the offset components depend critically on the
specific choice of frame.
Notes
-----
This uses the sky offset frame machinery, and hence will produce a new
sky offset frame if one does not already exist for this object's frame
class.
See Also
--------
separation :
for the *total* angular offset (not broken out into components).
position_angle :
for the direction of the offset.
"""
if not self.is_equivalent_frame(tocoord):
raise ValueError(
"Tried to use spherical_offsets_to with two non-matching frames!"
)
aframe = self.skyoffset_frame()
acoord = tocoord.transform_to(aframe)
dlon = acoord.spherical.lon.view(Angle)
dlat = acoord.spherical.lat.view(Angle)
return dlon, dlat
def spherical_offsets_by(self, d_lon, d_lat):
"""
Computes the coordinate that is a specified pair of angular offsets away
from this coordinate.
Parameters
----------
d_lon : angle-like
The angular offset in the longitude direction. The definition of
"longitude" depends on this coordinate's frame (e.g., RA for
equatorial coordinates).
d_lat : angle-like
The angular offset in the latitude direction. The definition of
"latitude" depends on this coordinate's frame (e.g., Dec for
equatorial coordinates).
Returns
-------
newcoord : `~astropy.coordinates.SkyCoord`
The coordinates for the location that corresponds to offsetting by
``d_lat`` in the latitude direction and ``d_lon`` in the longitude
direction.
Notes
-----
This internally uses `~astropy.coordinates.SkyOffsetFrame` to do the
transformation. For a more complete set of transform offsets, use
`~astropy.coordinates.SkyOffsetFrame` or `~astropy.wcs.WCS` manually.
This specific method can be reproduced by doing
``SkyCoord(SkyOffsetFrame(d_lon, d_lat, origin=self.frame).transform_to(self))``.
See Also
--------
spherical_offsets_to : compute the angular offsets to another coordinate
directional_offset_by : offset a coordinate by an angle in a direction
"""
from .builtin_frames.skyoffset import SkyOffsetFrame
return self.__class__(
SkyOffsetFrame(d_lon, d_lat, origin=self.frame).transform_to(self)
)
def directional_offset_by(self, position_angle, separation):
"""
Computes coordinates at the given offset from this coordinate.
Parameters
----------
position_angle : `~astropy.coordinates.Angle`
position_angle of offset
separation : `~astropy.coordinates.Angle`
offset angular separation
Returns
-------
newpoints : `~astropy.coordinates.SkyCoord`
The coordinates for the location that corresponds to offsetting by
the given `position_angle` and `separation`.
Notes
-----
Returned SkyCoord frame retains only the frame attributes that are for
the resulting frame type. (e.g. if the input frame is
`~astropy.coordinates.ICRS`, an ``equinox`` value will be retained, but
an ``obstime`` will not.)
For a more complete set of transform offsets, use `~astropy.wcs.WCS`.
`~astropy.coordinates.SkyCoord.skyoffset_frame()` can also be used to
create a spherical frame with (lat=0, lon=0) at a reference point,
approximating an xy cartesian system for small offsets. This method
is distinct in that it is accurate on the sphere.
See Also
--------
position_angle : inverse operation for the ``position_angle`` component
separation : inverse operation for the ``separation`` component
"""
from . import angle_utilities
slat = self.represent_as(UnitSphericalRepresentation).lat
slon = self.represent_as(UnitSphericalRepresentation).lon
newlon, newlat = angle_utilities.offset_by(
lon=slon, lat=slat, posang=position_angle, distance=separation
)
return SkyCoord(newlon, newlat, frame=self.frame)
def match_to_catalog_sky(self, catalogcoord, nthneighbor=1):
"""
Finds the nearest on-sky matches of this coordinate in a set of
catalog coordinates.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The base catalog in which to search for matches. Typically this
will be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is
desired here, as that is correct for matching one set of
coordinates to another. The next likely use case is ``2``,
for matching a coordinate catalog against *itself* (``1``
is inappropriate because each point will find itself as the
closest match).
Returns
-------
idx : int array
Indices into ``catalogcoord`` to get the matched points for
each of this object's coordinates. Shape matches this
object.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each
element in this object in ``catalogcoord``. Shape matches
this object.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the closest match for each element
in this object in ``catalogcoord``. Shape matches this
object. Unless both this and ``catalogcoord`` have associated
distances, this quantity assumes that all sources are at a
distance of 1 (dimensionless).
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
See Also
--------
astropy.coordinates.match_coordinates_sky
SkyCoord.match_to_catalog_3d
"""
from .matching import match_coordinates_sky
if not (
isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))
and catalogcoord.has_data
):
raise TypeError(
"Can only get separation to another SkyCoord or a "
"coordinate frame with data"
)
res = match_coordinates_sky(
self, catalogcoord, nthneighbor=nthneighbor, storekdtree="_kdtree_sky"
)
return res
def match_to_catalog_3d(self, catalogcoord, nthneighbor=1):
"""
Finds the nearest 3-dimensional matches of this coordinate to a set
of catalog coordinates.
This finds the 3-dimensional closest neighbor, which is only different
from the on-sky distance if ``distance`` is set in this object or the
``catalogcoord`` object.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The base catalog in which to search for matches. Typically this
will be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is
desired here, as that is correct for matching one set of
coordinates to another. The next likely use case is
``2``, for matching a coordinate catalog against *itself*
(``1`` is inappropriate because each point will find
itself as the closest match).
Returns
-------
idx : int array
Indices into ``catalogcoord`` to get the matched points for
each of this object's coordinates. Shape matches this
object.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each
element in this object in ``catalogcoord``. Shape matches
this object.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the closest match for each element
in this object in ``catalogcoord``. Shape matches this
object.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
See Also
--------
astropy.coordinates.match_coordinates_3d
SkyCoord.match_to_catalog_sky
"""
from .matching import match_coordinates_3d
if not (
isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))
and catalogcoord.has_data
):
raise TypeError(
"Can only get separation to another SkyCoord or a "
"coordinate frame with data"
)
res = match_coordinates_3d(
self, catalogcoord, nthneighbor=nthneighbor, storekdtree="_kdtree_3d"
)
return res
def search_around_sky(self, searcharoundcoords, seplimit):
"""
Searches for all coordinates in this object around a supplied set of
points within a given on-sky separation.
This is intended for use on `~astropy.coordinates.SkyCoord` objects
with coordinate arrays, rather than a scalar coordinate. For a scalar
coordinate, it is better to use
`~astropy.coordinates.SkyCoord.separation`.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
searcharoundcoords : coordinate-like
The coordinates to search around to try to find matching points in
this `SkyCoord`. This should be an object with array coordinates,
not a scalar coordinate object.
seplimit : `~astropy.units.Quantity` ['angle']
The on-sky separation to search within.
Returns
-------
idxsearcharound : int array
Indices into ``searcharoundcoords`` that match the
corresponding elements of ``idxself``. Shape matches
``idxself``.
idxself : int array
Indices into ``self`` that match the
corresponding elements of ``idxsearcharound``. Shape matches
``idxsearcharound``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
In the current implementation, the return values are always sorted in
the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is
in ascending order). This is considered an implementation detail,
though, so it could change in a future release.
See Also
--------
astropy.coordinates.search_around_sky
SkyCoord.search_around_3d
"""
from .matching import search_around_sky
return search_around_sky(
searcharoundcoords, self, seplimit, storekdtree="_kdtree_sky"
)
def search_around_3d(self, searcharoundcoords, distlimit):
"""
Searches for all coordinates in this object around a supplied set of
points within a given 3D radius.
This is intended for use on `~astropy.coordinates.SkyCoord` objects
with coordinate arrays, rather than a scalar coordinate. For a scalar
coordinate, it is better to use
`~astropy.coordinates.SkyCoord.separation_3d`.
For more on how to use this (and related) functionality, see the
examples in :doc:`astropy:/coordinates/matchsep`.
Parameters
----------
searcharoundcoords : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinates to search around to try to find matching points in
this `SkyCoord`. This should be an object with array coordinates,
not a scalar coordinate object.
distlimit : `~astropy.units.Quantity` ['length']
The physical radius to search within.
Returns
-------
idxsearcharound : int array
Indices into ``searcharoundcoords`` that match the
corresponding elements of ``idxself``. Shape matches
``idxself``.
idxself : int array
Indices into ``self`` that match the
corresponding elements of ``idxsearcharound``. Shape matches
``idxsearcharound``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
dist3d : `~astropy.units.Quantity` ['length']
The 3D distance between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
In the current implementation, the return values are always sorted in
the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is
in ascending order). This is considered an implementation detail,
though, so it could change in a future release.
See Also
--------
astropy.coordinates.search_around_3d
SkyCoord.search_around_sky
"""
from .matching import search_around_3d
return search_around_3d(
searcharoundcoords, self, distlimit, storekdtree="_kdtree_3d"
)
def position_angle(self, other):
"""
Computes the on-sky position angle (East of North) between this
`SkyCoord` and another.
Parameters
----------
other : `SkyCoord`
The other coordinate to compute the position angle to. It is
treated as the "head" of the vector of the position angle.
Returns
-------
pa : `~astropy.coordinates.Angle`
The (positive) position angle of the vector pointing from ``self``
to ``other``. If either ``self`` or ``other`` contain arrays, this
will be an array following the appropriate `numpy` broadcasting
rules.
Examples
--------
>>> c1 = SkyCoord(0*u.deg, 0*u.deg)
>>> c2 = SkyCoord(1*u.deg, 0*u.deg)
>>> c1.position_angle(c2).degree
90.0
>>> c3 = SkyCoord(1*u.deg, 1*u.deg)
>>> c1.position_angle(c3).degree # doctest: +FLOAT_CMP
44.995636455344844
"""
from . import angle_utilities
if not self.is_equivalent_frame(other):
try:
other = other.transform_to(self, merge_attributes=False)
except TypeError:
raise TypeError(
"Can only get position_angle to another "
"SkyCoord or a coordinate frame with data"
)
slat = self.represent_as(UnitSphericalRepresentation).lat
slon = self.represent_as(UnitSphericalRepresentation).lon
olat = other.represent_as(UnitSphericalRepresentation).lat
olon = other.represent_as(UnitSphericalRepresentation).lon
return angle_utilities.position_angle(slon, slat, olon, olat)
def skyoffset_frame(self, rotation=None):
"""
Returns the sky offset frame with this `SkyCoord` at the origin.
Returns
-------
astrframe : `~astropy.coordinates.SkyOffsetFrame`
A sky offset frame of the same type as this `SkyCoord` (e.g., if
this object has an ICRS coordinate, the resulting frame is
SkyOffsetICRS, with the origin set to this object)
rotation : angle-like
The final rotation of the frame about the ``origin``. The sign of
the rotation is the left-hand rule. That is, an object at a
particular position angle in the un-rotated system will be sent to
the positive latitude (z) direction in the final frame.
"""
from .builtin_frames.skyoffset import SkyOffsetFrame
return SkyOffsetFrame(origin=self, rotation=rotation)
def get_constellation(self, short_name=False, constellation_list="iau"):
"""
Determines the constellation(s) of the coordinates this `SkyCoord`
contains.
Parameters
----------
short_name : bool
If True, the returned names are the IAU-sanctioned abbreviated
names. Otherwise, full names for the constellations are used.
constellation_list : str
The set of constellations to use. Currently only ``'iau'`` is
supported, meaning the 88 "modern" constellations endorsed by the IAU.
Returns
-------
constellation : str or string array
If this is a scalar coordinate, returns the name of the
constellation. If it is an array `SkyCoord`, it returns an array of
names.
Notes
-----
To determine which constellation a point on the sky is in, this first
precesses to B1875, and then uses the Delporte boundaries of the 88
modern constellations, as tabulated by
`Roman 1987 <http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42>`_.
See Also
--------
astropy.coordinates.get_constellation
"""
from .funcs import get_constellation
# because of issue #7028, the conversion to a PrecessedGeocentric
# system fails in some cases. Work around is to drop the velocities.
# they are not needed here since only position information is used
extra_frameattrs = {nm: getattr(self, nm) for nm in self._extra_frameattr_names}
novel = SkyCoord(
self.realize_frame(self.data.without_differentials()), **extra_frameattrs
)
return get_constellation(novel, short_name, constellation_list)
# the simpler version below can be used when gh-issue #7028 is resolved
# return get_constellation(self, short_name, constellation_list)
# WCS pixel to/from sky conversions
def to_pixel(self, wcs, origin=0, mode="all"):
"""
Convert this coordinate to pixel coordinates using a `~astropy.wcs.WCS`
object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to use for convert
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
See Also
--------
astropy.wcs.utils.skycoord_to_pixel : the implementation of this method
"""
from astropy.wcs.utils import skycoord_to_pixel
return skycoord_to_pixel(self, wcs=wcs, origin=origin, mode=mode)
@classmethod
def from_pixel(cls, xp, yp, wcs, origin=0, mode="all"):
"""
Create a new `SkyCoord` from pixel coordinates using an
`~astropy.wcs.WCS` object.
Parameters
----------
xp, yp : float or ndarray
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS to use for convert
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
coord : `~astropy.coordinates.SkyCoord`
A new object with sky coordinates corresponding to the input ``xp``
and ``yp``.
See Also
--------
to_pixel : to do the inverse operation
astropy.wcs.utils.pixel_to_skycoord : the implementation of this method
"""
from astropy.wcs.utils import pixel_to_skycoord
return pixel_to_skycoord(xp, yp, wcs=wcs, origin=origin, mode=mode, cls=cls)
def contained_by(self, wcs, image=None, **kwargs):
"""
Determines if the SkyCoord is contained in the given wcs footprint.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The coordinate to check if it is within the wcs coordinate.
image : array
Optional. The image associated with the wcs object that the cooordinate
is being checked against. If not given the naxis keywords will be used
to determine if the coordinate falls within the wcs footprint.
**kwargs
Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel`
Returns
-------
response : bool
True means the WCS footprint contains the coordinate, False means it does not.
"""
if image is not None:
ymax, xmax = image.shape
else:
xmax, ymax = wcs._naxis
import warnings
with warnings.catch_warnings():
# Suppress warnings since they just mean we didn't find the coordinate
warnings.simplefilter("ignore")
try:
x, y = self.to_pixel(wcs, **kwargs)
except Exception:
return False
return (x < xmax) & (x > 0) & (y < ymax) & (y > 0)
def radial_velocity_correction(
self, kind="barycentric", obstime=None, location=None
):
"""
Compute the correction required to convert a radial velocity at a given
time and place on the Earth's Surface to a barycentric or heliocentric
velocity.
Parameters
----------
kind : str
The kind of velocity correction. Must be 'barycentric' or
'heliocentric'.
obstime : `~astropy.time.Time` or None, optional
The time at which to compute the correction. If `None`, the
``obstime`` frame attribute on the `SkyCoord` will be used.
location : `~astropy.coordinates.EarthLocation` or None, optional
The observer location at which to compute the correction. If
`None`, the ``location`` frame attribute on the passed-in
``obstime`` will be used, and if that is None, the ``location``
frame attribute on the `SkyCoord` will be used.
Raises
------
ValueError
If either ``obstime`` or ``location`` are passed in (not ``None``)
when the frame attribute is already set on this `SkyCoord`.
TypeError
If ``obstime`` or ``location`` aren't provided, either as arguments
or as frame attributes.
Returns
-------
vcorr : `~astropy.units.Quantity` ['speed']
The correction with a positive sign. I.e., *add* this
to an observed radial velocity to get the barycentric (or
heliocentric) velocity. If m/s precision or better is needed,
see the notes below.
Notes
-----
The barycentric correction is calculated to higher precision than the
heliocentric correction and includes additional physics (e.g time dilation).
Use barycentric corrections if m/s precision is required.
The algorithm here is sufficient to perform corrections at the mm/s level, but
care is needed in application. The barycentric correction returned uses the optical
approximation v = z * c. Strictly speaking, the barycentric correction is
multiplicative and should be applied as::
>>> from astropy.time import Time
>>> from astropy.coordinates import SkyCoord, EarthLocation
>>> from astropy.constants import c
>>> t = Time(56370.5, format='mjd', scale='utc')
>>> loc = EarthLocation('149d33m00.5s','-30d18m46.385s',236.87*u.m)
>>> sc = SkyCoord(1*u.deg, 2*u.deg)
>>> vcorr = sc.radial_velocity_correction(kind='barycentric', obstime=t, location=loc) # doctest: +REMOTE_DATA
>>> rv = rv + vcorr + rv * vcorr / c # doctest: +SKIP
Also note that this method returns the correction velocity in the so-called
*optical convention*::
>>> vcorr = zb * c # doctest: +SKIP
where ``zb`` is the barycentric correction redshift as defined in section 3
of Wright & Eastman (2014). The application formula given above follows from their
equation (11) under assumption that the radial velocity ``rv`` has also been defined
using the same optical convention. Note, this can be regarded as a matter of
velocity definition and does not by itself imply any loss of accuracy, provided
sufficient care has been taken during interpretation of the results. If you need
the barycentric correction expressed as the full relativistic velocity (e.g., to provide
it as the input to another software which performs the application), the
following recipe can be used::
>>> zb = vcorr / c # doctest: +REMOTE_DATA
>>> zb_plus_one_squared = (zb + 1) ** 2 # doctest: +REMOTE_DATA
>>> vcorr_rel = c * (zb_plus_one_squared - 1) / (zb_plus_one_squared + 1) # doctest: +REMOTE_DATA
or alternatively using just equivalencies::
>>> vcorr_rel = vcorr.to(u.Hz, u.doppler_optical(1*u.Hz)).to(vcorr.unit, u.doppler_relativistic(1*u.Hz)) # doctest: +REMOTE_DATA
See also `~astropy.units.equivalencies.doppler_optical`,
`~astropy.units.equivalencies.doppler_radio`, and
`~astropy.units.equivalencies.doppler_relativistic` for more information on
the velocity conventions.
The default is for this method to use the builtin ephemeris for
computing the sun and earth location. Other ephemerides can be chosen
by setting the `~astropy.coordinates.solar_system_ephemeris` variable,
either directly or via ``with`` statement. For example, to use the JPL
ephemeris, do::
>>> from astropy.coordinates import solar_system_ephemeris
>>> sc = SkyCoord(1*u.deg, 2*u.deg)
>>> with solar_system_ephemeris.set('jpl'): # doctest: +REMOTE_DATA
... rv += sc.radial_velocity_correction(obstime=t, location=loc) # doctest: +SKIP
"""
# has to be here to prevent circular imports
from .solar_system import get_body_barycentric_posvel
# location validation
timeloc = getattr(obstime, "location", None)
if location is None:
if self.location is not None:
location = self.location
if timeloc is not None:
raise ValueError(
"`location` cannot be in both the passed-in `obstime` and this"
" `SkyCoord` because it is ambiguous which is meant for the"
" radial_velocity_correction."
)
elif timeloc is not None:
location = timeloc
else:
raise TypeError(
"Must provide a `location` to radial_velocity_correction, either as"
" a SkyCoord frame attribute, as an attribute on the passed in"
" `obstime`, or in the method call."
)
elif self.location is not None or timeloc is not None:
raise ValueError(
"Cannot compute radial velocity correction if `location` argument is"
" passed in and there is also a `location` attribute on this SkyCoord"
" or the passed-in `obstime`."
)
# obstime validation
coo_at_rv_obstime = self # assume we need no space motion for now
if obstime is None:
obstime = self.obstime
if obstime is None:
raise TypeError(
"Must provide an `obstime` to radial_velocity_correction, either as"
" a SkyCoord frame attribute or in the method call."
)
elif self.obstime is not None and self.frame.data.differentials:
# we do need space motion after all
coo_at_rv_obstime = self.apply_space_motion(obstime)
elif self.obstime is None:
# warn the user if the object has differentials set
if "s" in self.data.differentials:
warnings.warn(
"SkyCoord has space motion, and therefore the specified "
"position of the SkyCoord may not be the same as "
"the `obstime` for the radial velocity measurement. "
"This may affect the rv correction at the order of km/s"
"for very high proper motions sources. If you wish to "
"apply space motion of the SkyCoord to correct for this"
"the `obstime` attribute of the SkyCoord must be set",
AstropyUserWarning,
)
pos_earth, v_earth = get_body_barycentric_posvel("earth", obstime)
if kind == "barycentric":
v_origin_to_earth = v_earth
elif kind == "heliocentric":
v_sun = get_body_barycentric_posvel("sun", obstime)[1]
v_origin_to_earth = v_earth - v_sun
else:
raise ValueError(
"`kind` argument to radial_velocity_correction must "
f"be 'barycentric' or 'heliocentric', but got '{kind}'"
)
gcrs_p, gcrs_v = location.get_gcrs_posvel(obstime)
# transforming to GCRS is not the correct thing to do here, since we don't want to
# include aberration (or light deflection)? Instead, only apply parallax if necessary
icrs_cart = coo_at_rv_obstime.icrs.cartesian
icrs_cart_novel = icrs_cart.without_differentials()
if self.data.__class__ is UnitSphericalRepresentation:
targcart = icrs_cart_novel
else:
# skycoord has distances so apply parallax
obs_icrs_cart = pos_earth + gcrs_p
targcart = icrs_cart_novel - obs_icrs_cart
targcart /= targcart.norm()
if kind == "barycentric":
beta_obs = (v_origin_to_earth + gcrs_v) / speed_of_light
gamma_obs = 1 / np.sqrt(1 - beta_obs.norm() ** 2)
gr = location.gravitational_redshift(obstime)
# barycentric redshift according to eq 28 in Wright & Eastmann (2014),
# neglecting Shapiro delay and effects of the star's own motion
zb = gamma_obs * (1 + beta_obs.dot(targcart)) / (1 + gr / speed_of_light)
# try and get terms corresponding to stellar motion.
if icrs_cart.differentials:
try:
ro = self.icrs.cartesian
beta_star = ro.differentials["s"].to_cartesian() / speed_of_light
# ICRS unit vector at coordinate epoch
ro = ro.without_differentials()
ro /= ro.norm()
zb *= (1 + beta_star.dot(ro)) / (1 + beta_star.dot(targcart))
except u.UnitConversionError:
warnings.warn(
"SkyCoord contains some velocity information, but not enough to"
" calculate the full space motion of the source, and so this"
" has been ignored for the purposes of calculating the radial"
" velocity correction. This can lead to errors on the order of"
" metres/second.",
AstropyUserWarning,
)
zb = zb - 1
return zb * speed_of_light
else:
# do a simpler correction ignoring time dilation and gravitational redshift
# this is adequate since Heliocentric corrections shouldn't be used if
# cm/s precision is required.
return targcart.dot(v_origin_to_earth + gcrs_v)
# Table interactions
@classmethod
def guess_from_table(cls, table, **coord_kwargs):
r"""
A convenience method to create and return a new `SkyCoord` from the data
in an astropy Table.
This method matches table columns that start with the case-insensitive
names of the the components of the requested frames (including
differentials), if they are also followed by a non-alphanumeric
character. It will also match columns that *end* with the component name
if a non-alphanumeric character is *before* it.
For example, the first rule means columns with names like
``'RA[J2000]'`` or ``'ra'`` will be interpreted as ``ra`` attributes for
`~astropy.coordinates.ICRS` frames, but ``'RAJ2000'`` or ``'radius'``
are *not*. Similarly, the second rule applied to the
`~astropy.coordinates.Galactic` frame means that a column named
``'gal_l'`` will be used as the the ``l`` component, but ``gall`` or
``'fill'`` will not.
The definition of alphanumeric here is based on Unicode's definition
of alphanumeric, except without ``_`` (which is normally considered
alphanumeric). So for ASCII, this means the non-alphanumeric characters
are ``<space>_!"#$%&'()*+,-./\:;<=>?@[]^`{|}~``).
Parameters
----------
table : `~astropy.table.Table` or subclass
The table to load data from.
**coord_kwargs
Any additional keyword arguments are passed directly to this class's
constructor.
Returns
-------
newsc : `~astropy.coordinates.SkyCoord` or subclass
The new `SkyCoord` (or subclass) object.
Raises
------
ValueError
If more than one match is found in the table for a component,
unless the additional matches are also valid frame component names.
If a "coord_kwargs" is provided for a value also found in the table.
"""
_frame_cls, _frame_kwargs = _get_frame_without_data([], coord_kwargs)
frame = _frame_cls(**_frame_kwargs)
coord_kwargs["frame"] = coord_kwargs.get("frame", frame)
representation_component_names = set(
frame.get_representation_component_names()
).union(set(frame.get_representation_component_names("s")))
comp_kwargs = {}
for comp_name in representation_component_names:
# this matches things like 'ra[...]'' but *not* 'rad'.
# note that the "_" must be in there explicitly, because
# "alphanumeric" usually includes underscores.
starts_with_comp = comp_name + r"(\W|\b|_)"
# this part matches stuff like 'center_ra', but *not*
# 'aura'
ends_with_comp = r".*(\W|\b|_)" + comp_name + r"\b"
# the final regex ORs together the two patterns
rex = re.compile(
rf"({starts_with_comp})|({ends_with_comp})", re.IGNORECASE | re.UNICODE
)
# find all matches
matches = {col_name for col_name in table.colnames if rex.match(col_name)}
# now need to select among matches, also making sure we don't have
# an exact match with another component
if len(matches) == 0: # no matches
continue
elif len(matches) == 1: # only one match
col_name = matches.pop()
else: # more than 1 match
# try to sieve out other components
matches -= representation_component_names - {comp_name}
# if there's only one remaining match, it worked.
if len(matches) == 1:
col_name = matches.pop()
else:
raise ValueError(
f'Found at least two matches for component "{comp_name}":'
f' "{matches}". Cannot guess coordinates from a table with this'
" ambiguity."
)
comp_kwargs[comp_name] = table[col_name]
for k, v in comp_kwargs.items():
if k in coord_kwargs:
raise ValueError(
f'Found column "{v.name}" in table, but it was already provided as'
' "{k}" keyword to guess_from_table function.'
)
else:
coord_kwargs[k] = v
return cls(**coord_kwargs)
# Name resolve
@classmethod
def from_name(cls, name, frame="icrs", parse=False, cache=True):
"""
Given a name, query the CDS name resolver to attempt to retrieve
coordinate information for that object. The search database, sesame
url, and query timeout can be set through configuration items in
``astropy.coordinates.name_resolve`` -- see docstring for
`~astropy.coordinates.get_icrs_coordinates` for more
information.
Parameters
----------
name : str
The name of the object to get coordinates for, e.g. ``'M42'``.
frame : str or `BaseCoordinateFrame` class or instance
The frame to transform the object to.
parse : bool
Whether to attempt extracting the coordinates from the name by
parsing with a regex. For objects catalog names that have
J-coordinates embedded in their names, e.g.,
'CRTS SSS100805 J194428-420209', this may be much faster than a
Sesame query for the same object name. The coordinates extracted
in this way may differ from the database coordinates by a few
deci-arcseconds, so only use this option if you do not need
sub-arcsecond accuracy for coordinates.
cache : bool, optional
Determines whether to cache the results or not. To update or
overwrite an existing value, pass ``cache='update'``.
Returns
-------
coord : SkyCoord
Instance of the SkyCoord class.
"""
from .name_resolve import get_icrs_coordinates
icrs_coord = get_icrs_coordinates(name, parse, cache=cache)
icrs_sky_coord = cls(icrs_coord)
if frame in ("icrs", icrs_coord.__class__):
return icrs_sky_coord
else:
return icrs_sky_coord.transform_to(frame)
| bsd-3-clause | cbda1055bc50222a32892ca84d297419 | 40.077823 | 139 | 0.595383 | 4.379454 | false | false | false | false |
astropy/astropy | astropy/io/fits/scripts/fitsinfo.py | 3 | 2044 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
``fitsinfo`` is a command-line script based on astropy.io.fits for
printing a summary of the HDUs in one or more FITS files(s) to the
standard output.
Example usage of ``fitsinfo``:
1. Print a summary of the HDUs in a FITS file::
$ fitsinfo filename.fits
Filename: filename.fits
No. Name Type Cards Dimensions Format
0 PRIMARY PrimaryHDU 138 ()
1 SCI ImageHDU 61 (800, 800) int16
2 SCI ImageHDU 61 (800, 800) int16
3 SCI ImageHDU 61 (800, 800) int16
4 SCI ImageHDU 61 (800, 800) int16
2. Print a summary of HDUs of all the FITS files in the current directory::
$ fitsinfo *.fits
"""
import argparse
import astropy.io.fits as fits
from astropy import __version__, log
DESCRIPTION = """
Print a summary of the HDUs in a FITS file(s).
This script is part of the Astropy package. See
https://docs.astropy.org/en/latest/io/fits/usage/scripts.html#module-astropy.io.fits.scripts.fitsinfo
for further documentation.
""".strip()
def fitsinfo(filename):
"""
Print a summary of the HDUs in a FITS file.
Parameters
----------
filename : str
The path to a FITS file.
"""
try:
fits.info(filename)
except OSError as e:
log.error(str(e))
return
def main(args=None):
"""The main function called by the `fitsinfo` script."""
parser = argparse.ArgumentParser(
description=DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--version", action="version", version=f"%(prog)s {__version__}"
)
parser.add_argument(
"filename",
nargs="+",
help="Path to one or more FITS files. Wildcards are supported.",
)
args = parser.parse_args(args)
for idx, filename in enumerate(args.filename):
if idx > 0:
print()
fitsinfo(filename)
| bsd-3-clause | 503c79168a05e30cf797408c4d908844 | 26.253333 | 101 | 0.618395 | 3.65 | false | false | false | false |
astropy/astropy | astropy/cosmology/core.py | 3 | 22120 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import annotations
import abc
import inspect
from typing import TYPE_CHECKING, Any, Mapping, TypeVar
import numpy as np
from astropy.io.registry import UnifiedReadWriteMethod
from astropy.utils.decorators import classproperty
from astropy.utils.metadata import MetaData
from .connect import (
CosmologyFromFormat,
CosmologyRead,
CosmologyToFormat,
CosmologyWrite,
)
from .parameter import Parameter
if TYPE_CHECKING: # pragma: no cover
from astropy.cosmology.funcs.comparison import _FormatType
# Originally authored by Andrew Becker (becker@astro.washington.edu),
# and modified by Neil Crighton (neilcrighton@gmail.com), Roban Kramer
# (robanhk@gmail.com), and Nathaniel Starkman (n.starkman@mail.utoronto.ca).
# Many of these adapted from Hogg 1999, astro-ph/9905116
# and Linder 2003, PRL 90, 91301
__all__ = ["Cosmology", "CosmologyError", "FlatCosmologyMixin"]
__doctest_requires__ = {} # needed until __getattr__ removed
##############################################################################
# Parameters
# registry of cosmology classes with {key=name : value=class}
_COSMOLOGY_CLASSES = dict()
# typing
_CosmoT = TypeVar("_CosmoT", bound="Cosmology")
_FlatCosmoT = TypeVar("_FlatCosmoT", bound="FlatCosmologyMixin")
##############################################################################
class CosmologyError(Exception):
pass
class Cosmology(metaclass=abc.ABCMeta):
"""Base-class for all Cosmologies.
Parameters
----------
*args
Arguments into the cosmology; used by subclasses, not this base class.
name : str or None (optional, keyword-only)
The name of the cosmology.
meta : dict or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
**kwargs
Arguments into the cosmology; used by subclasses, not this base class.
Notes
-----
Class instances are static -- you cannot (and should not) change the values
of the parameters. That is, all of the above attributes (except meta) are
read only.
For details on how to create performant custom subclasses, see the
documentation on :ref:`astropy-cosmology-fast-integrals`.
"""
meta = MetaData()
# Unified I/O object interchange methods
from_format = UnifiedReadWriteMethod(CosmologyFromFormat)
to_format = UnifiedReadWriteMethod(CosmologyToFormat)
# Unified I/O read and write methods
read = UnifiedReadWriteMethod(CosmologyRead)
write = UnifiedReadWriteMethod(CosmologyWrite)
# Parameters
__parameters__: tuple[str, ...] = ()
__all_parameters__: tuple[str, ...] = ()
# ---------------------------------------------------------------
def __init_subclass__(cls):
super().__init_subclass__()
# -------------------
# Parameters
# Get parameters that are still Parameters, either in this class or above.
parameters = []
derived_parameters = []
for n in cls.__parameters__:
p = getattr(cls, n)
if isinstance(p, Parameter):
derived_parameters.append(n) if p.derived else parameters.append(n)
# Add new parameter definitions
for n, v in cls.__dict__.items():
if n in parameters or n.startswith("_") or not isinstance(v, Parameter):
continue
derived_parameters.append(n) if v.derived else parameters.append(n)
# reorder to match signature
ordered = [
parameters.pop(parameters.index(n))
for n in cls._init_signature.parameters.keys()
if n in parameters
]
parameters = ordered + parameters # place "unordered" at the end
cls.__parameters__ = tuple(parameters)
cls.__all_parameters__ = cls.__parameters__ + tuple(derived_parameters)
# -------------------
# register as a Cosmology subclass
_COSMOLOGY_CLASSES[cls.__qualname__] = cls
@classproperty(lazy=True)
def _init_signature(cls):
"""Initialization signature (without 'self')."""
# get signature, dropping "self" by taking arguments [1:]
sig = inspect.signature(cls.__init__)
sig = sig.replace(parameters=list(sig.parameters.values())[1:])
return sig
# ---------------------------------------------------------------
def __init__(self, name=None, meta=None):
self._name = str(name) if name is not None else name
self.meta.update(meta or {})
@property
def name(self):
"""The name of the Cosmology instance."""
return self._name
@property
@abc.abstractmethod
def is_flat(self):
"""
Return bool; `True` if the cosmology is flat.
This is abstract and must be defined in subclasses.
"""
raise NotImplementedError("is_flat is not implemented")
def clone(self, *, meta=None, **kwargs):
"""Returns a copy of this object with updated parameters, as specified.
This cannot be used to change the type of the cosmology, so ``clone()``
cannot be used to change between flat and non-flat cosmologies.
Parameters
----------
meta : mapping or None (optional, keyword-only)
Metadata that will update the current metadata.
**kwargs
Cosmology parameter (and name) modifications. If any parameter is
changed and a new name is not given, the name will be set to "[old
name] (modified)".
Returns
-------
newcosmo : `~astropy.cosmology.Cosmology` subclass instance
A new instance of this class with updated parameters as specified.
If no arguments are given, then a reference to this object is
returned instead of copy.
Examples
--------
To make a copy of the ``Planck13`` cosmology with a different matter
density (``Om0``), and a new name:
>>> from astropy.cosmology import Planck13
>>> Planck13.clone(name="Modified Planck 2013", Om0=0.35)
FlatLambdaCDM(name="Modified Planck 2013", H0=67.77 km / (Mpc s),
Om0=0.35, ...
If no name is specified, the new name will note the modification.
>>> Planck13.clone(Om0=0.35).name
'Planck13 (modified)'
"""
# Quick return check, taking advantage of the Cosmology immutability.
if meta is None and not kwargs:
return self
# There are changed parameter or metadata values.
# The name needs to be changed accordingly, if it wasn't already.
_modname = self.name + " (modified)"
kwargs.setdefault("name", (_modname if self.name is not None else None))
# mix new meta into existing, preferring the former.
meta = meta if meta is not None else {}
new_meta = {**self.meta, **meta}
# Mix kwargs into initial arguments, preferring the former.
new_init = {**self._init_arguments, "meta": new_meta, **kwargs}
# Create BoundArgument to handle args versus kwargs.
# This also handles all errors from mismatched arguments
ba = self._init_signature.bind_partial(**new_init)
# Instantiate, respecting args vs kwargs
cloned = type(self)(*ba.args, **ba.kwargs)
# Check if nothing has changed.
# TODO! or should return self?
if (cloned.name == _modname) and not meta and cloned.is_equivalent(self):
cloned._name = self.name
return cloned
@property
def _init_arguments(self):
# parameters
kw = {n: getattr(self, n) for n in self.__parameters__}
# other info
kw["name"] = self.name
kw["meta"] = self.meta
return kw
# ---------------------------------------------------------------
# comparison methods
def is_equivalent(self, other: Any, /, *, format: _FormatType = False) -> bool:
r"""Check equivalence between Cosmologies.
Two cosmologies may be equivalent even if not the same class.
For example, an instance of ``LambdaCDM`` might have :math:`\Omega_0=1`
and :math:`\Omega_k=0` and therefore be flat, like ``FlatLambdaCDM``.
Parameters
----------
other : `~astropy.cosmology.Cosmology` subclass instance, positional-only
The object to which to compare.
format : bool or None or str, optional keyword-only
Whether to allow, before equivalence is checked, the object to be
converted to a |Cosmology|. This allows, e.g. a |Table| to be
equivalent to a Cosmology.
`False` (default) will not allow conversion. `True` or `None` will,
and will use the auto-identification to try to infer the correct
format. A `str` is assumed to be the correct format to use when
converting.
``format`` is broadcast to match the shape of ``other``.
Note that the cosmology arguments are not broadcast against
``format``, so it cannot determine the output shape.
Returns
-------
bool
True if cosmologies are equivalent, False otherwise.
Examples
--------
Two cosmologies may be equivalent even if not of the same class.
In this examples the ``LambdaCDM`` has ``Ode0`` set to the same value
calculated in ``FlatLambdaCDM``.
>>> import astropy.units as u
>>> from astropy.cosmology import LambdaCDM, FlatLambdaCDM
>>> cosmo1 = LambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, 0.7)
>>> cosmo2 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3)
>>> cosmo1.is_equivalent(cosmo2)
True
While in this example, the cosmologies are not equivalent.
>>> cosmo3 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, Tcmb0=3 * u.K)
>>> cosmo3.is_equivalent(cosmo2)
False
Also, using the keyword argument, the notion of equivalence is extended
to any Python object that can be converted to a |Cosmology|.
>>> from astropy.cosmology import Planck18
>>> tbl = Planck18.to_format("astropy.table")
>>> Planck18.is_equivalent(tbl, format=True)
True
The list of valid formats, e.g. the |Table| in this example, may be
checked with ``Cosmology.from_format.list_formats()``.
As can be seen in the list of formats, not all formats can be
auto-identified by ``Cosmology.from_format.registry``. Objects of
these kinds can still be checked for equivalence, but the correct
format string must be used.
>>> tbl = Planck18.to_format("yaml")
>>> Planck18.is_equivalent(tbl, format="yaml")
True
"""
from .funcs import cosmology_equal
try:
return cosmology_equal(
self, other, format=(None, format), allow_equivalent=True
)
except Exception:
# `is_equivalent` allows `other` to be any object and returns False
# if `other` cannot be converted to a Cosmology, rather than
# raising an Exception.
return False
def __equiv__(self, other: Any, /) -> bool:
"""Cosmology equivalence. Use ``.is_equivalent()`` for actual check!
Parameters
----------
other : `~astropy.cosmology.Cosmology` subclass instance, positional-only
The object in which to compare.
Returns
-------
bool or `NotImplemented`
`NotImplemented` if ``other`` is from a different class.
`True` if ``other`` is of the same class and has matching parameters
and parameter values.
`False` otherwise.
"""
if other.__class__ is not self.__class__:
return NotImplemented # allows other.__equiv__
# Check all parameters in 'other' match those in 'self' and 'other' has
# no extra parameters (latter part should never happen b/c same class)
return set(self.__all_parameters__) == set(other.__all_parameters__) and all(
np.all(getattr(self, k) == getattr(other, k))
for k in self.__all_parameters__
)
def __eq__(self, other: Any, /) -> bool:
"""Check equality between Cosmologies.
Checks the Parameters and immutable fields (i.e. not "meta").
Parameters
----------
other : `~astropy.cosmology.Cosmology` subclass instance, positional-only
The object in which to compare.
Returns
-------
bool
`True` if Parameters and names are the same, `False` otherwise.
"""
if other.__class__ is not self.__class__:
return NotImplemented # allows other.__eq__
eq = (
# non-Parameter checks: name
self.name == other.name
# check all parameters in 'other' match those in 'self' and 'other'
# has no extra parameters (latter part should never happen b/c same
# class) TODO! element-wise when there are array cosmologies
and set(self.__all_parameters__) == set(other.__all_parameters__)
and all(
np.all(getattr(self, k) == getattr(other, k))
for k in self.__all_parameters__
)
)
return eq
# ---------------------------------------------------------------
def __repr__(self):
namelead = f"{self.__class__.__qualname__}("
if self.name is not None:
namelead += f'name="{self.name}", '
# nicely formatted parameters
fmtps = (f"{k}={getattr(self, k)}" for k in self.__parameters__)
return namelead + ", ".join(fmtps) + ")"
def __astropy_table__(self, cls, copy, **kwargs):
"""Return a `~astropy.table.Table` of type ``cls``.
Parameters
----------
cls : type
Astropy ``Table`` class or subclass.
copy : bool
Ignored.
**kwargs : dict, optional
Additional keyword arguments. Passed to ``self.to_format()``.
See ``Cosmology.to_format.help("astropy.table")`` for allowed kwargs.
Returns
-------
`astropy.table.Table` or subclass instance
Instance of type ``cls``.
"""
return self.to_format("astropy.table", cls=cls, **kwargs)
class FlatCosmologyMixin(metaclass=abc.ABCMeta):
"""
Mixin class for flat cosmologies. Do NOT instantiate directly.
Note that all instances of ``FlatCosmologyMixin`` are flat, but not all
flat cosmologies are instances of ``FlatCosmologyMixin``. As example,
``LambdaCDM`` **may** be flat (for the a specific set of parameter values),
but ``FlatLambdaCDM`` **will** be flat.
"""
__all_parameters__: tuple[str, ...]
__parameters__: tuple[str, ...]
def __init_subclass__(cls: type[_FlatCosmoT]) -> None:
super().__init_subclass__()
# Determine the non-flat class.
# This will raise a TypeError if the MRO is inconsistent.
cls.__nonflatclass__
# ===============================================================
@classmethod # TODO! make metaclass-method
def _get_nonflat_cls(
cls, kls: type[_CosmoT] | None = None
) -> type[Cosmology] | None:
"""Find the corresponding non-flat class.
The class' bases are searched recursively.
Parameters
----------
kls : :class:`astropy.cosmology.Cosmology` class or None, optional
If `None` (default) this class is searched instead of `kls`.
Raises
------
TypeError
If more than one non-flat class is found at the same level of the
inheritance. This is similar to the error normally raised by Python
for an inconsistent method resolution order.
Returns
-------
type
A :class:`Cosmology` subclass this class inherits from that is not a
:class:`FlatCosmologyMixin` subclass.
"""
_kls = cls if kls is None else kls
# Find non-flat classes
nonflat: set[type[Cosmology]]
nonflat = {
b
for b in _kls.__bases__
if issubclass(b, Cosmology) and not issubclass(b, FlatCosmologyMixin)
}
if not nonflat: # e.g. subclassing FlatLambdaCDM
nonflat = {
k for b in _kls.__bases__ if (k := cls._get_nonflat_cls(b)) is not None
}
if len(nonflat) > 1:
raise TypeError(
"cannot create a consistent non-flat class resolution order "
f"for {_kls} with bases {nonflat} at the same inheritance level."
)
if not nonflat: # e.g. FlatFLRWMixin(FlatCosmologyMixin)
return None
return nonflat.pop()
__nonflatclass__ = classproperty(
_get_nonflat_cls, lazy=True, doc="Return the corresponding non-flat class."
)
# ===============================================================
@property
def is_flat(self):
"""Return `True`, the cosmology is flat."""
return True
@abc.abstractmethod
def nonflat(self: _FlatCosmoT) -> _CosmoT:
"""Return the equivalent non-flat-class instance of this cosmology."""
def clone(self, *, meta: Mapping | None = None, to_nonflat: bool = False, **kwargs):
"""Returns a copy of this object with updated parameters, as specified.
This cannot be used to change the type of the cosmology, except for
changing to the non-flat version of this cosmology.
Parameters
----------
meta : mapping or None (optional, keyword-only)
Metadata that will update the current metadata.
to_nonflat : bool, optional keyword-only
Whether to change to the non-flat version of this cosmology.
**kwargs
Cosmology parameter (and name) modifications. If any parameter is
changed and a new name is not given, the name will be set to "[old
name] (modified)".
Returns
-------
newcosmo : `~astropy.cosmology.Cosmology` subclass instance
A new instance of this class with updated parameters as specified.
If no arguments are given, then a reference to this object is
returned instead of copy.
Examples
--------
To make a copy of the ``Planck13`` cosmology with a different matter
density (``Om0``), and a new name:
>>> from astropy.cosmology import Planck13
>>> Planck13.clone(name="Modified Planck 2013", Om0=0.35)
FlatLambdaCDM(name="Modified Planck 2013", H0=67.77 km / (Mpc s),
Om0=0.35, ...
If no name is specified, the new name will note the modification.
>>> Planck13.clone(Om0=0.35).name
'Planck13 (modified)'
The keyword 'to_nonflat' can be used to clone on the non-flat equivalent
cosmology.
>>> Planck13.clone(to_nonflat=True)
LambdaCDM(name="Planck13", ...
>>> Planck13.clone(H0=70, to_nonflat=True)
LambdaCDM(name="Planck13 (modified)", H0=70.0 km / (Mpc s), ...
"""
if to_nonflat:
return self.nonflat.clone(meta=meta, **kwargs)
return super().clone(meta=meta, **kwargs)
# ===============================================================
def __equiv__(self, other):
"""flat-|Cosmology| equivalence.
Use `astropy.cosmology.funcs.cosmology_equal` with
``allow_equivalent=True`` for actual checks!
Parameters
----------
other : `~astropy.cosmology.Cosmology` subclass instance
The object to which to compare for equivalence.
Returns
-------
bool or `NotImplemented`
`True` if ``other`` is of the same class / non-flat class (e.g.
|FlatLambdaCDM| and |LambdaCDM|) has matching parameters and
parameter values.
`False` if ``other`` is of the same class but has different
parameters.
`NotImplemented` otherwise.
"""
if isinstance(other, FlatCosmologyMixin):
return super().__equiv__(other) # super gets from Cosmology
# check if `other` is the non-flat version of this class this makes the
# assumption that any further subclass of a flat cosmo keeps the same
# physics.
if not issubclass(other.__class__, self.__nonflatclass__):
return NotImplemented
# Check if have equivalent parameters and all parameters in `other`
# match those in `self`` and `other`` has no extra parameters.
params_eq = (
set(self.__all_parameters__) == set(other.__all_parameters__) # no extra
# equal
and all(
np.all(getattr(self, k) == getattr(other, k))
for k in self.__parameters__
)
# flatness check
and other.is_flat
)
return params_eq
# -----------------------------------------------------------------------------
def __getattr__(attr):
from . import flrw
if hasattr(flrw, attr) and attr not in ("__path__",):
import warnings
from astropy.utils.exceptions import AstropyDeprecationWarning
warnings.warn(
f"`astropy.cosmology.core.{attr}` has been moved (since v5.0) and "
f"should be imported as ``from astropy.cosmology import {attr}``."
" In future this will raise an exception.",
AstropyDeprecationWarning,
)
return getattr(flrw, attr)
raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
| bsd-3-clause | 8ccaa83149f40ed50558e97b0dde3a4f | 35.143791 | 88 | 0.57509 | 4.277703 | false | false | false | false |
astropy/astropy | astropy/io/misc/asdf/tags/coordinates/frames.py | 3 | 4873 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import glob
import os
import warnings
from asdf import tagged
import astropy.coordinates
import astropy.units as u
from astropy.coordinates import ICRS, Angle, Latitude, Longitude
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.io.misc.asdf.deprecation import create_asdf_deprecation_warning
from astropy.io.misc.asdf.types import AstropyType
from astropy.units import Quantity
__all__ = ["CoordType"]
SCHEMA_PATH = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
"..",
"..",
"data",
"schemas",
"astropy.org",
"astropy",
)
)
def _get_frames():
"""
By reading the schema files, get the list of all the frames we can
save/load.
"""
search = os.path.join(SCHEMA_PATH, "coordinates", "frames", "*.yaml")
files = glob.glob(search)
names = []
for fpath in files:
path, fname = os.path.split(fpath)
frame, _ = fname.split("-")
# Skip baseframe because we cannot directly save / load it.
# Skip icrs because we have an explicit tag for it because there are
# two versions.
if frame not in ["baseframe", "icrs"]:
names.append(frame)
return names
class BaseCoordType:
"""
This defines the base methods for coordinates, without defining anything
related to asdf types. This allows subclasses with different types and
schemas to use this without confusing the metaclass machinery.
"""
@staticmethod
def _tag_to_frame(tag):
"""
Extract the frame name from the tag.
"""
tag = tag[tag.rfind("/") + 1 :]
tag = tag[: tag.rfind("-")]
return frame_transform_graph.lookup_name(tag)
@classmethod
def _frame_name_to_tag(cls, frame_name):
return cls.make_yaml_tag(cls._tag_prefix + frame_name)
@classmethod
def from_tree_tagged(cls, node, ctx):
warnings.warn(create_asdf_deprecation_warning())
frame = cls._tag_to_frame(node._tag)
data = node.get("data", None)
if data is not None:
return frame(node["data"], **node["frame_attributes"])
return frame(**node["frame_attributes"])
@classmethod
def to_tree_tagged(cls, frame, ctx):
warnings.warn(create_asdf_deprecation_warning())
if type(frame) not in frame_transform_graph.frame_set:
raise ValueError(
"Can only save frames that are registered with the "
"transformation graph."
)
node = {}
if frame.has_data:
node["data"] = frame.data
frame_attributes = {}
for attr in frame.frame_attributes.keys():
value = getattr(frame, attr, None)
if value is not None:
frame_attributes[attr] = value
node["frame_attributes"] = frame_attributes
return tagged.tag_object(cls._frame_name_to_tag(frame.name), node, ctx=ctx)
@classmethod
def assert_equal(cls, old, new):
assert isinstance(new, type(old))
if new.has_data:
assert u.allclose(new.data.lon, old.data.lon)
assert u.allclose(new.data.lat, old.data.lat)
class CoordType(BaseCoordType, AstropyType):
_tag_prefix = "coordinates/frames/"
name = ["coordinates/frames/" + f for f in _get_frames()]
types = [astropy.coordinates.BaseCoordinateFrame]
handle_dynamic_subclasses = True
requires = ["astropy"]
version = "1.0.0"
class ICRSType(CoordType):
"""
Define a special tag for ICRS so we can make it version 1.1.0.
"""
name = "coordinates/frames/icrs"
types = ["astropy.coordinates.ICRS"]
version = "1.1.0"
class ICRSType10(AstropyType):
name = "coordinates/frames/icrs"
types = [astropy.coordinates.ICRS]
requires = ["astropy"]
version = "1.0.0"
@classmethod
def from_tree(cls, node, ctx):
wrap_angle = Angle(node["ra"]["wrap_angle"])
ra = Longitude(
node["ra"]["value"], unit=node["ra"]["unit"], wrap_angle=wrap_angle
)
dec = Latitude(node["dec"]["value"], unit=node["dec"]["unit"])
return ICRS(ra=ra, dec=dec)
@classmethod
def to_tree(cls, frame, ctx):
node = {}
wrap_angle = Quantity(frame.ra.wrap_angle)
node["ra"] = {
"value": frame.ra.value,
"unit": frame.ra.unit.to_string(),
"wrap_angle": wrap_angle,
}
node["dec"] = {"value": frame.dec.value, "unit": frame.dec.unit.to_string()}
return node
@classmethod
def assert_equal(cls, old, new):
assert isinstance(old, ICRS)
assert isinstance(new, ICRS)
assert u.allclose(new.ra, old.ra)
assert u.allclose(new.dec, old.dec)
| bsd-3-clause | 859be89bec225afa352188d01819bd9c | 28.005952 | 84 | 0.605171 | 3.748462 | false | false | false | false |
astropy/astropy | astropy/io/fits/card.py | 3 | 50925 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import re
import warnings
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
from . import conf
from .util import _is_int, _str_to_num, _words_group, translate
from .verify import VerifyError, VerifyWarning, _ErrList, _Verify
__all__ = ["Card", "Undefined"]
FIX_FP_TABLE = str.maketrans("de", "DE")
FIX_FP_TABLE2 = str.maketrans("dD", "eE")
CARD_LENGTH = 80
BLANK_CARD = " " * CARD_LENGTH
KEYWORD_LENGTH = 8 # The max length for FITS-standard keywords
VALUE_INDICATOR = "= " # The standard FITS value indicator
VALUE_INDICATOR_LEN = len(VALUE_INDICATOR)
HIERARCH_VALUE_INDICATOR = "=" # HIERARCH cards may use a shortened indicator
class Undefined:
"""Undefined value."""
def __init__(self):
# This __init__ is required to be here for Sphinx documentation
pass
UNDEFINED = Undefined()
class Card(_Verify):
length = CARD_LENGTH
"""The length of a Card image; should always be 80 for valid FITS files."""
# String for a FITS standard compliant (FSC) keyword.
_keywd_FSC_RE = re.compile(r"^[A-Z0-9_-]{0,%d}$" % KEYWORD_LENGTH)
# This will match any printable ASCII character excluding '='
_keywd_hierarch_RE = re.compile(r"^(?:HIERARCH +)?(?:^[ -<>-~]+ ?)+$", re.I)
# A number sub-string, either an integer or a float in fixed or
# scientific notation. One for FSC and one for non-FSC (NFSC) format:
# NFSC allows lower case of DE for exponent, allows space between sign,
# digits, exponent sign, and exponents
_digits_FSC = r"(\.\d+|\d+(\.\d*)?)([DE][+-]?\d+)?"
_digits_NFSC = r"(\.\d+|\d+(\.\d*)?) *([deDE] *[+-]? *\d+)?"
_numr_FSC = r"[+-]?" + _digits_FSC
_numr_NFSC = r"[+-]? *" + _digits_NFSC
# This regex helps delete leading zeros from numbers, otherwise
# Python might evaluate them as octal values (this is not-greedy, however,
# so it may not strip leading zeros from a float, which is fine)
_number_FSC_RE = re.compile(rf"(?P<sign>[+-])?0*?(?P<digt>{_digits_FSC})")
_number_NFSC_RE = re.compile(rf"(?P<sign>[+-])? *0*?(?P<digt>{_digits_NFSC})")
# Used in cards using the CONTINUE convention which expect a string
# followed by an optional comment
_strg = r"\'(?P<strg>([ -~]+?|\'\'|) *?)\'(?=$|/| )"
_comm_field = r"(?P<comm_field>(?P<sepr>/ *)(?P<comm>(.|\n)*))"
_strg_comment_RE = re.compile(f"({_strg})? *{_comm_field}?")
# FSC commentary card string which must contain printable ASCII characters.
# Note: \Z matches the end of the string without allowing newlines
_ascii_text_re = re.compile(r"[ -~]*\Z")
# Checks for a valid value/comment string. It returns a match object
# for a valid value/comment string.
# The valu group will return a match if a FITS string, boolean,
# number, or complex value is found, otherwise it will return
# None, meaning the keyword is undefined. The comment field will
# return a match if the comment separator is found, though the
# comment maybe an empty string.
# fmt: off
_value_FSC_RE = re.compile(
r'(?P<valu_field> *'
r'(?P<valu>'
# The <strg> regex is not correct for all cases, but
# it comes pretty darn close. It appears to find the
# end of a string rather well, but will accept
# strings with an odd number of single quotes,
# instead of issuing an error. The FITS standard
# appears vague on this issue and only states that a
# string should not end with two single quotes,
# whereas it should not end with an even number of
# quotes to be precise.
#
# Note that a non-greedy match is done for a string,
# since a greedy match will find a single-quote after
# the comment separator resulting in an incorrect
# match.
rf'{_strg}|'
r'(?P<bool>[FT])|'
r'(?P<numr>' + _numr_FSC + r')|'
r'(?P<cplx>\( *'
r'(?P<real>' + _numr_FSC + r') *, *'
r'(?P<imag>' + _numr_FSC + r') *\))'
r')? *)'
r'(?P<comm_field>'
r'(?P<sepr>/ *)'
r'(?P<comm>[!-~][ -~]*)?'
r')?$'
)
# fmt: on
# fmt: off
_value_NFSC_RE = re.compile(
r'(?P<valu_field> *'
r'(?P<valu>'
rf'{_strg}|'
r'(?P<bool>[FT])|'
r'(?P<numr>' + _numr_NFSC + r')|'
r'(?P<cplx>\( *'
r'(?P<real>' + _numr_NFSC + r') *, *'
r'(?P<imag>' + _numr_NFSC + r') *\))'
fr')? *){_comm_field}?$'
)
# fmt: on
_rvkc_identifier = r"[a-zA-Z_]\w*"
_rvkc_field = _rvkc_identifier + r"(\.\d+)?"
_rvkc_field_specifier_s = rf"{_rvkc_field}(\.{_rvkc_field})*"
_rvkc_field_specifier_val = r"(?P<keyword>{}): +(?P<val>{})".format(
_rvkc_field_specifier_s, _numr_FSC
)
_rvkc_keyword_val = rf"\'(?P<rawval>{_rvkc_field_specifier_val})\'"
_rvkc_keyword_val_comm = rf" *{_rvkc_keyword_val} *(/ *(?P<comm>[ -~]*))?$"
_rvkc_field_specifier_val_RE = re.compile(_rvkc_field_specifier_val + "$")
# regular expression to extract the key and the field specifier from a
# string that is being used to index into a card list that contains
# record value keyword cards (ex. 'DP1.AXIS.1')
_rvkc_keyword_name_RE = re.compile(
r"(?P<keyword>{})\.(?P<field_specifier>{})$".format(
_rvkc_identifier, _rvkc_field_specifier_s
)
)
# regular expression to extract the field specifier and value and comment
# from the string value of a record value keyword card
# (ex "'AXIS.1: 1' / a comment")
_rvkc_keyword_val_comm_RE = re.compile(_rvkc_keyword_val_comm)
_commentary_keywords = {"", "COMMENT", "HISTORY", "END"}
_special_keywords = _commentary_keywords.union(["CONTINUE"])
# The default value indicator; may be changed if required by a convention
# (namely HIERARCH cards)
_value_indicator = VALUE_INDICATOR
def __init__(self, keyword=None, value=None, comment=None, **kwargs):
# For backwards compatibility, support the 'key' keyword argument:
if keyword is None and "key" in kwargs:
keyword = kwargs["key"]
self._keyword = None
self._value = None
self._comment = None
self._valuestring = None
self._image = None
# This attribute is set to False when creating the card from a card
# image to ensure that the contents of the image get verified at some
# point
self._verified = True
# A flag to conveniently mark whether or not this was a valid HIERARCH
# card
self._hierarch = False
# If the card could not be parsed according the the FITS standard or
# any recognized non-standard conventions, this will be True
self._invalid = False
self._field_specifier = None
# These are used primarily only by RVKCs
self._rawkeyword = None
self._rawvalue = None
if not (
keyword is not None
and value is not None
and self._check_if_rvkc(keyword, value)
):
# If _check_if_rvkc passes, it will handle setting the keyword and
# value
if keyword is not None:
self.keyword = keyword
if value is not None:
self.value = value
if comment is not None:
self.comment = comment
self._modified = False
self._valuemodified = False
def __repr__(self):
return repr((self.keyword, self.value, self.comment))
def __str__(self):
return self.image
def __len__(self):
return 3
def __getitem__(self, index):
return (self.keyword, self.value, self.comment)[index]
@property
def keyword(self):
"""Returns the keyword name parsed from the card image."""
if self._keyword is not None:
return self._keyword
elif self._image:
self._keyword = self._parse_keyword()
return self._keyword
else:
self.keyword = ""
return ""
@keyword.setter
def keyword(self, keyword):
"""Set the key attribute; once set it cannot be modified."""
if self._keyword is not None:
raise AttributeError("Once set, the Card keyword may not be modified")
elif isinstance(keyword, str):
# Be nice and remove trailing whitespace--some FITS code always
# pads keywords out with spaces; leading whitespace, however,
# should be strictly disallowed.
keyword = keyword.rstrip()
keyword_upper = keyword.upper()
if len(keyword) <= KEYWORD_LENGTH and self._keywd_FSC_RE.match(
keyword_upper
):
# For keywords with length > 8 they will be HIERARCH cards,
# and can have arbitrary case keywords
if keyword_upper == "END":
raise ValueError("Keyword 'END' not allowed.")
keyword = keyword_upper
elif self._keywd_hierarch_RE.match(keyword):
# In prior versions of PyFITS (*) HIERARCH cards would only be
# created if the user-supplied keyword explicitly started with
# 'HIERARCH '. Now we will create them automatically for long
# keywords, but we still want to support the old behavior too;
# the old behavior makes it possible to create HIERARCH cards
# that would otherwise be recognized as RVKCs
# (*) This has never affected Astropy, because it was changed
# before PyFITS was merged into Astropy!
self._hierarch = True
self._value_indicator = HIERARCH_VALUE_INDICATOR
if keyword_upper[:9] == "HIERARCH ":
# The user explicitly asked for a HIERARCH card, so don't
# bug them about it...
keyword = keyword[9:].strip()
else:
# We'll gladly create a HIERARCH card, but a warning is
# also displayed
warnings.warn(
"Keyword name {!r} is greater than 8 characters or "
"contains characters not allowed by the FITS "
"standard; a HIERARCH card will be created.".format(keyword),
VerifyWarning,
)
else:
raise ValueError(f"Illegal keyword name: {keyword!r}.")
self._keyword = keyword
self._modified = True
else:
raise ValueError(f"Keyword name {keyword!r} is not a string.")
@property
def value(self):
"""The value associated with the keyword stored in this card."""
if self.field_specifier:
return float(self._value)
if self._value is not None:
value = self._value
elif self._valuestring is not None or self._image:
value = self._value = self._parse_value()
else:
if self._keyword == "":
self._value = value = ""
else:
self._value = value = UNDEFINED
if conf.strip_header_whitespace and isinstance(value, str):
value = value.rstrip()
return value
@value.setter
def value(self, value):
if self._invalid:
raise ValueError(
"The value of invalid/unparsable cards cannot set. Either "
"delete this card from the header or replace it."
)
if value is None:
value = UNDEFINED
try:
oldvalue = self.value
except VerifyError:
# probably a parsing error, falling back to the internal _value
# which should be None. This may happen while calling _fix_value.
oldvalue = self._value
if oldvalue is None:
oldvalue = UNDEFINED
if not isinstance(
value,
(
str,
int,
float,
complex,
bool,
Undefined,
np.floating,
np.integer,
np.complexfloating,
np.bool_,
),
):
raise ValueError(f"Illegal value: {value!r}.")
if isinstance(value, (float, np.float32)) and (
np.isnan(value) or np.isinf(value)
):
# value is checked for both float and np.float32 instances
# since np.float32 is not considered a Python float.
raise ValueError(
"Floating point {!r} values are not allowed in FITS headers.".format(
value
)
)
elif isinstance(value, str):
m = self._ascii_text_re.match(value)
if not m:
raise ValueError(
"FITS header values must contain standard printable ASCII "
"characters; {!r} contains characters not representable in "
"ASCII or non-printable characters.".format(value)
)
elif isinstance(value, np.bool_):
value = bool(value)
if conf.strip_header_whitespace and (
isinstance(oldvalue, str) and isinstance(value, str)
):
# Ignore extra whitespace when comparing the new value to the old
different = oldvalue.rstrip() != value.rstrip()
elif isinstance(oldvalue, bool) or isinstance(value, bool):
different = oldvalue is not value
else:
different = oldvalue != value or not isinstance(value, type(oldvalue))
if different:
self._value = value
self._rawvalue = None
self._modified = True
self._valuestring = None
self._valuemodified = True
if self.field_specifier:
try:
self._value = _int_or_float(self._value)
except ValueError:
raise ValueError(f"value {self._value} is not a float")
@value.deleter
def value(self):
if self._invalid:
raise ValueError(
"The value of invalid/unparsable cards cannot deleted. "
"Either delete this card from the header or replace it."
)
if not self.field_specifier:
self.value = ""
else:
raise AttributeError(
"Values cannot be deleted from record-valued keyword cards"
)
@property
def rawkeyword(self):
"""On record-valued keyword cards this is the name of the standard <= 8
character FITS keyword that this RVKC is stored in. Otherwise it is
the card's normal keyword.
"""
if self._rawkeyword is not None:
return self._rawkeyword
elif self.field_specifier is not None:
self._rawkeyword = self.keyword.split(".", 1)[0]
return self._rawkeyword
else:
return self.keyword
@property
def rawvalue(self):
"""On record-valued keyword cards this is the raw string value in
the ``<field-specifier>: <value>`` format stored in the card in order
to represent a RVKC. Otherwise it is the card's normal value.
"""
if self._rawvalue is not None:
return self._rawvalue
elif self.field_specifier is not None:
self._rawvalue = f"{self.field_specifier}: {self.value}"
return self._rawvalue
else:
return self.value
@property
def comment(self):
"""Get the comment attribute from the card image if not already set."""
if self._comment is not None:
return self._comment
elif self._image:
self._comment = self._parse_comment()
return self._comment
else:
self._comment = ""
return ""
@comment.setter
def comment(self, comment):
if self._invalid:
raise ValueError(
"The comment of invalid/unparsable cards cannot set. Either "
"delete this card from the header or replace it."
)
if comment is None:
comment = ""
if isinstance(comment, str):
m = self._ascii_text_re.match(comment)
if not m:
raise ValueError(
"FITS header comments must contain standard printable "
"ASCII characters; {!r} contains characters not "
"representable in ASCII or non-printable characters.".format(
comment
)
)
try:
oldcomment = self.comment
except VerifyError:
# probably a parsing error, falling back to the internal _comment
# which should be None.
oldcomment = self._comment
if oldcomment is None:
oldcomment = ""
if comment != oldcomment:
self._comment = comment
self._modified = True
@comment.deleter
def comment(self):
if self._invalid:
raise ValueError(
"The comment of invalid/unparsable cards cannot deleted. "
"Either delete this card from the header or replace it."
)
self.comment = ""
@property
def field_specifier(self):
"""
The field-specifier of record-valued keyword cards; always `None` on
normal cards.
"""
# Ensure that the keyword exists and has been parsed--the will set the
# internal _field_specifier attribute if this is a RVKC.
if self.keyword:
return self._field_specifier
else:
return None
@field_specifier.setter
def field_specifier(self, field_specifier):
if not field_specifier:
raise ValueError(
"The field-specifier may not be blank in record-valued keyword cards."
)
elif not self.field_specifier:
raise AttributeError(
"Cannot coerce cards to be record-valued "
"keyword cards by setting the "
"field_specifier attribute"
)
elif field_specifier != self.field_specifier:
self._field_specifier = field_specifier
# The keyword need also be updated
keyword = self._keyword.split(".", 1)[0]
self._keyword = ".".join([keyword, field_specifier])
self._modified = True
@field_specifier.deleter
def field_specifier(self):
raise AttributeError(
"The field_specifier attribute may not be "
"deleted from record-valued keyword cards."
)
@property
def image(self):
"""
The card "image", that is, the 80 byte character string that represents
this card in an actual FITS header.
"""
if self._image and not self._verified:
self.verify("fix+warn")
if self._image is None or self._modified:
self._image = self._format_image()
return self._image
@property
def is_blank(self):
"""
`True` if the card is completely blank--that is, it has no keyword,
value, or comment. It appears in the header as 80 spaces.
Returns `False` otherwise.
"""
if not self._verified:
# The card image has not been parsed yet; compare directly with the
# string representation of a blank card
return self._image == BLANK_CARD
# If the keyword, value, and comment are all empty (for self.value
# explicitly check that it is a string value, since a blank value is
# returned as '')
return (
not self.keyword
and (isinstance(self.value, str) and not self.value)
and not self.comment
)
@classmethod
def fromstring(cls, image):
"""
Construct a `Card` object from a (raw) string. It will pad the string
if it is not the length of a card image (80 columns). If the card
image is longer than 80 columns, assume it contains ``CONTINUE``
card(s).
"""
card = cls()
if isinstance(image, bytes):
# FITS supports only ASCII, but decode as latin1 and just take all
# bytes for now; if it results in mojibake due to e.g. UTF-8
# encoded data in a FITS header that's OK because it shouldn't be
# there in the first place
image = image.decode("latin1")
card._image = _pad(image)
card._verified = False
return card
@classmethod
def normalize_keyword(cls, keyword):
"""
`classmethod` to convert a keyword value that may contain a
field-specifier to uppercase. The effect is to raise the key to
uppercase and leave the field specifier in its original case.
Parameters
----------
keyword : or str
A keyword value or a ``keyword.field-specifier`` value
"""
# Test first for the most common case: a standard FITS keyword provided
# in standard all-caps
if len(keyword) <= KEYWORD_LENGTH and cls._keywd_FSC_RE.match(keyword):
return keyword
# Test if this is a record-valued keyword
match = cls._rvkc_keyword_name_RE.match(keyword)
if match:
return ".".join(
(match.group("keyword").strip().upper(), match.group("field_specifier"))
)
elif len(keyword) > 9 and keyword[:9].upper() == "HIERARCH ":
# Remove 'HIERARCH' from HIERARCH keywords; this could lead to
# ambiguity if there is actually a keyword card containing
# "HIERARCH HIERARCH", but shame on you if you do that.
return keyword[9:].strip().upper()
else:
# A normal FITS keyword, but provided in non-standard case
return keyword.strip().upper()
def _check_if_rvkc(self, *args):
"""
Determine whether or not the card is a record-valued keyword card.
If one argument is given, that argument is treated as a full card image
and parsed as such. If two arguments are given, the first is treated
as the card keyword (including the field-specifier if the card is
intended as a RVKC), and the second as the card value OR the first value
can be the base keyword, and the second value the 'field-specifier:
value' string.
If the check passes the ._keyword, ._value, and .field_specifier
keywords are set.
Examples
--------
::
self._check_if_rvkc('DP1', 'AXIS.1: 2')
self._check_if_rvkc('DP1.AXIS.1', 2)
self._check_if_rvkc('DP1 = AXIS.1: 2')
"""
if not conf.enable_record_valued_keyword_cards:
return False
if len(args) == 1:
return self._check_if_rvkc_image(*args)
elif len(args) == 2:
keyword, value = args
if not isinstance(keyword, str):
return False
if keyword in self._commentary_keywords:
return False
match = self._rvkc_keyword_name_RE.match(keyword)
if match and isinstance(value, (int, float)):
self._init_rvkc(
match.group("keyword"), match.group("field_specifier"), None, value
)
return True
# Testing for ': ' is a quick way to avoid running the full regular
# expression, speeding this up for the majority of cases
if isinstance(value, str) and value.find(": ") > 0:
match = self._rvkc_field_specifier_val_RE.match(value)
if match and self._keywd_FSC_RE.match(keyword):
self._init_rvkc(
keyword, match.group("keyword"), value, match.group("val")
)
return True
def _check_if_rvkc_image(self, *args):
"""
Implements `Card._check_if_rvkc` for the case of an unparsed card
image. If given one argument this is the full intact image. If given
two arguments the card has already been split between keyword and
value+comment at the standard value indicator '= '.
"""
if len(args) == 1:
image = args[0]
eq_idx = image.find(VALUE_INDICATOR)
if eq_idx < 0 or eq_idx > 9:
return False
keyword = image[:eq_idx]
rest = image[eq_idx + VALUE_INDICATOR_LEN :]
else:
keyword, rest = args
rest = rest.lstrip()
# This test allows us to skip running the full regular expression for
# the majority of cards that do not contain strings or that definitely
# do not contain RVKC field-specifiers; it's very much a
# micro-optimization but it does make a measurable difference
if not rest or rest[0] != "'" or rest.find(": ") < 2:
return False
match = self._rvkc_keyword_val_comm_RE.match(rest)
if match:
self._init_rvkc(
keyword,
match.group("keyword"),
match.group("rawval"),
match.group("val"),
)
return True
def _init_rvkc(self, keyword, field_specifier, field, value):
"""
Sort of addendum to Card.__init__ to set the appropriate internal
attributes if the card was determined to be a RVKC.
"""
keyword_upper = keyword.upper()
self._keyword = ".".join((keyword_upper, field_specifier))
self._rawkeyword = keyword_upper
self._field_specifier = field_specifier
self._value = _int_or_float(value)
self._rawvalue = field
def _parse_keyword(self):
keyword = self._image[:KEYWORD_LENGTH].strip()
keyword_upper = keyword.upper()
if keyword_upper in self._special_keywords:
return keyword_upper
elif (
keyword_upper == "HIERARCH"
and self._image[8] == " "
and HIERARCH_VALUE_INDICATOR in self._image
):
# This is valid HIERARCH card as described by the HIERARCH keyword
# convention:
# http://fits.gsfc.nasa.gov/registry/hierarch_keyword.html
self._hierarch = True
self._value_indicator = HIERARCH_VALUE_INDICATOR
keyword = self._image.split(HIERARCH_VALUE_INDICATOR, 1)[0][9:]
return keyword.strip()
else:
val_ind_idx = self._image.find(VALUE_INDICATOR)
if 0 <= val_ind_idx <= KEYWORD_LENGTH:
# The value indicator should appear in byte 8, but we are
# flexible and allow this to be fixed
if val_ind_idx < KEYWORD_LENGTH:
keyword = keyword[:val_ind_idx]
keyword_upper = keyword_upper[:val_ind_idx]
rest = self._image[val_ind_idx + VALUE_INDICATOR_LEN :]
# So far this looks like a standard FITS keyword; check whether
# the value represents a RVKC; if so then we pass things off to
# the RVKC parser
if self._check_if_rvkc_image(keyword, rest):
return self._keyword
return keyword_upper
else:
warnings.warn(
"The following header keyword is invalid or follows an "
"unrecognized non-standard convention:\n{}".format(self._image),
AstropyUserWarning,
)
self._invalid = True
return keyword
def _parse_value(self):
"""Extract the keyword value from the card image."""
# for commentary cards, no need to parse further
# Likewise for invalid cards
if self.keyword.upper() in self._commentary_keywords or self._invalid:
return self._image[KEYWORD_LENGTH:].rstrip()
if self._check_if_rvkc(self._image):
return self._value
m = self._value_NFSC_RE.match(self._split()[1])
if m is None:
raise VerifyError(
"Unparsable card ({}), fix it first with .verify('fix').".format(
self.keyword
)
)
if m.group("bool") is not None:
value = m.group("bool") == "T"
elif m.group("strg") is not None:
value = re.sub("''", "'", m.group("strg"))
elif m.group("numr") is not None:
# Check for numbers with leading 0s.
numr = self._number_NFSC_RE.match(m.group("numr"))
digt = translate(numr.group("digt"), FIX_FP_TABLE2, " ")
if numr.group("sign") is None:
sign = ""
else:
sign = numr.group("sign")
value = _str_to_num(sign + digt)
elif m.group("cplx") is not None:
# Check for numbers with leading 0s.
real = self._number_NFSC_RE.match(m.group("real"))
rdigt = translate(real.group("digt"), FIX_FP_TABLE2, " ")
if real.group("sign") is None:
rsign = ""
else:
rsign = real.group("sign")
value = _str_to_num(rsign + rdigt)
imag = self._number_NFSC_RE.match(m.group("imag"))
idigt = translate(imag.group("digt"), FIX_FP_TABLE2, " ")
if imag.group("sign") is None:
isign = ""
else:
isign = imag.group("sign")
value += _str_to_num(isign + idigt) * 1j
else:
value = UNDEFINED
if not self._valuestring:
self._valuestring = m.group("valu")
return value
def _parse_comment(self):
"""Extract the keyword value from the card image."""
# for commentary cards, no need to parse further
# likewise for invalid/unparsable cards
if self.keyword in Card._commentary_keywords or self._invalid:
return ""
valuecomment = self._split()[1]
m = self._value_NFSC_RE.match(valuecomment)
comment = ""
if m is not None:
# Don't combine this if statement with the one above, because
# we only want the elif case to run if this was not a valid
# card at all
if m.group("comm"):
comment = m.group("comm").rstrip()
elif "/" in valuecomment:
# The value in this FITS file was not in a valid/known format. In
# this case the best we can do is guess that everything after the
# first / was meant to be the comment
comment = valuecomment.split("/", 1)[1].strip()
return comment
def _split(self):
"""
Split the card image between the keyword and the rest of the card.
"""
if self._image is not None:
# If we already have a card image, don't try to rebuild a new card
# image, which self.image would do
image = self._image
else:
image = self.image
# Split cards with CONTINUE cards or commentary keywords with long
# values
if len(self._image) > self.length:
values = []
comments = []
keyword = None
for card in self._itersubcards():
kw, vc = card._split()
if keyword is None:
keyword = kw
if keyword in self._commentary_keywords:
values.append(vc)
continue
# Should match a string followed by a comment; if not it
# might be an invalid Card, so we just take it verbatim
m = self._strg_comment_RE.match(vc)
if not m:
return kw, vc
value = m.group("strg") or ""
value = value.rstrip().replace("''", "'")
if value and value[-1] == "&":
value = value[:-1]
values.append(value)
comment = m.group("comm")
if comment:
comments.append(comment.rstrip())
if keyword in self._commentary_keywords:
valuecomment = "".join(values)
else:
# CONTINUE card
valuecomment = f"'{''.join(values)}' / {' '.join(comments)}"
return keyword, valuecomment
if self.keyword in self._special_keywords:
keyword, valuecomment = image.split(" ", 1)
else:
try:
delim_index = image.index(self._value_indicator)
except ValueError:
delim_index = None
# The equal sign may not be any higher than column 10; anything
# past that must be considered part of the card value
if delim_index is None:
keyword = image[:KEYWORD_LENGTH]
valuecomment = image[KEYWORD_LENGTH:]
elif delim_index > 10 and image[:9] != "HIERARCH ":
keyword = image[:8]
valuecomment = image[8:]
else:
keyword, valuecomment = image.split(self._value_indicator, 1)
return keyword.strip(), valuecomment.strip()
def _fix_keyword(self):
if self.field_specifier:
keyword, field_specifier = self._keyword.split(".", 1)
self._keyword = ".".join([keyword.upper(), field_specifier])
else:
self._keyword = self._keyword.upper()
self._modified = True
def _fix_value(self):
"""Fix the card image for fixable non-standard compliance."""
value = None
keyword, valuecomment = self._split()
m = self._value_NFSC_RE.match(valuecomment)
# for the unparsable case
if m is None:
try:
value, comment = valuecomment.split("/", 1)
self.value = value.strip()
self.comment = comment.strip()
except (ValueError, IndexError):
self.value = valuecomment
self._valuestring = self._value
return
elif m.group("numr") is not None:
numr = self._number_NFSC_RE.match(m.group("numr"))
value = translate(numr.group("digt"), FIX_FP_TABLE, " ")
if numr.group("sign") is not None:
value = numr.group("sign") + value
elif m.group("cplx") is not None:
real = self._number_NFSC_RE.match(m.group("real"))
rdigt = translate(real.group("digt"), FIX_FP_TABLE, " ")
if real.group("sign") is not None:
rdigt = real.group("sign") + rdigt
imag = self._number_NFSC_RE.match(m.group("imag"))
idigt = translate(imag.group("digt"), FIX_FP_TABLE, " ")
if imag.group("sign") is not None:
idigt = imag.group("sign") + idigt
value = f"({rdigt}, {idigt})"
self._valuestring = value
# The value itself has not been modified, but its serialized
# representation (as stored in self._valuestring) has been changed, so
# still set this card as having been modified (see ticket #137)
self._modified = True
def _format_keyword(self):
if self.keyword:
if self.field_specifier:
return "{:{len}}".format(
self.keyword.split(".", 1)[0], len=KEYWORD_LENGTH
)
elif self._hierarch:
return f"HIERARCH {self.keyword} "
else:
return "{:{len}}".format(self.keyword, len=KEYWORD_LENGTH)
else:
return " " * KEYWORD_LENGTH
def _format_value(self):
# value string
float_types = (float, np.floating, complex, np.complexfloating)
# Force the value to be parsed out first
value = self.value
# But work with the underlying raw value instead (to preserve
# whitespace, for now...)
value = self._value
if self.keyword in self._commentary_keywords:
# The value of a commentary card must be just a raw unprocessed
# string
value = str(value)
elif (
self._valuestring
and not self._valuemodified
and isinstance(self.value, float_types)
):
# Keep the existing formatting for float/complex numbers
value = f"{self._valuestring:>20}"
elif self.field_specifier:
value = _format_value(self._value).strip()
value = f"'{self.field_specifier}: {value}'"
else:
value = _format_value(value)
# For HIERARCH cards the value should be shortened to conserve space
if not self.field_specifier and len(self.keyword) > KEYWORD_LENGTH:
value = value.strip()
return value
def _format_comment(self):
if not self.comment:
return ""
else:
return f" / {self._comment}"
def _format_image(self):
keyword = self._format_keyword()
value = self._format_value()
is_commentary = keyword.strip() in self._commentary_keywords
if is_commentary:
comment = ""
else:
comment = self._format_comment()
# equal sign string
# by default use the standard value indicator even for HIERARCH cards;
# later we may abbreviate it if necessary
delimiter = VALUE_INDICATOR
if is_commentary:
delimiter = ""
# put all parts together
output = "".join([keyword, delimiter, value, comment])
# For HIERARCH cards we can save a bit of space if necessary by
# removing the space between the keyword and the equals sign; I'm
# guessing this is part of the HIEARCH card specification
keywordvalue_length = len(keyword) + len(delimiter) + len(value)
if keywordvalue_length > self.length and keyword.startswith("HIERARCH"):
if keywordvalue_length == self.length + 1 and keyword[-1] == " ":
output = "".join([keyword[:-1], delimiter, value, comment])
else:
# I guess the HIERARCH card spec is incompatible with CONTINUE
# cards
raise ValueError(
"The header keyword {!r} with its value is too long".format(
self.keyword
)
)
if len(output) <= self.length:
output = f"{output:80}"
else:
# longstring case (CONTINUE card)
# try not to use CONTINUE if the string value can fit in one line.
# Instead, just truncate the comment
if isinstance(self.value, str) and len(value) > (self.length - 10):
output = self._format_long_image()
else:
warnings.warn(
"Card is too long, comment will be truncated.", VerifyWarning
)
output = output[: Card.length]
return output
def _format_long_image(self):
"""
Break up long string value/comment into ``CONTINUE`` cards.
This is a primitive implementation: it will put the value
string in one block and the comment string in another. Also,
it does not break at the blank space between words. So it may
not look pretty.
"""
if self.keyword in Card._commentary_keywords:
return self._format_long_commentary_image()
value_length = 67
comment_length = 64
output = []
# do the value string
value = self._value.replace("'", "''")
words = _words_group(value, value_length)
for idx, word in enumerate(words):
if idx == 0:
headstr = "{:{len}}= ".format(self.keyword, len=KEYWORD_LENGTH)
else:
headstr = "CONTINUE "
# If this is the final CONTINUE remove the '&'
if not self.comment and idx == len(words) - 1:
value_format = "'{}'"
else:
value_format = "'{}&'"
value = value_format.format(word)
output.append(f"{headstr + value:80}")
# do the comment string
comment_format = "{}"
if self.comment:
words = _words_group(self.comment, comment_length)
for idx, word in enumerate(words):
# If this is the final CONTINUE remove the '&'
if idx == len(words) - 1:
headstr = "CONTINUE '' / "
else:
headstr = "CONTINUE '&' / "
comment = headstr + comment_format.format(word)
output.append(f"{comment:80}")
return "".join(output)
def _format_long_commentary_image(self):
"""
If a commentary card's value is too long to fit on a single card, this
will render the card as multiple consecutive commentary card of the
same type.
"""
maxlen = Card.length - KEYWORD_LENGTH
value = self._format_value()
output = []
idx = 0
while idx < len(value):
output.append(str(Card(self.keyword, value[idx : idx + maxlen])))
idx += maxlen
return "".join(output)
def _verify(self, option="warn"):
errs = []
fix_text = f"Fixed {self.keyword!r} card to meet the FITS standard."
# Don't try to verify cards that already don't meet any recognizable
# standard
if self._invalid:
return _ErrList(errs)
# verify the equal sign position
if self.keyword not in self._commentary_keywords and (
self._image
and self._image[:9].upper() != "HIERARCH "
and self._image.find("=") != 8
):
errs.append(
dict(
err_text=(
"Card {!r} is not FITS standard (equal sign not "
"at column 8).".format(self.keyword)
),
fix_text=fix_text,
fix=self._fix_value,
)
)
# verify the key, it is never fixable
# always fix silently the case where "=" is before column 9,
# since there is no way to communicate back to the _keys.
if (self._image and self._image[:8].upper() == "HIERARCH") or self._hierarch:
pass
else:
if self._image:
# PyFITS will auto-uppercase any standard keyword, so lowercase
# keywords can only occur if they came from the wild
keyword = self._split()[0]
if keyword != keyword.upper():
# Keyword should be uppercase unless it's a HIERARCH card
errs.append(
dict(
err_text=f"Card keyword {keyword!r} is not upper case.",
fix_text=fix_text,
fix=self._fix_keyword,
)
)
keyword = self.keyword
if self.field_specifier:
keyword = keyword.split(".", 1)[0]
if not self._keywd_FSC_RE.match(keyword):
errs.append(
dict(err_text=f"Illegal keyword name {keyword!r}", fixable=False)
)
# verify the value, it may be fixable
keyword, valuecomment = self._split()
if self.keyword in self._commentary_keywords:
# For commentary keywords all that needs to be ensured is that it
# contains only printable ASCII characters
if not self._ascii_text_re.match(valuecomment):
errs.append(
dict(
err_text=(
"Unprintable string {!r}; commentary cards may "
"only contain printable ASCII characters".format(
valuecomment
)
),
fixable=False,
)
)
else:
if not self._valuemodified:
m = self._value_FSC_RE.match(valuecomment)
# If the value of a card was replaced before the card was ever
# even verified, the new value can be considered valid, so we
# don't bother verifying the old value. See
# https://github.com/astropy/astropy/issues/5408
if m is None:
errs.append(
dict(
err_text=(
f"Card {self.keyword!r} is not FITS standard "
f"(invalid value string: {valuecomment!r})."
),
fix_text=fix_text,
fix=self._fix_value,
)
)
# verify the comment (string), it is never fixable
m = self._value_NFSC_RE.match(valuecomment)
if m is not None:
comment = m.group("comm")
if comment is not None:
if not self._ascii_text_re.match(comment):
errs.append(
dict(
err_text=(
f"Unprintable string {comment!r}; header "
"comments may only contain printable "
"ASCII characters"
),
fixable=False,
)
)
errs = _ErrList([self.run_option(option, **err) for err in errs])
self._verified = True
return errs
def _itersubcards(self):
"""
If the card image is greater than 80 characters, it should consist of a
normal card followed by one or more CONTINUE card. This method returns
the subcards that make up this logical card.
This can also support the case where a HISTORY or COMMENT card has a
long value that is stored internally as multiple concatenated card
images.
"""
ncards = len(self._image) // Card.length
for idx in range(0, Card.length * ncards, Card.length):
card = Card.fromstring(self._image[idx : idx + Card.length])
if idx > 0 and card.keyword.upper() not in self._special_keywords:
raise VerifyError(
"Long card images must have CONTINUE cards after "
"the first card or have commentary keywords like "
"HISTORY or COMMENT."
)
if not isinstance(card.value, str):
raise VerifyError("CONTINUE cards must have string values.")
yield card
def _int_or_float(s):
"""
Converts an a string to an int if possible, or to a float.
If the string is neither a string or a float a value error is raised.
"""
if isinstance(s, float):
# Already a float so just pass through
return s
try:
return int(s)
except (ValueError, TypeError):
try:
return float(s)
except (ValueError, TypeError) as e:
raise ValueError(str(e))
def _format_value(value):
"""
Converts a card value to its appropriate string representation as
defined by the FITS format.
"""
# string value should occupies at least 8 columns, unless it is
# a null string
if isinstance(value, str):
if value == "":
return "''"
else:
exp_val_str = value.replace("'", "''")
val_str = f"'{exp_val_str:8}'"
return f"{val_str:20}"
# must be before int checking since bool is also int
elif isinstance(value, (bool, np.bool_)):
return f"{repr(value)[0]:>20}" # T or F
elif _is_int(value):
return f"{value:>20d}"
elif isinstance(value, (float, np.floating)):
return f"{_format_float(value):>20}"
elif isinstance(value, (complex, np.complexfloating)):
val_str = f"({_format_float(value.real)}, {_format_float(value.imag)})"
return f"{val_str:>20}"
elif isinstance(value, Undefined):
return ""
else:
return ""
def _format_float(value):
"""Format a floating number to make sure it gets the decimal point."""
value_str = f"{value:.16G}"
if "." not in value_str and "E" not in value_str:
value_str += ".0"
elif "E" in value_str:
# On some Windows builds of Python (and possibly other platforms?) the
# exponent is zero-padded out to, it seems, three digits. Normalize
# the format to pad only to two digits.
significand, exponent = value_str.split("E")
if exponent[0] in ("+", "-"):
sign = exponent[0]
exponent = exponent[1:]
else:
sign = ""
value_str = f"{significand}E{sign}{int(exponent):02d}"
# Limit the value string to at most 20 characters.
str_len = len(value_str)
if str_len > 20:
idx = value_str.find("E")
if idx < 0:
value_str = value_str[:20]
else:
value_str = value_str[: 20 - (str_len - idx)] + value_str[idx:]
return value_str
def _pad(input):
"""Pad blank space to the input string to be multiple of 80."""
_len = len(input)
if _len == Card.length:
return input
elif _len > Card.length:
strlen = _len % Card.length
if strlen == 0:
return input
else:
return input + " " * (Card.length - strlen)
# minimum length is 80
else:
strlen = _len % Card.length
return input + " " * (Card.length - strlen)
| bsd-3-clause | cfcab311bc3ac51eb76412cc55b24ca5 | 35.84877 | 88 | 0.537614 | 4.341802 | false | false | false | false |
astropy/astropy | astropy/visualization/scripts/tests/test_fits2bitmap.py | 3 | 2470 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy.io import fits
from astropy.utils.compat.optional_deps import HAS_MATPLOTLIB
if HAS_MATPLOTLIB:
import matplotlib.image as mpimg
from astropy.visualization.scripts.fits2bitmap import fits2bitmap, main
@pytest.mark.skipif(not HAS_MATPLOTLIB, reason="requires matplotlib")
class TestFits2Bitmap:
def setup_class(self):
self.filename = "test.fits"
self.array = np.arange(16384).reshape((128, 128))
@pytest.mark.openfiles_ignore
def test_function(self, tmp_path):
filename = tmp_path / self.filename
fits.writeto(filename, self.array)
fits2bitmap(filename)
@pytest.mark.openfiles_ignore
def test_script(self, tmp_path):
filename = str(tmp_path / self.filename)
fits.writeto(filename, self.array)
main([filename, "-e", "0"])
@pytest.mark.openfiles_ignore
def test_exten_num(self, tmp_path):
filename = str(tmp_path / self.filename)
hdu1 = fits.PrimaryHDU()
hdu2 = fits.ImageHDU(self.array)
hdulist = fits.HDUList([hdu1, hdu2])
hdulist.writeto(filename)
main([filename, "-e", "1"])
@pytest.mark.openfiles_ignore
def test_exten_name(self, tmp_path):
filename = str(tmp_path / self.filename)
hdu1 = fits.PrimaryHDU()
extname = "SCI"
hdu2 = fits.ImageHDU(self.array)
hdu2.header["EXTNAME"] = extname
hdulist = fits.HDUList([hdu1, hdu2])
hdulist.writeto(filename)
main([filename, "-e", extname])
@pytest.mark.parametrize("file_exten", [".gz", ".bz2"])
def test_compressed_fits(self, tmp_path, file_exten):
filename = str(tmp_path / f"test.fits{file_exten}")
fits.writeto(filename, self.array)
main([filename, "-e", "0"])
@pytest.mark.openfiles_ignore
def test_orientation(self, tmp_path):
"""
Regression test to check the image vertical orientation/origin.
"""
filename = str(tmp_path / self.filename)
out_filename = "fits2bitmap_test.png"
out_filename = str(tmp_path / out_filename)
data = np.zeros((32, 32))
data[0:16, :] = 1.0
fits.writeto(filename, data)
main([filename, "-e", "0", "-o", out_filename])
img = mpimg.imread(out_filename)
assert img[0, 0, 0] == 0
assert img[31, 31, 0] == 1
| bsd-3-clause | 91cf34a3b1883b56bf601faf561947ff | 31.933333 | 75 | 0.625101 | 3.355978 | false | true | false | false |
astropy/astropy | astropy/cosmology/io/tests/test_html.py | 3 | 10462 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# THIRD PARTY
import pytest
import astropy.units as u
from astropy.cosmology.io.html import _FORMAT_TABLE, read_html_table, write_html_table
from astropy.cosmology.parameter import Parameter
from astropy.table import QTable, Table, vstack
from astropy.units.decorators import NoneType
from astropy.utils.compat.optional_deps import HAS_BS4
from .base import ReadWriteDirectTestBase, ReadWriteTestMixinBase
###############################################################################
class ReadWriteHTMLTestMixin(ReadWriteTestMixinBase):
"""
Tests for a Cosmology[Read/Write] with ``format="ascii.html"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must dfine a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_to_html_table_bad_index(self, read, write, tmp_path):
"""Test if argument ``index`` is incorrect"""
fp = tmp_path / "test_to_html_table_bad_index.html"
write(fp, format="ascii.html")
# single-row table and has a non-0/None index
with pytest.raises(IndexError, match="index 2 out of range"):
read(fp, index=2, format="ascii.html")
# string index where doesn't match
with pytest.raises(KeyError, match="No matches found for key"):
read(fp, index="row 0", format="ascii.html")
# -----------------------
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_to_html_table_failed_cls(self, write, tmp_path):
"""Test failed table type."""
fp = tmp_path / "test_to_html_table_failed_cls.html"
with pytest.raises(TypeError, match="'cls' must be"):
write(fp, format="ascii.html", cls=list)
@pytest.mark.parametrize("tbl_cls", [QTable, Table])
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_to_html_table_cls(self, write, tbl_cls, tmp_path):
fp = tmp_path / "test_to_html_table_cls.html"
write(fp, format="ascii.html", cls=tbl_cls)
# -----------------------
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_readwrite_html_table_instance(
self, cosmo_cls, cosmo, read, write, tmp_path, add_cu
):
"""Test cosmology -> ascii.html -> cosmology."""
fp = tmp_path / "test_readwrite_html_table_instance.html"
# ------------
# To Table
write(fp, format="ascii.html")
# some checks on the saved file
tbl = QTable.read(fp)
# assert tbl.meta["cosmology"] == cosmo_cls.__qualname__ # metadata read not implemented
assert tbl["name"] == cosmo.name
# ------------
# From Table
tbl["mismatching"] = "will error"
tbl.write(fp, format="ascii.html", overwrite=True)
# tests are different if the last argument is a **kwarg
if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4:
got = read(fp, format="ascii.html")
assert got.__class__ is cosmo_cls
assert got.name == cosmo.name
# assert "mismatching" not in got.meta # metadata read not implemented
return # don't continue testing
# read with mismatching parameters errors
with pytest.raises(TypeError, match="there are unused parameters"):
read(fp, format="ascii.html")
# unless mismatched are moved to meta
got = read(fp, format="ascii.html", move_to_meta=True)
assert got == cosmo
# assert got.meta["mismatching"] == "will error" # metadata read not implemented
# it won't error if everything matches up
tbl.remove_column("mismatching")
tbl.write(fp, format="ascii.html", overwrite=True)
got = read(fp, format="ascii.html")
assert got == cosmo
# and it will also work if the cosmology is a class
# Note this is not the default output of ``write``.
# tbl.meta["cosmology"] = _COSMOLOGY_CLASSES[tbl.meta["cosmology"]] #
# metadata read not implemented
got = read(fp, format="ascii.html")
assert got == cosmo
got = read(fp)
assert got == cosmo
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_rename_html_table_columns(self, read, write, tmp_path):
"""Tests renaming columns"""
fp = tmp_path / "test_rename_html_table_columns.html"
write(fp, format="ascii.html", latex_names=True)
tbl = QTable.read(fp)
# asserts each column name has not been reverted yet
# For now, Cosmology class and name are stored in first 2 slots
for column_name in tbl.colnames[2:]:
assert column_name in _FORMAT_TABLE.values()
cosmo = read(fp, format="ascii.html")
converted_tbl = cosmo.to_format("astropy.table")
# asserts each column name has been reverted
# cosmology name is still stored in first slot
for column_name in converted_tbl.colnames[1:]:
assert column_name in _FORMAT_TABLE.keys()
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
@pytest.mark.parametrize("latex_names", [True, False])
def test_readwrite_html_subclass_partial_info(
self, cosmo_cls, cosmo, read, write, latex_names, tmp_path, add_cu
):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
fp = tmp_path / "test_read_html_subclass_partial_info.html"
# test write
write(fp, format="ascii.html", latex_names=latex_names)
# partial information
tbl = QTable.read(fp)
# tbl.meta.pop("cosmology", None) # metadata not implemented
cname = "$$T_{0}$$" if latex_names else "Tcmb0"
del tbl[cname] # format is not converted to original units
tbl.write(fp, overwrite=True)
# read with the same class that wrote fills in the missing info with
# the default value
got = cosmo_cls.read(fp, format="ascii.html")
got2 = read(fp, format="ascii.html", cosmology=cosmo_cls)
got3 = read(fp, format="ascii.html", cosmology=cosmo_cls.__qualname__)
assert (got == got2) and (got2 == got3) # internal consistency
# not equal, because Tcmb0 is changed, which also changes m_nu
assert got != cosmo
assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default
assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo
# but the metadata is the same
# assert got.meta == cosmo.meta # metadata read not implemented
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_readwrite_html_mutlirow(self, cosmo, read, write, tmp_path, add_cu):
"""Test if table has multiple rows."""
fp = tmp_path / "test_readwrite_html_mutlirow.html"
# Make
cosmo1 = cosmo.clone(name="row 0")
cosmo2 = cosmo.clone(name="row 2")
table = vstack(
[c.to_format("astropy.table") for c in (cosmo1, cosmo, cosmo2)],
metadata_conflicts="silent",
)
cosmo_cls = type(cosmo)
if cosmo_cls == NoneType:
assert False
for n, col in zip(table.colnames, table.itercols()):
if n == "cosmology":
continue
param = getattr(cosmo_cls, n)
if not isinstance(param, Parameter) or param.unit in (None, u.one):
continue
# Replace column with unitless version
table.replace_column(n, (col << param.unit).value, copy=False)
table.write(fp, format="ascii.html")
# ------------
# From Table
# it will error on a multi-row table
with pytest.raises(ValueError, match="need to select a specific row"):
read(fp, format="ascii.html")
# unless the index argument is provided
got = cosmo_cls.read(fp, index=1, format="ascii.html")
# got = read(fp, index=1, format="ascii.html")
assert got == cosmo
# the index can be a string
got = cosmo_cls.read(fp, index=cosmo.name, format="ascii.html")
assert got == cosmo
# it's better if the table already has an index
# this will be identical to the previous ``got``
table.add_index("name")
got2 = cosmo_cls.read(fp, index=cosmo.name, format="ascii.html")
assert got2 == cosmo
class TestReadWriteHTML(ReadWriteDirectTestBase, ReadWriteHTMLTestMixin):
"""
Directly test ``read/write_html``.
These are not public API and are discouraged from use, in favor of
``Cosmology.read/write(..., format="ascii.html")``, but should be
tested regardless b/c they are used internally.
"""
def setup_class(self):
self.functions = {"read": read_html_table, "write": write_html_table}
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_rename_direct_html_table_columns(self, read, write, tmp_path):
"""Tests renaming columns"""
fp = tmp_path / "test_rename_html_table_columns.html"
write(fp, format="ascii.html", latex_names=True)
tbl = QTable.read(fp)
# asserts each column name has not been reverted yet
for column_name in tbl.colnames[2:]:
# for now, Cosmology as metadata and name is stored in first 2 slots
assert column_name in _FORMAT_TABLE.values()
cosmo = read(fp, format="ascii.html")
converted_tbl = cosmo.to_format("astropy.table")
# asserts each column name has been reverted
for column_name in converted_tbl.colnames[1:]:
# for now now, metadata is still stored in first slot
assert column_name in _FORMAT_TABLE.keys()
| bsd-3-clause | e822d65d6611554a9ed10d2e104e11ca | 38.084291 | 97 | 0.603422 | 3.851988 | false | true | false | false |
scikit-learn/scikit-learn | sklearn/svm/_base.py | 9 | 42504 | import warnings
from abc import ABCMeta, abstractmethod
from numbers import Integral, Real
import numpy as np
import scipy.sparse as sp
# mypy error: error: Module 'sklearn.svm' has no attribute '_libsvm'
# (and same for other imports)
from . import _libsvm as libsvm # type: ignore
from . import _liblinear as liblinear # type: ignore
from . import _libsvm_sparse as libsvm_sparse # type: ignore
from ..base import BaseEstimator, ClassifierMixin
from ..preprocessing import LabelEncoder
from ..utils.multiclass import _ovr_decision_function
from ..utils import check_array, check_random_state
from ..utils import column_or_1d
from ..utils import compute_class_weight
from ..utils.metaestimators import available_if
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted, _check_large_sparse
from ..utils.validation import _num_samples
from ..utils.validation import _check_sample_weight, check_consistent_length
from ..utils.multiclass import check_classification_targets
from ..utils._param_validation import Interval, StrOptions
from ..exceptions import ConvergenceWarning
from ..exceptions import NotFittedError
LIBSVM_IMPL = ["c_svc", "nu_svc", "one_class", "epsilon_svr", "nu_svr"]
def _one_vs_one_coef(dual_coef, n_support, support_vectors):
"""Generate primal coefficients from dual coefficients
for the one-vs-one multi class LibSVM in the case
of a linear kernel."""
# get 1vs1 weights for all n*(n-1) classifiers.
# this is somewhat messy.
# shape of dual_coef_ is nSV * (n_classes -1)
# see docs for details
n_class = dual_coef.shape[0] + 1
# XXX we could do preallocation of coef but
# would have to take care in the sparse case
coef = []
sv_locs = np.cumsum(np.hstack([[0], n_support]))
for class1 in range(n_class):
# SVs for class1:
sv1 = support_vectors[sv_locs[class1] : sv_locs[class1 + 1], :]
for class2 in range(class1 + 1, n_class):
# SVs for class1:
sv2 = support_vectors[sv_locs[class2] : sv_locs[class2 + 1], :]
# dual coef for class1 SVs:
alpha1 = dual_coef[class2 - 1, sv_locs[class1] : sv_locs[class1 + 1]]
# dual coef for class2 SVs:
alpha2 = dual_coef[class1, sv_locs[class2] : sv_locs[class2 + 1]]
# build weight for class1 vs class2
coef.append(safe_sparse_dot(alpha1, sv1) + safe_sparse_dot(alpha2, sv2))
return coef
class BaseLibSVM(BaseEstimator, metaclass=ABCMeta):
"""Base class for estimators that use libsvm as backing library.
This implements support vector machine classification and regression.
Parameter documentation is in the derived `SVC` class.
"""
_parameter_constraints: dict = {
"kernel": [
StrOptions({"linear", "poly", "rbf", "sigmoid", "precomputed"}),
callable,
],
"degree": [Interval(Integral, 0, None, closed="left")],
"gamma": [
StrOptions({"scale", "auto"}),
Interval(Real, 0.0, None, closed="left"),
],
"coef0": [Interval(Real, None, None, closed="neither")],
"tol": [Interval(Real, 0.0, None, closed="neither")],
"C": [Interval(Real, 0.0, None, closed="neither")],
"nu": [Interval(Real, 0.0, 1.0, closed="right")],
"epsilon": [Interval(Real, 0.0, None, closed="left")],
"shrinking": ["boolean"],
"probability": ["boolean"],
"cache_size": [Interval(Real, 0, None, closed="neither")],
"class_weight": [StrOptions({"balanced"}), dict, None],
"verbose": ["verbose"],
"max_iter": [Interval(Integral, -1, None, closed="left")],
"random_state": ["random_state"],
}
# The order of these must match the integer values in LibSVM.
# XXX These are actually the same in the dense case. Need to factor
# this out.
_sparse_kernels = ["linear", "poly", "rbf", "sigmoid", "precomputed"]
@abstractmethod
def __init__(
self,
kernel,
degree,
gamma,
coef0,
tol,
C,
nu,
epsilon,
shrinking,
probability,
cache_size,
class_weight,
verbose,
max_iter,
random_state,
):
if self._impl not in LIBSVM_IMPL:
raise ValueError(
"impl should be one of %s, %s was given" % (LIBSVM_IMPL, self._impl)
)
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.tol = tol
self.C = C
self.nu = nu
self.epsilon = epsilon
self.shrinking = shrinking
self.probability = probability
self.cache_size = cache_size
self.class_weight = class_weight
self.verbose = verbose
self.max_iter = max_iter
self.random_state = random_state
def _more_tags(self):
# Used by cross_val_score.
return {"pairwise": self.kernel == "precomputed"}
def fit(self, X, y, sample_weight=None):
"""Fit the SVM model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) \
or (n_samples, n_samples)
Training vectors, where `n_samples` is the number of samples
and `n_features` is the number of features.
For kernel="precomputed", the expected shape of X is
(n_samples, n_samples).
y : array-like of shape (n_samples,)
Target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,), default=None
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Fitted estimator.
Notes
-----
If X and y are not C-ordered and contiguous arrays of np.float64 and
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
If X is a dense array, then the other methods will not support sparse
matrices as input.
"""
self._validate_params()
rnd = check_random_state(self.random_state)
sparse = sp.isspmatrix(X)
if sparse and self.kernel == "precomputed":
raise TypeError("Sparse precomputed kernels are not supported.")
self._sparse = sparse and not callable(self.kernel)
if callable(self.kernel):
check_consistent_length(X, y)
else:
X, y = self._validate_data(
X,
y,
dtype=np.float64,
order="C",
accept_sparse="csr",
accept_large_sparse=False,
)
y = self._validate_targets(y)
sample_weight = np.asarray(
[] if sample_weight is None else sample_weight, dtype=np.float64
)
solver_type = LIBSVM_IMPL.index(self._impl)
# input validation
n_samples = _num_samples(X)
if solver_type != 2 and n_samples != y.shape[0]:
raise ValueError(
"X and y have incompatible shapes.\n"
+ "X has %s samples, but y has %s." % (n_samples, y.shape[0])
)
if self.kernel == "precomputed" and n_samples != X.shape[1]:
raise ValueError(
"Precomputed matrix must be a square matrix."
" Input is a {}x{} matrix.".format(X.shape[0], X.shape[1])
)
if sample_weight.shape[0] > 0 and sample_weight.shape[0] != n_samples:
raise ValueError(
"sample_weight and X have incompatible shapes: "
"%r vs %r\n"
"Note: Sparse matrices cannot be indexed w/"
"boolean masks (use `indices=True` in CV)."
% (sample_weight.shape, X.shape)
)
kernel = "precomputed" if callable(self.kernel) else self.kernel
if kernel == "precomputed":
# unused but needs to be a float for cython code that ignores
# it anyway
self._gamma = 0.0
elif isinstance(self.gamma, str):
if self.gamma == "scale":
# var = E[X^2] - E[X]^2 if sparse
X_var = (X.multiply(X)).mean() - (X.mean()) ** 2 if sparse else X.var()
self._gamma = 1.0 / (X.shape[1] * X_var) if X_var != 0 else 1.0
elif self.gamma == "auto":
self._gamma = 1.0 / X.shape[1]
elif isinstance(self.gamma, Real):
self._gamma = self.gamma
fit = self._sparse_fit if self._sparse else self._dense_fit
if self.verbose:
print("[LibSVM]", end="")
seed = rnd.randint(np.iinfo("i").max)
fit(X, y, sample_weight, solver_type, kernel, random_seed=seed)
# see comment on the other call to np.iinfo in this file
self.shape_fit_ = X.shape if hasattr(X, "shape") else (n_samples,)
# In binary case, we need to flip the sign of coef, intercept and
# decision function. Use self._intercept_ and self._dual_coef_
# internally.
self._intercept_ = self.intercept_.copy()
self._dual_coef_ = self.dual_coef_
if self._impl in ["c_svc", "nu_svc"] and len(self.classes_) == 2:
self.intercept_ *= -1
self.dual_coef_ = -self.dual_coef_
dual_coef = self._dual_coef_.data if self._sparse else self._dual_coef_
intercept_finiteness = np.isfinite(self._intercept_).all()
dual_coef_finiteness = np.isfinite(dual_coef).all()
if not (intercept_finiteness and dual_coef_finiteness):
raise ValueError(
"The dual coefficients or intercepts are not finite. "
"The input data may contain large values and need to be"
"preprocessed."
)
# Since, in the case of SVC and NuSVC, the number of models optimized by
# libSVM could be greater than one (depending on the input), `n_iter_`
# stores an ndarray.
# For the other sub-classes (SVR, NuSVR, and OneClassSVM), the number of
# models optimized by libSVM is always one, so `n_iter_` stores an
# integer.
if self._impl in ["c_svc", "nu_svc"]:
self.n_iter_ = self._num_iter
else:
self.n_iter_ = self._num_iter.item()
return self
def _validate_targets(self, y):
"""Validation of y and class_weight.
Default implementation for SVR and one-class; overridden in BaseSVC.
"""
return column_or_1d(y, warn=True).astype(np.float64, copy=False)
def _warn_from_fit_status(self):
assert self.fit_status_ in (0, 1)
if self.fit_status_ == 1:
warnings.warn(
"Solver terminated early (max_iter=%i)."
" Consider pre-processing your data with"
" StandardScaler or MinMaxScaler."
% self.max_iter,
ConvergenceWarning,
)
def _dense_fit(self, X, y, sample_weight, solver_type, kernel, random_seed):
if callable(self.kernel):
# you must store a reference to X to compute the kernel in predict
# TODO: add keyword copy to copy on demand
self.__Xfit = X
X = self._compute_kernel(X)
if X.shape[0] != X.shape[1]:
raise ValueError("X.shape[0] should be equal to X.shape[1]")
libsvm.set_verbosity_wrap(self.verbose)
# we don't pass **self.get_params() to allow subclasses to
# add other parameters to __init__
(
self.support_,
self.support_vectors_,
self._n_support,
self.dual_coef_,
self.intercept_,
self._probA,
self._probB,
self.fit_status_,
self._num_iter,
) = libsvm.fit(
X,
y,
svm_type=solver_type,
sample_weight=sample_weight,
# TODO(1.4): Replace "_class_weight" with "class_weight_"
class_weight=getattr(self, "_class_weight", np.empty(0)),
kernel=kernel,
C=self.C,
nu=self.nu,
probability=self.probability,
degree=self.degree,
shrinking=self.shrinking,
tol=self.tol,
cache_size=self.cache_size,
coef0=self.coef0,
gamma=self._gamma,
epsilon=self.epsilon,
max_iter=self.max_iter,
random_seed=random_seed,
)
self._warn_from_fit_status()
def _sparse_fit(self, X, y, sample_weight, solver_type, kernel, random_seed):
X.data = np.asarray(X.data, dtype=np.float64, order="C")
X.sort_indices()
kernel_type = self._sparse_kernels.index(kernel)
libsvm_sparse.set_verbosity_wrap(self.verbose)
(
self.support_,
self.support_vectors_,
dual_coef_data,
self.intercept_,
self._n_support,
self._probA,
self._probB,
self.fit_status_,
self._num_iter,
) = libsvm_sparse.libsvm_sparse_train(
X.shape[1],
X.data,
X.indices,
X.indptr,
y,
solver_type,
kernel_type,
self.degree,
self._gamma,
self.coef0,
self.tol,
self.C,
# TODO(1.4): Replace "_class_weight" with "class_weight_"
getattr(self, "_class_weight", np.empty(0)),
sample_weight,
self.nu,
self.cache_size,
self.epsilon,
int(self.shrinking),
int(self.probability),
self.max_iter,
random_seed,
)
self._warn_from_fit_status()
if hasattr(self, "classes_"):
n_class = len(self.classes_) - 1
else: # regression
n_class = 1
n_SV = self.support_vectors_.shape[0]
dual_coef_indices = np.tile(np.arange(n_SV), n_class)
if not n_SV:
self.dual_coef_ = sp.csr_matrix([])
else:
dual_coef_indptr = np.arange(
0, dual_coef_indices.size + 1, dual_coef_indices.size / n_class
)
self.dual_coef_ = sp.csr_matrix(
(dual_coef_data, dual_coef_indices, dual_coef_indptr), (n_class, n_SV)
)
def predict(self, X):
"""Perform regression on samples in X.
For an one-class model, +1 (inlier) or -1 (outlier) is returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
y_pred : ndarray of shape (n_samples,)
The predicted values.
"""
X = self._validate_for_predict(X)
predict = self._sparse_predict if self._sparse else self._dense_predict
return predict(X)
def _dense_predict(self, X):
X = self._compute_kernel(X)
if X.ndim == 1:
X = check_array(X, order="C", accept_large_sparse=False)
kernel = self.kernel
if callable(self.kernel):
kernel = "precomputed"
if X.shape[1] != self.shape_fit_[0]:
raise ValueError(
"X.shape[1] = %d should be equal to %d, "
"the number of samples at training time"
% (X.shape[1], self.shape_fit_[0])
)
svm_type = LIBSVM_IMPL.index(self._impl)
return libsvm.predict(
X,
self.support_,
self.support_vectors_,
self._n_support,
self._dual_coef_,
self._intercept_,
self._probA,
self._probB,
svm_type=svm_type,
kernel=kernel,
degree=self.degree,
coef0=self.coef0,
gamma=self._gamma,
cache_size=self.cache_size,
)
def _sparse_predict(self, X):
# Precondition: X is a csr_matrix of dtype np.float64.
kernel = self.kernel
if callable(kernel):
kernel = "precomputed"
kernel_type = self._sparse_kernels.index(kernel)
C = 0.0 # C is not useful here
return libsvm_sparse.libsvm_sparse_predict(
X.data,
X.indices,
X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data,
self._intercept_,
LIBSVM_IMPL.index(self._impl),
kernel_type,
self.degree,
self._gamma,
self.coef0,
self.tol,
C,
# TODO(1.4): Replace "_class_weight" with "class_weight_"
getattr(self, "_class_weight", np.empty(0)),
self.nu,
self.epsilon,
self.shrinking,
self.probability,
self._n_support,
self._probA,
self._probB,
)
def _compute_kernel(self, X):
"""Return the data transformed by a callable kernel"""
if callable(self.kernel):
# in the case of precomputed kernel given as a function, we
# have to compute explicitly the kernel matrix
kernel = self.kernel(X, self.__Xfit)
if sp.issparse(kernel):
kernel = kernel.toarray()
X = np.asarray(kernel, dtype=np.float64, order="C")
return X
def _decision_function(self, X):
"""Evaluates the decision function for the samples in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
X : array-like of shape (n_samples, n_class * (n_class-1) / 2)
Returns the decision function of the sample for each class
in the model.
"""
# NOTE: _validate_for_predict contains check for is_fitted
# hence must be placed before any other attributes are used.
X = self._validate_for_predict(X)
X = self._compute_kernel(X)
if self._sparse:
dec_func = self._sparse_decision_function(X)
else:
dec_func = self._dense_decision_function(X)
# In binary case, we need to flip the sign of coef, intercept and
# decision function.
if self._impl in ["c_svc", "nu_svc"] and len(self.classes_) == 2:
return -dec_func.ravel()
return dec_func
def _dense_decision_function(self, X):
X = check_array(X, dtype=np.float64, order="C", accept_large_sparse=False)
kernel = self.kernel
if callable(kernel):
kernel = "precomputed"
return libsvm.decision_function(
X,
self.support_,
self.support_vectors_,
self._n_support,
self._dual_coef_,
self._intercept_,
self._probA,
self._probB,
svm_type=LIBSVM_IMPL.index(self._impl),
kernel=kernel,
degree=self.degree,
cache_size=self.cache_size,
coef0=self.coef0,
gamma=self._gamma,
)
def _sparse_decision_function(self, X):
X.data = np.asarray(X.data, dtype=np.float64, order="C")
kernel = self.kernel
if hasattr(kernel, "__call__"):
kernel = "precomputed"
kernel_type = self._sparse_kernels.index(kernel)
return libsvm_sparse.libsvm_sparse_decision_function(
X.data,
X.indices,
X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data,
self._intercept_,
LIBSVM_IMPL.index(self._impl),
kernel_type,
self.degree,
self._gamma,
self.coef0,
self.tol,
self.C,
# TODO(1.4): Replace "_class_weight" with "class_weight_"
getattr(self, "_class_weight", np.empty(0)),
self.nu,
self.epsilon,
self.shrinking,
self.probability,
self._n_support,
self._probA,
self._probB,
)
def _validate_for_predict(self, X):
check_is_fitted(self)
if not callable(self.kernel):
X = self._validate_data(
X,
accept_sparse="csr",
dtype=np.float64,
order="C",
accept_large_sparse=False,
reset=False,
)
if self._sparse and not sp.isspmatrix(X):
X = sp.csr_matrix(X)
if self._sparse:
X.sort_indices()
if sp.issparse(X) and not self._sparse and not callable(self.kernel):
raise ValueError(
"cannot use sparse input in %r trained on dense data"
% type(self).__name__
)
if self.kernel == "precomputed":
if X.shape[1] != self.shape_fit_[0]:
raise ValueError(
"X.shape[1] = %d should be equal to %d, "
"the number of samples at training time"
% (X.shape[1], self.shape_fit_[0])
)
# Fixes https://nvd.nist.gov/vuln/detail/CVE-2020-28975
# Check that _n_support is consistent with support_vectors
sv = self.support_vectors_
if not self._sparse and sv.size > 0 and self.n_support_.sum() != sv.shape[0]:
raise ValueError(
f"The internal representation of {self.__class__.__name__} was altered"
)
return X
@property
def coef_(self):
"""Weights assigned to the features when `kernel="linear"`.
Returns
-------
ndarray of shape (n_features, n_classes)
"""
if self.kernel != "linear":
raise AttributeError("coef_ is only available when using a linear kernel")
coef = self._get_coef()
# coef_ being a read-only property, it's better to mark the value as
# immutable to avoid hiding potential bugs for the unsuspecting user.
if sp.issparse(coef):
# sparse matrix do not have global flags
coef.data.flags.writeable = False
else:
# regular dense array
coef.flags.writeable = False
return coef
def _get_coef(self):
return safe_sparse_dot(self._dual_coef_, self.support_vectors_)
@property
def n_support_(self):
"""Number of support vectors for each class."""
try:
check_is_fitted(self)
except NotFittedError:
raise AttributeError
svm_type = LIBSVM_IMPL.index(self._impl)
if svm_type in (0, 1):
return self._n_support
else:
# SVR and OneClass
# _n_support has size 2, we make it size 1
return np.array([self._n_support[0]])
class BaseSVC(ClassifierMixin, BaseLibSVM, metaclass=ABCMeta):
"""ABC for LibSVM-based classifiers."""
_parameter_constraints: dict = {
**BaseLibSVM._parameter_constraints,
"decision_function_shape": [StrOptions({"ovr", "ovo"})],
"break_ties": ["boolean"],
}
for unused_param in ["epsilon", "nu"]:
_parameter_constraints.pop(unused_param)
@abstractmethod
def __init__(
self,
kernel,
degree,
gamma,
coef0,
tol,
C,
nu,
shrinking,
probability,
cache_size,
class_weight,
verbose,
max_iter,
decision_function_shape,
random_state,
break_ties,
):
self.decision_function_shape = decision_function_shape
self.break_ties = break_ties
super().__init__(
kernel=kernel,
degree=degree,
gamma=gamma,
coef0=coef0,
tol=tol,
C=C,
nu=nu,
epsilon=0.0,
shrinking=shrinking,
probability=probability,
cache_size=cache_size,
class_weight=class_weight,
verbose=verbose,
max_iter=max_iter,
random_state=random_state,
)
def _validate_targets(self, y):
y_ = column_or_1d(y, warn=True)
check_classification_targets(y)
cls, y = np.unique(y_, return_inverse=True)
self.class_weight_ = compute_class_weight(self.class_weight, classes=cls, y=y_)
if len(cls) < 2:
raise ValueError(
"The number of classes has to be greater than one; got %d class"
% len(cls)
)
self.classes_ = cls
return np.asarray(y, dtype=np.float64, order="C")
def decision_function(self, X):
"""Evaluate the decision function for the samples in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Returns
-------
X : ndarray of shape (n_samples, n_classes * (n_classes-1) / 2)
Returns the decision function of the sample for each class
in the model.
If decision_function_shape='ovr', the shape is (n_samples,
n_classes).
Notes
-----
If decision_function_shape='ovo', the function values are proportional
to the distance of the samples X to the separating hyperplane. If the
exact distances are required, divide the function values by the norm of
the weight vector (``coef_``). See also `this question
<https://stats.stackexchange.com/questions/14876/
interpreting-distance-from-hyperplane-in-svm>`_ for further details.
If decision_function_shape='ovr', the decision function is a monotonic
transformation of ovo decision function.
"""
dec = self._decision_function(X)
if self.decision_function_shape == "ovr" and len(self.classes_) > 2:
return _ovr_decision_function(dec < 0, -dec, len(self.classes_))
return dec
def predict(self, X):
"""Perform classification on samples in X.
For an one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples_test, n_samples_train)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
y_pred : ndarray of shape (n_samples,)
Class labels for samples in X.
"""
check_is_fitted(self)
if self.break_ties and self.decision_function_shape == "ovo":
raise ValueError(
"break_ties must be False when decision_function_shape is 'ovo'"
)
if (
self.break_ties
and self.decision_function_shape == "ovr"
and len(self.classes_) > 2
):
y = np.argmax(self.decision_function(X), axis=1)
else:
y = super().predict(X)
return self.classes_.take(np.asarray(y, dtype=np.intp))
# Hacky way of getting predict_proba to raise an AttributeError when
# probability=False using properties. Do not use this in new code; when
# probabilities are not available depending on a setting, introduce two
# estimators.
def _check_proba(self):
if not self.probability:
raise AttributeError(
"predict_proba is not available when probability=False"
)
if self._impl not in ("c_svc", "nu_svc"):
raise AttributeError("predict_proba only implemented for SVC and NuSVC")
return True
@available_if(_check_proba)
def predict_proba(self, X):
"""Compute probabilities of possible outcomes for samples in X.
The model need to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like of shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
T : ndarray of shape (n_samples, n_classes)
Returns the probability of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
X = self._validate_for_predict(X)
if self.probA_.size == 0 or self.probB_.size == 0:
raise NotFittedError(
"predict_proba is not available when fitted with probability=False"
)
pred_proba = (
self._sparse_predict_proba if self._sparse else self._dense_predict_proba
)
return pred_proba(X)
@available_if(_check_proba)
def predict_log_proba(self, X):
"""Compute log probabilities of possible outcomes for samples in X.
The model need to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples_test, n_samples_train)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
T : ndarray of shape (n_samples, n_classes)
Returns the log-probabilities of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
return np.log(self.predict_proba(X))
def _dense_predict_proba(self, X):
X = self._compute_kernel(X)
kernel = self.kernel
if callable(kernel):
kernel = "precomputed"
svm_type = LIBSVM_IMPL.index(self._impl)
pprob = libsvm.predict_proba(
X,
self.support_,
self.support_vectors_,
self._n_support,
self._dual_coef_,
self._intercept_,
self._probA,
self._probB,
svm_type=svm_type,
kernel=kernel,
degree=self.degree,
cache_size=self.cache_size,
coef0=self.coef0,
gamma=self._gamma,
)
return pprob
def _sparse_predict_proba(self, X):
X.data = np.asarray(X.data, dtype=np.float64, order="C")
kernel = self.kernel
if callable(kernel):
kernel = "precomputed"
kernel_type = self._sparse_kernels.index(kernel)
return libsvm_sparse.libsvm_sparse_predict_proba(
X.data,
X.indices,
X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data,
self._intercept_,
LIBSVM_IMPL.index(self._impl),
kernel_type,
self.degree,
self._gamma,
self.coef0,
self.tol,
self.C,
# TODO(1.4): Replace "_class_weight" with "class_weight_"
getattr(self, "_class_weight", np.empty(0)),
self.nu,
self.epsilon,
self.shrinking,
self.probability,
self._n_support,
self._probA,
self._probB,
)
def _get_coef(self):
if self.dual_coef_.shape[0] == 1:
# binary classifier
coef = safe_sparse_dot(self.dual_coef_, self.support_vectors_)
else:
# 1vs1 classifier
coef = _one_vs_one_coef(
self.dual_coef_, self._n_support, self.support_vectors_
)
if sp.issparse(coef[0]):
coef = sp.vstack(coef).tocsr()
else:
coef = np.vstack(coef)
return coef
@property
def probA_(self):
"""Parameter learned in Platt scaling when `probability=True`.
Returns
-------
ndarray of shape (n_classes * (n_classes - 1) / 2)
"""
return self._probA
@property
def probB_(self):
"""Parameter learned in Platt scaling when `probability=True`.
Returns
-------
ndarray of shape (n_classes * (n_classes - 1) / 2)
"""
return self._probB
# TODO(1.4): Remove
@property
def _class_weight(self):
"""Weights per class"""
# Class weights are defined for classifiers during
# fit.
return self.class_weight_
def _get_liblinear_solver_type(multi_class, penalty, loss, dual):
"""Find the liblinear magic number for the solver.
This number depends on the values of the following attributes:
- multi_class
- penalty
- loss
- dual
The same number is also internally used by LibLinear to determine
which solver to use.
"""
# nested dicts containing level 1: available loss functions,
# level2: available penalties for the given loss function,
# level3: whether the dual solver is available for the specified
# combination of loss function and penalty
_solver_type_dict = {
"logistic_regression": {"l1": {False: 6}, "l2": {False: 0, True: 7}},
"hinge": {"l2": {True: 3}},
"squared_hinge": {"l1": {False: 5}, "l2": {False: 2, True: 1}},
"epsilon_insensitive": {"l2": {True: 13}},
"squared_epsilon_insensitive": {"l2": {False: 11, True: 12}},
"crammer_singer": 4,
}
if multi_class == "crammer_singer":
return _solver_type_dict[multi_class]
elif multi_class != "ovr":
raise ValueError(
"`multi_class` must be one of `ovr`, `crammer_singer`, got %r" % multi_class
)
_solver_pen = _solver_type_dict.get(loss, None)
if _solver_pen is None:
error_string = "loss='%s' is not supported" % loss
else:
_solver_dual = _solver_pen.get(penalty, None)
if _solver_dual is None:
error_string = (
"The combination of penalty='%s' and loss='%s' is not supported"
% (penalty, loss)
)
else:
solver_num = _solver_dual.get(dual, None)
if solver_num is None:
error_string = (
"The combination of penalty='%s' and "
"loss='%s' are not supported when dual=%s" % (penalty, loss, dual)
)
else:
return solver_num
raise ValueError(
"Unsupported set of arguments: %s, Parameters: penalty=%r, loss=%r, dual=%r"
% (error_string, penalty, loss, dual)
)
def _fit_liblinear(
X,
y,
C,
fit_intercept,
intercept_scaling,
class_weight,
penalty,
dual,
verbose,
max_iter,
tol,
random_state=None,
multi_class="ovr",
loss="logistic_regression",
epsilon=0.1,
sample_weight=None,
):
"""Used by Logistic Regression (and CV) and LinearSVC/LinearSVR.
Preprocessing is done in this function before supplying it to liblinear.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X
C : float
Inverse of cross-validation parameter. Lower the C, the more
the penalization.
fit_intercept : bool
Whether or not to fit the intercept, that is to add a intercept
term to the decision function.
intercept_scaling : float
LibLinear internally penalizes the intercept and this term is subject
to regularization just like the other terms of the feature vector.
In order to avoid this, one should increase the intercept_scaling.
such that the feature vector becomes [x, intercept_scaling].
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
penalty : {'l1', 'l2'}
The norm of the penalty used in regularization.
dual : bool
Dual or primal formulation,
verbose : int
Set verbose to any positive number for verbosity.
max_iter : int
Number of iterations.
tol : float
Stopping condition.
random_state : int, RandomState instance or None, default=None
Controls the pseudo random number generation for shuffling the data.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
multi_class : {'ovr', 'crammer_singer'}, default='ovr'
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from an theoretical perspective
as it is consistent it is seldom used in practice and rarely leads to
better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
loss : {'logistic_regression', 'hinge', 'squared_hinge', \
'epsilon_insensitive', 'squared_epsilon_insensitive}, \
default='logistic_regression'
The loss function used to fit the model.
epsilon : float, default=0.1
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
sample_weight : array-like of shape (n_samples,), default=None
Weights assigned to each sample.
Returns
-------
coef_ : ndarray of shape (n_features, n_features + 1)
The coefficient vector got by minimizing the objective function.
intercept_ : float
The intercept term added to the vector.
n_iter_ : array of int
Number of iterations run across for each class.
"""
if loss not in ["epsilon_insensitive", "squared_epsilon_insensitive"]:
enc = LabelEncoder()
y_ind = enc.fit_transform(y)
classes_ = enc.classes_
if len(classes_) < 2:
raise ValueError(
"This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r"
% classes_[0]
)
class_weight_ = compute_class_weight(class_weight, classes=classes_, y=y)
else:
class_weight_ = np.empty(0, dtype=np.float64)
y_ind = y
liblinear.set_verbosity_wrap(verbose)
rnd = check_random_state(random_state)
if verbose:
print("[LibLinear]", end="")
# LinearSVC breaks when intercept_scaling is <= 0
bias = -1.0
if fit_intercept:
if intercept_scaling <= 0:
raise ValueError(
"Intercept scaling is %r but needs to be greater "
"than 0. To disable fitting an intercept,"
" set fit_intercept=False." % intercept_scaling
)
else:
bias = intercept_scaling
libsvm.set_verbosity_wrap(verbose)
libsvm_sparse.set_verbosity_wrap(verbose)
liblinear.set_verbosity_wrap(verbose)
# Liblinear doesn't support 64bit sparse matrix indices yet
if sp.issparse(X):
_check_large_sparse(X)
# LibLinear wants targets as doubles, even for classification
y_ind = np.asarray(y_ind, dtype=np.float64).ravel()
y_ind = np.require(y_ind, requirements="W")
sample_weight = _check_sample_weight(sample_weight, X, dtype=np.float64)
solver_type = _get_liblinear_solver_type(multi_class, penalty, loss, dual)
raw_coef_, n_iter_ = liblinear.train_wrap(
X,
y_ind,
sp.isspmatrix(X),
solver_type,
tol,
bias,
C,
class_weight_,
max_iter,
rnd.randint(np.iinfo("i").max),
epsilon,
sample_weight,
)
# Regarding rnd.randint(..) in the above signature:
# seed for srand in range [0..INT_MAX); due to limitations in Numpy
# on 32-bit platforms, we can't get to the UINT_MAX limit that
# srand supports
n_iter_max = max(n_iter_)
if n_iter_max >= max_iter:
warnings.warn(
"Liblinear failed to converge, increase the number of iterations.",
ConvergenceWarning,
)
if fit_intercept:
coef_ = raw_coef_[:, :-1]
intercept_ = intercept_scaling * raw_coef_[:, -1]
else:
coef_ = raw_coef_
intercept_ = 0.0
return coef_, intercept_, n_iter_
| bsd-3-clause | 9299d30d812ccc0005715fa9cfd2a244 | 32.840764 | 88 | 0.559594 | 4.071654 | false | false | false | false |
scikit-learn/scikit-learn | examples/release_highlights/plot_release_highlights_1_2_0.py | 1 | 3558 | # flake8: noqa
"""
=======================================
Release Highlights for scikit-learn 1.2
=======================================
.. currentmodule:: sklearn
We are pleased to announce the release of scikit-learn 1.2! Many bug fixes
and improvements were added, as well as some new key features. We detail
below a few of the major features of this release. **For an exhaustive list of
all the changes**, please refer to the :ref:`release notes <changes_1_2>`.
To install the latest version (with pip)::
pip install --upgrade scikit-learn
or with conda::
conda install -c conda-forge scikit-learn
"""
# %%
# Pandas output with `set_output` API
# -----------------------------------
# scikit-learn's transformers now support pandas output with the `set_output` API.
# To learn more about the `set_output` API see the example:
# :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py` and
# # this `video, pandas DataFrame output for scikit-learn transformers
# (some examples) <https://youtu.be/5bCg8VfX2x8>`__.
import numpy as np
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler, KBinsDiscretizer
from sklearn.compose import ColumnTransformer
X, y = load_iris(as_frame=True, return_X_y=True)
sepal_cols = ["sepal length (cm)", "sepal width (cm)"]
petal_cols = ["petal length (cm)", "petal width (cm)"]
preprocessor = ColumnTransformer(
[
("scaler", StandardScaler(), sepal_cols),
("kbin", KBinsDiscretizer(encode="ordinal"), petal_cols),
],
verbose_feature_names_out=False,
).set_output(transform="pandas")
X_out = preprocessor.fit_transform(X)
X_out.sample(n=5, random_state=0)
# %%
# Interaction constraints in Histogram-based Gradient Boosting Trees
# ------------------------------------------------------------------
# :class:`~ensemble.HistGradientBoostingRegressor` and
# :class:`~ensemble.HistGradientBoostingClassifier` now supports interaction constraints
# with the `interaction_cst` parameter. For details, see the
# :ref:`User Guide <interaction_cst_hgbt>`. In the following example, features are not
# allowed to interact.
from sklearn.datasets import load_diabetes
from sklearn.ensemble import HistGradientBoostingRegressor
X, y = load_diabetes(return_X_y=True, as_frame=True)
hist_no_interact = HistGradientBoostingRegressor(
interaction_cst=[[i] for i in range(X.shape[1])], random_state=0
)
hist_no_interact.fit(X, y)
# %%
# Faster parser in :func:`~datasets.fetch_openml`
# -----------------------------------------------
# :func:`~datasets.fetch_openml` now supports a new `"pandas"` parser that is
# more memory and CPU efficient. In v1.4, the default will change to
# `parser="auto"` which will automatically use the `"pandas"` parser for dense
# data and `"liac-arff"` for sparse data.
from sklearn.datasets import fetch_openml
X, y = fetch_openml(
"titanic", version=1, as_frame=True, return_X_y=True, parser="pandas"
)
X.head()
# %%
# Experimental Array API support in :class:`~discriminant_analysis.LinearDiscriminantAnalysis`
# --------------------------------------------------------------------------------------------
# Experimental support for the `Array API <https://data-apis.org/array-api/latest/>`_
# specification was added to :class:`~discriminant_analysis.LinearDiscriminantAnalysis`.
# The estimator can now run on any Array API compliant libraries such as
# `CuPy <https://docs.cupy.dev/en/stable/overview.html>`__, a GPU-accelerated array
# library. For details, see the :ref:`User Guide <array_api>`.
| bsd-3-clause | 8d7bf09be36755f38e94874d9bf73edd | 37.673913 | 94 | 0.671726 | 3.572289 | false | false | false | false |
scikit-learn/scikit-learn | sklearn/naive_bayes.py | 8 | 56302 | """
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <vincent.michel@inria.fr>
# Minor fixes by Fabian Pedregosa
# Amit Aides <amitibo@tx.technion.ac.il>
# Yehuda Finkelstein <yehudaf@tx.technion.ac.il>
# Lars Buitinck
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
import warnings
from abc import ABCMeta, abstractmethod
from numbers import Real, Integral
import numpy as np
from scipy.special import logsumexp
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils.extmath import safe_sparse_dot
from .utils.multiclass import _check_partial_fit_first_call
from .utils.validation import check_is_fitted, check_non_negative
from .utils.validation import _check_sample_weight
from .utils._param_validation import Interval, Hidden, StrOptions
__all__ = [
"BernoulliNB",
"GaussianNB",
"MultinomialNB",
"ComplementNB",
"CategoricalNB",
]
class _BaseNB(ClassifierMixin, BaseEstimator, metaclass=ABCMeta):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape (n_samples, n_classes).
Public methods predict, predict_proba, predict_log_proba, and
predict_joint_log_proba pass the input through _check_X before handing it
over to _joint_log_likelihood. The term "joint log likelihood" is used
interchangibly with "joint log probability".
"""
@abstractmethod
def _check_X(self, X):
"""To be overridden in subclasses with the actual checks.
Only used in predict* methods.
"""
def predict_joint_log_proba(self, X):
"""Return joint log probability estimates for the test vector X.
For each row x of X and class y, the joint log probability is given by
``log P(x, y) = log P(y) + log P(x|y),``
where ``log P(y)`` is the class prior probability and ``log P(x|y)`` is
the class-conditional probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Returns the joint log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
check_is_fitted(self)
X = self._check_X(X)
return self._joint_log_likelihood(X)
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Returns
-------
C : ndarray of shape (n_samples,)
Predicted target values for X.
"""
check_is_fitted(self)
X = self._check_X(X)
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
check_is_fitted(self)
X = self._check_X(X)
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(_BaseNB):
"""
Gaussian Naive Bayes (GaussianNB).
Can perform online updates to model parameters via :meth:`partial_fit`.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Parameters
----------
priors : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified, the priors are not
adjusted according to the data.
var_smoothing : float, default=1e-9
Portion of the largest variance of all features that is added to
variances for calculation stability.
.. versionadded:: 0.20
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
number of training samples observed in each class.
class_prior_ : ndarray of shape (n_classes,)
probability of each class.
classes_ : ndarray of shape (n_classes,)
class labels known to the classifier.
epsilon_ : float
absolute additive value to variances.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
var_ : ndarray of shape (n_classes, n_features)
Variance of each feature per class.
.. versionadded:: 1.0
theta_ : ndarray of shape (n_classes, n_features)
mean of each feature per class.
See Also
--------
BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models.
CategoricalNB : Naive Bayes classifier for categorical features.
ComplementNB : Complement Naive Bayes classifier.
MultinomialNB : Naive Bayes classifier for multinomial models.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
_parameter_constraints: dict = {
"priors": ["array-like", None],
"var_smoothing": [Interval(Real, 0, None, closed="left")],
}
def __init__(self, *, priors=None, var_smoothing=1e-9):
self.priors = priors
self.var_smoothing = var_smoothing
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
Returns the instance itself.
"""
self._validate_params()
y = self._validate_data(y=y)
return self._partial_fit(
X, y, np.unique(y), _refit=True, sample_weight=sample_weight
)
def _check_X(self, X):
"""Validate X, used only in predict* methods."""
return self._validate_data(X, reset=False)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like of shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like of shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like of shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like of shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight)
new_var = np.average((X - new_mu) ** 2, axis=0, weights=sample_weight)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = old_ssd + new_ssd + (n_new * n_past / n_total) * (mu - new_mu) ** 2
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
Returns the instance itself.
"""
self._validate_params()
return self._partial_fit(
X, y, classes, _refit=False, sample_weight=sample_weight
)
def _partial_fit(self, X, y, classes=None, _refit=False, sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit : bool, default=False
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
if _refit:
self.classes_ = None
first_call = _check_partial_fit_first_call(self, classes)
X, y = self._validate_data(X, y, reset=first_call)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
self.epsilon_ = self.var_smoothing * np.var(X, axis=0).max()
if first_call:
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.var_ = np.zeros((n_classes, n_features))
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
# Initialise the class prior
# Take into account the priors
if self.priors is not None:
priors = np.asarray(self.priors)
# Check that the provided prior matches the number of classes
if len(priors) != n_classes:
raise ValueError("Number of priors must match number of classes.")
# Check that the sum is 1
if not np.isclose(priors.sum(), 1.0):
raise ValueError("The sum of the priors should be 1.")
# Check that the priors are non-negative
if (priors < 0).any():
raise ValueError("Priors must be non-negative.")
self.class_prior_ = priors
else:
# Initialize the priors to zeros for each class
self.class_prior_ = np.zeros(len(self.classes_), dtype=np.float64)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.var_[:, :] -= self.epsilon_
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = np.in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError(
"The target label(s) %s in y do not exist in the initial classes %s"
% (unique_y[~unique_y_in_classes], classes)
)
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.var_[i, :], X_i, sw_i
)
self.theta_[i, :] = new_theta
self.var_[i, :] = new_sigma
self.class_count_[i] += N_i
self.var_[:, :] += self.epsilon_
# Update if only no priors is provided
if self.priors is None:
# Empirical prior, with sample_weight taken into account
self.class_prior_ = self.class_count_ / self.class_count_.sum()
return self
def _joint_log_likelihood(self, X):
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = -0.5 * np.sum(np.log(2.0 * np.pi * self.var_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) / (self.var_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class _BaseDiscreteNB(_BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per _BaseNB
_update_feature_log_prob(alpha)
_count(X, Y)
"""
_parameter_constraints: dict = {
"alpha": [Interval(Real, 0, None, closed="left"), "array-like"],
"fit_prior": ["boolean"],
"class_prior": ["array-like", None],
"force_alpha": ["boolean", Hidden(StrOptions({"warn"}))],
}
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None, force_alpha="warn"):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
self.force_alpha = force_alpha
@abstractmethod
def _count(self, X, Y):
"""Update counts that are used to calculate probabilities.
The counts make up a sufficient statistic extracted from the data.
Accordingly, this method is called each time `fit` or `partial_fit`
update the model. `class_count_` and `feature_count_` must be updated
here along with any model specific counts.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input samples.
Y : ndarray of shape (n_samples, n_classes)
Binarized class labels.
"""
@abstractmethod
def _update_feature_log_prob(self, alpha):
"""Update feature log probabilities based on counts.
This method is called each time `fit` or `partial_fit` update the
model.
Parameters
----------
alpha : float
smoothing parameter. See :meth:`_check_alpha`.
"""
def _check_X(self, X):
"""Validate X, used only in predict* methods."""
return self._validate_data(X, accept_sparse="csr", reset=False)
def _check_X_y(self, X, y, reset=True):
"""Validate X and y in fit methods."""
return self._validate_data(X, y, accept_sparse="csr", reset=reset)
def _update_class_log_prior(self, class_prior=None):
"""Update class log priors.
The class log priors are based on `class_prior`, class count or the
number of classes. This method is called each time `fit` or
`partial_fit` update the model.
"""
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
with warnings.catch_warnings():
# silence the warning when count is 0 because class was not yet
# observed
warnings.simplefilter("ignore", RuntimeWarning)
log_class_count = np.log(self.class_count_)
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = log_class_count - np.log(self.class_count_.sum())
else:
self.class_log_prior_ = np.full(n_classes, -np.log(n_classes))
def _check_alpha(self):
alpha = (
np.asarray(self.alpha) if not isinstance(self.alpha, Real) else self.alpha
)
alpha_min = np.min(alpha)
if isinstance(alpha, np.ndarray):
if not alpha.shape[0] == self.n_features_in_:
raise ValueError(
"When alpha is an array, it should contains `n_features`. "
f"Got {alpha.shape[0]} elements instead of {self.n_features_in_}."
)
# check that all alpha are positive
if alpha_min < 0:
raise ValueError("All values in alpha must be greater than 0.")
alpha_lower_bound = 1e-10
# TODO(1.4): Replace w/ deprecation of self.force_alpha
# See gh #22269
_force_alpha = self.force_alpha
if _force_alpha == "warn" and alpha_min < alpha_lower_bound:
_force_alpha = False
warnings.warn(
"The default value for `force_alpha` will change to `True` in 1.4. To"
" suppress this warning, manually set the value of `force_alpha`.",
FutureWarning,
)
if alpha_min < alpha_lower_bound and not _force_alpha:
warnings.warn(
"alpha too small will result in numeric errors, setting alpha ="
f" {alpha_lower_bound:.1e}. Use `force_alpha=True` to keep alpha"
" unchanged."
)
return np.maximum(alpha, alpha_lower_bound)
return alpha
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns the instance itself.
"""
first_call = not hasattr(self, "classes_")
if first_call:
self._validate_params()
X, y = self._check_X_y(X, y, reset=first_call)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_classes = len(classes)
self._init_counters(n_classes, n_features)
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
if len(self.classes_) == 2:
Y = np.concatenate((1 - Y, Y), axis=1)
else: # degenerate case: just one class
Y = np.ones_like(Y)
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64, copy=False)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
sample_weight = np.atleast_2d(sample_weight)
Y *= sample_weight.T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns the instance itself.
"""
self._validate_params()
X, y = self._check_X_y(X, y)
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
if len(self.classes_) == 2:
Y = np.concatenate((1 - Y, Y), axis=1)
else: # degenerate case: just one class
Y = np.ones_like(Y)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
if sample_weight is not None:
Y = Y.astype(np.float64, copy=False)
sample_weight = _check_sample_weight(sample_weight, X)
sample_weight = np.atleast_2d(sample_weight)
Y *= sample_weight.T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_classes = Y.shape[1]
self._init_counters(n_classes, n_features)
self._count(X, Y)
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self
def _init_counters(self, n_classes, n_features):
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_classes, n_features), dtype=np.float64)
def _more_tags(self):
return {"poor_score": True}
class MultinomialNB(_BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models.
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float or array-like of shape (n_features,), default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(set alpha=0 and force_alpha=True, for no smoothing).
force_alpha : bool, default=False
If False and alpha is less than 1e-10, it will set alpha to
1e-10. If True, alpha will remain unchanged. This may cause
numerical errors if alpha is too close to 0.
.. versionadded:: 1.2
.. deprecated:: 1.2
The default value of `force_alpha` will change to `True` in v1.4.
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified, the priors are not
adjusted according to the data.
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Smoothed empirical log probability for each class.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models.
CategoricalNB : Naive Bayes classifier for categorical features.
ComplementNB : Complement Naive Bayes classifier.
GaussianNB : Gaussian Naive Bayes.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
https://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB(force_alpha=True)
>>> clf.fit(X, y)
MultinomialNB(force_alpha=True)
>>> print(clf.predict(X[2:3]))
[3]
"""
def __init__(
self, *, alpha=1.0, force_alpha="warn", fit_prior=True, class_prior=None
):
super().__init__(
alpha=alpha,
fit_prior=fit_prior,
class_prior=class_prior,
force_alpha=force_alpha,
)
def _more_tags(self):
return {"requires_positive_X": True}
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
check_non_negative(X, "MultinomialNB (input X)")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = np.log(smoothed_fc) - np.log(
smoothed_cc.reshape(-1, 1)
)
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
return safe_sparse_dot(X, self.feature_log_prob_.T) + self.class_log_prior_
class ComplementNB(_BaseDiscreteNB):
"""The Complement Naive Bayes classifier described in Rennie et al. (2003).
The Complement Naive Bayes classifier was designed to correct the "severe
assumptions" made by the standard Multinomial Naive Bayes classifier. It is
particularly suited for imbalanced data sets.
Read more in the :ref:`User Guide <complement_naive_bayes>`.
.. versionadded:: 0.20
Parameters
----------
alpha : float or array-like of shape (n_features,), default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(set alpha=0 and force_alpha=True, for no smoothing).
force_alpha : bool, default=False
If False and alpha is less than 1e-10, it will set alpha to
1e-10. If True, alpha will remain unchanged. This may cause
numerical errors if alpha is too close to 0.
.. versionadded:: 1.2
.. deprecated:: 1.2
The default value of `force_alpha` will change to `True` in v1.4.
fit_prior : bool, default=True
Only used in edge case with a single class in the training set.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. Not used.
norm : bool, default=False
Whether or not a second normalization of the weights is performed. The
default behavior mirrors the implementations found in Mahout and Weka,
which do not follow the full algorithm described in Table 9 of the
paper.
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Smoothed empirical log probability for each class. Only used in edge
case with a single class in the training set.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
feature_all_ : ndarray of shape (n_features,)
Number of samples encountered for each feature during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature) during fitting.
This value is weighted by the sample weight when provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical weights for class complements.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models.
CategoricalNB : Naive Bayes classifier for categorical features.
GaussianNB : Gaussian Naive Bayes.
MultinomialNB : Naive Bayes classifier for multinomial models.
References
----------
Rennie, J. D., Shih, L., Teevan, J., & Karger, D. R. (2003).
Tackling the poor assumptions of naive bayes text classifiers. In ICML
(Vol. 3, pp. 616-623).
https://people.csail.mit.edu/jrennie/papers/icml03-nb.pdf
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import ComplementNB
>>> clf = ComplementNB(force_alpha=True)
>>> clf.fit(X, y)
ComplementNB(force_alpha=True)
>>> print(clf.predict(X[2:3]))
[3]
"""
_parameter_constraints: dict = {
**_BaseDiscreteNB._parameter_constraints,
"norm": ["boolean"],
}
def __init__(
self,
*,
alpha=1.0,
force_alpha="warn",
fit_prior=True,
class_prior=None,
norm=False,
):
super().__init__(
alpha=alpha,
force_alpha=force_alpha,
fit_prior=fit_prior,
class_prior=class_prior,
)
self.norm = norm
def _more_tags(self):
return {"requires_positive_X": True}
def _count(self, X, Y):
"""Count feature occurrences."""
check_non_negative(X, "ComplementNB (input X)")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
self.feature_all_ = self.feature_count_.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and compute the weights."""
comp_count = self.feature_all_ + alpha - self.feature_count_
logged = np.log(comp_count / comp_count.sum(axis=1, keepdims=True))
# _BaseNB.predict uses argmax, but ComplementNB operates with argmin.
if self.norm:
summed = logged.sum(axis=1, keepdims=True)
feature_log_prob = logged / summed
else:
feature_log_prob = -logged
self.feature_log_prob_ = feature_log_prob
def _joint_log_likelihood(self, X):
"""Calculate the class scores for the samples in X."""
jll = safe_sparse_dot(X, self.feature_log_prob_.T)
if len(self.classes_) == 1:
jll += self.class_log_prior_
return jll
class BernoulliNB(_BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float or array-like of shape (n_features,), default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(set alpha=0 and force_alpha=True, for no smoothing).
force_alpha : bool, default=False
If False and alpha is less than 1e-10, it will set alpha to
1e-10. If True, alpha will remain unchanged. This may cause
numerical errors if alpha is too close to 0.
.. versionadded:: 1.2
.. deprecated:: 1.2
The default value of `force_alpha` will change to `True` in v1.4.
binarize : float or None, default=0.0
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified, the priors are not
adjusted according to the data.
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Log probability of each class (smoothed).
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical log probability of features given a class, P(x_i|y).
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
CategoricalNB : Naive Bayes classifier for categorical features.
ComplementNB : The Complement Naive Bayes classifier
described in Rennie et al. (2003).
GaussianNB : Gaussian Naive Bayes (GaussianNB).
MultinomialNB : Naive Bayes classifier for multinomial models.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB(force_alpha=True)
>>> clf.fit(X, Y)
BernoulliNB(force_alpha=True)
>>> print(clf.predict(X[2:3]))
[3]
"""
_parameter_constraints: dict = {
**_BaseDiscreteNB._parameter_constraints,
"binarize": [None, Interval(Real, 0, None, closed="left")],
}
def __init__(
self,
*,
alpha=1.0,
force_alpha="warn",
binarize=0.0,
fit_prior=True,
class_prior=None,
):
super().__init__(
alpha=alpha,
fit_prior=fit_prior,
class_prior=class_prior,
force_alpha=force_alpha,
)
self.binarize = binarize
def _check_X(self, X):
"""Validate X, used only in predict* methods."""
X = super()._check_X(X)
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
return X
def _check_X_y(self, X, y, reset=True):
X, y = super()._check_X_y(X, y, reset=reset)
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
return X, y
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = self.class_count_ + alpha * 2
self.feature_log_prob_ = np.log(smoothed_fc) - np.log(
smoothed_cc.reshape(-1, 1)
)
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
n_features = self.feature_log_prob_.shape[1]
n_features_X = X.shape[1]
if n_features_X != n_features:
raise ValueError(
"Expected input with %d features, got %d instead"
% (n_features, n_features_X)
)
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
class CategoricalNB(_BaseDiscreteNB):
"""Naive Bayes classifier for categorical features.
The categorical Naive Bayes classifier is suitable for classification with
discrete features that are categorically distributed. The categories of
each feature are drawn from a categorical distribution.
Read more in the :ref:`User Guide <categorical_naive_bayes>`.
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(set alpha=0 and force_alpha=True, for no smoothing).
force_alpha : bool, default=False
If False and alpha is less than 1e-10, it will set alpha to
1e-10. If True, alpha will remain unchanged. This may cause
numerical errors if alpha is too close to 0.
.. versionadded:: 1.2
.. deprecated:: 1.2
The default value of `force_alpha` will change to `True` in v1.4.
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified, the priors are not
adjusted according to the data.
min_categories : int or array-like of shape (n_features,), default=None
Minimum number of categories per feature.
- integer: Sets the minimum number of categories per feature to
`n_categories` for each features.
- array-like: shape (n_features,) where `n_categories[i]` holds the
minimum number of categories for the ith column of the input.
- None (default): Determines the number of categories automatically
from the training data.
.. versionadded:: 0.24
Attributes
----------
category_count_ : list of arrays of shape (n_features,)
Holds arrays of shape (n_classes, n_categories of respective feature)
for each feature. Each array provides the number of samples
encountered for each class and category of the specific feature.
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Smoothed empirical log probability for each class.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
feature_log_prob_ : list of arrays of shape (n_features,)
Holds arrays of shape (n_classes, n_categories of respective feature)
for each feature. Each array provides the empirical log probability
of categories given the respective feature and class, ``P(x_i|y)``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_categories_ : ndarray of shape (n_features,), dtype=np.int64
Number of categories for each feature. This value is
inferred from the data or set by the minimum number of categories.
.. versionadded:: 0.24
See Also
--------
BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models.
ComplementNB : Complement Naive Bayes classifier.
GaussianNB : Gaussian Naive Bayes.
MultinomialNB : Naive Bayes classifier for multinomial models.
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import CategoricalNB
>>> clf = CategoricalNB(force_alpha=True)
>>> clf.fit(X, y)
CategoricalNB(force_alpha=True)
>>> print(clf.predict(X[2:3]))
[3]
"""
_parameter_constraints: dict = {
**_BaseDiscreteNB._parameter_constraints,
"min_categories": [
None,
"array-like",
Interval(Integral, 1, None, closed="left"),
],
"alpha": [Interval(Real, 0, None, closed="left")],
}
def __init__(
self,
*,
alpha=1.0,
force_alpha="warn",
fit_prior=True,
class_prior=None,
min_categories=None,
):
super().__init__(
alpha=alpha,
force_alpha=force_alpha,
fit_prior=fit_prior,
class_prior=class_prior,
)
self.min_categories = min_categories
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features. Here, each feature of X is
assumed to be from a different categorical distribution.
It is further assumed that all categories of each feature are
represented by the numbers 0, ..., n - 1, where n refers to the
total number of categories for the given feature. This can, for
instance, be achieved with the help of OrdinalEncoder.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns the instance itself.
"""
return super().fit(X, y, sample_weight=sample_weight)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features. Here, each feature of X is
assumed to be from a different categorical distribution.
It is further assumed that all categories of each feature are
represented by the numbers 0, ..., n - 1, where n refers to the
total number of categories for the given feature. This can, for
instance, be achieved with the help of OrdinalEncoder.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns the instance itself.
"""
return super().partial_fit(X, y, classes, sample_weight=sample_weight)
def _more_tags(self):
return {"requires_positive_X": True}
def _check_X(self, X):
"""Validate X, used only in predict* methods."""
X = self._validate_data(
X, dtype="int", accept_sparse=False, force_all_finite=True, reset=False
)
check_non_negative(X, "CategoricalNB (input X)")
return X
def _check_X_y(self, X, y, reset=True):
X, y = self._validate_data(
X, y, dtype="int", accept_sparse=False, force_all_finite=True, reset=reset
)
check_non_negative(X, "CategoricalNB (input X)")
return X, y
def _init_counters(self, n_classes, n_features):
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
self.category_count_ = [np.zeros((n_classes, 0)) for _ in range(n_features)]
@staticmethod
def _validate_n_categories(X, min_categories):
# rely on max for n_categories categories are encoded between 0...n-1
n_categories_X = X.max(axis=0) + 1
min_categories_ = np.array(min_categories)
if min_categories is not None:
if not np.issubdtype(min_categories_.dtype, np.signedinteger):
raise ValueError(
"'min_categories' should have integral type. Got "
f"{min_categories_.dtype} instead."
)
n_categories_ = np.maximum(n_categories_X, min_categories_, dtype=np.int64)
if n_categories_.shape != n_categories_X.shape:
raise ValueError(
f"'min_categories' should have shape ({X.shape[1]},"
") when an array-like is provided. Got"
f" {min_categories_.shape} instead."
)
return n_categories_
else:
return n_categories_X
def _count(self, X, Y):
def _update_cat_count_dims(cat_count, highest_feature):
diff = highest_feature + 1 - cat_count.shape[1]
if diff > 0:
# we append a column full of zeros for each new category
return np.pad(cat_count, [(0, 0), (0, diff)], "constant")
return cat_count
def _update_cat_count(X_feature, Y, cat_count, n_classes):
for j in range(n_classes):
mask = Y[:, j].astype(bool)
if Y.dtype.type == np.int64:
weights = None
else:
weights = Y[mask, j]
counts = np.bincount(X_feature[mask], weights=weights)
indices = np.nonzero(counts)[0]
cat_count[j, indices] += counts[indices]
self.class_count_ += Y.sum(axis=0)
self.n_categories_ = self._validate_n_categories(X, self.min_categories)
for i in range(self.n_features_in_):
X_feature = X[:, i]
self.category_count_[i] = _update_cat_count_dims(
self.category_count_[i], self.n_categories_[i] - 1
)
_update_cat_count(
X_feature, Y, self.category_count_[i], self.class_count_.shape[0]
)
def _update_feature_log_prob(self, alpha):
feature_log_prob = []
for i in range(self.n_features_in_):
smoothed_cat_count = self.category_count_[i] + alpha
smoothed_class_count = smoothed_cat_count.sum(axis=1)
feature_log_prob.append(
np.log(smoothed_cat_count) - np.log(smoothed_class_count.reshape(-1, 1))
)
self.feature_log_prob_ = feature_log_prob
def _joint_log_likelihood(self, X):
self._check_n_features(X, reset=False)
jll = np.zeros((X.shape[0], self.class_count_.shape[0]))
for i in range(self.n_features_in_):
indices = X[:, i]
jll += self.feature_log_prob_[i][:, indices].T
total_ll = jll + self.class_log_prior_
return total_ll
| bsd-3-clause | 69882a49372765f87fafeff8dc433067 | 35.796078 | 92 | 0.602419 | 3.962694 | false | false | false | false |
scikit-learn/scikit-learn | examples/neighbors/plot_caching_nearest_neighbors.py | 13 | 2682 | """
=========================
Caching nearest neighbors
=========================
This examples demonstrates how to precompute the k nearest neighbors before
using them in KNeighborsClassifier. KNeighborsClassifier can compute the
nearest neighbors internally, but precomputing them can have several benefits,
such as finer parameter control, caching for multiple use, or custom
implementations.
Here we use the caching property of pipelines to cache the nearest neighbors
graph between multiple fits of KNeighborsClassifier. The first call is slow
since it computes the neighbors graph, while subsequent call are faster as they
do not need to recompute the graph. Here the durations are small since the
dataset is small, but the gain can be more substantial when the dataset grows
larger, or when the grid of parameter to search is large.
"""
# Author: Tom Dupre la Tour
#
# License: BSD 3 clause
from tempfile import TemporaryDirectory
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsTransformer, KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_digits
from sklearn.pipeline import Pipeline
X, y = load_digits(return_X_y=True)
n_neighbors_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]
# The transformer computes the nearest neighbors graph using the maximum number
# of neighbors necessary in the grid search. The classifier model filters the
# nearest neighbors graph as required by its own n_neighbors parameter.
graph_model = KNeighborsTransformer(n_neighbors=max(n_neighbors_list), mode="distance")
classifier_model = KNeighborsClassifier(metric="precomputed")
# Note that we give `memory` a directory to cache the graph computation
# that will be used several times when tuning the hyperparameters of the
# classifier.
with TemporaryDirectory(prefix="sklearn_graph_cache_") as tmpdir:
full_model = Pipeline(
steps=[("graph", graph_model), ("classifier", classifier_model)], memory=tmpdir
)
param_grid = {"classifier__n_neighbors": n_neighbors_list}
grid_model = GridSearchCV(full_model, param_grid)
grid_model.fit(X, y)
# Plot the results of the grid search.
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
axes[0].errorbar(
x=n_neighbors_list,
y=grid_model.cv_results_["mean_test_score"],
yerr=grid_model.cv_results_["std_test_score"],
)
axes[0].set(xlabel="n_neighbors", title="Classification accuracy")
axes[1].errorbar(
x=n_neighbors_list,
y=grid_model.cv_results_["mean_fit_time"],
yerr=grid_model.cv_results_["std_fit_time"],
color="r",
)
axes[1].set(xlabel="n_neighbors", title="Fit time (with caching)")
fig.tight_layout()
plt.show()
| bsd-3-clause | b021d8548d2bcc26fbf50417b18b48b1 | 37.869565 | 87 | 0.746458 | 3.831429 | false | false | false | false |
scikit-learn/scikit-learn | examples/gaussian_process/plot_gpr_noisy_targets.py | 12 | 5306 | """
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression example computed in two different ways:
1. A noise-free case
2. A noisy case with known noise-level per datapoint
In both cases, the kernel's parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process model
as well as its probabilistic nature in the form of a pointwise 95% confidence
interval.
Note that `alpha` is a parameter to control the strength of the Tikhonov
regularization on the assumed training points' covariance matrix.
"""
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: BSD 3 clause
# %%
# Dataset generation
# ------------------
#
# We will start by generating a synthetic dataset. The true generative process
# is defined as :math:`f(x) = x \sin(x)`.
import numpy as np
X = np.linspace(start=0, stop=10, num=1_000).reshape(-1, 1)
y = np.squeeze(X * np.sin(X))
# %%
import matplotlib.pyplot as plt
plt.plot(X, y, label=r"$f(x) = x \sin(x)$", linestyle="dotted")
plt.legend()
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
_ = plt.title("True generative process")
# %%
# We will use this dataset in the next experiment to illustrate how Gaussian
# Process regression is working.
#
# Example with noise-free target
# ------------------------------
#
# In this first example, we will use the true generative process without
# adding any noise. For training the Gaussian Process regression, we will only
# select few samples.
rng = np.random.RandomState(1)
training_indices = rng.choice(np.arange(y.size), size=6, replace=False)
X_train, y_train = X[training_indices], y[training_indices]
# %%
# Now, we fit a Gaussian process on these few training data samples. We will
# use a radial basis function (RBF) kernel and a constant parameter to fit the
# amplitude.
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF
kernel = 1 * RBF(length_scale=1.0, length_scale_bounds=(1e-2, 1e2))
gaussian_process = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
gaussian_process.fit(X_train, y_train)
gaussian_process.kernel_
# %%
# After fitting our model, we see that the hyperparameters of the kernel have
# been optimized. Now, we will use our kernel to compute the mean prediction
# of the full dataset and plot the 95% confidence interval.
mean_prediction, std_prediction = gaussian_process.predict(X, return_std=True)
plt.plot(X, y, label=r"$f(x) = x \sin(x)$", linestyle="dotted")
plt.scatter(X_train, y_train, label="Observations")
plt.plot(X, mean_prediction, label="Mean prediction")
plt.fill_between(
X.ravel(),
mean_prediction - 1.96 * std_prediction,
mean_prediction + 1.96 * std_prediction,
alpha=0.5,
label=r"95% confidence interval",
)
plt.legend()
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
_ = plt.title("Gaussian process regression on noise-free dataset")
# %%
# We see that for a prediction made on a data point close to the one from the
# training set, the 95% confidence has a small amplitude. Whenever a sample
# falls far from training data, our model's prediction is less accurate and the
# model prediction is less precise (higher uncertainty).
#
# Example with noisy targets
# --------------------------
#
# We can repeat a similar experiment adding an additional noise to the target
# this time. It will allow seeing the effect of the noise on the fitted model.
#
# We add some random Gaussian noise to the target with an arbitrary
# standard deviation.
noise_std = 0.75
y_train_noisy = y_train + rng.normal(loc=0.0, scale=noise_std, size=y_train.shape)
# %%
# We create a similar Gaussian process model. In addition to the kernel, this
# time, we specify the parameter `alpha` which can be interpreted as the
# variance of a Gaussian noise.
gaussian_process = GaussianProcessRegressor(
kernel=kernel, alpha=noise_std**2, n_restarts_optimizer=9
)
gaussian_process.fit(X_train, y_train_noisy)
mean_prediction, std_prediction = gaussian_process.predict(X, return_std=True)
# %%
# Let's plot the mean prediction and the uncertainty region as before.
plt.plot(X, y, label=r"$f(x) = x \sin(x)$", linestyle="dotted")
plt.errorbar(
X_train,
y_train_noisy,
noise_std,
linestyle="None",
color="tab:blue",
marker=".",
markersize=10,
label="Observations",
)
plt.plot(X, mean_prediction, label="Mean prediction")
plt.fill_between(
X.ravel(),
mean_prediction - 1.96 * std_prediction,
mean_prediction + 1.96 * std_prediction,
color="tab:orange",
alpha=0.5,
label=r"95% confidence interval",
)
plt.legend()
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
_ = plt.title("Gaussian process regression on a noisy dataset")
# %%
# The noise affects the predictions close to the training samples: the
# predictive uncertainty near to the training samples is larger because we
# explicitly model a given level target noise independent of the input
# variable.
| bsd-3-clause | 781aea376de21d023fc3efea56b619d1 | 33.679739 | 82 | 0.705051 | 3.477064 | false | false | false | false |
pydanny/cookiecutter-django | {{cookiecutter.project_slug}}/config/urls.py | 2 | 2353 | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
{%- if cookiecutter.use_async == 'y' %}
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
{%- endif %}
from django.urls import include, path
from django.views import defaults as default_views
from django.views.generic import TemplateView
{%- if cookiecutter.use_drf == 'y' %}
from rest_framework.authtoken.views import obtain_auth_token
{%- endif %}
urlpatterns = [
path("", TemplateView.as_view(template_name="pages/home.html"), name="home"),
path(
"about/", TemplateView.as_view(template_name="pages/about.html"), name="about"
),
# Django Admin, use {% raw %}{% url 'admin:index' %}{% endraw %}
path(settings.ADMIN_URL, admin.site.urls),
# User management
path("users/", include("{{ cookiecutter.project_slug }}.users.urls", namespace="users")),
path("accounts/", include("allauth.urls")),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
{%- if cookiecutter.use_async == 'y' %}
if settings.DEBUG:
# Static file serving when using Gunicorn + Uvicorn for local web socket development
urlpatterns += staticfiles_urlpatterns()
{%- endif %}
{% if cookiecutter.use_drf == 'y' %}
# API URLS
urlpatterns += [
# API base url
path("api/", include("config.api_router")),
# DRF auth token
path("auth-token/", obtain_auth_token),
]
{%- endif %}
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
| bsd-3-clause | c8b5c437c6a2c6c0d8d83a1fbc6b6266 | 35.2 | 93 | 0.640034 | 3.857377 | false | false | false | false |
scikit-learn/scikit-learn | sklearn/preprocessing/_data.py | 4 | 119368 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# Giorgio Patrini <giorgio.patrini@anu.edu.au>
# Eric Chang <ericchang2017@u.northwestern.edu>
# License: BSD 3 clause
import warnings
from numbers import Integral, Real
import numpy as np
from scipy import sparse
from scipy import stats
from scipy import optimize
from scipy.special import boxcox
from ..base import (
BaseEstimator,
TransformerMixin,
OneToOneFeatureMixin,
ClassNamePrefixFeaturesOutMixin,
)
from ..utils import check_array
from ..utils._param_validation import Interval, StrOptions
from ..utils.extmath import _incremental_mean_and_var, row_norms
from ..utils.sparsefuncs_fast import (
inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2,
)
from ..utils.sparsefuncs import (
inplace_column_scale,
mean_variance_axis,
incr_mean_variance_axis,
min_max_axis,
)
from ..utils.validation import (
check_is_fitted,
check_random_state,
_check_sample_weight,
FLOAT_DTYPES,
)
from ._encoders import OneHotEncoder
BOUNDS_THRESHOLD = 1e-7
__all__ = [
"Binarizer",
"KernelCenterer",
"MinMaxScaler",
"MaxAbsScaler",
"Normalizer",
"OneHotEncoder",
"RobustScaler",
"StandardScaler",
"QuantileTransformer",
"PowerTransformer",
"add_dummy_feature",
"binarize",
"normalize",
"scale",
"robust_scale",
"maxabs_scale",
"minmax_scale",
"quantile_transform",
"power_transform",
]
def _is_constant_feature(var, mean, n_samples):
"""Detect if a feature is indistinguishable from a constant feature.
The detection is based on its computed variance and on the theoretical
error bounds of the '2 pass algorithm' for variance computation.
See "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
"""
# In scikit-learn, variance is always computed using float64 accumulators.
eps = np.finfo(np.float64).eps
upper_bound = n_samples * eps * var + (n_samples * mean * eps) ** 2
return var <= upper_bound
def _handle_zeros_in_scale(scale, copy=True, constant_mask=None):
"""Set scales of near constant features to 1.
The goal is to avoid division by very small or zero values.
Near constant features are detected automatically by identifying
scales close to machine precision unless they are precomputed by
the caller and passed with the `constant_mask` kwarg.
Typically for standard scaling, the scales are the standard
deviation while near constant features are better detected on the
computed variances which are closer to machine precision by
construction.
"""
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0.0:
scale = 1.0
return scale
elif isinstance(scale, np.ndarray):
if constant_mask is None:
# Detect near constant values to avoid dividing by a very small
# value that could lead to surprising results and numerical
# stability issues.
constant_mask = scale < 10 * np.finfo(scale.dtype).eps
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[constant_mask] = 1.0
return scale
def scale(X, *, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis.
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to center and scale.
axis : int, default=0
Axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : bool, default=True
If True, center the data before scaling.
with_std : bool, default=True
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : bool, default=True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
The transformed data.
See Also
--------
StandardScaler : Performs scaling to unit variance using the Transformer
API (e.g. as part of a preprocessing
:class:`~sklearn.pipeline.Pipeline`).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
NaNs are treated as missing values: disregarded to compute the statistics,
and maintained during the data transformation.
We use a biased estimator for the standard deviation, equivalent to
`numpy.std(x, ddof=0)`. Note that the choice of `ddof` is unlikely to
affect model performance.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
.. warning:: Risk of data leak
Do not use :func:`~sklearn.preprocessing.scale` unless you know
what you are doing. A common mistake is to apply it to the entire data
*before* splitting into training and test sets. This will bias the
model evaluation because information would have leaked from the test
set to the training set.
In general, we recommend using
:class:`~sklearn.preprocessing.StandardScaler` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking: `pipe = make_pipeline(StandardScaler(), LogisticRegression())`.
""" # noqa
X = check_array(
X,
accept_sparse="csc",
copy=copy,
ensure_2d=False,
estimator="the scale function",
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives."
)
if axis != 0:
raise ValueError(
"Can only scale sparse matrix on axis=0, got axis=%d" % axis
)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.nanmean(X, axis)
if with_std:
scale_ = np.nanstd(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = np.nanmean(Xr, axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn(
"Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features."
)
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = np.nanmean(Xr, axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn(
"Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. "
)
Xr -= mean_2
return X
class MinMaxScaler(OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
"""Transform features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, e.g. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : bool, default=True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
clip : bool, default=False
Set to True to clip transformed values of held-out data to
provided `feature range`.
.. versionadded:: 0.24
Attributes
----------
min_ : ndarray of shape (n_features,)
Per feature adjustment for minimum. Equivalent to
``min - X.min(axis=0) * self.scale_``
scale_ : ndarray of shape (n_features,)
Per feature relative scaling of the data. Equivalent to
``(max - min) / (X.max(axis=0) - X.min(axis=0))``
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray of shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_*
data_max_ : ndarray of shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_*
data_range_ : ndarray of shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_*
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
n_samples_seen_ : int
The number of samples processed by the estimator.
It will be reset on new calls to fit, but increments across
``partial_fit`` calls.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
minmax_scale : Equivalent function without the estimator API.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
Examples
--------
>>> from sklearn.preprocessing import MinMaxScaler
>>> data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]
>>> scaler = MinMaxScaler()
>>> print(scaler.fit(data))
MinMaxScaler()
>>> print(scaler.data_max_)
[ 1. 18.]
>>> print(scaler.transform(data))
[[0. 0. ]
[0.25 0.25]
[0.5 0.5 ]
[1. 1. ]]
>>> print(scaler.transform([[2, 2]]))
[[1.5 0. ]]
"""
_parameter_constraints: dict = {
"feature_range": [tuple],
"copy": ["boolean"],
"clip": ["boolean"],
}
def __init__(self, feature_range=(0, 1), *, copy=True, clip=False):
self.feature_range = feature_range
self.copy = copy
self.clip = clip
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, because they are all set together
# in partial_fit
if hasattr(self, "scale_"):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
y : None
Ignored.
Returns
-------
self : object
Fitted scaler.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when :meth:`fit` is not feasible due to very large number of
`n_samples` or because X is read from a continuous stream.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : None
Ignored.
Returns
-------
self : object
Fitted scaler.
"""
self._validate_params()
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError(
"Minimum of desired feature range must be smaller than maximum. Got %s."
% str(feature_range)
)
if sparse.issparse(X):
raise TypeError(
"MinMaxScaler does not support sparse input. "
"Consider using MaxAbsScaler instead."
)
first_pass = not hasattr(self, "n_samples_seen_")
X = self._validate_data(
X,
reset=first_pass,
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
data_min = np.nanmin(X, axis=0)
data_max = np.nanmax(X, axis=0)
if first_pass:
self.n_samples_seen_ = X.shape[0]
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = (feature_range[1] - feature_range[0]) / _handle_zeros_in_scale(
data_range, copy=True
)
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scale features of X according to feature_range.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data that will be transformed.
Returns
-------
Xt : ndarray of shape (n_samples, n_features)
Transformed data.
"""
check_is_fitted(self)
X = self._validate_data(
X,
copy=self.copy,
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
reset=False,
)
X *= self.scale_
X += self.min_
if self.clip:
np.clip(X, self.feature_range[0], self.feature_range[1], out=X)
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data that will be transformed. It cannot be sparse.
Returns
-------
Xt : ndarray of shape (n_samples, n_features)
Transformed data.
"""
check_is_fitted(self)
X = check_array(
X, copy=self.copy, dtype=FLOAT_DTYPES, force_all_finite="allow-nan"
)
X -= self.min_
X /= self.scale_
return X
def _more_tags(self):
return {"allow_nan": True}
def minmax_scale(X, feature_range=(0, 1), *, axis=0, copy=True):
"""Transform features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by (when ``axis=0``)::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
The transformation is calculated as (when ``axis=0``)::
X_scaled = scale * X + min - X.min(axis=0) * scale
where scale = (max - min) / (X.max(axis=0) - X.min(axis=0))
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface
to :class:`~sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int, default=0
Axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : bool, default=True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Returns
-------
X_tr : ndarray of shape (n_samples, n_features)
The transformed data.
.. warning:: Risk of data leak
Do not use :func:`~sklearn.preprocessing.minmax_scale` unless you know
what you are doing. A common mistake is to apply it to the entire data
*before* splitting into training and test sets. This will bias the
model evaluation because information would have leaked from the test
set to the training set.
In general, we recommend using
:class:`~sklearn.preprocessing.MinMaxScaler` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking: `pipe = make_pipeline(MinMaxScaler(), LogisticRegression())`.
See Also
--------
MinMaxScaler : Performs scaling to a given range using the Transformer
API (e.g. as part of a preprocessing
:class:`~sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(
X, copy=False, ensure_2d=False, dtype=FLOAT_DTYPES, force_all_finite="allow-nan"
)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
"""Standardize features by removing the mean and scaling to unit variance.
The standard score of a sample `x` is calculated as:
z = (x - u) / s
where `u` is the mean of the training samples or zero if `with_mean=False`,
and `s` is the standard deviation of the training samples or one if
`with_std=False`.
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using
:meth:`transform`.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual features do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
than others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
copy : bool, default=True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
with_mean : bool, default=True
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : bool, default=True
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
Attributes
----------
scale_ : ndarray of shape (n_features,) or None
Per feature relative scaling of the data to achieve zero mean and unit
variance. Generally this is calculated using `np.sqrt(var_)`. If a
variance is zero, we can't achieve unit variance, and the data is left
as-is, giving a scaling factor of 1. `scale_` is equal to `None`
when `with_std=False`.
.. versionadded:: 0.17
*scale_*
mean_ : ndarray of shape (n_features,) or None
The mean value for each feature in the training set.
Equal to ``None`` when ``with_mean=False``.
var_ : ndarray of shape (n_features,) or None
The variance for each feature in the training set. Used to compute
`scale_`. Equal to ``None`` when ``with_std=False``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_samples_seen_ : int or ndarray of shape (n_features,)
The number of samples processed by the estimator for each feature.
If there are no missing samples, the ``n_samples_seen`` will be an
integer, otherwise it will be an array of dtype int. If
`sample_weights` are used it will be a float (if no missing data)
or an array of dtype float that sums the weights seen so far.
Will be reset on new calls to fit, but increments across
``partial_fit`` calls.
See Also
--------
scale : Equivalent function without the estimator API.
:class:`~sklearn.decomposition.PCA` : Further removes the linear
correlation across features with 'whiten=True'.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
We use a biased estimator for the standard deviation, equivalent to
`numpy.std(x, ddof=0)`. Note that the choice of `ddof` is unlikely to
affect model performance.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
Examples
--------
>>> from sklearn.preprocessing import StandardScaler
>>> data = [[0, 0], [0, 0], [1, 1], [1, 1]]
>>> scaler = StandardScaler()
>>> print(scaler.fit(data))
StandardScaler()
>>> print(scaler.mean_)
[0.5 0.5]
>>> print(scaler.transform(data))
[[-1. -1.]
[-1. -1.]
[ 1. 1.]
[ 1. 1.]]
>>> print(scaler.transform([[2, 2]]))
[[3. 3.]]
"""
_parameter_constraints: dict = {
"copy": ["boolean"],
"with_mean": ["boolean"],
"with_std": ["boolean"],
}
def __init__(self, *, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, because they are all set together
# in partial_fit
if hasattr(self, "scale_"):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None, sample_weight=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : None
Ignored.
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.24
parameter *sample_weight* support to StandardScaler.
Returns
-------
self : object
Fitted scaler.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y, sample_weight)
def partial_fit(self, X, y=None, sample_weight=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when :meth:`fit` is not feasible due to very large number of
`n_samples` or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : None
Ignored.
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.24
parameter *sample_weight* support to StandardScaler.
Returns
-------
self : object
Fitted scaler.
"""
self._validate_params()
first_call = not hasattr(self, "n_samples_seen_")
X = self._validate_data(
X,
accept_sparse=("csr", "csc"),
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
reset=first_call,
)
n_features = X.shape[1]
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
# if n_samples_seen_ is an integer (i.e. no missing values), we need to
# transform it to a NumPy array of shape (n_features,) required by
# incr_mean_variance_axis and _incremental_variance_axis
dtype = np.int64 if sample_weight is None else X.dtype
if not hasattr(self, "n_samples_seen_"):
self.n_samples_seen_ = np.zeros(n_features, dtype=dtype)
elif np.size(self.n_samples_seen_) == 1:
self.n_samples_seen_ = np.repeat(self.n_samples_seen_, X.shape[1])
self.n_samples_seen_ = self.n_samples_seen_.astype(dtype, copy=False)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives."
)
sparse_constructor = (
sparse.csr_matrix if X.format == "csr" else sparse.csc_matrix
)
if self.with_std:
# First pass
if not hasattr(self, "scale_"):
self.mean_, self.var_, self.n_samples_seen_ = mean_variance_axis(
X, axis=0, weights=sample_weight, return_sum_weights=True
)
# Next passes
else:
(
self.mean_,
self.var_,
self.n_samples_seen_,
) = incr_mean_variance_axis(
X,
axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_,
weights=sample_weight,
)
# We force the mean and variance to float64 for large arrays
# See https://github.com/scikit-learn/scikit-learn/pull/12338
self.mean_ = self.mean_.astype(np.float64, copy=False)
self.var_ = self.var_.astype(np.float64, copy=False)
else:
self.mean_ = None # as with_mean must be False for sparse
self.var_ = None
weights = _check_sample_weight(sample_weight, X)
sum_weights_nan = weights @ sparse_constructor(
(np.isnan(X.data), X.indices, X.indptr), shape=X.shape
)
self.n_samples_seen_ += (np.sum(weights) - sum_weights_nan).astype(
dtype
)
else:
# First pass
if not hasattr(self, "scale_"):
self.mean_ = 0.0
if self.with_std:
self.var_ = 0.0
else:
self.var_ = None
if not self.with_mean and not self.with_std:
self.mean_ = None
self.var_ = None
self.n_samples_seen_ += X.shape[0] - np.isnan(X).sum(axis=0)
else:
self.mean_, self.var_, self.n_samples_seen_ = _incremental_mean_and_var(
X,
self.mean_,
self.var_,
self.n_samples_seen_,
sample_weight=sample_weight,
)
# for backward-compatibility, reduce n_samples_seen_ to an integer
# if the number of samples is the same for each feature (i.e. no
# missing values)
if np.ptp(self.n_samples_seen_) == 0:
self.n_samples_seen_ = self.n_samples_seen_[0]
if self.with_std:
# Extract the list of near constant features on the raw variances,
# before taking the square root.
constant_mask = _is_constant_feature(
self.var_, self.mean_, self.n_samples_seen_
)
self.scale_ = _handle_zeros_in_scale(
np.sqrt(self.var_), copy=False, constant_mask=constant_mask
)
else:
self.scale_ = None
return self
def transform(self, X, copy=None):
"""Perform standardization by centering and scaling.
Parameters
----------
X : {array-like, sparse matrix of shape (n_samples, n_features)
The data used to scale along the features axis.
copy : bool, default=None
Copy the input X or not.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
check_is_fitted(self)
copy = copy if copy is not None else self.copy
X = self._validate_data(
X,
reset=False,
accept_sparse="csr",
copy=copy,
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives."
)
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to scale along the features axis.
copy : bool, default=None
Copy the input X or not.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
check_is_fitted(self)
copy = copy if copy is not None else self.copy
X = check_array(
X,
accept_sparse="csr",
copy=copy,
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives."
)
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
def _more_tags(self):
return {"allow_nan": True, "preserves_dtype": [np.float64, np.float32]}
class MaxAbsScaler(OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : bool, default=True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray of shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray of shape (n_features,)
Per feature maximum absolute value.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See Also
--------
maxabs_scale : Equivalent function without the estimator API.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
Examples
--------
>>> from sklearn.preprocessing import MaxAbsScaler
>>> X = [[ 1., -1., 2.],
... [ 2., 0., 0.],
... [ 0., 1., -1.]]
>>> transformer = MaxAbsScaler().fit(X)
>>> transformer
MaxAbsScaler()
>>> transformer.transform(X)
array([[ 0.5, -1. , 1. ],
[ 1. , 0. , 0. ],
[ 0. , 1. , -0.5]])
"""
_parameter_constraints: dict = {"copy": ["boolean"]}
def __init__(self, *, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, because they are all set together
# in partial_fit
if hasattr(self, "scale_"):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
y : None
Ignored.
Returns
-------
self : object
Fitted scaler.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when :meth:`fit` is not feasible due to very large number of
`n_samples` or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : None
Ignored.
Returns
-------
self : object
Fitted scaler.
"""
self._validate_params()
first_pass = not hasattr(self, "n_samples_seen_")
X = self._validate_data(
X,
reset=first_pass,
accept_sparse=("csr", "csc"),
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0, ignore_nan=True)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.nanmax(np.abs(X), axis=0)
if first_pass:
self.n_samples_seen_ = X.shape[0]
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs, copy=True)
return self
def transform(self, X):
"""Scale the data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data that should be scaled.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
check_is_fitted(self)
X = self._validate_data(
X,
accept_sparse=("csr", "csc"),
copy=self.copy,
reset=False,
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data that should be transformed back.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
check_is_fitted(self)
X = check_array(
X,
accept_sparse=("csr", "csc"),
copy=self.copy,
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def _more_tags(self):
return {"allow_nan": True}
def maxabs_scale(X, *, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data.
axis : int, default=0
Axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : bool, default=True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
The transformed data.
.. warning:: Risk of data leak
Do not use :func:`~sklearn.preprocessing.maxabs_scale` unless you know
what you are doing. A common mistake is to apply it to the entire data
*before* splitting into training and test sets. This will bias the
model evaluation because information would have leaked from the test
set to the training set.
In general, we recommend using
:class:`~sklearn.preprocessing.MaxAbsScaler` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking: `pipe = make_pipeline(MaxAbsScaler(), LogisticRegression())`.
See Also
--------
MaxAbsScaler : Performs scaling to the [-1, 1] range using
the Transformer API (e.g. as part of a preprocessing
:class:`~sklearn.pipeline.Pipeline`).
Notes
-----
NaNs are treated as missing values: disregarded to compute the statistics,
and maintained during the data transformation.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(
X,
accept_sparse=("csr", "csc"),
copy=False,
ensure_2d=False,
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the quantile range (defaults to IQR: Interquartile Range).
The IQR is the range between the 1st quartile (25th quantile)
and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature by
computing the relevant statistics on the samples in the training
set. Median and interquartile range are then stored to be used on
later data using the :meth:`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : bool, default=True
If `True`, center the data before scaling.
This will cause :meth:`transform` to raise an exception when attempted
on sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : bool, default=True
If `True`, scale the data to interquartile range.
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0, \
default=(25.0, 75.0)
Quantile range used to calculate `scale_`. By default this is equal to
the IQR, i.e., `q_min` is the first quantile and `q_max` is the third
quantile.
.. versionadded:: 0.18
copy : bool, default=True
If `False`, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
unit_variance : bool, default=False
If `True`, scale data so that normally distributed features have a
variance of 1. In general, if the difference between the x-values of
`q_max` and `q_min` for a standard normal distribution is greater
than 1, the dataset will be scaled down. If less than 1, the dataset
will be scaled up.
.. versionadded:: 0.24
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
robust_scale : Equivalent function without the estimator API.
sklearn.decomposition.PCA : Further removes the linear correlation across
features with 'whiten=True'.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
https://en.wikipedia.org/wiki/Median
https://en.wikipedia.org/wiki/Interquartile_range
Examples
--------
>>> from sklearn.preprocessing import RobustScaler
>>> X = [[ 1., -2., 2.],
... [ -2., 1., 3.],
... [ 4., 1., -2.]]
>>> transformer = RobustScaler().fit(X)
>>> transformer
RobustScaler()
>>> transformer.transform(X)
array([[ 0. , -2. , 0. ],
[-1. , 0. , 0.4],
[ 1. , 0. , -1.6]])
"""
_parameter_constraints: dict = {
"with_centering": ["boolean"],
"with_scaling": ["boolean"],
"quantile_range": [tuple],
"copy": ["boolean"],
"unit_variance": ["boolean"],
}
def __init__(
self,
*,
with_centering=True,
with_scaling=True,
quantile_range=(25.0, 75.0),
copy=True,
unit_variance=False,
):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.quantile_range = quantile_range
self.unit_variance = unit_variance
self.copy = copy
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the median and quantiles
used for later scaling along the features axis.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Fitted scaler.
"""
self._validate_params()
# at fit, convert sparse matrices to csc for optimized computation of
# the quantiles
X = self._validate_data(
X,
accept_sparse="csc",
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
q_min, q_max = self.quantile_range
if not 0 <= q_min <= q_max <= 100:
raise ValueError("Invalid quantile range: %s" % str(self.quantile_range))
if self.with_centering:
if sparse.issparse(X):
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives."
)
self.center_ = np.nanmedian(X, axis=0)
else:
self.center_ = None
if self.with_scaling:
quantiles = []
for feature_idx in range(X.shape[1]):
if sparse.issparse(X):
column_nnz_data = X.data[
X.indptr[feature_idx] : X.indptr[feature_idx + 1]
]
column_data = np.zeros(shape=X.shape[0], dtype=X.dtype)
column_data[: len(column_nnz_data)] = column_nnz_data
else:
column_data = X[:, feature_idx]
quantiles.append(np.nanpercentile(column_data, self.quantile_range))
quantiles = np.transpose(quantiles)
self.scale_ = quantiles[1] - quantiles[0]
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
if self.unit_variance:
adjust = stats.norm.ppf(q_max / 100.0) - stats.norm.ppf(q_min / 100.0)
self.scale_ = self.scale_ / adjust
else:
self.scale_ = None
return self
def transform(self, X):
"""Center and scale the data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to scale along the specified axis.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
check_is_fitted(self)
X = self._validate_data(
X,
accept_sparse=("csr", "csc"),
copy=self.copy,
dtype=FLOAT_DTYPES,
reset=False,
force_all_finite="allow-nan",
)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The rescaled data to be transformed back.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
check_is_fitted(self)
X = check_array(
X,
accept_sparse=("csr", "csc"),
copy=self.copy,
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def _more_tags(self):
return {"allow_nan": True}
def robust_scale(
X,
*,
axis=0,
with_centering=True,
with_scaling=True,
quantile_range=(25.0, 75.0),
copy=True,
unit_variance=False,
):
"""Standardize a dataset along any axis.
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_sample, n_features)
The data to center and scale.
axis : int, default=0
Axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : bool, default=True
If `True`, center the data before scaling.
with_scaling : bool, default=True
If `True`, scale the data to unit variance (or equivalently,
unit standard deviation).
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0,\
default=(25.0, 75.0)
Quantile range used to calculate `scale_`. By default this is equal to
the IQR, i.e., `q_min` is the first quantile and `q_max` is the third
quantile.
.. versionadded:: 0.18
copy : bool, default=True
Set to `False` to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
unit_variance : bool, default=False
If `True`, scale data so that normally distributed features have a
variance of 1. In general, if the difference between the x-values of
`q_max` and `q_min` for a standard normal distribution is greater
than 1, the dataset will be scaled down. If less than 1, the dataset
will be scaled up.
.. versionadded:: 0.24
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
The transformed data.
See Also
--------
RobustScaler : Performs centering and scaling using the Transformer API
(e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
.. warning:: Risk of data leak
Do not use :func:`~sklearn.preprocessing.robust_scale` unless you know
what you are doing. A common mistake is to apply it to the entire data
*before* splitting into training and test sets. This will bias the
model evaluation because information would have leaked from the test
set to the training set.
In general, we recommend using
:class:`~sklearn.preprocessing.RobustScaler` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking: `pipe = make_pipeline(RobustScaler(), LogisticRegression())`.
"""
X = check_array(
X,
accept_sparse=("csr", "csc"),
copy=False,
ensure_2d=False,
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = RobustScaler(
with_centering=with_centering,
with_scaling=with_scaling,
quantile_range=quantile_range,
unit_variance=unit_variance,
copy=copy,
)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
def normalize(X, norm="l2", *, axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : {'l1', 'l2', 'max'}, default='l2'
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : {0, 1}, default=1
Define axis used to normalize the data along. If 1, independently
normalize each sample, otherwise (if 0) normalize each feature.
copy : bool, default=True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : bool, default=False
Whether to return the computed norms.
Returns
-------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Normalized input X.
norms : ndarray of shape (n_samples, ) if axis=1 else (n_features, )
An array of norms along given axis for X.
When X is sparse, a NotImplementedError will be raised
for norm 'l1' or 'l2'.
See Also
--------
Normalizer : Performs normalization using the Transformer API
(e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
if norm not in ("l1", "l2", "max"):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = "csc"
elif axis == 1:
sparse_format = "csr"
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(
X,
accept_sparse=sparse_format,
copy=copy,
estimator="the normalize function",
dtype=FLOAT_DTYPES,
)
if axis == 0:
X = X.T
if sparse.issparse(X):
if return_norm and norm in ("l1", "l2"):
raise NotImplementedError(
"return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'"
)
if norm == "l1":
inplace_csr_row_normalize_l1(X)
elif norm == "l2":
inplace_csr_row_normalize_l2(X)
elif norm == "max":
mins, maxes = min_max_axis(X, 1)
norms = np.maximum(abs(mins), maxes)
norms_elementwise = norms.repeat(np.diff(X.indptr))
mask = norms_elementwise != 0
X.data[mask] /= norms_elementwise[mask]
else:
if norm == "l1":
norms = np.abs(X).sum(axis=1)
elif norm == "l2":
norms = row_norms(X)
elif norm == "max":
norms = np.max(abs(X), axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1, l2 or inf) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : {'l1', 'l2', 'max'}, default='l2'
The norm to use to normalize each non zero sample. If norm='max'
is used, values will be rescaled by the maximum of the absolute
values.
copy : bool, default=True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Attributes
----------
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
normalize : Equivalent function without the estimator API.
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
Examples
--------
>>> from sklearn.preprocessing import Normalizer
>>> X = [[4, 1, 2, 2],
... [1, 3, 9, 3],
... [5, 7, 5, 1]]
>>> transformer = Normalizer().fit(X) # fit does nothing.
>>> transformer
Normalizer()
>>> transformer.transform(X)
array([[0.8, 0.2, 0.4, 0.4],
[0.1, 0.3, 0.9, 0.3],
[0.5, 0.7, 0.5, 0.1]])
"""
_parameter_constraints: dict = {
"norm": [StrOptions({"l1", "l2", "max"})],
"copy": ["boolean"],
}
def __init__(self, norm="l2", *, copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged.
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to estimate the normalization parameters.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Fitted transformer.
"""
self._validate_params()
self._validate_data(X, accept_sparse="csr")
return self
def transform(self, X, copy=None):
"""Scale each non zero row of X to unit norm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
copy : bool, default=None
Copy the input X or not.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
copy = copy if copy is not None else self.copy
X = self._validate_data(X, accept_sparse="csr", reset=False)
return normalize(X, norm=self.norm, axis=1, copy=copy)
def _more_tags(self):
return {"stateless": True}
def binarize(X, *, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix.
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, default=0.0
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : bool, default=True
Set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
The transformed data.
See Also
--------
Binarizer : Performs binarization using the Transformer API
(e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=["csr", "csc"], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError("Cannot binarize a sparse matrix with threshold < 0")
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
"""Binarize data (set feature values to 0 or 1) according to a threshold.
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, default=0.0
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : bool, default=True
Set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Attributes
----------
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
binarize : Equivalent function without the estimator API.
KBinsDiscretizer : Bin continuous data into intervals.
OneHotEncoder : Encode categorical features as a one-hot numeric array.
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
Examples
--------
>>> from sklearn.preprocessing import Binarizer
>>> X = [[ 1., -1., 2.],
... [ 2., 0., 0.],
... [ 0., 1., -1.]]
>>> transformer = Binarizer().fit(X) # fit does nothing.
>>> transformer
Binarizer()
>>> transformer.transform(X)
array([[1., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]])
"""
_parameter_constraints: dict = {
"threshold": [Real],
"copy": ["boolean"],
}
def __init__(self, *, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged.
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data.
y : None
Ignored.
Returns
-------
self : object
Fitted transformer.
"""
self._validate_params()
self._validate_data(X, accept_sparse="csr")
return self
def transform(self, X, copy=None):
"""Binarize each element of X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
copy : bool
Copy the input X or not.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
copy = copy if copy is not None else self.copy
# TODO: This should be refactored because binarize also calls
# check_array
X = self._validate_data(X, accept_sparse=["csr", "csc"], copy=copy, reset=False)
return binarize(X, threshold=self.threshold, copy=False)
def _more_tags(self):
return {"stateless": True}
class KernelCenterer(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
r"""Center an arbitrary kernel matrix :math:`K`.
Let define a kernel :math:`K` such that:
.. math::
K(X, Y) = \phi(X) . \phi(Y)^{T}
:math:`\phi(X)` is a function mapping of rows of :math:`X` to a
Hilbert space and :math:`K` is of shape `(n_samples, n_samples)`.
This class allows to compute :math:`\tilde{K}(X, Y)` such that:
.. math::
\tilde{K(X, Y)} = \tilde{\phi}(X) . \tilde{\phi}(Y)^{T}
:math:`\tilde{\phi}(X)` is the centered mapped data in the Hilbert
space.
`KernelCenterer` centers the features without explicitly computing the
mapping :math:`\phi(\cdot)`. Working with centered kernels is sometime
expected when dealing with algebra computation such as eigendecomposition
for :class:`~sklearn.decomposition.KernelPCA` for instance.
Read more in the :ref:`User Guide <kernel_centering>`.
Attributes
----------
K_fit_rows_ : ndarray of shape (n_samples,)
Average of each column of kernel matrix.
K_fit_all_ : float
Average of kernel matrix.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
sklearn.kernel_approximation.Nystroem : Approximate a kernel map
using a subset of the training data.
References
----------
.. [1] `Schölkopf, Bernhard, Alexander Smola, and Klaus-Robert Müller.
"Nonlinear component analysis as a kernel eigenvalue problem."
Neural computation 10.5 (1998): 1299-1319.
<https://www.mlpack.org/papers/kpca.pdf>`_
Examples
--------
>>> from sklearn.preprocessing import KernelCenterer
>>> from sklearn.metrics.pairwise import pairwise_kernels
>>> X = [[ 1., -2., 2.],
... [ -2., 1., 3.],
... [ 4., 1., -2.]]
>>> K = pairwise_kernels(X, metric='linear')
>>> K
array([[ 9., 2., -2.],
[ 2., 14., -13.],
[ -2., -13., 21.]])
>>> transformer = KernelCenterer().fit(K)
>>> transformer
KernelCenterer()
>>> transformer.transform(K)
array([[ 5., 0., -5.],
[ 0., 14., -14.],
[ -5., -14., 19.]])
"""
def __init__(self):
# Needed for backported inspect.signature compatibility with PyPy
pass
def fit(self, K, y=None):
"""Fit KernelCenterer.
Parameters
----------
K : ndarray of shape (n_samples, n_samples)
Kernel matrix.
y : None
Ignored.
Returns
-------
self : object
Returns the instance itself.
"""
K = self._validate_data(K, dtype=FLOAT_DTYPES)
if K.shape[0] != K.shape[1]:
raise ValueError(
"Kernel matrix must be a square matrix."
" Input is a {}x{} matrix.".format(K.shape[0], K.shape[1])
)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, copy=True):
"""Center kernel matrix.
Parameters
----------
K : ndarray of shape (n_samples1, n_samples2)
Kernel matrix.
copy : bool, default=True
Set to False to perform inplace computation.
Returns
-------
K_new : ndarray of shape (n_samples1, n_samples2)
Returns the instance itself.
"""
check_is_fitted(self)
K = self._validate_data(K, copy=copy, dtype=FLOAT_DTYPES, reset=False)
K_pred_cols = (np.sum(K, axis=1) / self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
@property
def _n_features_out(self):
"""Number of transformed output features."""
# Used by ClassNamePrefixFeaturesOutMixin. This model preserves the
# number of input features but this is not a one-to-one mapping in the
# usual sense. Hence the choice not to use OneToOneFeatureMixin to
# implement get_feature_names_out for this class.
return self.n_features_in_
def _more_tags(self):
return {"pairwise": True}
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {ndarray, sparse matrix} of shape (n_samples, n_features + 1)
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[1., 0., 1.],
[1., 1., 0.]])
"""
X = check_array(X, accept_sparse=["csc", "csr", "coo"], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.full(n_samples, value), X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.full(n_samples, value), X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.full((n_samples, 1), value), X))
class QuantileTransformer(OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently. First an
estimate of the cumulative distribution function of a feature is
used to map the original values to a uniform distribution. The obtained
values are then mapped to the desired output distribution using the
associated quantile function. Features values of new/unseen data that fall
below or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
.. versionadded:: 0.19
Parameters
----------
n_quantiles : int, default=1000 or n_samples
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative distribution function.
If n_quantiles is larger than the number of samples, n_quantiles is set
to the number of samples as a larger number of quantiles does not give
a better approximation of the cumulative distribution function
estimator.
output_distribution : {'uniform', 'normal'}, default='uniform'
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, default=False
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, default=10_000
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, default=None
Determines random number generation for subsampling and smoothing
noise.
Please see ``subsample`` for more details.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
copy : bool, default=True
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array).
Attributes
----------
n_quantiles_ : int
The actual number of quantiles used to discretize the cumulative
distribution function.
quantiles_ : ndarray of shape (n_quantiles, n_features)
The values corresponding the quantiles of reference.
references_ : ndarray of shape (n_quantiles, )
Quantiles of references.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
quantile_transform : Equivalent function without the estimator API.
PowerTransformer : Perform mapping to a normal distribution using a power
transform.
StandardScaler : Perform standardization that is faster, but less robust
to outliers.
RobustScaler : Perform robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import QuantileTransformer
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> qt = QuantileTransformer(n_quantiles=10, random_state=0)
>>> qt.fit_transform(X)
array([...])
"""
_parameter_constraints: dict = {
"n_quantiles": [Interval(Integral, 1, None, closed="left")],
"output_distribution": [StrOptions({"uniform", "normal"})],
"ignore_implicit_zeros": ["boolean"],
"subsample": [Interval(Integral, 1, None, closed="left")],
"random_state": ["random_state"],
"copy": ["boolean"],
}
def __init__(
self,
*,
n_quantiles=1000,
output_distribution="uniform",
ignore_implicit_zeros=False,
subsample=10_000,
random_state=None,
copy=True,
):
self.n_quantiles = n_quantiles
self.output_distribution = output_distribution
self.ignore_implicit_zeros = ignore_implicit_zeros
self.subsample = subsample
self.random_state = random_state
self.copy = copy
def _dense_fit(self, X, random_state):
"""Compute percentiles for dense matrices.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The data used to scale along the features axis.
"""
if self.ignore_implicit_zeros:
warnings.warn(
"'ignore_implicit_zeros' takes effect only with"
" sparse matrix. This parameter has no effect."
)
n_samples, n_features = X.shape
references = self.references_ * 100
self.quantiles_ = []
for col in X.T:
if self.subsample < n_samples:
subsample_idx = random_state.choice(
n_samples, size=self.subsample, replace=False
)
col = col.take(subsample_idx, mode="clip")
self.quantiles_.append(np.nanpercentile(col, references))
self.quantiles_ = np.transpose(self.quantiles_)
# Due to floating-point precision error in `np.nanpercentile`,
# make sure that quantiles are monotonically increasing.
# Upstream issue in numpy:
# https://github.com/numpy/numpy/issues/14685
self.quantiles_ = np.maximum.accumulate(self.quantiles_)
def _sparse_fit(self, X, random_state):
"""Compute percentiles for sparse matrices.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
The data used to scale along the features axis. The sparse matrix
needs to be nonnegative. If a sparse matrix is provided,
it will be converted into a sparse ``csc_matrix``.
"""
n_samples, n_features = X.shape
references = self.references_ * 100
self.quantiles_ = []
for feature_idx in range(n_features):
column_nnz_data = X.data[X.indptr[feature_idx] : X.indptr[feature_idx + 1]]
if len(column_nnz_data) > self.subsample:
column_subsample = self.subsample * len(column_nnz_data) // n_samples
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=column_subsample, dtype=X.dtype)
else:
column_data = np.zeros(shape=self.subsample, dtype=X.dtype)
column_data[:column_subsample] = random_state.choice(
column_nnz_data, size=column_subsample, replace=False
)
else:
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=len(column_nnz_data), dtype=X.dtype)
else:
column_data = np.zeros(shape=n_samples, dtype=X.dtype)
column_data[: len(column_nnz_data)] = column_nnz_data
if not column_data.size:
# if no nnz, an error will be raised for computing the
# quantiles. Force the quantiles to be zeros.
self.quantiles_.append([0] * len(references))
else:
self.quantiles_.append(np.nanpercentile(column_data, references))
self.quantiles_ = np.transpose(self.quantiles_)
# due to floating-point precision error in `np.nanpercentile`,
# make sure the quantiles are monotonically increasing
# Upstream issue in numpy:
# https://github.com/numpy/numpy/issues/14685
self.quantiles_ = np.maximum.accumulate(self.quantiles_)
def fit(self, X, y=None):
"""Compute the quantiles used for transforming.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
y : None
Ignored.
Returns
-------
self : object
Fitted transformer.
"""
self._validate_params()
if self.n_quantiles > self.subsample:
raise ValueError(
"The number of quantiles cannot be greater than"
" the number of samples used. Got {} quantiles"
" and {} samples.".format(self.n_quantiles, self.subsample)
)
X = self._check_inputs(X, in_fit=True, copy=False)
n_samples = X.shape[0]
if self.n_quantiles > n_samples:
warnings.warn(
"n_quantiles (%s) is greater than the total number "
"of samples (%s). n_quantiles is set to "
"n_samples." % (self.n_quantiles, n_samples)
)
self.n_quantiles_ = max(1, min(self.n_quantiles, n_samples))
rng = check_random_state(self.random_state)
# Create the quantiles of reference
self.references_ = np.linspace(0, 1, self.n_quantiles_, endpoint=True)
if sparse.issparse(X):
self._sparse_fit(X, rng)
else:
self._dense_fit(X, rng)
return self
def _transform_col(self, X_col, quantiles, inverse):
"""Private function to transform a single feature."""
output_distribution = self.output_distribution
if not inverse:
lower_bound_x = quantiles[0]
upper_bound_x = quantiles[-1]
lower_bound_y = 0
upper_bound_y = 1
else:
lower_bound_x = 0
upper_bound_x = 1
lower_bound_y = quantiles[0]
upper_bound_y = quantiles[-1]
# for inverse transform, match a uniform distribution
with np.errstate(invalid="ignore"): # hide NaN comparison warnings
if output_distribution == "normal":
X_col = stats.norm.cdf(X_col)
# else output distribution is already a uniform distribution
# find index for lower and higher bounds
with np.errstate(invalid="ignore"): # hide NaN comparison warnings
if output_distribution == "normal":
lower_bounds_idx = X_col - BOUNDS_THRESHOLD < lower_bound_x
upper_bounds_idx = X_col + BOUNDS_THRESHOLD > upper_bound_x
if output_distribution == "uniform":
lower_bounds_idx = X_col == lower_bound_x
upper_bounds_idx = X_col == upper_bound_x
isfinite_mask = ~np.isnan(X_col)
X_col_finite = X_col[isfinite_mask]
if not inverse:
# Interpolate in one direction and in the other and take the
# mean. This is in case of repeated values in the features
# and hence repeated quantiles
#
# If we don't do this, only one extreme of the duplicated is
# used (the upper when we do ascending, and the
# lower for descending). We take the mean of these two
X_col[isfinite_mask] = 0.5 * (
np.interp(X_col_finite, quantiles, self.references_)
- np.interp(-X_col_finite, -quantiles[::-1], -self.references_[::-1])
)
else:
X_col[isfinite_mask] = np.interp(X_col_finite, self.references_, quantiles)
X_col[upper_bounds_idx] = upper_bound_y
X_col[lower_bounds_idx] = lower_bound_y
# for forward transform, match the output distribution
if not inverse:
with np.errstate(invalid="ignore"): # hide NaN comparison warnings
if output_distribution == "normal":
X_col = stats.norm.ppf(X_col)
# find the value to clip the data to avoid mapping to
# infinity. Clip such that the inverse transform will be
# consistent
clip_min = stats.norm.ppf(BOUNDS_THRESHOLD - np.spacing(1))
clip_max = stats.norm.ppf(1 - (BOUNDS_THRESHOLD - np.spacing(1)))
X_col = np.clip(X_col, clip_min, clip_max)
# else output distribution is uniform and the ppf is the
# identity function so we let X_col unchanged
return X_col
def _check_inputs(self, X, in_fit, accept_sparse_negative=False, copy=False):
"""Check inputs before fit and transform."""
X = self._validate_data(
X,
reset=in_fit,
accept_sparse="csc",
copy=copy,
dtype=FLOAT_DTYPES,
force_all_finite="allow-nan",
)
# we only accept positive sparse matrix when ignore_implicit_zeros is
# false and that we call fit or transform.
with np.errstate(invalid="ignore"): # hide NaN comparison warnings
if (
not accept_sparse_negative
and not self.ignore_implicit_zeros
and (sparse.issparse(X) and np.any(X.data < 0))
):
raise ValueError(
"QuantileTransformer only accepts non-negative sparse matrices."
)
return X
def _transform(self, X, inverse=False):
"""Forward and inverse transform.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The data used to scale along the features axis.
inverse : bool, default=False
If False, apply forward transform. If True, apply
inverse transform.
Returns
-------
X : ndarray of shape (n_samples, n_features)
Projected data.
"""
if sparse.issparse(X):
for feature_idx in range(X.shape[1]):
column_slice = slice(X.indptr[feature_idx], X.indptr[feature_idx + 1])
X.data[column_slice] = self._transform_col(
X.data[column_slice], self.quantiles_[:, feature_idx], inverse
)
else:
for feature_idx in range(X.shape[1]):
X[:, feature_idx] = self._transform_col(
X[:, feature_idx], self.quantiles_[:, feature_idx], inverse
)
return X
def transform(self, X):
"""Feature-wise transformation of the data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : {ndarray, sparse matrix} of shape (n_samples, n_features)
The projected data.
"""
check_is_fitted(self)
X = self._check_inputs(X, in_fit=False, copy=self.copy)
return self._transform(X, inverse=False)
def inverse_transform(self, X):
"""Back-projection to the original space.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : {ndarray, sparse matrix} of (n_samples, n_features)
The projected data.
"""
check_is_fitted(self)
X = self._check_inputs(
X, in_fit=False, accept_sparse_negative=True, copy=self.copy
)
return self._transform(X, inverse=True)
def _more_tags(self):
return {"allow_nan": True}
def quantile_transform(
X,
*,
axis=0,
n_quantiles=1000,
output_distribution="uniform",
ignore_implicit_zeros=False,
subsample=int(1e5),
random_state=None,
copy=True,
):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently. First an
estimate of the cumulative distribution function of a feature is
used to map the original values to a uniform distribution. The obtained
values are then mapped to the desired output distribution using the
associated quantile function. Features values of new/unseen data that fall
below or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to transform.
axis : int, default=0
Axis used to compute the means and standard deviations along. If 0,
transform each feature, otherwise (if 1) transform each sample.
n_quantiles : int, default=1000 or n_samples
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative distribution function.
If n_quantiles is larger than the number of samples, n_quantiles is set
to the number of samples as a larger number of quantiles does not give
a better approximation of the cumulative distribution function
estimator.
output_distribution : {'uniform', 'normal'}, default='uniform'
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, default=False
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, default=1e5
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, default=None
Determines random number generation for subsampling and smoothing
noise.
Please see ``subsample`` for more details.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
copy : bool, default=True
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array). If True, a copy of `X` is transformed,
leaving the original `X` unchanged.
.. versionchanged:: 0.23
The default value of `copy` changed from False to True in 0.23.
Returns
-------
Xt : {ndarray, sparse matrix} of shape (n_samples, n_features)
The transformed data.
See Also
--------
QuantileTransformer : Performs quantile-based scaling using the
Transformer API (e.g. as part of a preprocessing
:class:`~sklearn.pipeline.Pipeline`).
power_transform : Maps data to a normal distribution using a
power transformation.
scale : Performs standardization that is faster, but less robust
to outliers.
robust_scale : Performs robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
.. warning:: Risk of data leak
Do not use :func:`~sklearn.preprocessing.quantile_transform` unless
you know what you are doing. A common mistake is to apply it
to the entire data *before* splitting into training and
test sets. This will bias the model evaluation because
information would have leaked from the test set to the
training set.
In general, we recommend using
:class:`~sklearn.preprocessing.QuantileTransformer` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking:`pipe = make_pipeline(QuantileTransformer(),
LogisticRegression())`.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import quantile_transform
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> quantile_transform(X, n_quantiles=10, random_state=0, copy=True)
array([...])
"""
n = QuantileTransformer(
n_quantiles=n_quantiles,
output_distribution=output_distribution,
subsample=subsample,
ignore_implicit_zeros=ignore_implicit_zeros,
random_state=random_state,
copy=copy,
)
if axis == 0:
return n.fit_transform(X)
elif axis == 1:
return n.fit_transform(X.T).T
else:
raise ValueError(
"axis should be either equal to 0 or 1. Got axis={}".format(axis)
)
class PowerTransformer(OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
"""Apply a power transform featurewise to make data more Gaussian-like.
Power transforms are a family of parametric, monotonic transformations
that are applied to make data more Gaussian-like. This is useful for
modeling issues related to heteroscedasticity (non-constant variance),
or other situations where normality is desired.
Currently, PowerTransformer supports the Box-Cox transform and the
Yeo-Johnson transform. The optimal parameter for stabilizing variance and
minimizing skewness is estimated through maximum likelihood.
Box-Cox requires input data to be strictly positive, while Yeo-Johnson
supports both positive or negative data.
By default, zero-mean, unit-variance normalization is applied to the
transformed data.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
.. versionadded:: 0.20
Parameters
----------
method : {'yeo-johnson', 'box-cox'}, default='yeo-johnson'
The power transform method. Available methods are:
- 'yeo-johnson' [1]_, works with positive and negative values
- 'box-cox' [2]_, only works with strictly positive values
standardize : bool, default=True
Set to True to apply zero-mean, unit-variance normalization to the
transformed output.
copy : bool, default=True
Set to False to perform inplace computation during transformation.
Attributes
----------
lambdas_ : ndarray of float of shape (n_features,)
The parameters of the power transformation for the selected features.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
power_transform : Equivalent function without the estimator API.
QuantileTransformer : Maps data to a standard normal distribution with
the parameter `output_distribution='normal'`.
Notes
-----
NaNs are treated as missing values: disregarded in ``fit``, and maintained
in ``transform``.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
References
----------
.. [1] I.K. Yeo and R.A. Johnson, "A new family of power transformations to
improve normality or symmetry." Biometrika, 87(4), pp.954-959,
(2000).
.. [2] G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal
of the Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import PowerTransformer
>>> pt = PowerTransformer()
>>> data = [[1, 2], [3, 2], [4, 5]]
>>> print(pt.fit(data))
PowerTransformer()
>>> print(pt.lambdas_)
[ 1.386... -3.100...]
>>> print(pt.transform(data))
[[-1.316... -0.707...]
[ 0.209... -0.707...]
[ 1.106... 1.414...]]
"""
_parameter_constraints: dict = {
"method": [StrOptions({"yeo-johnson", "box-cox"})],
"standardize": ["boolean"],
"copy": ["boolean"],
}
def __init__(self, method="yeo-johnson", *, standardize=True, copy=True):
self.method = method
self.standardize = standardize
self.copy = copy
def fit(self, X, y=None):
"""Estimate the optimal parameter lambda for each feature.
The optimal lambda parameter for minimizing skewness is estimated on
each feature independently using maximum likelihood.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data used to estimate the optimal transformation parameters.
y : None
Ignored.
Returns
-------
self : object
Fitted transformer.
"""
self._validate_params()
self._fit(X, y=y, force_transform=False)
return self
def fit_transform(self, X, y=None):
"""Fit `PowerTransformer` to `X`, then transform `X`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data used to estimate the optimal transformation parameters
and to be transformed using a power transformation.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
X_new : ndarray of shape (n_samples, n_features)
Transformed data.
"""
self._validate_params()
return self._fit(X, y, force_transform=True)
def _fit(self, X, y=None, force_transform=False):
X = self._check_input(X, in_fit=True, check_positive=True)
if not self.copy and not force_transform: # if call from fit()
X = X.copy() # force copy so that fit does not change X inplace
optim_function = {
"box-cox": self._box_cox_optimize,
"yeo-johnson": self._yeo_johnson_optimize,
}[self.method]
with np.errstate(invalid="ignore"): # hide NaN warnings
self.lambdas_ = np.array([optim_function(col) for col in X.T])
if self.standardize or force_transform:
transform_function = {
"box-cox": boxcox,
"yeo-johnson": self._yeo_johnson_transform,
}[self.method]
for i, lmbda in enumerate(self.lambdas_):
with np.errstate(invalid="ignore"): # hide NaN warnings
X[:, i] = transform_function(X[:, i], lmbda)
if self.standardize:
self._scaler = StandardScaler(copy=False)
if force_transform:
X = self._scaler.fit_transform(X)
else:
self._scaler.fit(X)
return X
def transform(self, X):
"""Apply the power transform to each feature using the fitted lambdas.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data to be transformed using a power transformation.
Returns
-------
X_trans : ndarray of shape (n_samples, n_features)
The transformed data.
"""
check_is_fitted(self)
X = self._check_input(X, in_fit=False, check_positive=True, check_shape=True)
transform_function = {
"box-cox": boxcox,
"yeo-johnson": self._yeo_johnson_transform,
}[self.method]
for i, lmbda in enumerate(self.lambdas_):
with np.errstate(invalid="ignore"): # hide NaN warnings
X[:, i] = transform_function(X[:, i], lmbda)
if self.standardize:
X = self._scaler.transform(X)
return X
def inverse_transform(self, X):
"""Apply the inverse power transformation using the fitted lambdas.
The inverse of the Box-Cox transformation is given by::
if lambda_ == 0:
X = exp(X_trans)
else:
X = (X_trans * lambda_ + 1) ** (1 / lambda_)
The inverse of the Yeo-Johnson transformation is given by::
if X >= 0 and lambda_ == 0:
X = exp(X_trans) - 1
elif X >= 0 and lambda_ != 0:
X = (X_trans * lambda_ + 1) ** (1 / lambda_) - 1
elif X < 0 and lambda_ != 2:
X = 1 - (-(2 - lambda_) * X_trans + 1) ** (1 / (2 - lambda_))
elif X < 0 and lambda_ == 2:
X = 1 - exp(-X_trans)
Parameters
----------
X : array-like of shape (n_samples, n_features)
The transformed data.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The original data.
"""
check_is_fitted(self)
X = self._check_input(X, in_fit=False, check_shape=True)
if self.standardize:
X = self._scaler.inverse_transform(X)
inv_fun = {
"box-cox": self._box_cox_inverse_tranform,
"yeo-johnson": self._yeo_johnson_inverse_transform,
}[self.method]
for i, lmbda in enumerate(self.lambdas_):
with np.errstate(invalid="ignore"): # hide NaN warnings
X[:, i] = inv_fun(X[:, i], lmbda)
return X
def _box_cox_inverse_tranform(self, x, lmbda):
"""Return inverse-transformed input x following Box-Cox inverse
transform with parameter lambda.
"""
if lmbda == 0:
x_inv = np.exp(x)
else:
x_inv = (x * lmbda + 1) ** (1 / lmbda)
return x_inv
def _yeo_johnson_inverse_transform(self, x, lmbda):
"""Return inverse-transformed input x following Yeo-Johnson inverse
transform with parameter lambda.
"""
x_inv = np.zeros_like(x)
pos = x >= 0
# when x >= 0
if abs(lmbda) < np.spacing(1.0):
x_inv[pos] = np.exp(x[pos]) - 1
else: # lmbda != 0
x_inv[pos] = np.power(x[pos] * lmbda + 1, 1 / lmbda) - 1
# when x < 0
if abs(lmbda - 2) > np.spacing(1.0):
x_inv[~pos] = 1 - np.power(-(2 - lmbda) * x[~pos] + 1, 1 / (2 - lmbda))
else: # lmbda == 2
x_inv[~pos] = 1 - np.exp(-x[~pos])
return x_inv
def _yeo_johnson_transform(self, x, lmbda):
"""Return transformed input x following Yeo-Johnson transform with
parameter lambda.
"""
out = np.zeros_like(x)
pos = x >= 0 # binary mask
# when x >= 0
if abs(lmbda) < np.spacing(1.0):
out[pos] = np.log1p(x[pos])
else: # lmbda != 0
out[pos] = (np.power(x[pos] + 1, lmbda) - 1) / lmbda
# when x < 0
if abs(lmbda - 2) > np.spacing(1.0):
out[~pos] = -(np.power(-x[~pos] + 1, 2 - lmbda) - 1) / (2 - lmbda)
else: # lmbda == 2
out[~pos] = -np.log1p(-x[~pos])
return out
def _box_cox_optimize(self, x):
"""Find and return optimal lambda parameter of the Box-Cox transform by
MLE, for observed data x.
We here use scipy builtins which uses the brent optimizer.
"""
# the computation of lambda is influenced by NaNs so we need to
# get rid of them
_, lmbda = stats.boxcox(x[~np.isnan(x)], lmbda=None)
return lmbda
def _yeo_johnson_optimize(self, x):
"""Find and return optimal lambda parameter of the Yeo-Johnson
transform by MLE, for observed data x.
Like for Box-Cox, MLE is done via the brent optimizer.
"""
x_tiny = np.finfo(np.float64).tiny
def _neg_log_likelihood(lmbda):
"""Return the negative log likelihood of the observed data x as a
function of lambda."""
x_trans = self._yeo_johnson_transform(x, lmbda)
n_samples = x.shape[0]
x_trans_var = x_trans.var()
# Reject transformed data that would raise a RuntimeWarning in np.log
if x_trans_var < x_tiny:
return np.inf
log_var = np.log(x_trans_var)
loglike = -n_samples / 2 * log_var
loglike += (lmbda - 1) * (np.sign(x) * np.log1p(np.abs(x))).sum()
return -loglike
# the computation of lambda is influenced by NaNs so we need to
# get rid of them
x = x[~np.isnan(x)]
# choosing bracket -2, 2 like for boxcox
return optimize.brent(_neg_log_likelihood, brack=(-2, 2))
def _check_input(self, X, in_fit, check_positive=False, check_shape=False):
"""Validate the input before fit and transform.
Parameters
----------
X : array-like of shape (n_samples, n_features)
in_fit : bool
Whether or not `_check_input` is called from `fit` or other
methods, e.g. `predict`, `transform`, etc.
check_positive : bool, default=False
If True, check that all data is positive and non-zero (only if
``self.method=='box-cox'``).
check_shape : bool, default=False
If True, check that n_features matches the length of self.lambdas_
"""
X = self._validate_data(
X,
ensure_2d=True,
dtype=FLOAT_DTYPES,
copy=self.copy,
force_all_finite="allow-nan",
reset=in_fit,
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", r"All-NaN (slice|axis) encountered")
if check_positive and self.method == "box-cox" and np.nanmin(X) <= 0:
raise ValueError(
"The Box-Cox transformation can only be "
"applied to strictly positive data"
)
if check_shape and not X.shape[1] == len(self.lambdas_):
raise ValueError(
"Input data has a different number of features "
"than fitting data. Should have {n}, data has {m}".format(
n=len(self.lambdas_), m=X.shape[1]
)
)
return X
def _more_tags(self):
return {"allow_nan": True}
def power_transform(X, method="yeo-johnson", *, standardize=True, copy=True):
"""Parametric, monotonic transformation to make data more Gaussian-like.
Power transforms are a family of parametric, monotonic transformations
that are applied to make data more Gaussian-like. This is useful for
modeling issues related to heteroscedasticity (non-constant variance),
or other situations where normality is desired.
Currently, power_transform supports the Box-Cox transform and the
Yeo-Johnson transform. The optimal parameter for stabilizing variance and
minimizing skewness is estimated through maximum likelihood.
Box-Cox requires input data to be strictly positive, while Yeo-Johnson
supports both positive or negative data.
By default, zero-mean, unit-variance normalization is applied to the
transformed data.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data to be transformed using a power transformation.
method : {'yeo-johnson', 'box-cox'}, default='yeo-johnson'
The power transform method. Available methods are:
- 'yeo-johnson' [1]_, works with positive and negative values
- 'box-cox' [2]_, only works with strictly positive values
.. versionchanged:: 0.23
The default value of the `method` parameter changed from
'box-cox' to 'yeo-johnson' in 0.23.
standardize : bool, default=True
Set to True to apply zero-mean, unit-variance normalization to the
transformed output.
copy : bool, default=True
Set to False to perform inplace computation during transformation.
Returns
-------
X_trans : ndarray of shape (n_samples, n_features)
The transformed data.
See Also
--------
PowerTransformer : Equivalent transformation with the
Transformer API (e.g. as part of a preprocessing
:class:`~sklearn.pipeline.Pipeline`).
quantile_transform : Maps data to a standard normal distribution with
the parameter `output_distribution='normal'`.
Notes
-----
NaNs are treated as missing values: disregarded in ``fit``, and maintained
in ``transform``.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
References
----------
.. [1] I.K. Yeo and R.A. Johnson, "A new family of power transformations to
improve normality or symmetry." Biometrika, 87(4), pp.954-959,
(2000).
.. [2] G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal
of the Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import power_transform
>>> data = [[1, 2], [3, 2], [4, 5]]
>>> print(power_transform(data, method='box-cox'))
[[-1.332... -0.707...]
[ 0.256... -0.707...]
[ 1.076... 1.414...]]
.. warning:: Risk of data leak.
Do not use :func:`~sklearn.preprocessing.power_transform` unless you
know what you are doing. A common mistake is to apply it to the entire
data *before* splitting into training and test sets. This will bias the
model evaluation because information would have leaked from the test
set to the training set.
In general, we recommend using
:class:`~sklearn.preprocessing.PowerTransformer` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking, e.g.: `pipe = make_pipeline(PowerTransformer(),
LogisticRegression())`.
"""
pt = PowerTransformer(method=method, standardize=standardize, copy=copy)
return pt.fit_transform(X)
| bsd-3-clause | 9a298c11008a34b63dedbcec748b22fa | 33.578795 | 88 | 0.592958 | 4.122466 | false | false | false | false |
scikit-learn/scikit-learn | doc/tutorial/machine_learning_map/parse_path.py | 12 | 7398 | #!/usr/local/bin/python
"""
Based on: http://wxpsvg.googlecode.com/svn/trunk/svg/pathdata.py
According to that project, this file is licensed under the LGPL
"""
try:
from pyparsing import (ParserElement, Literal, Word, CaselessLiteral,
Optional, Combine, Forward, ZeroOrMore, nums, oneOf, Group, ParseException, OneOrMore)
except ImportError:
import sys
sys.exit("pyparsing is required")
#ParserElement.enablePackrat()
def Command(char):
""" Case insensitive but case preserving"""
return CaselessPreservingLiteral(char)
def Arguments(token):
return Group(token)
class CaselessPreservingLiteral(CaselessLiteral):
""" Like CaselessLiteral, but returns the match as found
instead of as defined.
"""
def __init__( self, matchString ):
super().__init__(matchString.upper())
self.name = "'%s'" % matchString
self.errmsg = "Expected " + self.name
self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
test = instring[ loc:loc+self.matchLen ]
if test.upper() == self.match:
return loc+self.matchLen, test
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
def Sequence(token):
""" A sequence of the token"""
return OneOrMore(token+maybeComma)
digit_sequence = Word(nums)
sign = oneOf("+ -")
def convertToFloat(s, loc, toks):
try:
return float(toks[0])
except BaseException as e:
raise ParseException(loc, "invalid float format %s" % toks[0]) from e
exponent = CaselessLiteral("e")+Optional(sign)+Word(nums)
#note that almost all these fields are optional,
#and this can match almost anything. We rely on Pythons built-in
#float() function to clear out invalid values - loosely matching like this
#speeds up parsing quite a lot
floatingPointConstant = Combine(
Optional(sign) +
Optional(Word(nums)) +
Optional(Literal(".") + Optional(Word(nums)))+
Optional(exponent)
)
floatingPointConstant.setParseAction(convertToFloat)
number = floatingPointConstant
#same as FP constant but don't allow a - sign
nonnegativeNumber = Combine(
Optional(Word(nums)) +
Optional(Literal(".") + Optional(Word(nums)))+
Optional(exponent)
)
nonnegativeNumber.setParseAction(convertToFloat)
coordinate = number
#comma or whitespace can separate values all over the place in SVG
maybeComma = Optional(Literal(',')).suppress()
coordinateSequence = Sequence(coordinate)
coordinatePair = (coordinate + maybeComma + coordinate).setParseAction(tuple)
coordinatePairSequence = Sequence(coordinatePair)
coordinatePairPair = coordinatePair + maybeComma + coordinatePair
coordinatePairPairSequence = Sequence(Group(coordinatePairPair))
coordinatePairTriple = coordinatePair + maybeComma + coordinatePair + maybeComma + coordinatePair
coordinatePairTripleSequence = Sequence(Group(coordinatePairTriple))
#commands
lineTo = Group(Command("L") + Arguments(coordinatePairSequence))
curve = Group(Command("C") + Arguments(coordinatePairSequence))
moveTo = Group(Command("M") + Arguments(coordinatePairSequence))
closePath = Group(Command("Z")).setParseAction(lambda t: ('Z', (None,)))
flag = oneOf("1 0").setParseAction(lambda t: bool(int((t[0]))))
arcRadius = (
nonnegativeNumber + maybeComma + #rx
nonnegativeNumber #ry
).setParseAction(tuple)
arcFlags = (flag + maybeComma + flag).setParseAction(tuple)
ellipticalArcArgument = Group(
arcRadius + maybeComma + #rx, ry
number + maybeComma +#rotation
arcFlags + #large-arc-flag, sweep-flag
coordinatePair #(x,y)
)
ellipticalArc = Group(Command("A") + Arguments(Sequence(ellipticalArcArgument)))
smoothQuadraticBezierCurveto = Group(Command("T") + Arguments(coordinatePairSequence))
quadraticBezierCurveto = Group(Command("Q") + Arguments(coordinatePairPairSequence))
smoothCurve = Group(Command("S") + Arguments(coordinatePairPairSequence))
#curve = Group(Command("C") + Arguments(coordinatePairTripleSequence))
horizontalLine = Group(Command("H") + Arguments(coordinateSequence))
verticalLine = Group(Command("V") + Arguments(coordinateSequence))
drawToCommand = (
lineTo | moveTo | closePath | ellipticalArc | smoothQuadraticBezierCurveto |
quadraticBezierCurveto | smoothCurve | curve | horizontalLine | verticalLine
)
#~ number.debug = True
moveToDrawToCommands = moveTo + ZeroOrMore(drawToCommand)
path = ZeroOrMore(moveToDrawToCommands)
path.keepTabs = True
def get_points(d):
commands = path.parseString(d)
points = []
currentset = None
for command in commands:
if command[0] == 'M' or command[0] == 'm':
currentset = []
points.append(currentset)
currentset.append(command[1][-1])
elif command[0] == 'L' or command[0] == 'l':
currentset.extend(command[1])
elif command[0] == 'C' or command[0] == 'c':
currentset.extend(command[1])
return points
if __name__ == "__main__":
s = ("M 242.96145,653.59282 L 244.83646,650.1553 L 247.02397,649.8428 "
"L 247.33647,650.62405 L 245.30521,653.59282 L 242.96145,653.59282 z "
"M 252.80525,649.99905 L 258.74278,652.49906 L 260.77404,652.18656 "
"L 262.33654,648.43654 L 261.71154,645.15528 L 257.64902,644.68653 "
"L 253.74275,646.40528 L 252.80525,649.99905 z M 282.49289,659.6866 "
"L 286.08665,664.99912 L 288.43041,664.68662 L 289.52417,664.21787 "
"L 290.93042,665.46787 L 294.52419,665.31162 L 295.4617,663.90537 "
"L 292.64918,662.18661 L 290.77417,658.59284 L 288.74291,655.15533 "
"L 283.11789,657.96784 L 282.49289,659.6866 z M 302.02423,668.28039 "
"L 303.27423,666.40538 L 307.8055,667.34288 L 308.43051,666.87413 "
"L 314.36803,667.49913 L 314.05553,668.74914 L 311.55552,670.15539 "
"L 307.33675,669.84289 L 302.02423,668.28039 z M 307.1805,673.28041 "
"L 309.05551,677.03043 L 312.02427,675.93667 L 312.33677,674.37416 "
"L 310.77427,672.3429 L 307.1805,672.0304 L 307.1805,673.28041 z "
"M 313.89928,672.18665 L 316.08679,669.37414 L 320.61806,671.7179 "
"L 324.83683,672.81166 L 329.0556,675.46792 L 329.0556,677.34293 "
"L 325.61809,679.06169 L 320.93056,679.99919 L 318.5868,678.59293 "
"L 313.89928,672.18665 z M 329.99311,687.18672 L 331.55561,685.93672 "
"L 334.83688,687.49923 L 342.18066,690.93674 L 345.46193,692.968 "
"L 347.02443,695.31176 L 348.89944,699.53053 L 352.80571,702.03054 "
"L 352.49321,703.28055 L 348.74319,706.40556 L 344.68067,707.81182 "
"L 343.27442,707.18682 L 340.30565,708.90557 L 337.96189,712.03059 "
"L 335.77438,714.8431 L 334.05562,714.68685 L 330.61811,712.18684 "
"L 330.30561,707.81182 L 330.93061,705.46806 L 329.3681,699.99928 "
"L 327.33684,698.28052 L 327.18059,695.78051 L 329.3681,694.84301 "
"L 331.39936,691.87425 L 331.86811,690.93674 L 330.30561,689.21798 "
"L 329.99311,687.18672 z ")
print(path.parseString(s))
| bsd-3-clause | 5c5f8a7a7837e087521f31e815c82dc2 | 36.53125 | 97 | 0.667748 | 3.19568 | false | false | false | false |
scikit-learn/scikit-learn | examples/cluster/plot_dbscan.py | 3 | 3999 | """
===================================
Demo of DBSCAN clustering algorithm
===================================
DBSCAN (Density-Based Spatial Clustering of Applications with Noise) finds core
samples in regions of high density and expands clusters from them. This
algorithm is good for data which contains clusters of similar density.
See the :ref:`sphx_glr_auto_examples_cluster_plot_cluster_comparison.py` example
for a demo of different clustering algorithms on 2D datasets.
"""
# %%
# Data generation
# ---------------
#
# We use :class:`~sklearn.datasets.make_blobs` to create 3 synthetic clusters.
from sklearn.datasets import make_blobs
from sklearn.preprocessing import StandardScaler
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(
n_samples=750, centers=centers, cluster_std=0.4, random_state=0
)
X = StandardScaler().fit_transform(X)
# %%
# We can visualize the resulting data:
import matplotlib.pyplot as plt
plt.scatter(X[:, 0], X[:, 1])
plt.show()
# %%
# Compute DBSCAN
# --------------
#
# One can access the labels assigned by :class:`~sklearn.cluster.DBSCAN` using
# the `labels_` attribute. Noisy samples are given the label math:`-1`.
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
print("Estimated number of clusters: %d" % n_clusters_)
print("Estimated number of noise points: %d" % n_noise_)
# %%
# Clustering algorithms are fundamentally unsupervised learning methods.
# However, since :class:`~sklearn.datasets.make_blobs` gives access to the true
# labels of the synthetic clusters, it is possible to use evaluation metrics
# that leverage this "supervised" ground truth information to quantify the
# quality of the resulting clusters. Examples of such metrics are the
# homogeneity, completeness, V-measure, Rand-Index, Adjusted Rand-Index and
# Adjusted Mutual Information (AMI).
#
# If the ground truth labels are not known, evaluation can only be performed
# using the model results itself. In that case, the Silhouette Coefficient comes
# in handy.
#
# For more information, see the
# :ref:`sphx_glr_auto_examples_cluster_plot_adjusted_for_chance_measures.py`
# example or the :ref:`clustering_evaluation` module.
print(f"Homogeneity: {metrics.homogeneity_score(labels_true, labels):.3f}")
print(f"Completeness: {metrics.completeness_score(labels_true, labels):.3f}")
print(f"V-measure: {metrics.v_measure_score(labels_true, labels):.3f}")
print(f"Adjusted Rand Index: {metrics.adjusted_rand_score(labels_true, labels):.3f}")
print(
"Adjusted Mutual Information:"
f" {metrics.adjusted_mutual_info_score(labels_true, labels):.3f}"
)
print(f"Silhouette Coefficient: {metrics.silhouette_score(X, labels):.3f}")
# %%
# Plot results
# ------------
#
# Core samples (large dots) and non-core samples (small dots) are color-coded
# according to the asigned cluster. Samples tagged as noise are represented in
# black.
unique_labels = set(labels)
core_samples_mask = np.zeros_like(labels, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))]
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = [0, 0, 0, 1]
class_member_mask = labels == k
xy = X[class_member_mask & core_samples_mask]
plt.plot(
xy[:, 0],
xy[:, 1],
"o",
markerfacecolor=tuple(col),
markeredgecolor="k",
markersize=14,
)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(
xy[:, 0],
xy[:, 1],
"o",
markerfacecolor=tuple(col),
markeredgecolor="k",
markersize=6,
)
plt.title(f"Estimated number of clusters: {n_clusters_}")
plt.show()
| bsd-3-clause | 999bc10e8a391000a80644679b5b7be7 | 30.242188 | 85 | 0.684921 | 3.31592 | false | false | false | false |
scikit-learn/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 16 | 5559 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker="o")
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {"time": training_time, "error": reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times["pca"], label="PCA")
plot_results(
all_components, all_times["ipca"], label="IncrementalPCA, bsize=%i" % batch_size
)
plt.legend(loc="upper left")
plt.suptitle(
"Algorithm runtime vs. n_components\n LFW, size %i x %i"
% data.shape
)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors["pca"], label="PCA")
plot_results(
all_components,
all_errors["ipca"],
label="IncrementalPCA, bsize=%i" % batch_size,
)
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\nLFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times["pca"], label="PCA")
plot_results(all_batch_sizes, all_times["ipca"], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle(
"Algorithm runtime vs. batch_size for n_components %i\n LFW,"
" size %i x %i" % (n_features, data.shape[0], data.shape[1])
)
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors["pca"], label="PCA")
plot_results(all_batch_sizes, all_errors["ipca"], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle(
"Algorithm error vs. batch_size for n_components %i\n LFW,"
" size %i x %i" % (n_features, data.shape[0], data.shape[1])
)
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [
i.astype(int) for i in np.linspace(data.shape[1] // 10, data.shape[1], num=5)
]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {
k: benchmark(est, data) for k, est in [("pca", pca), ("ipca", ipca)]
}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]["time"])
all_errors[k].append(results_dict[k]["error"])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [
i.astype(int) for i in np.linspace(data.shape[0] // 10, data.shape[0], num=10)
]
for n_components in [
i.astype(int) for i in np.linspace(data.shape[1] // 10, data.shape[1], num=4)
]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = PCA(
n_components=n_components, svd_solver="randomized", random_state=1999
)
results_dict = {
k: benchmark(est, data) for k, est in [("pca", pca), ("rpca", rpca)]
}
# Create flat baselines to compare the variation over batch size
all_times["pca"].extend([results_dict["pca"]["time"]] * len(batch_sizes))
all_errors["pca"].extend([results_dict["pca"]["error"]] * len(batch_sizes))
all_times["rpca"].extend([results_dict["rpca"]["time"]] * len(batch_sizes))
all_errors["rpca"].extend([results_dict["rpca"]["error"]] * len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [("ipca", ipca)]}
all_times["ipca"].append(results_dict["ipca"]["time"])
all_errors["ipca"].append(results_dict["ipca"]["error"])
plot_batch_times(all_times, n_components, batch_sizes, data)
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=0.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause | 87e4f3bdb5f2c1cfc473e563c51ffc00 | 34.407643 | 88 | 0.626731 | 3.346779 | false | false | false | false |
scikit-learn/scikit-learn | benchmarks/bench_sparsify.py | 12 | 3356 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse import csr_matrix
import numpy as np
from sklearn.linear_model import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2) :]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features // 2 :]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[: n_samples // 2], y[: n_samples // 2]
X_test, y_test = X[n_samples // 2 :], y[n_samples // 2 :]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty="l1", alpha=0.2, max_iter=2000, tol=None)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), "dense model")
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), "sparse model")
benchmark_sparse_predict()
| bsd-3-clause | c824bf714d4d165f81ecaa841507f0cd | 30.660377 | 87 | 0.575089 | 3.199237 | false | true | false | false |
scikit-learn/scikit-learn | sklearn/neighbors/_classification.py | 8 | 26187 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
from numbers import Integral
import numpy as np
from ..utils.fixes import _mode
from ..utils.extmath import weighted_mode
from ..utils.validation import _is_arraylike, _num_samples
import warnings
from ._base import _get_weights
from ._base import NeighborsBase, KNeighborsMixin, RadiusNeighborsMixin
from ..base import ClassifierMixin
from ..utils._param_validation import StrOptions
class KNeighborsClassifier(KNeighborsMixin, ClassifierMixin, NeighborsBase):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, default=5
Number of neighbors to use by default for :meth:`kneighbors` queries.
weights : {'uniform', 'distance'}, callable or None, default='uniform'
Weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : int, default=2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit. X may be a :term:`sparse graph`, in which
case only "nonzero" elements may be considered neighbors.
If metric is a callable function, it takes two arrays representing 1D
vectors as inputs and must return one value indicating the distance
between those vectors. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Doesn't affect :meth:`fit` method.
Attributes
----------
classes_ : array of shape (n_classes,)
Class labels known to the classifier
effective_metric_ : str or callble
The distance metric used. It will be same as the `metric` parameter
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
'minkowski' and `p` parameter set to 2.
effective_metric_params_ : dict
Additional keyword arguments for the metric function. For most metrics
will be same with `metric_params` parameter, but may also contain the
`p` parameter value if the `effective_metric_` attribute is set to
'minkowski'.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_samples_fit_ : int
Number of samples in the fitted data.
outputs_2d_ : bool
False when `y`'s shape is (n_samples, ) or (n_samples, 1) during fit
otherwise True.
See Also
--------
RadiusNeighborsClassifier: Classifier based on neighbors within a fixed radius.
KNeighborsRegressor: Regression based on k-nearest neighbors.
RadiusNeighborsRegressor: Regression based on neighbors within a fixed radius.
NearestNeighbors: Unsupervised learner for implementing neighbor searches.
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y)
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[0.666... 0.333...]]
"""
_parameter_constraints: dict = {**NeighborsBase._parameter_constraints}
_parameter_constraints.pop("radius")
_parameter_constraints.update(
{"weights": [StrOptions({"uniform", "distance"}), callable, None]}
)
def __init__(
self,
n_neighbors=5,
*,
weights="uniform",
algorithm="auto",
leaf_size=30,
p=2,
metric="minkowski",
metric_params=None,
n_jobs=None,
):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
p=p,
metric_params=metric_params,
n_jobs=n_jobs,
)
self.weights = weights
def fit(self, X, y):
"""Fit the k-nearest neighbors classifier from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : {array-like, sparse matrix} of shape (n_samples,) or \
(n_samples, n_outputs)
Target values.
Returns
-------
self : KNeighborsClassifier
The fitted k-nearest neighbors classifier.
"""
self._validate_params()
return self._fit(X, y)
def predict(self, X):
"""Predict the class labels for the provided data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : ndarray of shape (n_queries,) or (n_queries, n_outputs)
Class labels for each data sample.
"""
if self.weights == "uniform":
# In that case, we do not need the distances to perform
# the weighting so we do not compute them.
neigh_ind = self.kneighbors(X, return_distance=False)
neigh_dist = None
else:
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_queries = _num_samples(X)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_queries, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = _mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : ndarray of shape (n_queries, n_classes), or a list of n_outputs \
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
if self.weights == "uniform":
# In that case, we do not need the distances to perform
# the weighting so we do not compute them.
neigh_ind = self.kneighbors(X, return_distance=False)
neigh_dist = None
else:
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_queries = _num_samples(X)
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(n_queries)
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_queries, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
def _more_tags(self):
return {"multilabel": True}
class RadiusNeighborsClassifier(RadiusNeighborsMixin, ClassifierMixin, NeighborsBase):
"""Classifier implementing a vote among neighbors within a given radius.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, default=1.0
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
weights : {'uniform', 'distance'}, callable or None, default='uniform'
Weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : int, default=2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit. X may be a :term:`sparse graph`, in which
case only "nonzero" elements may be considered neighbors.
If metric is a callable function, it takes two arrays representing 1D
vectors as inputs and must return one value indicating the distance
between those vectors. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
outlier_label : {manual label, 'most_frequent'}, default=None
Label for outlier samples (samples with no neighbors in given radius).
- manual label: str or int label (should be the same type as y)
or list of manual labels if multi-output is used.
- 'most_frequent' : assign the most frequent label of y to outliers.
- None : when any outlier is detected, ValueError will be raised.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier.
effective_metric_ : str or callable
The distance metric used. It will be same as the `metric` parameter
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
'minkowski' and `p` parameter set to 2.
effective_metric_params_ : dict
Additional keyword arguments for the metric function. For most metrics
will be same with `metric_params` parameter, but may also contain the
`p` parameter value if the `effective_metric_` attribute is set to
'minkowski'.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_samples_fit_ : int
Number of samples in the fitted data.
outlier_label_ : int or array-like of shape (n_class,)
Label which is given for outlier samples (samples with no neighbors
on given radius).
outputs_2d_ : bool
False when `y`'s shape is (n_samples, ) or (n_samples, 1) during fit
otherwise True.
See Also
--------
KNeighborsClassifier : Classifier implementing the k-nearest neighbors
vote.
RadiusNeighborsRegressor : Regression based on neighbors within a
fixed radius.
KNeighborsRegressor : Regression based on k-nearest neighbors.
NearestNeighbors : Unsupervised learner for implementing neighbor
searches.
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y)
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
>>> print(neigh.predict_proba([[1.0]]))
[[0.66666667 0.33333333]]
"""
_parameter_constraints: dict = {
**NeighborsBase._parameter_constraints,
"weights": [StrOptions({"uniform", "distance"}), callable, None],
"outlier_label": [Integral, str, "array-like", None],
}
_parameter_constraints.pop("n_neighbors")
def __init__(
self,
radius=1.0,
*,
weights="uniform",
algorithm="auto",
leaf_size=30,
p=2,
metric="minkowski",
outlier_label=None,
metric_params=None,
n_jobs=None,
):
super().__init__(
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
p=p,
metric_params=metric_params,
n_jobs=n_jobs,
)
self.weights = weights
self.outlier_label = outlier_label
def fit(self, X, y):
"""Fit the radius neighbors classifier from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : {array-like, sparse matrix} of shape (n_samples,) or \
(n_samples, n_outputs)
Target values.
Returns
-------
self : RadiusNeighborsClassifier
The fitted radius neighbors classifier.
"""
self._validate_params()
self._fit(X, y)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
if self.outlier_label is None:
outlier_label_ = None
elif self.outlier_label == "most_frequent":
outlier_label_ = []
# iterate over multi-output, get the most frequent label for each
# output.
for k, classes_k in enumerate(classes_):
label_count = np.bincount(_y[:, k])
outlier_label_.append(classes_k[label_count.argmax()])
else:
if _is_arraylike(self.outlier_label) and not isinstance(
self.outlier_label, str
):
if len(self.outlier_label) != len(classes_):
raise ValueError(
"The length of outlier_label: {} is "
"inconsistent with the output "
"length: {}".format(self.outlier_label, len(classes_))
)
outlier_label_ = self.outlier_label
else:
outlier_label_ = [self.outlier_label] * len(classes_)
for classes, label in zip(classes_, outlier_label_):
if _is_arraylike(label) and not isinstance(label, str):
# ensure the outlier label for each output is a scalar.
raise TypeError(
"The outlier_label of classes {} is "
"supposed to be a scalar, got "
"{}.".format(classes, label)
)
if np.append(classes, label).dtype != classes.dtype:
# ensure the dtype of outlier label is consistent with y.
raise TypeError(
"The dtype of outlier_label {} is "
"inconsistent with classes {} in "
"y.".format(label, classes)
)
self.outlier_label_ = outlier_label_
return self
def predict(self, X):
"""Predict the class labels for the provided data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : ndarray of shape (n_queries,) or (n_queries, n_outputs)
Class labels for each data sample.
"""
probs = self.predict_proba(X)
classes_ = self.classes_
if not self.outputs_2d_:
probs = [probs]
classes_ = [self.classes_]
n_outputs = len(classes_)
n_queries = probs[0].shape[0]
y_pred = np.empty((n_queries, n_outputs), dtype=classes_[0].dtype)
for k, prob in enumerate(probs):
# iterate over multi-output, assign labels based on probabilities
# of each output.
max_prob_index = prob.argmax(axis=1)
y_pred[:, k] = classes_[k].take(max_prob_index)
outlier_zero_probs = (prob == 0).all(axis=1)
if outlier_zero_probs.any():
zero_prob_index = np.flatnonzero(outlier_zero_probs)
y_pred[zero_prob_index, k] = self.outlier_label_[k]
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : ndarray of shape (n_queries, n_classes), or a list of \
n_outputs of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
n_queries = _num_samples(X)
neigh_dist, neigh_ind = self.radius_neighbors(X)
outlier_mask = np.zeros(n_queries, dtype=bool)
outlier_mask[:] = [len(nind) == 0 for nind in neigh_ind]
outliers = np.flatnonzero(outlier_mask)
inliers = np.flatnonzero(~outlier_mask)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
if self.outlier_label_ is None and outliers.size > 0:
raise ValueError(
"No neighbors found for test samples %r, "
"you can try using larger radius, "
"giving a label for outliers, "
"or considering removing them from your dataset." % outliers
)
weights = _get_weights(neigh_dist, self.weights)
if weights is not None:
weights = weights[inliers]
probabilities = []
# iterate over multi-output, measure probabilities of the k-th output.
for k, classes_k in enumerate(classes_):
pred_labels = np.zeros(len(neigh_ind), dtype=object)
pred_labels[:] = [_y[ind, k] for ind in neigh_ind]
proba_k = np.zeros((n_queries, classes_k.size))
proba_inl = np.zeros((len(inliers), classes_k.size))
# samples have different size of neighbors within the same radius
if weights is None:
for i, idx in enumerate(pred_labels[inliers]):
proba_inl[i, :] = np.bincount(idx, minlength=classes_k.size)
else:
for i, idx in enumerate(pred_labels[inliers]):
proba_inl[i, :] = np.bincount(
idx, weights[i], minlength=classes_k.size
)
proba_k[inliers, :] = proba_inl
if outliers.size > 0:
_outlier_label = self.outlier_label_[k]
label_index = np.flatnonzero(classes_k == _outlier_label)
if label_index.size == 1:
proba_k[outliers, label_index[0]] = 1.0
else:
warnings.warn(
"Outlier label {} is not in training "
"classes. All class probabilities of "
"outliers will be assigned with 0."
"".format(self.outlier_label_[k])
)
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
def _more_tags(self):
return {"multilabel": True}
| bsd-3-clause | 09cdadf4ee8dabfc02047a798a78059a | 35.72791 | 86 | 0.587811 | 4.180556 | false | false | false | false |
scikit-learn/scikit-learn | sklearn/mixture/_gaussian_mixture.py | 9 | 29207 | """Gaussian Mixture Model."""
# Author: Wei Xue <xuewei4d@gmail.com>
# Modified by Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ._base import BaseMixture, _check_shape
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils._param_validation import StrOptions
###############################################################################
# Gaussian mixture shape checkers used by the GaussianMixture class
def _check_weights(weights, n_components):
"""Check the user provided 'weights'.
Parameters
----------
weights : array-like of shape (n_components,)
The proportions of components of each mixture.
n_components : int
Number of components.
Returns
-------
weights : array, shape (n_components,)
"""
weights = check_array(weights, dtype=[np.float64, np.float32], ensure_2d=False)
_check_shape(weights, (n_components,), "weights")
# check range
if any(np.less(weights, 0.0)) or any(np.greater(weights, 1.0)):
raise ValueError(
"The parameter 'weights' should be in the range "
"[0, 1], but got max value %.5f, min value %.5f"
% (np.min(weights), np.max(weights))
)
# check normalization
if not np.allclose(np.abs(1.0 - np.sum(weights)), 0.0):
raise ValueError(
"The parameter 'weights' should be normalized, but got sum(weights) = %.5f"
% np.sum(weights)
)
return weights
def _check_means(means, n_components, n_features):
"""Validate the provided 'means'.
Parameters
----------
means : array-like of shape (n_components, n_features)
The centers of the current components.
n_components : int
Number of components.
n_features : int
Number of features.
Returns
-------
means : array, (n_components, n_features)
"""
means = check_array(means, dtype=[np.float64, np.float32], ensure_2d=False)
_check_shape(means, (n_components, n_features), "means")
return means
def _check_precision_positivity(precision, covariance_type):
"""Check a precision vector is positive-definite."""
if np.any(np.less_equal(precision, 0.0)):
raise ValueError("'%s precision' should be positive" % covariance_type)
def _check_precision_matrix(precision, covariance_type):
"""Check a precision matrix is symmetric and positive-definite."""
if not (
np.allclose(precision, precision.T) and np.all(linalg.eigvalsh(precision) > 0.0)
):
raise ValueError(
"'%s precision' should be symmetric, positive-definite" % covariance_type
)
def _check_precisions_full(precisions, covariance_type):
"""Check the precision matrices are symmetric and positive-definite."""
for prec in precisions:
_check_precision_matrix(prec, covariance_type)
def _check_precisions(precisions, covariance_type, n_components, n_features):
"""Validate user provided precisions.
Parameters
----------
precisions : array-like
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : str
n_components : int
Number of components.
n_features : int
Number of features.
Returns
-------
precisions : array
"""
precisions = check_array(
precisions,
dtype=[np.float64, np.float32],
ensure_2d=False,
allow_nd=covariance_type == "full",
)
precisions_shape = {
"full": (n_components, n_features, n_features),
"tied": (n_features, n_features),
"diag": (n_components, n_features),
"spherical": (n_components,),
}
_check_shape(
precisions, precisions_shape[covariance_type], "%s precision" % covariance_type
)
_check_precisions = {
"full": _check_precisions_full,
"tied": _check_precision_matrix,
"diag": _check_precision_positivity,
"spherical": _check_precision_positivity,
}
_check_precisions[covariance_type](precisions, covariance_type)
return precisions
###############################################################################
# Gaussian mixture parameters estimators (used by the M-Step)
def _estimate_gaussian_covariances_full(resp, X, nk, means, reg_covar):
"""Estimate the full covariance matrices.
Parameters
----------
resp : array-like of shape (n_samples, n_components)
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
means : array-like of shape (n_components, n_features)
reg_covar : float
Returns
-------
covariances : array, shape (n_components, n_features, n_features)
The covariance matrix of the current components.
"""
n_components, n_features = means.shape
covariances = np.empty((n_components, n_features, n_features))
for k in range(n_components):
diff = X - means[k]
covariances[k] = np.dot(resp[:, k] * diff.T, diff) / nk[k]
covariances[k].flat[:: n_features + 1] += reg_covar
return covariances
def _estimate_gaussian_covariances_tied(resp, X, nk, means, reg_covar):
"""Estimate the tied covariance matrix.
Parameters
----------
resp : array-like of shape (n_samples, n_components)
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
means : array-like of shape (n_components, n_features)
reg_covar : float
Returns
-------
covariance : array, shape (n_features, n_features)
The tied covariance matrix of the components.
"""
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(nk * means.T, means)
covariance = avg_X2 - avg_means2
covariance /= nk.sum()
covariance.flat[:: len(covariance) + 1] += reg_covar
return covariance
def _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar):
"""Estimate the diagonal covariance vectors.
Parameters
----------
responsibilities : array-like of shape (n_samples, n_components)
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
means : array-like of shape (n_components, n_features)
reg_covar : float
Returns
-------
covariances : array, shape (n_components, n_features)
The covariance vector of the current components.
"""
avg_X2 = np.dot(resp.T, X * X) / nk[:, np.newaxis]
avg_means2 = means**2
avg_X_means = means * np.dot(resp.T, X) / nk[:, np.newaxis]
return avg_X2 - 2 * avg_X_means + avg_means2 + reg_covar
def _estimate_gaussian_covariances_spherical(resp, X, nk, means, reg_covar):
"""Estimate the spherical variance values.
Parameters
----------
responsibilities : array-like of shape (n_samples, n_components)
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
means : array-like of shape (n_components, n_features)
reg_covar : float
Returns
-------
variances : array, shape (n_components,)
The variance values of each components.
"""
return _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar).mean(1)
def _estimate_gaussian_parameters(X, resp, reg_covar, covariance_type):
"""Estimate the Gaussian distribution parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data array.
resp : array-like of shape (n_samples, n_components)
The responsibilities for each data sample in X.
reg_covar : float
The regularization added to the diagonal of the covariance matrices.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
nk : array-like of shape (n_components,)
The numbers of data samples in the current components.
means : array-like of shape (n_components, n_features)
The centers of the current components.
covariances : array-like
The covariance matrix of the current components.
The shape depends of the covariance_type.
"""
nk = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps
means = np.dot(resp.T, X) / nk[:, np.newaxis]
covariances = {
"full": _estimate_gaussian_covariances_full,
"tied": _estimate_gaussian_covariances_tied,
"diag": _estimate_gaussian_covariances_diag,
"spherical": _estimate_gaussian_covariances_spherical,
}[covariance_type](resp, X, nk, means, reg_covar)
return nk, means, covariances
def _compute_precision_cholesky(covariances, covariance_type):
"""Compute the Cholesky decomposition of the precisions.
Parameters
----------
covariances : array-like
The covariance matrix of the current components.
The shape depends of the covariance_type.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
precisions_cholesky : array-like
The cholesky decomposition of sample precisions of the current
components. The shape depends of the covariance_type.
"""
estimate_precision_error_message = (
"Fitting the mixture model failed because some components have "
"ill-defined empirical covariance (for instance caused by singleton "
"or collapsed samples). Try to decrease the number of components, "
"or increase reg_covar."
)
if covariance_type == "full":
n_components, n_features, _ = covariances.shape
precisions_chol = np.empty((n_components, n_features, n_features))
for k, covariance in enumerate(covariances):
try:
cov_chol = linalg.cholesky(covariance, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol[k] = linalg.solve_triangular(
cov_chol, np.eye(n_features), lower=True
).T
elif covariance_type == "tied":
_, n_features = covariances.shape
try:
cov_chol = linalg.cholesky(covariances, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol = linalg.solve_triangular(
cov_chol, np.eye(n_features), lower=True
).T
else:
if np.any(np.less_equal(covariances, 0.0)):
raise ValueError(estimate_precision_error_message)
precisions_chol = 1.0 / np.sqrt(covariances)
return precisions_chol
###############################################################################
# Gaussian mixture probability estimators
def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features):
"""Compute the log-det of the cholesky decomposition of matrices.
Parameters
----------
matrix_chol : array-like
Cholesky decompositions of the matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
n_features : int
Number of features.
Returns
-------
log_det_precision_chol : array-like of shape (n_components,)
The determinant of the precision matrix for each component.
"""
if covariance_type == "full":
n_components, _, _ = matrix_chol.shape
log_det_chol = np.sum(
np.log(matrix_chol.reshape(n_components, -1)[:, :: n_features + 1]), 1
)
elif covariance_type == "tied":
log_det_chol = np.sum(np.log(np.diag(matrix_chol)))
elif covariance_type == "diag":
log_det_chol = np.sum(np.log(matrix_chol), axis=1)
else:
log_det_chol = n_features * (np.log(matrix_chol))
return log_det_chol
def _estimate_log_gaussian_prob(X, means, precisions_chol, covariance_type):
"""Estimate the log Gaussian probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
means : array-like of shape (n_components, n_features)
precisions_chol : array-like
Cholesky decompositions of the precision matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
Returns
-------
log_prob : array, shape (n_samples, n_components)
"""
n_samples, n_features = X.shape
n_components, _ = means.shape
# The determinant of the precision matrix from the Cholesky decomposition
# corresponds to the negative half of the determinant of the full precision
# matrix.
# In short: det(precision_chol) = - det(precision) / 2
log_det = _compute_log_det_cholesky(precisions_chol, covariance_type, n_features)
if covariance_type == "full":
log_prob = np.empty((n_samples, n_components))
for k, (mu, prec_chol) in enumerate(zip(means, precisions_chol)):
y = np.dot(X, prec_chol) - np.dot(mu, prec_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type == "tied":
log_prob = np.empty((n_samples, n_components))
for k, mu in enumerate(means):
y = np.dot(X, precisions_chol) - np.dot(mu, precisions_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type == "diag":
precisions = precisions_chol**2
log_prob = (
np.sum((means**2 * precisions), 1)
- 2.0 * np.dot(X, (means * precisions).T)
+ np.dot(X**2, precisions.T)
)
elif covariance_type == "spherical":
precisions = precisions_chol**2
log_prob = (
np.sum(means**2, 1) * precisions
- 2 * np.dot(X, means.T * precisions)
+ np.outer(row_norms(X, squared=True), precisions)
)
# Since we are using the precision of the Cholesky decomposition,
# `- 0.5 * log_det_precision` becomes `+ log_det_precision_chol`
return -0.5 * (n_features * np.log(2 * np.pi) + log_prob) + log_det
class GaussianMixture(BaseMixture):
"""Gaussian Mixture.
Representation of a Gaussian mixture model probability distribution.
This class allows to estimate the parameters of a Gaussian mixture
distribution.
Read more in the :ref:`User Guide <gmm>`.
.. versionadded:: 0.18
Parameters
----------
n_components : int, default=1
The number of mixture components.
covariance_type : {'full', 'tied', 'diag', 'spherical'}, default='full'
String describing the type of covariance parameters to use.
Must be one of:
- 'full': each component has its own general covariance matrix.
- 'tied': all components share the same general covariance matrix.
- 'diag': each component has its own diagonal covariance matrix.
- 'spherical': each component has its own single variance.
tol : float, default=1e-3
The convergence threshold. EM iterations will stop when the
lower bound average gain is below this threshold.
reg_covar : float, default=1e-6
Non-negative regularization added to the diagonal of covariance.
Allows to assure that the covariance matrices are all positive.
max_iter : int, default=100
The number of EM iterations to perform.
n_init : int, default=1
The number of initializations to perform. The best results are kept.
init_params : {'kmeans', 'k-means++', 'random', 'random_from_data'}, \
default='kmeans'
The method used to initialize the weights, the means and the
precisions.
String must be one of:
- 'kmeans' : responsibilities are initialized using kmeans.
- 'k-means++' : use the k-means++ method to initialize.
- 'random' : responsibilities are initialized randomly.
- 'random_from_data' : initial means are randomly selected data points.
.. versionchanged:: v1.1
`init_params` now accepts 'random_from_data' and 'k-means++' as
initialization methods.
weights_init : array-like of shape (n_components, ), default=None
The user-provided initial weights.
If it is None, weights are initialized using the `init_params` method.
means_init : array-like of shape (n_components, n_features), default=None
The user-provided initial means,
If it is None, means are initialized using the `init_params` method.
precisions_init : array-like, default=None
The user-provided initial precisions (inverse of the covariance
matrices).
If it is None, precisions are initialized using the 'init_params'
method.
The shape depends on 'covariance_type'::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
random_state : int, RandomState instance or None, default=None
Controls the random seed given to the method chosen to initialize the
parameters (see `init_params`).
In addition, it controls the generation of random samples from the
fitted distribution (see the method `sample`).
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
warm_start : bool, default=False
If 'warm_start' is True, the solution of the last fitting is used as
initialization for the next call of fit(). This can speed up
convergence when fit is called several times on similar problems.
In that case, 'n_init' is ignored and only a single initialization
occurs upon the first call.
See :term:`the Glossary <warm_start>`.
verbose : int, default=0
Enable verbose output. If 1 then it prints the current
initialization and each iteration step. If greater than 1 then
it prints also the log probability and the time needed
for each step.
verbose_interval : int, default=10
Number of iteration done before the next print.
Attributes
----------
weights_ : array-like of shape (n_components,)
The weights of each mixture components.
means_ : array-like of shape (n_components, n_features)
The mean of each mixture component.
covariances_ : array-like
The covariance of each mixture component.
The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_ : array-like
The precision matrices for each component in the mixture. A precision
matrix is the inverse of a covariance matrix. A covariance matrix is
symmetric positive definite so the mixture of Gaussian can be
equivalently parameterized by the precision matrices. Storing the
precision matrices instead of the covariance matrices makes it more
efficient to compute the log-likelihood of new samples at test time.
The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_cholesky_ : array-like
The cholesky decomposition of the precision matrices of each mixture
component. A precision matrix is the inverse of a covariance matrix.
A covariance matrix is symmetric positive definite so the mixture of
Gaussian can be equivalently parameterized by the precision matrices.
Storing the precision matrices instead of the covariance matrices makes
it more efficient to compute the log-likelihood of new samples at test
time. The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
n_iter_ : int
Number of step used by the best fit of EM to reach the convergence.
lower_bound_ : float
Lower bound value on the log-likelihood (of the training data with
respect to the model) of the best fit of EM.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
BayesianGaussianMixture : Gaussian mixture model fit with a variational
inference.
Examples
--------
>>> import numpy as np
>>> from sklearn.mixture import GaussianMixture
>>> X = np.array([[1, 2], [1, 4], [1, 0], [10, 2], [10, 4], [10, 0]])
>>> gm = GaussianMixture(n_components=2, random_state=0).fit(X)
>>> gm.means_
array([[10., 2.],
[ 1., 2.]])
>>> gm.predict([[0, 0], [12, 3]])
array([1, 0])
"""
_parameter_constraints: dict = {
**BaseMixture._parameter_constraints,
"covariance_type": [StrOptions({"full", "tied", "diag", "spherical"})],
"weights_init": ["array-like", None],
"means_init": ["array-like", None],
"precisions_init": ["array-like", None],
}
def __init__(
self,
n_components=1,
*,
covariance_type="full",
tol=1e-3,
reg_covar=1e-6,
max_iter=100,
n_init=1,
init_params="kmeans",
weights_init=None,
means_init=None,
precisions_init=None,
random_state=None,
warm_start=False,
verbose=0,
verbose_interval=10,
):
super().__init__(
n_components=n_components,
tol=tol,
reg_covar=reg_covar,
max_iter=max_iter,
n_init=n_init,
init_params=init_params,
random_state=random_state,
warm_start=warm_start,
verbose=verbose,
verbose_interval=verbose_interval,
)
self.covariance_type = covariance_type
self.weights_init = weights_init
self.means_init = means_init
self.precisions_init = precisions_init
def _check_parameters(self, X):
"""Check the Gaussian mixture parameters are well defined."""
_, n_features = X.shape
if self.weights_init is not None:
self.weights_init = _check_weights(self.weights_init, self.n_components)
if self.means_init is not None:
self.means_init = _check_means(
self.means_init, self.n_components, n_features
)
if self.precisions_init is not None:
self.precisions_init = _check_precisions(
self.precisions_init,
self.covariance_type,
self.n_components,
n_features,
)
def _initialize(self, X, resp):
"""Initialization of the Gaussian mixture parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
resp : array-like of shape (n_samples, n_components)
"""
n_samples, _ = X.shape
weights, means, covariances = _estimate_gaussian_parameters(
X, resp, self.reg_covar, self.covariance_type
)
weights /= n_samples
self.weights_ = weights if self.weights_init is None else self.weights_init
self.means_ = means if self.means_init is None else self.means_init
if self.precisions_init is None:
self.covariances_ = covariances
self.precisions_cholesky_ = _compute_precision_cholesky(
covariances, self.covariance_type
)
elif self.covariance_type == "full":
self.precisions_cholesky_ = np.array(
[
linalg.cholesky(prec_init, lower=True)
for prec_init in self.precisions_init
]
)
elif self.covariance_type == "tied":
self.precisions_cholesky_ = linalg.cholesky(
self.precisions_init, lower=True
)
else:
self.precisions_cholesky_ = np.sqrt(self.precisions_init)
def _m_step(self, X, log_resp):
"""M step.
Parameters
----------
X : array-like of shape (n_samples, n_features)
log_resp : array-like of shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
self.weights_, self.means_, self.covariances_ = _estimate_gaussian_parameters(
X, np.exp(log_resp), self.reg_covar, self.covariance_type
)
self.weights_ /= self.weights_.sum()
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type
)
def _estimate_log_prob(self, X):
return _estimate_log_gaussian_prob(
X, self.means_, self.precisions_cholesky_, self.covariance_type
)
def _estimate_log_weights(self):
return np.log(self.weights_)
def _compute_lower_bound(self, _, log_prob_norm):
return log_prob_norm
def _get_parameters(self):
return (
self.weights_,
self.means_,
self.covariances_,
self.precisions_cholesky_,
)
def _set_parameters(self, params):
(
self.weights_,
self.means_,
self.covariances_,
self.precisions_cholesky_,
) = params
# Attributes computation
_, n_features = self.means_.shape
if self.covariance_type == "full":
self.precisions_ = np.empty(self.precisions_cholesky_.shape)
for k, prec_chol in enumerate(self.precisions_cholesky_):
self.precisions_[k] = np.dot(prec_chol, prec_chol.T)
elif self.covariance_type == "tied":
self.precisions_ = np.dot(
self.precisions_cholesky_, self.precisions_cholesky_.T
)
else:
self.precisions_ = self.precisions_cholesky_**2
def _n_parameters(self):
"""Return the number of free parameters in the model."""
_, n_features = self.means_.shape
if self.covariance_type == "full":
cov_params = self.n_components * n_features * (n_features + 1) / 2.0
elif self.covariance_type == "diag":
cov_params = self.n_components * n_features
elif self.covariance_type == "tied":
cov_params = n_features * (n_features + 1) / 2.0
elif self.covariance_type == "spherical":
cov_params = self.n_components
mean_params = n_features * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model on the input X.
You can refer to this :ref:`mathematical section <aic_bic>` for more
details regarding the formulation of the BIC used.
Parameters
----------
X : array of shape (n_samples, n_dimensions)
The input samples.
Returns
-------
bic : float
The lower the better.
"""
return -2 * self.score(X) * X.shape[0] + self._n_parameters() * np.log(
X.shape[0]
)
def aic(self, X):
"""Akaike information criterion for the current model on the input X.
You can refer to this :ref:`mathematical section <aic_bic>` for more
details regarding the formulation of the AIC used.
Parameters
----------
X : array of shape (n_samples, n_dimensions)
The input samples.
Returns
-------
aic : float
The lower the better.
"""
return -2 * self.score(X) * X.shape[0] + 2 * self._n_parameters()
| bsd-3-clause | 735d418ac9b4fc19ffa6704d8f5cf94f | 33.240328 | 88 | 0.603314 | 3.985128 | false | false | false | false |
scikit-learn/scikit-learn | examples/model_selection/plot_nested_cross_validation_iris.py | 13 | 4476 | """
=========================================
Nested versus non-nested cross-validation
=========================================
This example compares non-nested and nested cross-validation strategies on a
classifier of the iris data set. Nested cross-validation (CV) is often used to
train a model in which hyperparameters also need to be optimized. Nested CV
estimates the generalization error of the underlying model and its
(hyper)parameter search. Choosing the parameters that maximize non-nested CV
biases the model to the dataset, yielding an overly-optimistic score.
Model selection without nested CV uses the same data to tune model parameters
and evaluate model performance. Information may thus "leak" into the model
and overfit the data. The magnitude of this effect is primarily dependent on
the size of the dataset and the stability of the model. See Cawley and Talbot
[1]_ for an analysis of these issues.
To avoid this problem, nested CV effectively uses a series of
train/validation/test set splits. In the inner loop (here executed by
:class:`GridSearchCV <sklearn.model_selection.GridSearchCV>`), the score is
approximately maximized by fitting a model to each training set, and then
directly maximized in selecting (hyper)parameters over the validation set. In
the outer loop (here in :func:`cross_val_score
<sklearn.model_selection.cross_val_score>`), generalization error is estimated
by averaging test set scores over several dataset splits.
The example below uses a support vector classifier with a non-linear kernel to
build a model with optimized hyperparameters by grid search. We compare the
performance of non-nested and nested CV strategies by taking the difference
between their scores.
.. topic:: See Also:
- :ref:`cross_validation`
- :ref:`grid_search`
.. topic:: References:
.. [1] `Cawley, G.C.; Talbot, N.L.C. On over-fitting in model selection and
subsequent selection bias in performance evaluation.
J. Mach. Learn. Res 2010,11, 2079-2107.
<http://jmlr.csail.mit.edu/papers/volume11/cawley10a/cawley10a.pdf>`_
"""
from sklearn.datasets import load_iris
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold
import numpy as np
# Number of random trials
NUM_TRIALS = 30
# Load the dataset
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
# Set up possible values of parameters to optimize over
p_grid = {"C": [1, 10, 100], "gamma": [0.01, 0.1]}
# We will use a Support Vector Classifier with "rbf" kernel
svm = SVC(kernel="rbf")
# Arrays to store scores
non_nested_scores = np.zeros(NUM_TRIALS)
nested_scores = np.zeros(NUM_TRIALS)
# Loop for each trial
for i in range(NUM_TRIALS):
# Choose cross-validation techniques for the inner and outer loops,
# independently of the dataset.
# E.g "GroupKFold", "LeaveOneOut", "LeaveOneGroupOut", etc.
inner_cv = KFold(n_splits=4, shuffle=True, random_state=i)
outer_cv = KFold(n_splits=4, shuffle=True, random_state=i)
# Non_nested parameter search and scoring
clf = GridSearchCV(estimator=svm, param_grid=p_grid, cv=outer_cv)
clf.fit(X_iris, y_iris)
non_nested_scores[i] = clf.best_score_
# Nested CV with parameter optimization
clf = GridSearchCV(estimator=svm, param_grid=p_grid, cv=inner_cv)
nested_score = cross_val_score(clf, X=X_iris, y=y_iris, cv=outer_cv)
nested_scores[i] = nested_score.mean()
score_difference = non_nested_scores - nested_scores
print(
"Average difference of {:6f} with std. dev. of {:6f}.".format(
score_difference.mean(), score_difference.std()
)
)
# Plot scores on each trial for nested and non-nested CV
plt.figure()
plt.subplot(211)
(non_nested_scores_line,) = plt.plot(non_nested_scores, color="r")
(nested_line,) = plt.plot(nested_scores, color="b")
plt.ylabel("score", fontsize="14")
plt.legend(
[non_nested_scores_line, nested_line],
["Non-Nested CV", "Nested CV"],
bbox_to_anchor=(0, 0.4, 0.5, 0),
)
plt.title(
"Non-Nested and Nested Cross Validation on Iris Dataset",
x=0.5,
y=1.1,
fontsize="15",
)
# Plot bar chart of the difference.
plt.subplot(212)
difference_plot = plt.bar(range(NUM_TRIALS), score_difference)
plt.xlabel("Individual Trial #")
plt.legend(
[difference_plot],
["Non-Nested CV - Nested CV Score"],
bbox_to_anchor=(0, 1, 0.8, 0),
)
plt.ylabel("score difference", fontsize="14")
plt.show()
| bsd-3-clause | 4f7a53c79b983b4a6b0f9472983d6d48 | 34.244094 | 79 | 0.717158 | 3.411585 | false | false | false | false |
scikit-learn/scikit-learn | examples/covariance/plot_sparse_cov.py | 12 | 5002 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphicalLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphicalLasso setting the sparsity of the model is
set by internal cross-validation in the GraphicalLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
# author: Gael Varoquaux <gael.varoquaux@inria.fr>
# License: BSD 3 clause
# Copyright: INRIA
# %%
# Generate the data
# -----------------
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(
n_features, alpha=0.98, smallest_coef=0.4, largest_coef=0.7, random_state=prng
)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
# %%
# Estimate the covariance
# -----------------------
from sklearn.covariance import GraphicalLassoCV, ledoit_wolf
emp_cov = np.dot(X.T, X) / n_samples
model = GraphicalLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
# %%
# Plot the results
# ----------------
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [
("Empirical", emp_cov),
("Ledoit-Wolf", lw_cov_),
("GraphicalLassoCV", cov_),
("True", cov),
]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(
this_cov, interpolation="nearest", vmin=-vmax, vmax=vmax, cmap=plt.cm.RdBu_r
)
plt.xticks(())
plt.yticks(())
plt.title("%s covariance" % name)
# plot the precisions
precs = [
("Empirical", linalg.inv(emp_cov)),
("Ledoit-Wolf", lw_prec_),
("GraphicalLasso", prec_),
("True", prec),
]
vmax = 0.9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(
np.ma.masked_equal(this_prec, 0),
interpolation="nearest",
vmin=-vmax,
vmax=vmax,
cmap=plt.cm.RdBu_r,
)
plt.xticks(())
plt.yticks(())
plt.title("%s precision" % name)
if hasattr(ax, "set_facecolor"):
ax.set_facecolor(".7")
else:
ax.set_axis_bgcolor(".7")
# %%
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([0.2, 0.15, 0.75, 0.7])
plt.plot(model.cv_results_["alphas"], model.cv_results_["mean_test_score"], "o-")
plt.axvline(model.alpha_, color=".5")
plt.title("Model selection")
plt.ylabel("Cross-validation score")
plt.xlabel("alpha")
plt.show()
| bsd-3-clause | bec65f2c212030c22055880b9dbed4e0 | 30.658228 | 84 | 0.70092 | 3.414334 | false | false | false | false |
scikit-learn/scikit-learn | sklearn/base.py | 4 | 37220 | """Base classes for all estimators."""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import copy
import warnings
from collections import defaultdict
import platform
import inspect
import re
import numpy as np
from . import __version__
from ._config import get_config
from .utils import _IS_32BIT
from .utils._set_output import _SetOutputMixin
from .utils._tags import (
_DEFAULT_TAGS,
)
from .utils.validation import check_X_y
from .utils.validation import check_array
from .utils.validation import _check_y
from .utils.validation import _num_features
from .utils.validation import _check_feature_names_in
from .utils.validation import _generate_get_feature_names_out
from .utils.validation import check_is_fitted
from .utils.validation import _get_feature_names
from .utils._estimator_html_repr import estimator_html_repr
from .utils._param_validation import validate_parameter_constraints
def clone(estimator, *, safe=True):
"""Construct a new unfitted estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It returns a new estimator
with the same parameters that has not been fitted on any data.
Parameters
----------
estimator : {list, tuple, set} of estimator instance or a single \
estimator instance
The estimator or group of estimators to be cloned.
safe : bool, default=True
If safe is False, clone will fall back to a deep copy on objects
that are not estimators.
Returns
-------
estimator : object
The deep copy of the input, an estimator if input is an estimator.
Notes
-----
If the estimator's `random_state` parameter is an integer (or if the
estimator doesn't have a `random_state` parameter), an *exact clone* is
returned: the clone and the original estimator will give the exact same
results. Otherwise, *statistical clone* is returned: the clone might
return different results from the original estimator. More details can be
found in :ref:`randomness`.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, "get_params") or isinstance(estimator, type):
if not safe:
return copy.deepcopy(estimator)
else:
if isinstance(estimator, type):
raise TypeError(
"Cannot clone object. "
+ "You should provide an instance of "
+ "scikit-learn estimator instead of a class."
)
else:
raise TypeError(
"Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn "
"estimator as it does not implement a "
"'get_params' method." % (repr(estimator), type(estimator))
)
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in new_object_params.items():
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if param1 is not param2:
raise RuntimeError(
"Cannot clone object %s, as the constructor "
"either does not set or modifies parameter %s" % (estimator, name)
)
# _sklearn_output_config is used by `set_output` to configure the output
# container of an estimator.
if hasattr(estimator, "_sklearn_output_config"):
new_object._sklearn_output_config = copy.deepcopy(
estimator._sklearn_output_config
)
return new_object
class BaseEstimator:
"""Base class for all estimators in scikit-learn.
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, "deprecated_original", cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = inspect.signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [
p
for p in init_signature.parameters.values()
if p.name != "self" and p.kind != p.VAR_KEYWORD
]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError(
"scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention." % (cls, init_signature)
)
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""
Get parameters for this estimator.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
value = getattr(self, key)
if deep and hasattr(value, "get_params") and not isinstance(value, type):
deep_items = value.get_params().items()
out.update((key + "__" + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as :class:`~sklearn.pipeline.Pipeline`). The latter have
parameters of the form ``<component>__<parameter>`` so that it's
possible to update each component of a nested object.
Parameters
----------
**params : dict
Estimator parameters.
Returns
-------
self : estimator instance
Estimator instance.
"""
if not params:
# Simple optimization to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
nested_params = defaultdict(dict) # grouped by prefix
for key, value in params.items():
key, delim, sub_key = key.partition("__")
if key not in valid_params:
local_valid_params = self._get_param_names()
raise ValueError(
f"Invalid parameter {key!r} for estimator {self}. "
f"Valid parameters are: {local_valid_params!r}."
)
if delim:
nested_params[key][sub_key] = value
else:
setattr(self, key, value)
valid_params[key] = value
for key, sub_params in nested_params.items():
valid_params[key].set_params(**sub_params)
return self
def __repr__(self, N_CHAR_MAX=700):
# N_CHAR_MAX is the (approximate) maximum number of non-blank
# characters to render. We pass it as an optional parameter to ease
# the tests.
from .utils._pprint import _EstimatorPrettyPrinter
N_MAX_ELEMENTS_TO_SHOW = 30 # number of elements to show in sequences
# use ellipsis for sequences with a lot of elements
pp = _EstimatorPrettyPrinter(
compact=True,
indent=1,
indent_at_name=True,
n_max_elements_to_show=N_MAX_ELEMENTS_TO_SHOW,
)
repr_ = pp.pformat(self)
# Use bruteforce ellipsis when there are a lot of non-blank characters
n_nonblank = len("".join(repr_.split()))
if n_nonblank > N_CHAR_MAX:
lim = N_CHAR_MAX // 2 # apprx number of chars to keep on both ends
regex = r"^(\s*\S){%d}" % lim
# The regex '^(\s*\S){%d}' % n
# matches from the start of the string until the nth non-blank
# character:
# - ^ matches the start of string
# - (pattern){n} matches n repetitions of pattern
# - \s*\S matches a non-blank char following zero or more blanks
left_lim = re.match(regex, repr_).end()
right_lim = re.match(regex, repr_[::-1]).end()
if "\n" in repr_[left_lim:-right_lim]:
# The left side and right side aren't on the same line.
# To avoid weird cuts, e.g.:
# categoric...ore',
# we need to start the right side with an appropriate newline
# character so that it renders properly as:
# categoric...
# handle_unknown='ignore',
# so we add [^\n]*\n which matches until the next \n
regex += r"[^\n]*\n"
right_lim = re.match(regex, repr_[::-1]).end()
ellipsis = "..."
if left_lim + len(ellipsis) < len(repr_) - right_lim:
# Only add ellipsis if it results in a shorter repr
repr_ = repr_[:left_lim] + "..." + repr_[-right_lim:]
return repr_
def __getstate__(self):
try:
state = super().__getstate__()
except AttributeError:
state = self.__dict__.copy()
if type(self).__module__.startswith("sklearn."):
return dict(state.items(), _sklearn_version=__version__)
else:
return state
def __setstate__(self, state):
if type(self).__module__.startswith("sklearn."):
pickle_version = state.pop("_sklearn_version", "pre-0.18")
if pickle_version != __version__:
warnings.warn(
"Trying to unpickle estimator {0} from version {1} when "
"using version {2}. This might lead to breaking code or "
"invalid results. Use at your own risk. "
"For more info please refer to:\n"
"https://scikit-learn.org/stable/model_persistence.html"
"#security-maintainability-limitations".format(
self.__class__.__name__, pickle_version, __version__
),
UserWarning,
)
try:
super().__setstate__(state)
except AttributeError:
self.__dict__.update(state)
def _more_tags(self):
return _DEFAULT_TAGS
def _get_tags(self):
collected_tags = {}
for base_class in reversed(inspect.getmro(self.__class__)):
if hasattr(base_class, "_more_tags"):
# need the if because mixins might not have _more_tags
# but might do redundant work in estimators
# (i.e. calling more tags on BaseEstimator multiple times)
more_tags = base_class._more_tags(self)
collected_tags.update(more_tags)
return collected_tags
def _check_n_features(self, X, reset):
"""Set the `n_features_in_` attribute, or check against it.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input samples.
reset : bool
If True, the `n_features_in_` attribute is set to `X.shape[1]`.
If False and the attribute exists, then check that it is equal to
`X.shape[1]`. If False and the attribute does *not* exist, then
the check is skipped.
.. note::
It is recommended to call reset=True in `fit` and in the first
call to `partial_fit`. All other methods that validate `X`
should set `reset=False`.
"""
try:
n_features = _num_features(X)
except TypeError as e:
if not reset and hasattr(self, "n_features_in_"):
raise ValueError(
"X does not contain any features, but "
f"{self.__class__.__name__} is expecting "
f"{self.n_features_in_} features"
) from e
# If the number of features is not defined and reset=True,
# then we skip this check
return
if reset:
self.n_features_in_ = n_features
return
if not hasattr(self, "n_features_in_"):
# Skip this check if the expected number of expected input features
# was not recorded by calling fit first. This is typically the case
# for stateless transformers.
return
if n_features != self.n_features_in_:
raise ValueError(
f"X has {n_features} features, but {self.__class__.__name__} "
f"is expecting {self.n_features_in_} features as input."
)
def _check_feature_names(self, X, *, reset):
"""Set or check the `feature_names_in_` attribute.
.. versionadded:: 1.0
Parameters
----------
X : {ndarray, dataframe} of shape (n_samples, n_features)
The input samples.
reset : bool
Whether to reset the `feature_names_in_` attribute.
If False, the input will be checked for consistency with
feature names of data provided when reset was last True.
.. note::
It is recommended to call `reset=True` in `fit` and in the first
call to `partial_fit`. All other methods that validate `X`
should set `reset=False`.
"""
if reset:
feature_names_in = _get_feature_names(X)
if feature_names_in is not None:
self.feature_names_in_ = feature_names_in
elif hasattr(self, "feature_names_in_"):
# Delete the attribute when the estimator is fitted on a new dataset
# that has no feature names.
delattr(self, "feature_names_in_")
return
fitted_feature_names = getattr(self, "feature_names_in_", None)
X_feature_names = _get_feature_names(X)
if fitted_feature_names is None and X_feature_names is None:
# no feature names seen in fit and in X
return
if X_feature_names is not None and fitted_feature_names is None:
warnings.warn(
f"X has feature names, but {self.__class__.__name__} was fitted without"
" feature names"
)
return
if X_feature_names is None and fitted_feature_names is not None:
warnings.warn(
"X does not have valid feature names, but"
f" {self.__class__.__name__} was fitted with feature names"
)
return
# validate the feature names against the `feature_names_in_` attribute
if len(fitted_feature_names) != len(X_feature_names) or np.any(
fitted_feature_names != X_feature_names
):
message = (
"The feature names should match those that were passed during fit.\n"
)
fitted_feature_names_set = set(fitted_feature_names)
X_feature_names_set = set(X_feature_names)
unexpected_names = sorted(X_feature_names_set - fitted_feature_names_set)
missing_names = sorted(fitted_feature_names_set - X_feature_names_set)
def add_names(names):
output = ""
max_n_names = 5
for i, name in enumerate(names):
if i >= max_n_names:
output += "- ...\n"
break
output += f"- {name}\n"
return output
if unexpected_names:
message += "Feature names unseen at fit time:\n"
message += add_names(unexpected_names)
if missing_names:
message += "Feature names seen at fit time, yet now missing:\n"
message += add_names(missing_names)
if not missing_names and not unexpected_names:
message += (
"Feature names must be in the same order as they were in fit.\n"
)
raise ValueError(message)
def _validate_data(
self,
X="no_validation",
y="no_validation",
reset=True,
validate_separately=False,
**check_params,
):
"""Validate input data and set or check the `n_features_in_` attribute.
Parameters
----------
X : {array-like, sparse matrix, dataframe} of shape \
(n_samples, n_features), default='no validation'
The input samples.
If `'no_validation'`, no validation is performed on `X`. This is
useful for meta-estimator which can delegate input validation to
their underlying estimator(s). In that case `y` must be passed and
the only accepted `check_params` are `multi_output` and
`y_numeric`.
y : array-like of shape (n_samples,), default='no_validation'
The targets.
- If `None`, `check_array` is called on `X`. If the estimator's
requires_y tag is True, then an error will be raised.
- If `'no_validation'`, `check_array` is called on `X` and the
estimator's requires_y tag is ignored. This is a default
placeholder and is never meant to be explicitly set. In that case
`X` must be passed.
- Otherwise, only `y` with `_check_y` or both `X` and `y` are
checked with either `check_array` or `check_X_y` depending on
`validate_separately`.
reset : bool, default=True
Whether to reset the `n_features_in_` attribute.
If False, the input will be checked for consistency with data
provided when reset was last True.
.. note::
It is recommended to call reset=True in `fit` and in the first
call to `partial_fit`. All other methods that validate `X`
should set `reset=False`.
validate_separately : False or tuple of dicts, default=False
Only used if y is not None.
If False, call validate_X_y(). Else, it must be a tuple of kwargs
to be used for calling check_array() on X and y respectively.
`estimator=self` is automatically added to these dicts to generate
more informative error message in case of invalid input data.
**check_params : kwargs
Parameters passed to :func:`sklearn.utils.check_array` or
:func:`sklearn.utils.check_X_y`. Ignored if validate_separately
is not False.
`estimator=self` is automatically added to these params to generate
more informative error message in case of invalid input data.
Returns
-------
out : {ndarray, sparse matrix} or tuple of these
The validated input. A tuple is returned if both `X` and `y` are
validated.
"""
self._check_feature_names(X, reset=reset)
if y is None and self._get_tags()["requires_y"]:
raise ValueError(
f"This {self.__class__.__name__} estimator "
"requires y to be passed, but the target y is None."
)
no_val_X = isinstance(X, str) and X == "no_validation"
no_val_y = y is None or isinstance(y, str) and y == "no_validation"
default_check_params = {"estimator": self}
check_params = {**default_check_params, **check_params}
if no_val_X and no_val_y:
raise ValueError("Validation should be done on X, y or both.")
elif not no_val_X and no_val_y:
X = check_array(X, input_name="X", **check_params)
out = X
elif no_val_X and not no_val_y:
y = _check_y(y, **check_params)
out = y
else:
if validate_separately:
# We need this because some estimators validate X and y
# separately, and in general, separately calling check_array()
# on X and y isn't equivalent to just calling check_X_y()
# :(
check_X_params, check_y_params = validate_separately
if "estimator" not in check_X_params:
check_X_params = {**default_check_params, **check_X_params}
X = check_array(X, input_name="X", **check_X_params)
if "estimator" not in check_y_params:
check_y_params = {**default_check_params, **check_y_params}
y = check_array(y, input_name="y", **check_y_params)
else:
X, y = check_X_y(X, y, **check_params)
out = X, y
if not no_val_X and check_params.get("ensure_2d", True):
self._check_n_features(X, reset=reset)
return out
def _validate_params(self):
"""Validate types and values of constructor parameters
The expected type and values must be defined in the `_parameter_constraints`
class attribute, which is a dictionary `param_name: list of constraints`. See
the docstring of `validate_parameter_constraints` for a description of the
accepted constraints.
"""
validate_parameter_constraints(
self._parameter_constraints,
self.get_params(deep=False),
caller_name=self.__class__.__name__,
)
@property
def _repr_html_(self):
"""HTML representation of estimator.
This is redundant with the logic of `_repr_mimebundle_`. The latter
should be favorted in the long term, `_repr_html_` is only
implemented for consumers who do not interpret `_repr_mimbundle_`.
"""
if get_config()["display"] != "diagram":
raise AttributeError(
"_repr_html_ is only defined when the "
"'display' configuration option is set to "
"'diagram'"
)
return self._repr_html_inner
def _repr_html_inner(self):
"""This function is returned by the @property `_repr_html_` to make
`hasattr(estimator, "_repr_html_") return `True` or `False` depending
on `get_config()["display"]`.
"""
return estimator_html_repr(self)
def _repr_mimebundle_(self, **kwargs):
"""Mime bundle used by jupyter kernels to display estimator"""
output = {"text/plain": repr(self)}
if get_config()["display"] == "diagram":
output["text/html"] = estimator_html_repr(self)
return output
class ClassifierMixin:
"""Mixin class for all classifiers in scikit-learn."""
_estimator_type = "classifier"
def score(self, X, y, sample_weight=None):
"""
Return the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for `X`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Mean accuracy of ``self.predict(X)`` wrt. `y`.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
def _more_tags(self):
return {"requires_y": True}
class RegressorMixin:
"""Mixin class for all regression estimators in scikit-learn."""
_estimator_type = "regressor"
def score(self, X, y, sample_weight=None):
"""Return the coefficient of determination of the prediction.
The coefficient of determination :math:`R^2` is defined as
:math:`(1 - \\frac{u}{v})`, where :math:`u` is the residual
sum of squares ``((y_true - y_pred)** 2).sum()`` and :math:`v`
is the total sum of squares ``((y_true - y_true.mean()) ** 2).sum()``.
The best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always predicts
the expected value of `y`, disregarding the input features, would get
a :math:`R^2` score of 0.0.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples. For some estimators this may be a precomputed
kernel matrix or a list of generic objects instead with shape
``(n_samples, n_samples_fitted)``, where ``n_samples_fitted``
is the number of samples used in the fitting for the estimator.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True values for `X`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
:math:`R^2` of ``self.predict(X)`` wrt. `y`.
Notes
-----
The :math:`R^2` score used when calling ``score`` on a regressor uses
``multioutput='uniform_average'`` from version 0.23 to keep consistent
with default value of :func:`~sklearn.metrics.r2_score`.
This influences the ``score`` method of all the multioutput
regressors (except for
:class:`~sklearn.multioutput.MultiOutputRegressor`).
"""
from .metrics import r2_score
y_pred = self.predict(X)
return r2_score(y, y_pred, sample_weight=sample_weight)
def _more_tags(self):
return {"requires_y": True}
class ClusterMixin:
"""Mixin class for all cluster estimators in scikit-learn."""
_estimator_type = "clusterer"
def fit_predict(self, X, y=None):
"""
Perform clustering on `X` and returns cluster labels.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
labels : ndarray of shape (n_samples,), dtype=np.int64
Cluster labels.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
def _more_tags(self):
return {"preserves_dtype": []}
class BiclusterMixin:
"""Mixin class for all bicluster estimators in scikit-learn."""
@property
def biclusters_(self):
"""Convenient way to get row and column indicators together.
Returns the ``rows_`` and ``columns_`` members.
"""
return self.rows_, self.columns_
def get_indices(self, i):
"""Row and column indices of the `i`'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Parameters
----------
i : int
The index of the cluster.
Returns
-------
row_ind : ndarray, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : ndarray, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
rows = self.rows_[i]
columns = self.columns_[i]
return np.nonzero(rows)[0], np.nonzero(columns)[0]
def get_shape(self, i):
"""Shape of the `i`'th bicluster.
Parameters
----------
i : int
The index of the cluster.
Returns
-------
n_rows : int
Number of rows in the bicluster.
n_cols : int
Number of columns in the bicluster.
"""
indices = self.get_indices(i)
return tuple(len(i) for i in indices)
def get_submatrix(self, i, data):
"""Return the submatrix corresponding to bicluster `i`.
Parameters
----------
i : int
The index of the cluster.
data : array-like of shape (n_samples, n_features)
The data.
Returns
-------
submatrix : ndarray of shape (n_rows, n_cols)
The submatrix corresponding to bicluster `i`.
Notes
-----
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
from .utils.validation import check_array
data = check_array(data, accept_sparse="csr")
row_ind, col_ind = self.get_indices(i)
return data[row_ind[:, np.newaxis], col_ind]
class TransformerMixin(_SetOutputMixin):
"""Mixin class for all transformers in scikit-learn.
If :term:`get_feature_names_out` is defined, then `BaseEstimator` will
automatically wrap `transform` and `fit_transform` to follow the `set_output`
API. See the :ref:`developer_api_set_output` for details.
:class:`base.OneToOneFeatureMixin` and
:class:`base.ClassNamePrefixFeaturesOutMixin` are helpful mixins for
defining :term:`get_feature_names_out`.
"""
def fit_transform(self, X, y=None, **fit_params):
"""
Fit to data, then transform it.
Fits transformer to `X` and `y` with optional parameters `fit_params`
and returns a transformed version of `X`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
**fit_params : dict
Additional fit parameters.
Returns
-------
X_new : ndarray array of shape (n_samples, n_features_new)
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
class OneToOneFeatureMixin:
"""Provides `get_feature_names_out` for simple transformers.
This mixin assumes there's a 1-to-1 correspondence between input features
and output features, such as :class:`~preprocessing.StandardScaler`.
"""
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Same as input features.
"""
return _check_feature_names_in(self, input_features)
class ClassNamePrefixFeaturesOutMixin:
"""Mixin class for transformers that generate their own names by prefixing.
This mixin is useful when the transformer needs to generate its own feature
names out, such as :class:`~decomposition.PCA`. For example, if
:class:`~decomposition.PCA` outputs 3 features, then the generated feature
names out are: `["pca0", "pca1", "pca2"]`.
This mixin assumes that a `_n_features_out` attribute is defined when the
transformer is fitted. `_n_features_out` is the number of output features
that the transformer will return in `transform` of `fit_transform`.
"""
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
The feature names out will prefixed by the lowercased class name. For
example, if the transformer outputs 3 features, then the feature names
out are: `["class_name0", "class_name1", "class_name2"]`.
Parameters
----------
input_features : array-like of str or None, default=None
Only used to validate feature names with the names seen in :meth:`fit`.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
check_is_fitted(self, "_n_features_out")
return _generate_get_feature_names_out(
self, self._n_features_out, input_features=input_features
)
class DensityMixin:
"""Mixin class for all density estimators in scikit-learn."""
_estimator_type = "DensityEstimator"
def score(self, X, y=None):
"""Return the score of the model on the data `X`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
score : float
"""
pass
class OutlierMixin:
"""Mixin class for all outlier detection estimators in scikit-learn."""
_estimator_type = "outlier_detector"
def fit_predict(self, X, y=None):
"""Perform fit on X and returns labels for X.
Returns -1 for outliers and 1 for inliers.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
y : ndarray of shape (n_samples,)
1 for inliers, -1 for outliers.
"""
# override for transductive outlier detectors like LocalOulierFactor
return self.fit(X).predict(X)
class MetaEstimatorMixin:
_required_parameters = ["estimator"]
"""Mixin class for all meta estimators in scikit-learn."""
class MultiOutputMixin:
"""Mixin to mark estimators that support multioutput."""
def _more_tags(self):
return {"multioutput": True}
class _UnstableArchMixin:
"""Mark estimators that are non-determinstic on 32bit or PowerPC"""
def _more_tags(self):
return {
"non_deterministic": (
_IS_32BIT or platform.machine().startswith(("ppc", "powerpc"))
)
}
def is_classifier(estimator):
"""Return True if the given estimator is (probably) a classifier.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a classifier and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Return True if the given estimator is (probably) a regressor.
Parameters
----------
estimator : estimator instance
Estimator object to test.
Returns
-------
out : bool
True if estimator is a regressor and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "regressor"
def is_outlier_detector(estimator):
"""Return True if the given estimator is (probably) an outlier detector.
Parameters
----------
estimator : estimator instance
Estimator object to test.
Returns
-------
out : bool
True if estimator is an outlier detector and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "outlier_detector"
| bsd-3-clause | c18298990493e5064d718d78cdac9860 | 34.857418 | 88 | 0.575605 | 4.393296 | false | false | false | false |
scikit-learn/scikit-learn | sklearn/inspection/_plot/decision_boundary.py | 2 | 13204 | from functools import reduce
import numpy as np
from ...preprocessing import LabelEncoder
from ...utils import check_matplotlib_support
from ...utils import _safe_indexing
from ...base import is_regressor
from ...utils.validation import check_is_fitted, _is_arraylike_not_scalar
def _check_boundary_response_method(estimator, response_method):
"""Return prediction method from the `response_method` for decision boundary.
Parameters
----------
estimator : object
Fitted estimator to check.
response_method : {'auto', 'predict_proba', 'decision_function', 'predict'}
Specifies whether to use :term:`predict_proba`,
:term:`decision_function`, :term:`predict` as the target response.
If set to 'auto', the response method is tried in the following order:
:term:`decision_function`, :term:`predict_proba`, :term:`predict`.
Returns
-------
prediction_method: callable
Prediction method of estimator.
"""
has_classes = hasattr(estimator, "classes_")
if has_classes and _is_arraylike_not_scalar(estimator.classes_[0]):
msg = "Multi-label and multi-output multi-class classifiers are not supported"
raise ValueError(msg)
if has_classes and len(estimator.classes_) > 2:
if response_method not in {"auto", "predict"}:
msg = (
"Multiclass classifiers are only supported when response_method is"
" 'predict' or 'auto'"
)
raise ValueError(msg)
methods_list = ["predict"]
elif response_method == "auto":
methods_list = ["decision_function", "predict_proba", "predict"]
else:
methods_list = [response_method]
prediction_method = [getattr(estimator, method, None) for method in methods_list]
prediction_method = reduce(lambda x, y: x or y, prediction_method)
if prediction_method is None:
raise ValueError(
f"{estimator.__class__.__name__} has none of the following attributes: "
f"{', '.join(methods_list)}."
)
return prediction_method
class DecisionBoundaryDisplay:
"""Decisions boundary visualization.
It is recommended to use
:func:`~sklearn.inspection.DecisionBoundaryDisplay.from_estimator`
to create a :class:`DecisionBoundaryDisplay`. All parameters are stored as
attributes.
Read more in the :ref:`User Guide <visualizations>`.
.. versionadded:: 1.1
Parameters
----------
xx0 : ndarray of shape (grid_resolution, grid_resolution)
First output of :func:`meshgrid <numpy.meshgrid>`.
xx1 : ndarray of shape (grid_resolution, grid_resolution)
Second output of :func:`meshgrid <numpy.meshgrid>`.
response : ndarray of shape (grid_resolution, grid_resolution)
Values of the response function.
xlabel : str, default=None
Default label to place on x axis.
ylabel : str, default=None
Default label to place on y axis.
Attributes
----------
surface_ : matplotlib `QuadContourSet` or `QuadMesh`
If `plot_method` is 'contour' or 'contourf', `surface_` is a
:class:`QuadContourSet <matplotlib.contour.QuadContourSet>`. If
`plot_method` is 'pcolormesh', `surface_` is a
:class:`QuadMesh <matplotlib.collections.QuadMesh>`.
ax_ : matplotlib Axes
Axes with confusion matrix.
figure_ : matplotlib Figure
Figure containing the confusion matrix.
See Also
--------
DecisionBoundaryDisplay.from_estimator : Plot decision boundary given an estimator.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from sklearn.datasets import load_iris
>>> from sklearn.inspection import DecisionBoundaryDisplay
>>> from sklearn.tree import DecisionTreeClassifier
>>> iris = load_iris()
>>> feature_1, feature_2 = np.meshgrid(
... np.linspace(iris.data[:, 0].min(), iris.data[:, 0].max()),
... np.linspace(iris.data[:, 1].min(), iris.data[:, 1].max())
... )
>>> grid = np.vstack([feature_1.ravel(), feature_2.ravel()]).T
>>> tree = DecisionTreeClassifier().fit(iris.data[:, :2], iris.target)
>>> y_pred = np.reshape(tree.predict(grid), feature_1.shape)
>>> display = DecisionBoundaryDisplay(
... xx0=feature_1, xx1=feature_2, response=y_pred
... )
>>> display.plot()
<...>
>>> display.ax_.scatter(
... iris.data[:, 0], iris.data[:, 1], c=iris.target, edgecolor="black"
... )
<...>
>>> plt.show()
"""
def __init__(self, *, xx0, xx1, response, xlabel=None, ylabel=None):
self.xx0 = xx0
self.xx1 = xx1
self.response = response
self.xlabel = xlabel
self.ylabel = ylabel
def plot(self, plot_method="contourf", ax=None, xlabel=None, ylabel=None, **kwargs):
"""Plot visualization.
Parameters
----------
plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf'
Plotting method to call when plotting the response. Please refer
to the following matplotlib documentation for details:
:func:`contourf <matplotlib.pyplot.contourf>`,
:func:`contour <matplotlib.pyplot.contour>`,
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>`.
ax : Matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
xlabel : str, default=None
Overwrite the x-axis label.
ylabel : str, default=None
Overwrite the y-axis label.
**kwargs : dict
Additional keyword arguments to be passed to the `plot_method`.
Returns
-------
display: :class:`~sklearn.inspection.DecisionBoundaryDisplay`
Object that stores computed values.
"""
check_matplotlib_support("DecisionBoundaryDisplay.plot")
import matplotlib.pyplot as plt # noqa
if plot_method not in ("contourf", "contour", "pcolormesh"):
raise ValueError(
"plot_method must be 'contourf', 'contour', or 'pcolormesh'"
)
if ax is None:
_, ax = plt.subplots()
plot_func = getattr(ax, plot_method)
self.surface_ = plot_func(self.xx0, self.xx1, self.response, **kwargs)
if xlabel is not None or not ax.get_xlabel():
xlabel = self.xlabel if xlabel is None else xlabel
ax.set_xlabel(xlabel)
if ylabel is not None or not ax.get_ylabel():
ylabel = self.ylabel if ylabel is None else ylabel
ax.set_ylabel(ylabel)
self.ax_ = ax
self.figure_ = ax.figure
return self
@classmethod
def from_estimator(
cls,
estimator,
X,
*,
grid_resolution=100,
eps=1.0,
plot_method="contourf",
response_method="auto",
xlabel=None,
ylabel=None,
ax=None,
**kwargs,
):
"""Plot decision boundary given an estimator.
Read more in the :ref:`User Guide <visualizations>`.
Parameters
----------
estimator : object
Trained estimator used to plot the decision boundary.
X : {array-like, sparse matrix, dataframe} of shape (n_samples, 2)
Input data that should be only 2-dimensional.
grid_resolution : int, default=100
Number of grid points to use for plotting decision boundary.
Higher values will make the plot look nicer but be slower to
render.
eps : float, default=1.0
Extends the minimum and maximum values of X for evaluating the
response function.
plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf'
Plotting method to call when plotting the response. Please refer
to the following matplotlib documentation for details:
:func:`contourf <matplotlib.pyplot.contourf>`,
:func:`contour <matplotlib.pyplot.contour>`,
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>`.
response_method : {'auto', 'predict_proba', 'decision_function', \
'predict'}, default='auto'
Specifies whether to use :term:`predict_proba`,
:term:`decision_function`, :term:`predict` as the target response.
If set to 'auto', the response method is tried in the following order:
:term:`decision_function`, :term:`predict_proba`, :term:`predict`.
For multiclass problems, :term:`predict` is selected when
`response_method="auto"`.
xlabel : str, default=None
The label used for the x-axis. If `None`, an attempt is made to
extract a label from `X` if it is a dataframe, otherwise an empty
string is used.
ylabel : str, default=None
The label used for the y-axis. If `None`, an attempt is made to
extract a label from `X` if it is a dataframe, otherwise an empty
string is used.
ax : Matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
**kwargs : dict
Additional keyword arguments to be passed to the
`plot_method`.
Returns
-------
display : :class:`~sklearn.inspection.DecisionBoundaryDisplay`
Object that stores the result.
See Also
--------
DecisionBoundaryDisplay : Decision boundary visualization.
ConfusionMatrixDisplay.from_estimator : Plot the confusion matrix
given an estimator, the data, and the label.
ConfusionMatrixDisplay.from_predictions : Plot the confusion matrix
given the true and predicted labels.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.inspection import DecisionBoundaryDisplay
>>> iris = load_iris()
>>> X = iris.data[:, :2]
>>> classifier = LogisticRegression().fit(X, iris.target)
>>> disp = DecisionBoundaryDisplay.from_estimator(
... classifier, X, response_method="predict",
... xlabel=iris.feature_names[0], ylabel=iris.feature_names[1],
... alpha=0.5,
... )
>>> disp.ax_.scatter(X[:, 0], X[:, 1], c=iris.target, edgecolor="k")
<...>
>>> plt.show()
"""
check_matplotlib_support(f"{cls.__name__}.from_estimator")
check_is_fitted(estimator)
if not grid_resolution > 1:
raise ValueError(
"grid_resolution must be greater than 1. Got"
f" {grid_resolution} instead."
)
if not eps >= 0:
raise ValueError(
f"eps must be greater than or equal to 0. Got {eps} instead."
)
possible_plot_methods = ("contourf", "contour", "pcolormesh")
if plot_method not in possible_plot_methods:
available_methods = ", ".join(possible_plot_methods)
raise ValueError(
f"plot_method must be one of {available_methods}. "
f"Got {plot_method} instead."
)
x0, x1 = _safe_indexing(X, 0, axis=1), _safe_indexing(X, 1, axis=1)
x0_min, x0_max = x0.min() - eps, x0.max() + eps
x1_min, x1_max = x1.min() - eps, x1.max() + eps
xx0, xx1 = np.meshgrid(
np.linspace(x0_min, x0_max, grid_resolution),
np.linspace(x1_min, x1_max, grid_resolution),
)
if hasattr(X, "iloc"):
# we need to preserve the feature names and therefore get an empty dataframe
X_grid = X.iloc[[], :].copy()
X_grid.iloc[:, 0] = xx0.ravel()
X_grid.iloc[:, 1] = xx1.ravel()
else:
X_grid = np.c_[xx0.ravel(), xx1.ravel()]
pred_func = _check_boundary_response_method(estimator, response_method)
response = pred_func(X_grid)
# convert classes predictions into integers
if pred_func.__name__ == "predict" and hasattr(estimator, "classes_"):
encoder = LabelEncoder()
encoder.classes_ = estimator.classes_
response = encoder.transform(response)
if response.ndim != 1:
if is_regressor(estimator):
raise ValueError("Multi-output regressors are not supported")
# TODO: Support pos_label
response = response[:, 1]
if xlabel is None:
xlabel = X.columns[0] if hasattr(X, "columns") else ""
if ylabel is None:
ylabel = X.columns[1] if hasattr(X, "columns") else ""
display = DecisionBoundaryDisplay(
xx0=xx0,
xx1=xx1,
response=response.reshape(xx0.shape),
xlabel=xlabel,
ylabel=ylabel,
)
return display.plot(ax=ax, plot_method=plot_method, **kwargs)
| bsd-3-clause | 4427f261d0e7ed39d4eaabc081b952ed | 35.175342 | 88 | 0.592018 | 4.219879 | false | false | false | false |
scikit-learn/scikit-learn | sklearn/impute/_iterative.py | 1 | 34743 | from time import time
from collections import namedtuple
from numbers import Integral, Real
import warnings
from scipy import stats
import numpy as np
from ..base import clone
from ..exceptions import ConvergenceWarning
from ..preprocessing import normalize
from ..utils import (
check_array,
check_random_state,
is_scalar_nan,
_safe_assign,
_safe_indexing,
)
from ..utils.validation import FLOAT_DTYPES, check_is_fitted
from ..utils.validation import _check_feature_names_in
from ..utils._mask import _get_mask
from ..utils._param_validation import HasMethods, Interval, StrOptions
from ._base import _BaseImputer
from ._base import SimpleImputer
from ._base import _check_inputs_dtype
_ImputerTriplet = namedtuple(
"_ImputerTriplet", ["feat_idx", "neighbor_feat_idx", "estimator"]
)
def _assign_where(X1, X2, cond):
"""Assign X2 to X1 where cond is True.
Parameters
----------
X1 : ndarray or dataframe of shape (n_samples, n_features)
Data.
X2 : ndarray of shape (n_samples, n_features)
Data to be assigned.
cond : ndarray of shape (n_samples, n_features)
Boolean mask to assign data.
"""
if hasattr(X1, "mask"): # pandas dataframes
X1.mask(cond=cond, other=X2, inplace=True)
else: # ndarrays
X1[cond] = X2[cond]
class IterativeImputer(_BaseImputer):
"""Multivariate imputer that estimates each feature from all the others.
A strategy for imputing missing values by modeling each feature with
missing values as a function of other features in a round-robin fashion.
Read more in the :ref:`User Guide <iterative_imputer>`.
.. versionadded:: 0.21
.. note::
This estimator is still **experimental** for now: the predictions
and the API might change without any deprecation cycle. To use it,
you need to explicitly import `enable_iterative_imputer`::
>>> # explicitly require this experimental feature
>>> from sklearn.experimental import enable_iterative_imputer # noqa
>>> # now you can import normally from sklearn.impute
>>> from sklearn.impute import IterativeImputer
Parameters
----------
estimator : estimator object, default=BayesianRidge()
The estimator to use at each step of the round-robin imputation.
If `sample_posterior=True`, the estimator must support
`return_std` in its `predict` method.
missing_values : int or np.nan, default=np.nan
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed. For pandas' dataframes with
nullable integer dtypes with missing values, `missing_values`
should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.
sample_posterior : bool, default=False
Whether to sample from the (Gaussian) predictive posterior of the
fitted estimator for each imputation. Estimator must support
`return_std` in its `predict` method if set to `True`. Set to
`True` if using `IterativeImputer` for multiple imputations.
max_iter : int, default=10
Maximum number of imputation rounds to perform before returning the
imputations computed during the final round. A round is a single
imputation of each feature with missing values. The stopping criterion
is met once `max(abs(X_t - X_{t-1}))/max(abs(X[known_vals])) < tol`,
where `X_t` is `X` at iteration `t`. Note that early stopping is only
applied if `sample_posterior=False`.
tol : float, default=1e-3
Tolerance of the stopping condition.
n_nearest_features : int, default=None
Number of other features to use to estimate the missing values of
each feature column. Nearness between features is measured using
the absolute correlation coefficient between each feature pair (after
initial imputation). To ensure coverage of features throughout the
imputation process, the neighbor features are not necessarily nearest,
but are drawn with probability proportional to correlation for each
imputed target feature. Can provide significant speed-up when the
number of features is huge. If `None`, all features will be used.
initial_strategy : {'mean', 'median', 'most_frequent', 'constant'}, \
default='mean'
Which strategy to use to initialize the missing values. Same as the
`strategy` parameter in :class:`~sklearn.impute.SimpleImputer`.
imputation_order : {'ascending', 'descending', 'roman', 'arabic', \
'random'}, default='ascending'
The order in which the features will be imputed. Possible values:
- `'ascending'`: From features with fewest missing values to most.
- `'descending'`: From features with most missing values to fewest.
- `'roman'`: Left to right.
- `'arabic'`: Right to left.
- `'random'`: A random order for each round.
skip_complete : bool, default=False
If `True` then features with missing values during :meth:`transform`
which did not have any missing values during :meth:`fit` will be
imputed with the initial imputation method only. Set to `True` if you
have many features with no missing values at both :meth:`fit` and
:meth:`transform` time to save compute.
min_value : float or array-like of shape (n_features,), default=-np.inf
Minimum possible imputed value. Broadcast to shape `(n_features,)` if
scalar. If array-like, expects shape `(n_features,)`, one min value for
each feature. The default is `-np.inf`.
.. versionchanged:: 0.23
Added support for array-like.
max_value : float or array-like of shape (n_features,), default=np.inf
Maximum possible imputed value. Broadcast to shape `(n_features,)` if
scalar. If array-like, expects shape `(n_features,)`, one max value for
each feature. The default is `np.inf`.
.. versionchanged:: 0.23
Added support for array-like.
verbose : int, default=0
Verbosity flag, controls the debug messages that are issued
as functions are evaluated. The higher, the more verbose. Can be 0, 1,
or 2.
random_state : int, RandomState instance or None, default=None
The seed of the pseudo random number generator to use. Randomizes
selection of estimator features if `n_nearest_features` is not `None`,
the `imputation_order` if `random`, and the sampling from posterior if
`sample_posterior=True`. Use an integer for determinism.
See :term:`the Glossary <random_state>`.
add_indicator : bool, default=False
If `True`, a :class:`MissingIndicator` transform will stack onto output
of the imputer's transform. This allows a predictive estimator
to account for missingness despite imputation. If a feature has no
missing values at fit/train time, the feature won't appear on
the missing indicator even if there are missing values at
transform/test time.
keep_empty_features : bool, default=False
If True, features that consist exclusively of missing values when
`fit` is called are returned in results when `transform` is called.
The imputed value is always `0` except when
`initial_strategy="constant"` in which case `fill_value` will be
used instead.
.. versionadded:: 1.2
Attributes
----------
initial_imputer_ : object of type :class:`~sklearn.impute.SimpleImputer`
Imputer used to initialize the missing values.
imputation_sequence_ : list of tuples
Each tuple has `(feat_idx, neighbor_feat_idx, estimator)`, where
`feat_idx` is the current feature to be imputed,
`neighbor_feat_idx` is the array of other features used to impute the
current feature, and `estimator` is the trained estimator used for
the imputation. Length is `self.n_features_with_missing_ *
self.n_iter_`.
n_iter_ : int
Number of iteration rounds that occurred. Will be less than
`self.max_iter` if early stopping criterion was reached.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_features_with_missing_ : int
Number of features with missing values.
indicator_ : :class:`~sklearn.impute.MissingIndicator`
Indicator used to add binary indicators for missing values.
`None` if `add_indicator=False`.
random_state_ : RandomState instance
RandomState instance that is generated either from a seed, the random
number generator or by `np.random`.
See Also
--------
SimpleImputer : Univariate imputer for completing missing values
with simple strategies.
KNNImputer : Multivariate imputer that estimates missing features using
nearest samples.
Notes
-----
To support imputation in inductive mode we store each feature's estimator
during the :meth:`fit` phase, and predict without refitting (in order)
during the :meth:`transform` phase.
Features which contain all missing values at :meth:`fit` are discarded upon
:meth:`transform`.
Using defaults, the imputer scales in :math:`\\mathcal{O}(knp^3\\min(n,p))`
where :math:`k` = `max_iter`, :math:`n` the number of samples and
:math:`p` the number of features. It thus becomes prohibitively costly when
the number of features increases. Setting
`n_nearest_features << n_features`, `skip_complete=True` or increasing `tol`
can help to reduce its computational cost.
Depending on the nature of missing values, simple imputers can be
preferable in a prediction context.
References
----------
.. [1] `Stef van Buuren, Karin Groothuis-Oudshoorn (2011). "mice:
Multivariate Imputation by Chained Equations in R". Journal of
Statistical Software 45: 1-67.
<https://www.jstatsoft.org/article/view/v045i03>`_
.. [2] `S. F. Buck, (1960). "A Method of Estimation of Missing Values in
Multivariate Data Suitable for use with an Electronic Computer".
Journal of the Royal Statistical Society 22(2): 302-306.
<https://www.jstor.org/stable/2984099>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.experimental import enable_iterative_imputer
>>> from sklearn.impute import IterativeImputer
>>> imp_mean = IterativeImputer(random_state=0)
>>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]])
IterativeImputer(random_state=0)
>>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]]
>>> imp_mean.transform(X)
array([[ 6.9584..., 2. , 3. ],
[ 4. , 2.6000..., 6. ],
[10. , 4.9999..., 9. ]])
"""
_parameter_constraints: dict = {
**_BaseImputer._parameter_constraints,
"estimator": [None, HasMethods(["fit", "predict"])],
"sample_posterior": ["boolean"],
"max_iter": [Interval(Integral, 0, None, closed="left")],
"tol": [Interval(Real, 0, None, closed="left")],
"n_nearest_features": [None, Interval(Integral, 1, None, closed="left")],
"initial_strategy": [
StrOptions({"mean", "median", "most_frequent", "constant"})
],
"imputation_order": [
StrOptions({"ascending", "descending", "roman", "arabic", "random"})
],
"skip_complete": ["boolean"],
"min_value": [None, Interval(Real, None, None, closed="both"), "array-like"],
"max_value": [None, Interval(Real, None, None, closed="both"), "array-like"],
"verbose": ["verbose"],
"random_state": ["random_state"],
}
def __init__(
self,
estimator=None,
*,
missing_values=np.nan,
sample_posterior=False,
max_iter=10,
tol=1e-3,
n_nearest_features=None,
initial_strategy="mean",
imputation_order="ascending",
skip_complete=False,
min_value=-np.inf,
max_value=np.inf,
verbose=0,
random_state=None,
add_indicator=False,
keep_empty_features=False,
):
super().__init__(
missing_values=missing_values,
add_indicator=add_indicator,
keep_empty_features=keep_empty_features,
)
self.estimator = estimator
self.sample_posterior = sample_posterior
self.max_iter = max_iter
self.tol = tol
self.n_nearest_features = n_nearest_features
self.initial_strategy = initial_strategy
self.imputation_order = imputation_order
self.skip_complete = skip_complete
self.min_value = min_value
self.max_value = max_value
self.verbose = verbose
self.random_state = random_state
def _impute_one_feature(
self,
X_filled,
mask_missing_values,
feat_idx,
neighbor_feat_idx,
estimator=None,
fit_mode=True,
):
"""Impute a single feature from the others provided.
This function predicts the missing values of one of the features using
the current estimates of all the other features. The `estimator` must
support `return_std=True` in its `predict` method for this function
to work.
Parameters
----------
X_filled : ndarray
Input data with the most recent imputations.
mask_missing_values : ndarray
Input data's missing indicator matrix.
feat_idx : int
Index of the feature currently being imputed.
neighbor_feat_idx : ndarray
Indices of the features to be used in imputing `feat_idx`.
estimator : object
The estimator to use at this step of the round-robin imputation.
If `sample_posterior=True`, the estimator must support
`return_std` in its `predict` method.
If None, it will be cloned from self._estimator.
fit_mode : boolean, default=True
Whether to fit and predict with the estimator or just predict.
Returns
-------
X_filled : ndarray
Input data with `X_filled[missing_row_mask, feat_idx]` updated.
estimator : estimator with sklearn API
The fitted estimator used to impute
`X_filled[missing_row_mask, feat_idx]`.
"""
if estimator is None and fit_mode is False:
raise ValueError(
"If fit_mode is False, then an already-fitted "
"estimator should be passed in."
)
if estimator is None:
estimator = clone(self._estimator)
missing_row_mask = mask_missing_values[:, feat_idx]
if fit_mode:
X_train = _safe_indexing(
_safe_indexing(X_filled, neighbor_feat_idx, axis=1),
~missing_row_mask,
axis=0,
)
y_train = _safe_indexing(
_safe_indexing(X_filled, feat_idx, axis=1),
~missing_row_mask,
axis=0,
)
estimator.fit(X_train, y_train)
# if no missing values, don't predict
if np.sum(missing_row_mask) == 0:
return X_filled, estimator
# get posterior samples if there is at least one missing value
X_test = _safe_indexing(
_safe_indexing(X_filled, neighbor_feat_idx, axis=1),
missing_row_mask,
axis=0,
)
if self.sample_posterior:
mus, sigmas = estimator.predict(X_test, return_std=True)
imputed_values = np.zeros(mus.shape, dtype=X_filled.dtype)
# two types of problems: (1) non-positive sigmas
# (2) mus outside legal range of min_value and max_value
# (results in inf sample)
positive_sigmas = sigmas > 0
imputed_values[~positive_sigmas] = mus[~positive_sigmas]
mus_too_low = mus < self._min_value[feat_idx]
imputed_values[mus_too_low] = self._min_value[feat_idx]
mus_too_high = mus > self._max_value[feat_idx]
imputed_values[mus_too_high] = self._max_value[feat_idx]
# the rest can be sampled without statistical issues
inrange_mask = positive_sigmas & ~mus_too_low & ~mus_too_high
mus = mus[inrange_mask]
sigmas = sigmas[inrange_mask]
a = (self._min_value[feat_idx] - mus) / sigmas
b = (self._max_value[feat_idx] - mus) / sigmas
truncated_normal = stats.truncnorm(a=a, b=b, loc=mus, scale=sigmas)
imputed_values[inrange_mask] = truncated_normal.rvs(
random_state=self.random_state_
)
else:
imputed_values = estimator.predict(X_test)
imputed_values = np.clip(
imputed_values, self._min_value[feat_idx], self._max_value[feat_idx]
)
# update the feature
_safe_assign(
X_filled,
imputed_values,
row_indexer=missing_row_mask,
column_indexer=feat_idx,
)
return X_filled, estimator
def _get_neighbor_feat_idx(self, n_features, feat_idx, abs_corr_mat):
"""Get a list of other features to predict `feat_idx`.
If `self.n_nearest_features` is less than or equal to the total
number of features, then use a probability proportional to the absolute
correlation between `feat_idx` and each other feature to randomly
choose a subsample of the other features (without replacement).
Parameters
----------
n_features : int
Number of features in `X`.
feat_idx : int
Index of the feature currently being imputed.
abs_corr_mat : ndarray, shape (n_features, n_features)
Absolute correlation matrix of `X`. The diagonal has been zeroed
out and each feature has been normalized to sum to 1. Can be None.
Returns
-------
neighbor_feat_idx : array-like
The features to use to impute `feat_idx`.
"""
if self.n_nearest_features is not None and self.n_nearest_features < n_features:
p = abs_corr_mat[:, feat_idx]
neighbor_feat_idx = self.random_state_.choice(
np.arange(n_features), self.n_nearest_features, replace=False, p=p
)
else:
inds_left = np.arange(feat_idx)
inds_right = np.arange(feat_idx + 1, n_features)
neighbor_feat_idx = np.concatenate((inds_left, inds_right))
return neighbor_feat_idx
def _get_ordered_idx(self, mask_missing_values):
"""Decide in what order we will update the features.
As a homage to the MICE R package, we will have 4 main options of
how to order the updates, and use a random order if anything else
is specified.
Also, this function skips features which have no missing values.
Parameters
----------
mask_missing_values : array-like, shape (n_samples, n_features)
Input data's missing indicator matrix, where `n_samples` is the
number of samples and `n_features` is the number of features.
Returns
-------
ordered_idx : ndarray, shape (n_features,)
The order in which to impute the features.
"""
frac_of_missing_values = mask_missing_values.mean(axis=0)
if self.skip_complete:
missing_values_idx = np.flatnonzero(frac_of_missing_values)
else:
missing_values_idx = np.arange(np.shape(frac_of_missing_values)[0])
if self.imputation_order == "roman":
ordered_idx = missing_values_idx
elif self.imputation_order == "arabic":
ordered_idx = missing_values_idx[::-1]
elif self.imputation_order == "ascending":
n = len(frac_of_missing_values) - len(missing_values_idx)
ordered_idx = np.argsort(frac_of_missing_values, kind="mergesort")[n:]
elif self.imputation_order == "descending":
n = len(frac_of_missing_values) - len(missing_values_idx)
ordered_idx = np.argsort(frac_of_missing_values, kind="mergesort")[n:][::-1]
elif self.imputation_order == "random":
ordered_idx = missing_values_idx
self.random_state_.shuffle(ordered_idx)
return ordered_idx
def _get_abs_corr_mat(self, X_filled, tolerance=1e-6):
"""Get absolute correlation matrix between features.
Parameters
----------
X_filled : ndarray, shape (n_samples, n_features)
Input data with the most recent imputations.
tolerance : float, default=1e-6
`abs_corr_mat` can have nans, which will be replaced
with `tolerance`.
Returns
-------
abs_corr_mat : ndarray, shape (n_features, n_features)
Absolute correlation matrix of `X` at the beginning of the
current round. The diagonal has been zeroed out and each feature's
absolute correlations with all others have been normalized to sum
to 1.
"""
n_features = X_filled.shape[1]
if self.n_nearest_features is None or self.n_nearest_features >= n_features:
return None
with np.errstate(invalid="ignore"):
# if a feature in the neighborhood has only a single value
# (e.g., categorical feature), the std. dev. will be null and
# np.corrcoef will raise a warning due to a division by zero
abs_corr_mat = np.abs(np.corrcoef(X_filled.T))
# np.corrcoef is not defined for features with zero std
abs_corr_mat[np.isnan(abs_corr_mat)] = tolerance
# ensures exploration, i.e. at least some probability of sampling
np.clip(abs_corr_mat, tolerance, None, out=abs_corr_mat)
# features are not their own neighbors
np.fill_diagonal(abs_corr_mat, 0)
# needs to sum to 1 for np.random.choice sampling
abs_corr_mat = normalize(abs_corr_mat, norm="l1", axis=0, copy=False)
return abs_corr_mat
def _initial_imputation(self, X, in_fit=False):
"""Perform initial imputation for input `X`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
in_fit : bool, default=False
Whether function is called in :meth:`fit`.
Returns
-------
Xt : ndarray of shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
X_filled : ndarray of shape (n_samples, n_features)
Input data with the most recent imputations.
mask_missing_values : ndarray of shape (n_samples, n_features)
Input data's missing indicator matrix, where `n_samples` is the
number of samples and `n_features` is the number of features,
masked by non-missing features.
X_missing_mask : ndarray, shape (n_samples, n_features)
Input data's mask matrix indicating missing datapoints, where
`n_samples` is the number of samples and `n_features` is the
number of features.
"""
if is_scalar_nan(self.missing_values):
force_all_finite = "allow-nan"
else:
force_all_finite = True
X = self._validate_data(
X,
dtype=FLOAT_DTYPES,
order="F",
reset=in_fit,
force_all_finite=force_all_finite,
)
_check_inputs_dtype(X, self.missing_values)
X_missing_mask = _get_mask(X, self.missing_values)
mask_missing_values = X_missing_mask.copy()
if self.initial_imputer_ is None:
self.initial_imputer_ = SimpleImputer(
missing_values=self.missing_values,
strategy=self.initial_strategy,
keep_empty_features=self.keep_empty_features,
)
X_filled = self.initial_imputer_.fit_transform(X)
else:
X_filled = self.initial_imputer_.transform(X)
valid_mask = np.flatnonzero(
np.logical_not(np.isnan(self.initial_imputer_.statistics_))
)
if not self.keep_empty_features:
# drop empty features
Xt = X[:, valid_mask]
mask_missing_values = mask_missing_values[:, valid_mask]
else:
# mark empty features as not missing and keep the original
# imputation
mask_missing_values[:, valid_mask] = True
Xt = X
return Xt, X_filled, mask_missing_values, X_missing_mask
@staticmethod
def _validate_limit(limit, limit_type, n_features):
"""Validate the limits (min/max) of the feature values.
Converts scalar min/max limits to vectors of shape `(n_features,)`.
Parameters
----------
limit: scalar or array-like
The user-specified limit (i.e, min_value or max_value).
limit_type: {'max', 'min'}
Type of limit to validate.
n_features: int
Number of features in the dataset.
Returns
-------
limit: ndarray, shape(n_features,)
Array of limits, one for each feature.
"""
limit_bound = np.inf if limit_type == "max" else -np.inf
limit = limit_bound if limit is None else limit
if np.isscalar(limit):
limit = np.full(n_features, limit)
limit = check_array(limit, force_all_finite=False, copy=False, ensure_2d=False)
if not limit.shape[0] == n_features:
raise ValueError(
f"'{limit_type}_value' should be of "
f"shape ({n_features},) when an array-like "
f"is provided. Got {limit.shape}, instead."
)
return limit
def fit_transform(self, X, y=None):
"""Fit the imputer on `X` and return the transformed `X`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data.
"""
self._validate_params()
self.random_state_ = getattr(
self, "random_state_", check_random_state(self.random_state)
)
if self.estimator is None:
from ..linear_model import BayesianRidge
self._estimator = BayesianRidge()
else:
self._estimator = clone(self.estimator)
self.imputation_sequence_ = []
self.initial_imputer_ = None
X, Xt, mask_missing_values, complete_mask = self._initial_imputation(
X, in_fit=True
)
super()._fit_indicator(complete_mask)
X_indicator = super()._transform_indicator(complete_mask)
if self.max_iter == 0 or np.all(mask_missing_values):
self.n_iter_ = 0
return super()._concatenate_indicator(Xt, X_indicator)
# Edge case: a single feature. We return the initial ...
if Xt.shape[1] == 1:
self.n_iter_ = 0
return super()._concatenate_indicator(Xt, X_indicator)
self._min_value = self._validate_limit(self.min_value, "min", X.shape[1])
self._max_value = self._validate_limit(self.max_value, "max", X.shape[1])
if not np.all(np.greater(self._max_value, self._min_value)):
raise ValueError("One (or more) features have min_value >= max_value.")
# order in which to impute
# note this is probably too slow for large feature data (d > 100000)
# and a better way would be good.
# see: https://goo.gl/KyCNwj and subsequent comments
ordered_idx = self._get_ordered_idx(mask_missing_values)
self.n_features_with_missing_ = len(ordered_idx)
abs_corr_mat = self._get_abs_corr_mat(Xt)
n_samples, n_features = Xt.shape
if self.verbose > 0:
print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,))
start_t = time()
if not self.sample_posterior:
Xt_previous = Xt.copy()
normalized_tol = self.tol * np.max(np.abs(X[~mask_missing_values]))
for self.n_iter_ in range(1, self.max_iter + 1):
if self.imputation_order == "random":
ordered_idx = self._get_ordered_idx(mask_missing_values)
for feat_idx in ordered_idx:
neighbor_feat_idx = self._get_neighbor_feat_idx(
n_features, feat_idx, abs_corr_mat
)
Xt, estimator = self._impute_one_feature(
Xt,
mask_missing_values,
feat_idx,
neighbor_feat_idx,
estimator=None,
fit_mode=True,
)
estimator_triplet = _ImputerTriplet(
feat_idx, neighbor_feat_idx, estimator
)
self.imputation_sequence_.append(estimator_triplet)
if self.verbose > 1:
print(
"[IterativeImputer] Ending imputation round "
"%d/%d, elapsed time %0.2f"
% (self.n_iter_, self.max_iter, time() - start_t)
)
if not self.sample_posterior:
inf_norm = np.linalg.norm(Xt - Xt_previous, ord=np.inf, axis=None)
if self.verbose > 0:
print(
"[IterativeImputer] Change: {}, scaled tolerance: {} ".format(
inf_norm, normalized_tol
)
)
if inf_norm < normalized_tol:
if self.verbose > 0:
print("[IterativeImputer] Early stopping criterion reached.")
break
Xt_previous = Xt.copy()
else:
if not self.sample_posterior:
warnings.warn(
"[IterativeImputer] Early stopping criterion not reached.",
ConvergenceWarning,
)
_assign_where(Xt, X, cond=~mask_missing_values)
return super()._concatenate_indicator(Xt, X_indicator)
def transform(self, X):
"""Impute all missing values in `X`.
Note that this is stochastic, and that if `random_state` is not fixed,
repeated calls, or permuted input, results will differ.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data.
"""
check_is_fitted(self)
X, Xt, mask_missing_values, complete_mask = self._initial_imputation(
X, in_fit=False
)
X_indicator = super()._transform_indicator(complete_mask)
if self.n_iter_ == 0 or np.all(mask_missing_values):
return super()._concatenate_indicator(Xt, X_indicator)
imputations_per_round = len(self.imputation_sequence_) // self.n_iter_
i_rnd = 0
if self.verbose > 0:
print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,))
start_t = time()
for it, estimator_triplet in enumerate(self.imputation_sequence_):
Xt, _ = self._impute_one_feature(
Xt,
mask_missing_values,
estimator_triplet.feat_idx,
estimator_triplet.neighbor_feat_idx,
estimator=estimator_triplet.estimator,
fit_mode=False,
)
if not (it + 1) % imputations_per_round:
if self.verbose > 1:
print(
"[IterativeImputer] Ending imputation round "
"%d/%d, elapsed time %0.2f"
% (i_rnd + 1, self.n_iter_, time() - start_t)
)
i_rnd += 1
_assign_where(Xt, X, cond=~mask_missing_values)
return super()._concatenate_indicator(Xt, X_indicator)
def fit(self, X, y=None):
"""Fit the imputer on `X` and return self.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted estimator.
"""
self.fit_transform(X)
return self
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
input_features = _check_feature_names_in(self, input_features)
names = self.initial_imputer_.get_feature_names_out(input_features)
return self._concatenate_indicator_feature_names_out(names, input_features)
| bsd-3-clause | d245a91518fb1d7006864f849f761ab1 | 38.08099 | 88 | 0.594336 | 4.081649 | false | false | false | false |
scikit-learn/scikit-learn | sklearn/model_selection/_split.py | 8 | 95127 | """
The :mod:`sklearn.model_selection._split` module includes classes and
functions to split the data based on a preset strategy.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Raghav RV <rvraghav93@gmail.com>
# Leandro Hermida <hermidal@cs.umd.edu>
# Rodion Martynov <marrodion@gmail.com>
# License: BSD 3 clause
from collections.abc import Iterable
from collections import defaultdict
import warnings
from itertools import chain, combinations
from math import ceil, floor
import numbers
from abc import ABCMeta, abstractmethod
from inspect import signature
import numpy as np
from scipy.special import comb
from ..utils import indexable, check_random_state, _safe_indexing
from ..utils import _approximate_mode
from ..utils.validation import _num_samples, column_or_1d
from ..utils.validation import check_array
from ..utils.multiclass import type_of_target
__all__ = [
"BaseCrossValidator",
"KFold",
"GroupKFold",
"LeaveOneGroupOut",
"LeaveOneOut",
"LeavePGroupsOut",
"LeavePOut",
"RepeatedStratifiedKFold",
"RepeatedKFold",
"ShuffleSplit",
"GroupShuffleSplit",
"StratifiedKFold",
"StratifiedGroupKFold",
"StratifiedShuffleSplit",
"PredefinedSplit",
"train_test_split",
"check_cv",
]
class BaseCrossValidator(metaclass=ABCMeta):
"""Base class for all cross-validators
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
"""
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, groups):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self, X=None, y=None, groups=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, groups)
"""
for test_index in self._iter_test_indices(X, y, groups):
test_mask = np.zeros(_num_samples(X), dtype=bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, groups=None):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator"""
def __repr__(self):
return _build_repr(self)
class LeaveOneOut(BaseCrossValidator):
"""Leave-One-Out cross-validator
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_splits=n)`` and
``LeavePOut(p=1)`` where ``n`` is the number of samples.
Due to the high number of test sets (which is the same as the
number of samples) this cross-validation method can be very costly.
For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit`
or :class:`StratifiedKFold`.
Read more in the :ref:`User Guide <leave_one_out>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import LeaveOneOut
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = LeaveOneOut()
>>> loo.get_n_splits(X)
2
>>> print(loo)
LeaveOneOut()
>>> for i, (train_index, test_index) in enumerate(loo.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[1]
Test: index=[0]
Fold 1:
Train: index=[0]
Test: index=[1]
See Also
--------
LeaveOneGroupOut : For splitting the data according to explicit,
domain-specific stratification of the dataset.
GroupKFold : K-fold iterator variant with non-overlapping groups.
"""
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
if n_samples <= 1:
raise ValueError(
"Cannot perform LeaveOneOut with n_samples={}.".format(n_samples)
)
return range(n_samples)
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if X is None:
raise ValueError("The 'X' parameter should not be None.")
return _num_samples(X)
class LeavePOut(BaseCrossValidator):
"""Leave-P-Out cross-validator
Provides train/test indices to split data in train/test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(p)`` is NOT equivalent to
``KFold(n_splits=n_samples // p)`` which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross-validation method can be very costly. For
large datasets one should favor :class:`KFold`, :class:`StratifiedKFold`
or :class:`ShuffleSplit`.
Read more in the :ref:`User Guide <leave_p_out>`.
Parameters
----------
p : int
Size of the test sets. Must be strictly less than the number of
samples.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import LeavePOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = LeavePOut(2)
>>> lpo.get_n_splits(X)
6
>>> print(lpo)
LeavePOut(p=2)
>>> for i, (train_index, test_index) in enumerate(lpo.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[2 3]
Test: index=[0 1]
Fold 1:
Train: index=[1 3]
Test: index=[0 2]
Fold 2:
Train: index=[1 2]
Test: index=[0 3]
Fold 3:
Train: index=[0 3]
Test: index=[1 2]
Fold 4:
Train: index=[0 2]
Test: index=[1 3]
Fold 5:
Train: index=[0 1]
Test: index=[2 3]
"""
def __init__(self, p):
self.p = p
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
if n_samples <= self.p:
raise ValueError(
"p={} must be strictly less than the number of samples={}".format(
self.p, n_samples
)
)
for combination in combinations(range(n_samples), self.p):
yield np.array(combination)
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
"""
if X is None:
raise ValueError("The 'X' parameter should not be None.")
return int(comb(_num_samples(X), self.p, exact=True))
class _BaseKFold(BaseCrossValidator, metaclass=ABCMeta):
"""Base class for KFold, GroupKFold, and StratifiedKFold"""
@abstractmethod
def __init__(self, n_splits, *, shuffle, random_state):
if not isinstance(n_splits, numbers.Integral):
raise ValueError(
"The number of folds must be of Integral type. "
"%s of type %s was passed." % (n_splits, type(n_splits))
)
n_splits = int(n_splits)
if n_splits <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_splits=2 or more,"
" got n_splits={0}.".format(n_splits)
)
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False; got {0}".format(shuffle))
if not shuffle and random_state is not None: # None is the default
raise ValueError(
"Setting a random_state has no effect since shuffle is "
"False. You should leave "
"random_state to its default (None), or set shuffle=True.",
)
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,), default=None
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
if self.n_splits > n_samples:
raise ValueError(
(
"Cannot have number of splits n_splits={0} greater"
" than the number of samples: n_samples={1}."
).format(self.n_splits, n_samples)
)
for train, test in super().split(X, y, groups):
yield train, test
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
class KFold(_BaseKFold):
"""K-Folds cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining
folds form the training set.
Read more in the :ref:`User Guide <k_fold>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
.. versionchanged:: 0.22
``n_splits`` default value changed from 3 to 5.
shuffle : bool, default=False
Whether to shuffle the data before splitting into batches.
Note that the samples within each split will not be shuffled.
random_state : int, RandomState instance or None, default=None
When `shuffle` is True, `random_state` affects the ordering of the
indices, which controls the randomness of each fold. Otherwise, this
parameter has no effect.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(n_splits=2)
>>> kf.get_n_splits(X)
2
>>> print(kf)
KFold(n_splits=2, random_state=None, shuffle=False)
>>> for i, (train_index, test_index) in enumerate(kf.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[2 3]
Test: index=[0 1]
Fold 1:
Train: index=[0 1]
Test: index=[2 3]
Notes
-----
The first ``n_samples % n_splits`` folds have size
``n_samples // n_splits + 1``, other folds have size
``n_samples // n_splits``, where ``n_samples`` is the number of samples.
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
See Also
--------
StratifiedKFold : Takes class information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
GroupKFold : K-fold iterator variant with non-overlapping groups.
RepeatedKFold : Repeats K-Fold n times.
"""
def __init__(self, n_splits=5, *, shuffle=False, random_state=None):
super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state)
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
indices = np.arange(n_samples)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_splits = self.n_splits
fold_sizes = np.full(n_splits, n_samples // n_splits, dtype=int)
fold_sizes[: n_samples % n_splits] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class GroupKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping groups.
Each group will appear exactly once in the test set across all folds (the
number of distinct groups has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct groups is approximately the same in each fold.
Read more in the :ref:`User Guide <group_k_fold>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
.. versionchanged:: 0.22
``n_splits`` default value changed from 3 to 5.
Notes
-----
Groups appear in an arbitrary order throughout the folds.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import GroupKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]])
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> groups = np.array([0, 0, 2, 2, 3, 3])
>>> group_kfold = GroupKFold(n_splits=2)
>>> group_kfold.get_n_splits(X, y, groups)
2
>>> print(group_kfold)
GroupKFold(n_splits=2)
>>> for i, (train_index, test_index) in enumerate(group_kfold.split(X, y, groups)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}, group={groups[train_index]}")
... print(f" Test: index={test_index}, group={groups[test_index]}")
Fold 0:
Train: index=[2 3], group=[2 2]
Test: index=[0 1 4 5], group=[0 0 3 3]
Fold 1:
Train: index=[0 1 4 5], group=[0 0 3 3]
Test: index=[2 3], group=[2 2]
See Also
--------
LeaveOneGroupOut : For splitting the data according to explicit
domain-specific stratification of the dataset.
StratifiedKFold : Takes class information into account to avoid building
folds with imbalanced class proportions (for binary or multiclass
classification tasks).
"""
def __init__(self, n_splits=5):
super().__init__(n_splits, shuffle=False, random_state=None)
def _iter_test_indices(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, input_name="groups", ensure_2d=False, dtype=None)
unique_groups, groups = np.unique(groups, return_inverse=True)
n_groups = len(unique_groups)
if self.n_splits > n_groups:
raise ValueError(
"Cannot have number of splits n_splits=%d greater"
" than the number of groups: %d." % (self.n_splits, n_groups)
)
# Weight groups by their number of occurrences
n_samples_per_group = np.bincount(groups)
# Distribute the most frequent groups first
indices = np.argsort(n_samples_per_group)[::-1]
n_samples_per_group = n_samples_per_group[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(self.n_splits)
# Mapping from group index to fold index
group_to_fold = np.zeros(len(unique_groups))
# Distribute samples by adding the largest weight to the lightest fold
for group_index, weight in enumerate(n_samples_per_group):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
group_to_fold[indices[group_index]] = lightest_fold
indices = group_to_fold[groups]
for f in range(self.n_splits):
yield np.where(indices == f)[0]
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,), default=None
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super().split(X, y, groups)
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross-validator.
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
samples for each class.
Read more in the :ref:`User Guide <stratified_k_fold>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
.. versionchanged:: 0.22
``n_splits`` default value changed from 3 to 5.
shuffle : bool, default=False
Whether to shuffle each class's samples before splitting into batches.
Note that the samples within each split will not be shuffled.
random_state : int, RandomState instance or None, default=None
When `shuffle` is True, `random_state` affects the ordering of the
indices, which controls the randomness of each fold for each class.
Otherwise, leave `random_state` as `None`.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_splits=2)
>>> skf.get_n_splits(X, y)
2
>>> print(skf)
StratifiedKFold(n_splits=2, random_state=None, shuffle=False)
>>> for i, (train_index, test_index) in enumerate(skf.split(X, y)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[1 3]
Test: index=[0 2]
Fold 1:
Train: index=[0 2]
Test: index=[1 3]
Notes
-----
The implementation is designed to:
* Generate test sets such that all contain the same distribution of
classes, or as close as possible.
* Be invariant to class label: relabelling ``y = ["Happy", "Sad"]`` to
``y = [1, 0]`` should not change the indices generated.
* Preserve order dependencies in the dataset ordering, when
``shuffle=False``: all samples from class k in some test set were
contiguous in y, or separated in y by samples from classes other than k.
* Generate test sets where the smallest and largest differ by at most one
sample.
.. versionchanged:: 0.22
The previous implementation did not follow the last constraint.
See Also
--------
RepeatedStratifiedKFold : Repeats Stratified K-Fold n times.
"""
def __init__(self, n_splits=5, *, shuffle=False, random_state=None):
super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state)
def _make_test_folds(self, X, y=None):
rng = check_random_state(self.random_state)
y = np.asarray(y)
type_of_target_y = type_of_target(y)
allowed_target_types = ("binary", "multiclass")
if type_of_target_y not in allowed_target_types:
raise ValueError(
"Supported target types are: {}. Got {!r} instead.".format(
allowed_target_types, type_of_target_y
)
)
y = column_or_1d(y)
_, y_idx, y_inv = np.unique(y, return_index=True, return_inverse=True)
# y_inv encodes y according to lexicographic order. We invert y_idx to
# map the classes so that they are encoded by order of appearance:
# 0 represents the first label appearing in y, 1 the second, etc.
_, class_perm = np.unique(y_idx, return_inverse=True)
y_encoded = class_perm[y_inv]
n_classes = len(y_idx)
y_counts = np.bincount(y_encoded)
min_groups = np.min(y_counts)
if np.all(self.n_splits > y_counts):
raise ValueError(
"n_splits=%d cannot be greater than the"
" number of members in each class." % (self.n_splits)
)
if self.n_splits > min_groups:
warnings.warn(
"The least populated class in y has only %d"
" members, which is less than n_splits=%d."
% (min_groups, self.n_splits),
UserWarning,
)
# Determine the optimal number of samples from each class in each fold,
# using round robin over the sorted y. (This can be done direct from
# counts, but that code is unreadable.)
y_order = np.sort(y_encoded)
allocation = np.asarray(
[
np.bincount(y_order[i :: self.n_splits], minlength=n_classes)
for i in range(self.n_splits)
]
)
# To maintain the data order dependencies as best as possible within
# the stratification constraint, we assign samples from each class in
# blocks (and then mess that up when shuffle=True).
test_folds = np.empty(len(y), dtype="i")
for k in range(n_classes):
# since the kth column of allocation stores the number of samples
# of class k in each test set, this generates blocks of fold
# indices corresponding to the allocation for class k.
folds_for_class = np.arange(self.n_splits).repeat(allocation[:, k])
if self.shuffle:
rng.shuffle(folds_for_class)
test_folds[y_encoded == k] = folds_for_class
return test_folds
def _iter_test_masks(self, X, y=None, groups=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_splits):
yield test_folds == i
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
"""
y = check_array(y, input_name="y", ensure_2d=False, dtype=None)
return super().split(X, y, groups)
class StratifiedGroupKFold(_BaseKFold):
"""Stratified K-Folds iterator variant with non-overlapping groups.
This cross-validation object is a variation of StratifiedKFold attempts to
return stratified folds with non-overlapping groups. The folds are made by
preserving the percentage of samples for each class.
Each group will appear exactly once in the test set across all folds (the
number of distinct groups has to be at least equal to the number of folds).
The difference between :class:`~sklearn.model_selection.GroupKFold`
and :class:`~sklearn.model_selection.StratifiedGroupKFold` is that
the former attempts to create balanced folds such that the number of
distinct groups is approximately the same in each fold, whereas
StratifiedGroupKFold attempts to create folds which preserve the
percentage of samples for each class as much as possible given the
constraint of non-overlapping groups between splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
shuffle : bool, default=False
Whether to shuffle each class's samples before splitting into batches.
Note that the samples within each split will not be shuffled.
This implementation can only shuffle groups that have approximately the
same y distribution, no global shuffle will be performed.
random_state : int or RandomState instance, default=None
When `shuffle` is True, `random_state` affects the ordering of the
indices, which controls the randomness of each fold for each class.
Otherwise, leave `random_state` as `None`.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import StratifiedGroupKFold
>>> X = np.ones((17, 2))
>>> y = np.array([0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
>>> groups = np.array([1, 1, 2, 2, 3, 3, 3, 4, 5, 5, 5, 5, 6, 6, 7, 8, 8])
>>> sgkf = StratifiedGroupKFold(n_splits=3)
>>> sgkf.get_n_splits(X, y)
3
>>> print(sgkf)
StratifiedGroupKFold(n_splits=3, random_state=None, shuffle=False)
>>> for i, (train_index, test_index) in enumerate(sgkf.split(X, y, groups)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" group={groups[train_index]}")
... print(f" Test: index={test_index}")
... print(f" group={groups[test_index]}")
Fold 0:
Train: index=[ 0 1 2 3 7 8 9 10 11 15 16]
group=[1 1 2 2 4 5 5 5 5 8 8]
Test: index=[ 4 5 6 12 13 14]
group=[3 3 3 6 6 7]
Fold 1:
Train: index=[ 4 5 6 7 8 9 10 11 12 13 14]
group=[3 3 3 4 5 5 5 5 6 6 7]
Test: index=[ 0 1 2 3 15 16]
group=[1 1 2 2 8 8]
Fold 2:
Train: index=[ 0 1 2 3 4 5 6 12 13 14 15 16]
group=[1 1 2 2 3 3 3 6 6 7 8 8]
Test: index=[ 7 8 9 10 11]
group=[4 5 5 5 5]
Notes
-----
The implementation is designed to:
* Mimic the behavior of StratifiedKFold as much as possible for trivial
groups (e.g. when each group contains only one sample).
* Be invariant to class label: relabelling ``y = ["Happy", "Sad"]`` to
``y = [1, 0]`` should not change the indices generated.
* Stratify based on samples as much as possible while keeping
non-overlapping groups constraint. That means that in some cases when
there is a small number of groups containing a large number of samples
the stratification will not be possible and the behavior will be close
to GroupKFold.
See also
--------
StratifiedKFold: Takes class information into account to build folds which
retain class distributions (for binary or multiclass classification
tasks).
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def __init__(self, n_splits=5, shuffle=False, random_state=None):
super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state)
def _iter_test_indices(self, X, y, groups):
# Implementation is based on this kaggle kernel:
# https://www.kaggle.com/jakubwasikowski/stratified-group-k-fold-cross-validation
# and is a subject to Apache 2.0 License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
# Changelist:
# - Refactored function to a class following scikit-learn KFold
# interface.
# - Added heuristic for assigning group to the least populated fold in
# cases when all other criteria are equal
# - Swtch from using python ``Counter`` to ``np.unique`` to get class
# distribution
# - Added scikit-learn checks for input: checking that target is binary
# or multiclass, checking passed random state, checking that number
# of splits is less than number of members in each class, checking
# that least populated class has more members than there are splits.
rng = check_random_state(self.random_state)
y = np.asarray(y)
type_of_target_y = type_of_target(y)
allowed_target_types = ("binary", "multiclass")
if type_of_target_y not in allowed_target_types:
raise ValueError(
"Supported target types are: {}. Got {!r} instead.".format(
allowed_target_types, type_of_target_y
)
)
y = column_or_1d(y)
_, y_inv, y_cnt = np.unique(y, return_inverse=True, return_counts=True)
if np.all(self.n_splits > y_cnt):
raise ValueError(
"n_splits=%d cannot be greater than the"
" number of members in each class." % (self.n_splits)
)
n_smallest_class = np.min(y_cnt)
if self.n_splits > n_smallest_class:
warnings.warn(
"The least populated class in y has only %d"
" members, which is less than n_splits=%d."
% (n_smallest_class, self.n_splits),
UserWarning,
)
n_classes = len(y_cnt)
_, groups_inv, groups_cnt = np.unique(
groups, return_inverse=True, return_counts=True
)
y_counts_per_group = np.zeros((len(groups_cnt), n_classes))
for class_idx, group_idx in zip(y_inv, groups_inv):
y_counts_per_group[group_idx, class_idx] += 1
y_counts_per_fold = np.zeros((self.n_splits, n_classes))
groups_per_fold = defaultdict(set)
if self.shuffle:
rng.shuffle(y_counts_per_group)
# Stable sort to keep shuffled order for groups with the same
# class distribution variance
sorted_groups_idx = np.argsort(
-np.std(y_counts_per_group, axis=1), kind="mergesort"
)
for group_idx in sorted_groups_idx:
group_y_counts = y_counts_per_group[group_idx]
best_fold = self._find_best_fold(
y_counts_per_fold=y_counts_per_fold,
y_cnt=y_cnt,
group_y_counts=group_y_counts,
)
y_counts_per_fold[best_fold] += group_y_counts
groups_per_fold[best_fold].add(group_idx)
for i in range(self.n_splits):
test_indices = [
idx
for idx, group_idx in enumerate(groups_inv)
if group_idx in groups_per_fold[i]
]
yield test_indices
def _find_best_fold(self, y_counts_per_fold, y_cnt, group_y_counts):
best_fold = None
min_eval = np.inf
min_samples_in_fold = np.inf
for i in range(self.n_splits):
y_counts_per_fold[i] += group_y_counts
# Summarise the distribution over classes in each proposed fold
std_per_class = np.std(y_counts_per_fold / y_cnt.reshape(1, -1), axis=0)
y_counts_per_fold[i] -= group_y_counts
fold_eval = np.mean(std_per_class)
samples_in_fold = np.sum(y_counts_per_fold[i])
is_current_fold_better = (
fold_eval < min_eval
or np.isclose(fold_eval, min_eval)
and samples_in_fold < min_samples_in_fold
)
if is_current_fold_better:
min_eval = fold_eval
min_samples_in_fold = samples_in_fold
best_fold = i
return best_fold
class TimeSeriesSplit(_BaseKFold):
"""Time Series cross-validator
Provides train/test indices to split time series data samples
that are observed at fixed time intervals, in train/test sets.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <time_series_split>`.
.. versionadded:: 0.18
Parameters
----------
n_splits : int, default=5
Number of splits. Must be at least 2.
.. versionchanged:: 0.22
``n_splits`` default value changed from 3 to 5.
max_train_size : int, default=None
Maximum size for a single training set.
test_size : int, default=None
Used to limit the size of the test set. Defaults to
``n_samples // (n_splits + 1)``, which is the maximum allowed value
with ``gap=0``.
.. versionadded:: 0.24
gap : int, default=0
Number of samples to exclude from the end of each train set before
the test set.
.. versionadded:: 0.24
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import TimeSeriesSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> tscv = TimeSeriesSplit()
>>> print(tscv)
TimeSeriesSplit(gap=0, max_train_size=None, n_splits=5, test_size=None)
>>> for i, (train_index, test_index) in enumerate(tscv.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[0]
Test: index=[1]
Fold 1:
Train: index=[0 1]
Test: index=[2]
Fold 2:
Train: index=[0 1 2]
Test: index=[3]
Fold 3:
Train: index=[0 1 2 3]
Test: index=[4]
Fold 4:
Train: index=[0 1 2 3 4]
Test: index=[5]
>>> # Fix test_size to 2 with 12 samples
>>> X = np.random.randn(12, 2)
>>> y = np.random.randint(0, 2, 12)
>>> tscv = TimeSeriesSplit(n_splits=3, test_size=2)
>>> for i, (train_index, test_index) in enumerate(tscv.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[0 1 2 3 4 5]
Test: index=[6 7]
Fold 1:
Train: index=[0 1 2 3 4 5 6 7]
Test: index=[8 9]
Fold 2:
Train: index=[0 1 2 3 4 5 6 7 8 9]
Test: index=[10 11]
>>> # Add in a 2 period gap
>>> tscv = TimeSeriesSplit(n_splits=3, test_size=2, gap=2)
>>> for i, (train_index, test_index) in enumerate(tscv.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[0 1 2 3]
Test: index=[6 7]
Fold 1:
Train: index=[0 1 2 3 4 5]
Test: index=[8 9]
Fold 2:
Train: index=[0 1 2 3 4 5 6 7]
Test: index=[10 11]
Notes
-----
The training set has size ``i * n_samples // (n_splits + 1)
+ n_samples % (n_splits + 1)`` in the ``i`` th split,
with a test set of size ``n_samples//(n_splits + 1)`` by default,
where ``n_samples`` is the number of samples.
"""
def __init__(self, n_splits=5, *, max_train_size=None, test_size=None, gap=0):
super().__init__(n_splits, shuffle=False, random_state=None)
self.max_train_size = max_train_size
self.test_size = test_size
self.gap = gap
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
gap = self.gap
test_size = (
self.test_size if self.test_size is not None else n_samples // n_folds
)
# Make sure we have enough samples for the given split parameters
if n_folds > n_samples:
raise ValueError(
f"Cannot have number of folds={n_folds} greater"
f" than the number of samples={n_samples}."
)
if n_samples - gap - (test_size * n_splits) <= 0:
raise ValueError(
f"Too many splits={n_splits} for number of samples"
f"={n_samples} with test_size={test_size} and gap={gap}."
)
indices = np.arange(n_samples)
test_starts = range(n_samples - n_splits * test_size, n_samples, test_size)
for test_start in test_starts:
train_end = test_start - gap
if self.max_train_size and self.max_train_size < train_end:
yield (
indices[train_end - self.max_train_size : train_end],
indices[test_start : test_start + test_size],
)
else:
yield (
indices[:train_end],
indices[test_start : test_start + test_size],
)
class LeaveOneGroupOut(BaseCrossValidator):
"""Leave One Group Out cross-validator
Provides train/test indices to split data such that each training set is
comprised of all samples except ones belonging to one specific group.
Arbitrary domain specific group information is provided an array integers
that encodes the group of each sample.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <leave_one_group_out>`.
Notes
-----
Splits are ordered according to the index of the group left out. The first
split has testing set consisting of the group whose index in `groups` is
lowest, and so on.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import LeaveOneGroupOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> groups = np.array([1, 1, 2, 2])
>>> logo = LeaveOneGroupOut()
>>> logo.get_n_splits(X, y, groups)
2
>>> logo.get_n_splits(groups=groups) # 'groups' is always required
2
>>> print(logo)
LeaveOneGroupOut()
>>> for i, (train_index, test_index) in enumerate(logo.split(X, y, groups)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}, group={groups[train_index]}")
... print(f" Test: index={test_index}, group={groups[test_index]}")
Fold 0:
Train: index=[2 3], group=[2 2]
Test: index=[0 1], group=[1 1]
Fold 1:
Train: index=[0 1], group=[1 1]
Test: index=[2 3], group=[2 2]
See also
--------
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
# We make a copy of groups to avoid side-effects during iteration
groups = check_array(
groups, input_name="groups", copy=True, ensure_2d=False, dtype=None
)
unique_groups = np.unique(groups)
if len(unique_groups) <= 1:
raise ValueError(
"The groups parameter contains fewer than 2 unique groups "
"(%s). LeaveOneGroupOut expects at least 2." % unique_groups
)
for i in unique_groups:
yield groups == i
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set. This 'groups' parameter must always be specified to
calculate the number of splits, though the other parameters can be
omitted.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, input_name="groups", ensure_2d=False, dtype=None)
return len(np.unique(groups))
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,), default=None
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super().split(X, y, groups)
class LeavePGroupsOut(BaseCrossValidator):
"""Leave P Group(s) Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and LeaveOneGroupOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the groups while the latter uses samples
all assigned the same groups.
Read more in the :ref:`User Guide <leave_p_groups_out>`.
Parameters
----------
n_groups : int
Number of groups (``p``) to leave out in the test split.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import LeavePGroupsOut
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> groups = np.array([1, 2, 3])
>>> lpgo = LeavePGroupsOut(n_groups=2)
>>> lpgo.get_n_splits(X, y, groups)
3
>>> lpgo.get_n_splits(groups=groups) # 'groups' is always required
3
>>> print(lpgo)
LeavePGroupsOut(n_groups=2)
>>> for i, (train_index, test_index) in enumerate(lpgo.split(X, y, groups)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}, group={groups[train_index]}")
... print(f" Test: index={test_index}, group={groups[test_index]}")
Fold 0:
Train: index=[2], group=[3]
Test: index=[0 1], group=[1 2]
Fold 1:
Train: index=[1], group=[2]
Test: index=[0 2], group=[1 3]
Fold 2:
Train: index=[0], group=[1]
Test: index=[1 2], group=[2 3]
See Also
--------
GroupKFold : K-fold iterator variant with non-overlapping groups.
"""
def __init__(self, n_groups):
self.n_groups = n_groups
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(
groups, input_name="groups", copy=True, ensure_2d=False, dtype=None
)
unique_groups = np.unique(groups)
if self.n_groups >= len(unique_groups):
raise ValueError(
"The groups parameter contains fewer than (or equal to) "
"n_groups (%d) numbers of unique groups (%s). LeavePGroupsOut "
"expects that at least n_groups + 1 (%d) unique groups be "
"present" % (self.n_groups, unique_groups, self.n_groups + 1)
)
combi = combinations(range(len(unique_groups)), self.n_groups)
for indices in combi:
test_index = np.zeros(_num_samples(X), dtype=bool)
for l in unique_groups[np.array(indices)]:
test_index[groups == l] = True
yield test_index
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set. This 'groups' parameter must always be specified to
calculate the number of splits, though the other parameters can be
omitted.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, input_name="groups", ensure_2d=False, dtype=None)
return int(comb(len(np.unique(groups)), self.n_groups, exact=True))
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,), default=None
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super().split(X, y, groups)
class _RepeatedSplits(metaclass=ABCMeta):
"""Repeated splits for an arbitrary randomized CV splitter.
Repeats splits for cross-validators n times with different randomization
in each repetition.
Parameters
----------
cv : callable
Cross-validator class.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : int, RandomState instance or None, default=None
Passes `random_state` to the arbitrary repeating cross validator.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
**cvargs : additional params
Constructor parameters for cv. Must not contain random_state
and shuffle.
"""
def __init__(self, cv, *, n_repeats=10, random_state=None, **cvargs):
if not isinstance(n_repeats, numbers.Integral):
raise ValueError("Number of repetitions must be of Integral type.")
if n_repeats <= 0:
raise ValueError("Number of repetitions must be greater than 0.")
if any(key in cvargs for key in ("random_state", "shuffle")):
raise ValueError("cvargs must not contain random_state or shuffle.")
self.cv = cv
self.n_repeats = n_repeats
self.random_state = random_state
self.cvargs = cvargs
def split(self, X, y=None, groups=None):
"""Generates indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
n_repeats = self.n_repeats
rng = check_random_state(self.random_state)
for idx in range(n_repeats):
cv = self.cv(random_state=rng, shuffle=True, **self.cvargs)
for train_index, test_index in cv.split(X, y, groups):
yield train_index, test_index
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
y : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
rng = check_random_state(self.random_state)
cv = self.cv(random_state=rng, shuffle=True, **self.cvargs)
return cv.get_n_splits(X, y, groups) * self.n_repeats
def __repr__(self):
return _build_repr(self)
class RepeatedKFold(_RepeatedSplits):
"""Repeated K-Fold cross validator.
Repeats K-Fold n times with different randomization in each repetition.
Read more in the :ref:`User Guide <repeated_k_fold>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : int, RandomState instance or None, default=None
Controls the randomness of each repeated cross-validation instance.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import RepeatedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> rkf = RepeatedKFold(n_splits=2, n_repeats=2, random_state=2652124)
>>> rkf.get_n_splits(X, y)
4
>>> print(rkf)
RepeatedKFold(n_repeats=2, n_splits=2, random_state=2652124)
>>> for i, (train_index, test_index) in enumerate(rkf.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
...
Fold 0:
Train: index=[0 1]
Test: index=[2 3]
Fold 1:
Train: index=[2 3]
Test: index=[0 1]
Fold 2:
Train: index=[1 2]
Test: index=[0 3]
Fold 3:
Train: index=[0 3]
Test: index=[1 2]
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
See Also
--------
RepeatedStratifiedKFold : Repeats Stratified K-Fold n times.
"""
def __init__(self, *, n_splits=5, n_repeats=10, random_state=None):
super().__init__(
KFold, n_repeats=n_repeats, random_state=random_state, n_splits=n_splits
)
class RepeatedStratifiedKFold(_RepeatedSplits):
"""Repeated Stratified K-Fold cross validator.
Repeats Stratified K-Fold n times with different randomization in each
repetition.
Read more in the :ref:`User Guide <repeated_k_fold>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : int, RandomState instance or None, default=None
Controls the generation of the random states for each repetition.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import RepeatedStratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> rskf = RepeatedStratifiedKFold(n_splits=2, n_repeats=2,
... random_state=36851234)
>>> rskf.get_n_splits(X, y)
4
>>> print(rskf)
RepeatedStratifiedKFold(n_repeats=2, n_splits=2, random_state=36851234)
>>> for i, (train_index, test_index) in enumerate(rskf.split(X, y)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
...
Fold 0:
Train: index=[1 2]
Test: index=[0 3]
Fold 1:
Train: index=[0 3]
Test: index=[1 2]
Fold 2:
Train: index=[1 3]
Test: index=[0 2]
Fold 3:
Train: index=[0 2]
Test: index=[1 3]
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
See Also
--------
RepeatedKFold : Repeats K-Fold n times.
"""
def __init__(self, *, n_splits=5, n_repeats=10, random_state=None):
super().__init__(
StratifiedKFold,
n_repeats=n_repeats,
random_state=random_state,
n_splits=n_splits,
)
class BaseShuffleSplit(metaclass=ABCMeta):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(
self, n_splits=10, *, test_size=None, train_size=None, random_state=None
):
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self._default_test_size = 0.1
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
"""
X, y, groups = indexable(X, y, groups)
for train, test in self._iter_indices(X, y, groups):
yield train, test
@abstractmethod
def _iter_indices(self, X, y=None, groups=None):
"""Generate (train, test) indices"""
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
def __repr__(self):
return _build_repr(self)
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validator
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <ShuffleSplit>`.
Parameters
----------
n_splits : int, default=10
Number of re-shuffling & splitting iterations.
test_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.1.
train_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the training and testing indices produced.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import ShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1, 2, 1, 2])
>>> rs = ShuffleSplit(n_splits=5, test_size=.25, random_state=0)
>>> rs.get_n_splits(X)
5
>>> print(rs)
ShuffleSplit(n_splits=5, random_state=0, test_size=0.25, train_size=None)
>>> for i, (train_index, test_index) in enumerate(rs.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[1 3 0 4]
Test: index=[5 2]
Fold 1:
Train: index=[4 0 2 5]
Test: index=[1 3]
Fold 2:
Train: index=[1 2 4 0]
Test: index=[3 5]
Fold 3:
Train: index=[3 4 1 0]
Test: index=[5 2]
Fold 4:
Train: index=[3 5 1 0]
Test: index=[2 4]
>>> # Specify train and test size
>>> rs = ShuffleSplit(n_splits=5, train_size=0.5, test_size=.25,
... random_state=0)
>>> for i, (train_index, test_index) in enumerate(rs.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[1 3 0]
Test: index=[5 2]
Fold 1:
Train: index=[4 0 2]
Test: index=[1 3]
Fold 2:
Train: index=[1 2 4]
Test: index=[3 5]
Fold 3:
Train: index=[3 4 1]
Test: index=[5 2]
Fold 4:
Train: index=[3 5 1]
Test: index=[2 4]
"""
def __init__(
self, n_splits=10, *, test_size=None, train_size=None, random_state=None
):
super().__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state,
)
self._default_test_size = 0.1
def _iter_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(
n_samples,
self.test_size,
self.train_size,
default_test_size=self._default_test_size,
)
rng = check_random_state(self.random_state)
for i in range(self.n_splits):
# random partition
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test : (n_test + n_train)]
yield ind_train, ind_test
class GroupShuffleSplit(ShuffleSplit):
"""Shuffle-Group(s)-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided group. This group information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and GroupShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique groups,
whereas GroupShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique groups.
For example, a less computationally intensive alternative to
``LeavePGroupsOut(p=10)`` would be
``GroupShuffleSplit(test_size=10, n_splits=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to groups, and
not to samples, as in ShuffleSplit.
Read more in the :ref:`User Guide <group_shuffle_split>`.
Parameters
----------
n_splits : int, default=5
Number of re-shuffling & splitting iterations.
test_size : float, int, default=0.2
If float, should be between 0.0 and 1.0 and represent the proportion
of groups to include in the test split (rounded up). If int,
represents the absolute number of test groups. If None, the value is
set to the complement of the train size.
The default will change in version 0.21. It will remain 0.2 only
if ``train_size`` is unspecified, otherwise it will complement
the specified ``train_size``.
train_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the train split. If
int, represents the absolute number of train groups. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the training and testing indices produced.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import GroupShuffleSplit
>>> X = np.ones(shape=(8, 2))
>>> y = np.ones(shape=(8, 1))
>>> groups = np.array([1, 1, 2, 2, 2, 3, 3, 3])
>>> print(groups.shape)
(8,)
>>> gss = GroupShuffleSplit(n_splits=2, train_size=.7, random_state=42)
>>> gss.get_n_splits()
2
>>> print(gss)
GroupShuffleSplit(n_splits=2, random_state=42, test_size=None, train_size=0.7)
>>> for i, (train_index, test_index) in enumerate(gss.split(X, y, groups)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}, group={groups[train_index]}")
... print(f" Test: index={test_index}, group={groups[test_index]}")
Fold 0:
Train: index=[2 3 4 5 6 7], group=[2 2 2 3 3 3]
Test: index=[0 1], group=[1 1]
Fold 1:
Train: index=[0 1 5 6 7], group=[1 1 3 3 3]
Test: index=[2 3 4], group=[2 2 2]
See Also
--------
ShuffleSplit : Shuffles samples to create independent test/train sets.
LeavePGroupsOut : Train set leaves out all possible subsets of `p` groups.
"""
def __init__(
self, n_splits=5, *, test_size=None, train_size=None, random_state=None
):
super().__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state,
)
self._default_test_size = 0.2
def _iter_indices(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, input_name="groups", ensure_2d=False, dtype=None)
classes, group_indices = np.unique(groups, return_inverse=True)
for group_train, group_test in super()._iter_indices(X=classes):
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(group_indices, group_train))
test = np.flatnonzero(np.in1d(group_indices, group_test))
yield train, test
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,), default=None
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
"""
return super().split(X, y, groups)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <stratified_shuffle_split>`.
Parameters
----------
n_splits : int, default=10
Number of re-shuffling & splitting iterations.
test_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.1.
train_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the training and testing indices produced.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 0, 1, 1, 1])
>>> sss = StratifiedShuffleSplit(n_splits=5, test_size=0.5, random_state=0)
>>> sss.get_n_splits(X, y)
5
>>> print(sss)
StratifiedShuffleSplit(n_splits=5, random_state=0, ...)
>>> for i, (train_index, test_index) in enumerate(sss.split(X, y)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[5 2 3]
Test: index=[4 1 0]
Fold 1:
Train: index=[5 1 4]
Test: index=[0 2 3]
Fold 2:
Train: index=[5 0 2]
Test: index=[4 3 1]
Fold 3:
Train: index=[4 1 0]
Test: index=[2 3 5]
Fold 4:
Train: index=[0 5 1]
Test: index=[3 4 2]
"""
def __init__(
self, n_splits=10, *, test_size=None, train_size=None, random_state=None
):
super().__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state,
)
self._default_test_size = 0.1
def _iter_indices(self, X, y, groups=None):
n_samples = _num_samples(X)
y = check_array(y, input_name="y", ensure_2d=False, dtype=None)
n_train, n_test = _validate_shuffle_split(
n_samples,
self.test_size,
self.train_size,
default_test_size=self._default_test_size,
)
if y.ndim == 2:
# for multi-label y, map each distinct row to a string repr
# using join because str(row) uses an ellipsis if len(row) > 1000
y = np.array([" ".join(row.astype("str")) for row in y])
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = np.bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError(
"The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of groups for any class cannot"
" be less than 2."
)
if n_train < n_classes:
raise ValueError(
"The train_size = %d should be greater or "
"equal to the number of classes = %d" % (n_train, n_classes)
)
if n_test < n_classes:
raise ValueError(
"The test_size = %d should be greater or "
"equal to the number of classes = %d" % (n_test, n_classes)
)
# Find the sorted list of instances for each class:
# (np.unique above performs a sort, so code is O(n logn) already)
class_indices = np.split(
np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1]
)
rng = check_random_state(self.random_state)
for _ in range(self.n_splits):
# if there are ties in the class-counts, we want
# to make sure to break them anew in each iteration
n_i = _approximate_mode(class_counts, n_train, rng)
class_counts_remaining = class_counts - n_i
t_i = _approximate_mode(class_counts_remaining, n_test, rng)
train = []
test = []
for i in range(n_classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = class_indices[i].take(permutation, mode="clip")
train.extend(perm_indices_class_i[: n_i[i]])
test.extend(perm_indices_class_i[n_i[i] : n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like of shape (n_samples,) or (n_samples, n_labels)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
"""
y = check_array(y, input_name="y", ensure_2d=False, dtype=None)
return super().split(X, y, groups)
def _validate_shuffle_split(n_samples, test_size, train_size, default_test_size=None):
"""
Validation helper to check if the test/test sizes are meaningful wrt to the
size of the data (n_samples)
"""
if test_size is None and train_size is None:
test_size = default_test_size
test_size_type = np.asarray(test_size).dtype.kind
train_size_type = np.asarray(train_size).dtype.kind
if (
test_size_type == "i"
and (test_size >= n_samples or test_size <= 0)
or test_size_type == "f"
and (test_size <= 0 or test_size >= 1)
):
raise ValueError(
"test_size={0} should be either positive and smaller"
" than the number of samples {1} or a float in the "
"(0, 1) range".format(test_size, n_samples)
)
if (
train_size_type == "i"
and (train_size >= n_samples or train_size <= 0)
or train_size_type == "f"
and (train_size <= 0 or train_size >= 1)
):
raise ValueError(
"train_size={0} should be either positive and smaller"
" than the number of samples {1} or a float in the "
"(0, 1) range".format(train_size, n_samples)
)
if train_size is not None and train_size_type not in ("i", "f"):
raise ValueError("Invalid value for train_size: {}".format(train_size))
if test_size is not None and test_size_type not in ("i", "f"):
raise ValueError("Invalid value for test_size: {}".format(test_size))
if train_size_type == "f" and test_size_type == "f" and train_size + test_size > 1:
raise ValueError(
"The sum of test_size and train_size = {}, should be in the (0, 1)"
" range. Reduce test_size and/or train_size.".format(train_size + test_size)
)
if test_size_type == "f":
n_test = ceil(test_size * n_samples)
elif test_size_type == "i":
n_test = float(test_size)
if train_size_type == "f":
n_train = floor(train_size * n_samples)
elif train_size_type == "i":
n_train = float(train_size)
if train_size is None:
n_train = n_samples - n_test
elif test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError(
"The sum of train_size and test_size = %d, "
"should be smaller than the number of "
"samples %d. Reduce test_size and/or "
"train_size." % (n_train + n_test, n_samples)
)
n_train, n_test = int(n_train), int(n_test)
if n_train == 0:
raise ValueError(
"With n_samples={}, test_size={} and train_size={}, the "
"resulting train set will be empty. Adjust any of the "
"aforementioned parameters.".format(n_samples, test_size, train_size)
)
return n_train, n_test
class PredefinedSplit(BaseCrossValidator):
"""Predefined split cross-validator
Provides train/test indices to split data into train/test sets using a
predefined scheme specified by the user with the ``test_fold`` parameter.
Read more in the :ref:`User Guide <predefined_split>`.
.. versionadded:: 0.16
Parameters
----------
test_fold : array-like of shape (n_samples,)
The entry ``test_fold[i]`` represents the index of the test set that
sample ``i`` belongs to. It is possible to exclude sample ``i`` from
any test set (i.e. include sample ``i`` in every training set) by
setting ``test_fold[i]`` equal to -1.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> test_fold = [0, 1, -1, 1]
>>> ps = PredefinedSplit(test_fold)
>>> ps.get_n_splits()
2
>>> print(ps)
PredefinedSplit(test_fold=array([ 0, 1, -1, 1]))
>>> for i, (train_index, test_index) in enumerate(ps.split()):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[1 2 3]
Test: index=[0]
Fold 1:
Train: index=[0 2]
Test: index=[1 3]
"""
def __init__(self, test_fold):
self.test_fold = np.array(test_fold, dtype=int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
yield train_index, test_index
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets."""
for f in self.unique_folds:
test_index = np.where(self.test_fold == f)[0]
test_mask = np.zeros(len(self.test_fold), dtype=bool)
test_mask[test_index] = True
yield test_mask
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.unique_folds)
class _CVIterableWrapper(BaseCrossValidator):
"""Wrapper class for old style cv objects and iterables."""
def __init__(self, cv):
self.cv = list(cv)
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.cv)
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
for train, test in self.cv:
yield train, test
def check_cv(cv=5, y=None, *, classifier=False):
"""Input checker utility for building a cross-validator.
Parameters
----------
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable that generates (train, test) splits as arrays of indices.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` is used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value changed from 3-fold to 5-fold.
y : array-like, default=None
The target variable for supervised learning problems.
classifier : bool, default=False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
"""
cv = 5 if cv is None else cv
if isinstance(cv, numbers.Integral):
if (
classifier
and (y is not None)
and (type_of_target(y, input_name="y") in ("binary", "multiclass"))
):
return StratifiedKFold(cv)
else:
return KFold(cv)
if not hasattr(cv, "split") or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError(
"Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or an iterable. Got %s." % cv
)
return _CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
def train_test_split(
*arrays,
test_size=None,
train_size=None,
random_state=None,
shuffle=True,
stratify=None,
):
"""Split arrays or matrices into random train and test subsets.
Quick utility that wraps input validation,
``next(ShuffleSplit().split(X, y))``, and application to input data
into a single call for splitting (and optionally subsampling) data into a
one-liner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.25.
train_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, default=None
Controls the shuffling applied to the data before applying the split.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
shuffle : bool, default=True
Whether or not to shuffle the data before splitting. If shuffle=False
then stratify must be None.
stratify : array-like, default=None
If not None, data is split in a stratified fashion, using this as
the class labels.
Read more in the :ref:`User Guide <stratification>`.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
>>> train_test_split(y, shuffle=False)
[[0, 1, 2], [3, 4]]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
arrays = indexable(*arrays)
n_samples = _num_samples(arrays[0])
n_train, n_test = _validate_shuffle_split(
n_samples, test_size, train_size, default_test_size=0.25
)
if shuffle is False:
if stratify is not None:
raise ValueError(
"Stratified train/test split is not implemented for shuffle=False"
)
train = np.arange(n_train)
test = np.arange(n_train, n_train + n_test)
else:
if stratify is not None:
CVClass = StratifiedShuffleSplit
else:
CVClass = ShuffleSplit
cv = CVClass(test_size=n_test, train_size=n_train, random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
return list(
chain.from_iterable(
(_safe_indexing(a, train), _safe_indexing(a, test)) for a in arrays
)
)
# Tell nose that train_test_split is not a test.
# (Needed for external libraries that may use nose.)
# Use setattr to avoid mypy errors when monkeypatching.
setattr(train_test_split, "__test__", False)
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params : dict
The dictionary to pretty print
offset : int, default=0
The offset in characters to add at the begin of each line.
printer : callable, default=repr
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ",\n" + (1 + offset // 2) * " "
for i, (k, v) in enumerate(sorted(params.items())):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = "%s=%s" % (k, str(v))
else:
# use repr of the rest
this_repr = "%s=%s" % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + "..." + this_repr[-100:]
if i > 0:
if this_line_length + len(this_repr) >= 75 or "\n" in this_repr:
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(", ")
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = "".join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = "\n".join(l.rstrip(" ") for l in lines.split("\n"))
return lines
def _build_repr(self):
# XXX This is copied from BaseEstimator's get_params
cls = self.__class__
init = getattr(cls.__init__, "deprecated_original", cls.__init__)
# Ignore varargs, kw and default values and pop self
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
if init is object.__init__:
args = []
else:
args = sorted(
[
p.name
for p in init_signature.parameters.values()
if p.name != "self" and p.kind != p.VAR_KEYWORD
]
)
class_name = self.__class__.__name__
params = dict()
for key in args:
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", FutureWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if value is None and hasattr(self, "cvargs"):
value = self.cvargs.get(key, None)
if len(w) and w[0].category == FutureWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
params[key] = value
return "%s(%s)" % (class_name, _pprint(params, offset=len(class_name)))
def _yields_constant_splits(cv):
# Return True if calling cv.split() always returns the same splits
# We assume that if a cv doesn't have a shuffle parameter, it shuffles by
# default (e.g. ShuffleSplit). If it actually doesn't shuffle (e.g.
# LeaveOneOut), then it won't have a random_state parameter anyway, in
# which case it will default to 0, leading to output=True
shuffle = getattr(cv, "shuffle", True)
random_state = getattr(cv, "random_state", 0)
return isinstance(random_state, numbers.Integral) or not shuffle
| bsd-3-clause | 582bbb1f988ca38040937bc2380b3fee | 34.284496 | 89 | 0.592997 | 3.887336 | false | true | false | false |
scikit-learn/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 12 | 4806 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
# %%
# Dataset based latent variables model
# ------------------------------------
import numpy as np
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[: n // 2]
Y_train = Y[: n // 2]
X_test = X[n // 2 :]
Y_test = Y[n // 2 :]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
# %%
# Canonical (symmetric) PLS
# -------------------------
#
# Transform data
# ~~~~~~~~~~~~~~
from sklearn.cross_decomposition import PLSCanonical
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# %%
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
import matplotlib.pyplot as plt
# On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.scatter(X_train_r[:, 0], Y_train_r[:, 0], label="train", marker="o", s=25)
plt.scatter(X_test_r[:, 0], Y_test_r[:, 0], label="test", marker="o", s=25)
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title(
"Comp. 1: X vs Y (test corr = %.2f)"
% np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1]
)
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.scatter(X_train_r[:, 1], Y_train_r[:, 1], label="train", marker="o", s=25)
plt.scatter(X_test_r[:, 1], Y_test_r[:, 1], label="test", marker="o", s=25)
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title(
"Comp. 2: X vs Y (test corr = %.2f)"
% np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1]
)
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.scatter(X_train_r[:, 0], X_train_r[:, 1], label="train", marker="*", s=50)
plt.scatter(X_test_r[:, 0], X_test_r[:, 1], label="test", marker="*", s=50)
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title(
"X comp. 1 vs X comp. 2 (test corr = %.2f)"
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1]
)
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.scatter(Y_train_r[:, 0], Y_train_r[:, 1], label="train", marker="*", s=50)
plt.scatter(Y_test_r[:, 0], Y_test_r[:, 1], label="test", marker="*", s=50)
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title(
"Y comp. 1 vs Y comp. 2 , (test corr = %.2f)"
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1]
)
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
# %%
# PLS regression, with multivariate response, a.k.a. PLS2
# -------------------------------------------------------
from sklearn.cross_decomposition import PLSRegression
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
# %%
# PLS regression, with univariate response, a.k.a. PLS1
# -----------------------------------------------------
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of components exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
# %%
# CCA (PLS mode B with symmetric deflation)
# -----------------------------------------
from sklearn.cross_decomposition import CCA
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = cca.transform(X_train, Y_train)
X_test_r, Y_test_r = cca.transform(X_test, Y_test)
| bsd-3-clause | a1d852f868b632dc3de138a6c1e8c6ab | 27.105263 | 78 | 0.605909 | 2.709132 | false | true | false | false |
scikit-learn/scikit-learn | benchmarks/bench_plot_randomized_svd.py | 12 | 18108 | """
Benchmarks on the power iterations phase in randomized SVD.
We test on various synthetic and real datasets the effect of increasing
the number of power iterations in terms of quality of approximation
and running time. A number greater than 0 should help with noisy matrices,
which are characterized by a slow spectral decay.
We test several policy for normalizing the power iterations. Normalization
is crucial to avoid numerical issues.
The quality of the approximation is measured by the spectral norm discrepancy
between the original input matrix and the reconstructed one (by multiplying
the randomized_svd's outputs). The spectral norm is always equivalent to the
largest singular value of a matrix. (3) justifies this choice. However, one can
notice in these experiments that Frobenius and spectral norms behave
very similarly in a qualitative sense. Therefore, we suggest to run these
benchmarks with `enable_spectral_norm = False`, as Frobenius' is MUCH faster to
compute.
The benchmarks follow.
(a) plot: time vs norm, varying number of power iterations
data: many datasets
goal: compare normalization policies and study how the number of power
iterations affect time and norm
(b) plot: n_iter vs norm, varying rank of data and number of components for
randomized_SVD
data: low-rank matrices on which we control the rank
goal: study whether the rank of the matrix and the number of components
extracted by randomized SVD affect "the optimal" number of power iterations
(c) plot: time vs norm, varying datasets
data: many datasets
goal: compare default configurations
We compare the following algorithms:
- randomized_svd(..., power_iteration_normalizer='none')
- randomized_svd(..., power_iteration_normalizer='LU')
- randomized_svd(..., power_iteration_normalizer='QR')
- randomized_svd(..., power_iteration_normalizer='auto')
- fbpca.pca() from https://github.com/facebook/fbpca (if installed)
Conclusion
----------
- n_iter=2 appears to be a good default value
- power_iteration_normalizer='none' is OK if n_iter is small, otherwise LU
gives similar errors to QR but is cheaper. That's what 'auto' implements.
References
----------
(1) :arxiv:`"Finding structure with randomness:
Stochastic algorithms for constructing approximate matrix decompositions."
<0909.4061>`
Halko, et al., (2009)
(2) A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
(3) An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
# Author: Giorgio Patrini
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import gc
import pickle
from time import time
from collections import defaultdict
import os.path
from sklearn.utils._arpack import _init_arpack_v0
from sklearn.utils import gen_batches
from sklearn.utils.validation import check_random_state
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets import make_low_rank_matrix, make_sparse_uncorrelated
from sklearn.datasets import (
fetch_lfw_people,
fetch_openml,
fetch_20newsgroups_vectorized,
fetch_olivetti_faces,
fetch_rcv1,
)
try:
import fbpca
fbpca_available = True
except ImportError:
fbpca_available = False
# If this is enabled, tests are much slower and will crash with the large data
enable_spectral_norm = False
# TODO: compute approximate spectral norms with the power method as in
# Estimating the largest eigenvalues by the power and Lanczos methods with
# a random start, Jacek Kuczynski and Henryk Wozniakowski, SIAM Journal on
# Matrix Analysis and Applications, 13 (4): 1094-1122, 1992.
# This approximation is a very fast estimate of the spectral norm, but depends
# on starting random vectors.
# Determine when to switch to batch computation for matrix norms,
# in case the reconstructed (dense) matrix is too large
MAX_MEMORY = int(4e9)
# The following datasets can be downloaded manually from:
# CIFAR 10: https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
# SVHN: http://ufldl.stanford.edu/housenumbers/train_32x32.mat
CIFAR_FOLDER = "./cifar-10-batches-py/"
SVHN_FOLDER = "./SVHN/"
datasets = [
"low rank matrix",
"lfw_people",
"olivetti_faces",
"20newsgroups",
"mnist_784",
"CIFAR",
"a3a",
"SVHN",
"uncorrelated matrix",
]
big_sparse_datasets = ["big sparse matrix", "rcv1"]
def unpickle(file_name):
with open(file_name, "rb") as fo:
return pickle.load(fo, encoding="latin1")["data"]
def handle_missing_dataset(file_folder):
if not os.path.isdir(file_folder):
print("%s file folder not found. Test skipped." % file_folder)
return 0
def get_data(dataset_name):
print("Getting dataset: %s" % dataset_name)
if dataset_name == "lfw_people":
X = fetch_lfw_people().data
elif dataset_name == "20newsgroups":
X = fetch_20newsgroups_vectorized().data[:, :100000]
elif dataset_name == "olivetti_faces":
X = fetch_olivetti_faces().data
elif dataset_name == "rcv1":
X = fetch_rcv1().data
elif dataset_name == "CIFAR":
if handle_missing_dataset(CIFAR_FOLDER) == 0:
return
X1 = [unpickle("%sdata_batch_%d" % (CIFAR_FOLDER, i + 1)) for i in range(5)]
X = np.vstack(X1)
del X1
elif dataset_name == "SVHN":
if handle_missing_dataset(SVHN_FOLDER) == 0:
return
X1 = sp.io.loadmat("%strain_32x32.mat" % SVHN_FOLDER)["X"]
X2 = [X1[:, :, :, i].reshape(32 * 32 * 3) for i in range(X1.shape[3])]
X = np.vstack(X2)
del X1
del X2
elif dataset_name == "low rank matrix":
X = make_low_rank_matrix(
n_samples=500,
n_features=int(1e4),
effective_rank=100,
tail_strength=0.5,
random_state=random_state,
)
elif dataset_name == "uncorrelated matrix":
X, _ = make_sparse_uncorrelated(
n_samples=500, n_features=10000, random_state=random_state
)
elif dataset_name == "big sparse matrix":
sparsity = int(1e6)
size = int(1e6)
small_size = int(1e4)
data = np.random.normal(0, 1, int(sparsity / 10))
data = np.repeat(data, 10)
row = np.random.uniform(0, small_size, sparsity)
col = np.random.uniform(0, small_size, sparsity)
X = sp.sparse.csr_matrix((data, (row, col)), shape=(size, small_size))
del data
del row
del col
else:
X = fetch_openml(dataset_name, parser="auto").data
return X
def plot_time_vs_s(time, norm, point_labels, title):
plt.figure()
colors = ["g", "b", "y"]
for i, l in enumerate(sorted(norm.keys())):
if l != "fbpca":
plt.plot(time[l], norm[l], label=l, marker="o", c=colors.pop())
else:
plt.plot(time[l], norm[l], label=l, marker="^", c="red")
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(
label,
xy=(x, y),
xytext=(0, -20),
textcoords="offset points",
ha="right",
va="bottom",
)
plt.legend(loc="upper right")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def scatter_time_vs_s(time, norm, point_labels, title):
plt.figure()
size = 100
for i, l in enumerate(sorted(norm.keys())):
if l != "fbpca":
plt.scatter(time[l], norm[l], label=l, marker="o", c="b", s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(
label,
xy=(x, y),
xytext=(0, -80),
textcoords="offset points",
ha="right",
arrowprops=dict(arrowstyle="->", connectionstyle="arc3"),
va="bottom",
size=11,
rotation=90,
)
else:
plt.scatter(time[l], norm[l], label=l, marker="^", c="red", s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(
label,
xy=(x, y),
xytext=(0, 30),
textcoords="offset points",
ha="right",
arrowprops=dict(arrowstyle="->", connectionstyle="arc3"),
va="bottom",
size=11,
rotation=90,
)
plt.legend(loc="best")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def plot_power_iter_vs_s(power_iter, s, title):
plt.figure()
for l in sorted(s.keys()):
plt.plot(power_iter, s[l], label=l, marker="o")
plt.legend(loc="lower right", prop={"size": 10})
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("n_iter")
def svd_timing(
X, n_comps, n_iter, n_oversamples, power_iteration_normalizer="auto", method=None
):
"""
Measure time for decomposition
"""
print("... running SVD ...")
if method != "fbpca":
gc.collect()
t0 = time()
U, mu, V = randomized_svd(
X,
n_comps,
n_oversamples=n_oversamples,
n_iter=n_iter,
power_iteration_normalizer=power_iteration_normalizer,
random_state=random_state,
transpose=False,
)
call_time = time() - t0
else:
gc.collect()
t0 = time()
# There is a different convention for l here
U, mu, V = fbpca.pca(
X, n_comps, raw=True, n_iter=n_iter, l=n_oversamples + n_comps
)
call_time = time() - t0
return U, mu, V, call_time
def norm_diff(A, norm=2, msg=True, random_state=None):
"""
Compute the norm diff with the original matrix, when randomized
SVD is called with *params.
norm: 2 => spectral; 'fro' => Frobenius
"""
if msg:
print("... computing %s norm ..." % norm)
if norm == 2:
# s = sp.linalg.norm(A, ord=2) # slow
v0 = _init_arpack_v0(min(A.shape), random_state)
value = sp.sparse.linalg.svds(A, k=1, return_singular_vectors=False, v0=v0)
else:
if sp.sparse.issparse(A):
value = sp.sparse.linalg.norm(A, ord=norm)
else:
value = sp.linalg.norm(A, ord=norm)
return value
def scalable_frobenius_norm_discrepancy(X, U, s, V):
if not sp.sparse.issparse(X) or (
X.shape[0] * X.shape[1] * X.dtype.itemsize < MAX_MEMORY
):
# if the input is not sparse or sparse but not too big,
# U.dot(np.diag(s).dot(V)) will fit in RAM
A = X - U.dot(np.diag(s).dot(V))
return norm_diff(A, norm="fro")
print("... computing fro norm by batches...")
batch_size = 1000
Vhat = np.diag(s).dot(V)
cum_norm = 0.0
for batch in gen_batches(X.shape[0], batch_size):
M = X[batch, :] - U[batch, :].dot(Vhat)
cum_norm += norm_diff(M, norm="fro", msg=False)
return np.sqrt(cum_norm)
def bench_a(X, dataset_name, power_iter, n_oversamples, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
X_spectral_norm = norm_diff(X, norm=2, msg=False, random_state=0)
all_frobenius = defaultdict(list)
X_fro_norm = norm_diff(X, norm="fro", msg=False)
for pi in power_iter:
for pm in ["none", "LU", "QR"]:
print("n_iter = %d on sklearn - %s" % (pi, pm))
U, s, V, time = svd_timing(
X,
n_comps,
n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples,
)
label = "sklearn - %s" % pm
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(
norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm
)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
print("n_iter = %d on fbca" % (pi))
U, s, V, time = svd_timing(
X,
n_comps,
n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples,
method="fbpca",
)
label = "fbpca"
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(
norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm
)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_spectral, power_iter, title)
title = "%s: Frobenius norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_frobenius, power_iter, title)
def bench_b(power_list):
n_samples, n_features = 1000, 10000
data_params = {
"n_samples": n_samples,
"n_features": n_features,
"tail_strength": 0.7,
"random_state": random_state,
}
dataset_name = "low rank matrix %d x %d" % (n_samples, n_features)
ranks = [10, 50, 100]
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for rank in ranks:
X = make_low_rank_matrix(effective_rank=rank, **data_params)
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False, random_state=0)
X_fro_norm = norm_diff(X, norm="fro", msg=False)
for n_comp in [int(rank / 2), rank, rank * 2]:
label = "rank=%d, n_comp=%d" % (rank, n_comp)
print(label)
for pi in power_list:
U, s, V, _ = svd_timing(
X,
n_comp,
n_iter=pi,
n_oversamples=2,
power_iteration_normalizer="LU",
)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(
norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm
)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_spectral, title)
title = "%s: Frobenius norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_frobenius, title)
def bench_c(datasets, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False, random_state=0)
X_fro_norm = norm_diff(X, norm="fro", msg=False)
n_comps = np.minimum(n_comps, np.min(X.shape))
label = "sklearn"
print("%s %d x %d - %s" % (dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=10, method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(
norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm
)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
label = "fbpca"
print("%s %d x %d - %s" % (dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(
X, n_comps, n_iter=2, n_oversamples=2, method=label
)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(
norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm
)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if len(all_time) == 0:
raise ValueError("No tests ran. Aborting.")
if enable_spectral_norm:
title = "normalized spectral norm diff vs running time"
scatter_time_vs_s(all_time, all_spectral, datasets, title)
title = "normalized Frobenius norm diff vs running time"
scatter_time_vs_s(all_time, all_frobenius, datasets, title)
if __name__ == "__main__":
random_state = check_random_state(1234)
power_iter = np.arange(0, 6)
n_comps = 50
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
print(
" >>>>>> Benching sklearn and fbpca on %s %d x %d"
% (dataset_name, X.shape[0], X.shape[1])
)
bench_a(
X,
dataset_name,
power_iter,
n_oversamples=2,
n_comps=np.minimum(n_comps, np.min(X.shape)),
)
print(" >>>>>> Benching on simulated low rank matrix with variable rank")
bench_b(power_iter)
print(" >>>>>> Benching sklearn and fbpca default configurations")
bench_c(datasets + big_sparse_datasets, n_comps)
plt.show()
| bsd-3-clause | 6cabca52493692fe6751efc02e4b5868 | 33.230624 | 88 | 0.581566 | 3.425653 | false | false | false | false |
scikit-learn/scikit-learn | sklearn/datasets/tests/test_openml.py | 12 | 54065 | """Test the openml loader."""
import gzip
import json
import os
import re
from functools import partial
from importlib import resources
from io import BytesIO
from urllib.error import HTTPError
import numpy as np
import scipy.sparse
import pytest
import sklearn
from sklearn import config_context
from sklearn.utils import Bunch, check_pandas_support
from sklearn.utils._testing import (
SkipTest,
assert_allclose,
assert_array_equal,
fails_if_pypy,
)
from sklearn.datasets import fetch_openml as fetch_openml_orig
from sklearn.datasets._openml import (
_OPENML_PREFIX,
_open_openml_url,
_get_local_path,
_retry_with_clean_cache,
)
OPENML_TEST_DATA_MODULE = "sklearn.datasets.tests.data.openml"
# if True, urlopen will be monkey patched to only use local files
test_offline = True
class _MockHTTPResponse:
def __init__(self, data, is_gzip):
self.data = data
self.is_gzip = is_gzip
def read(self, amt=-1):
return self.data.read(amt)
def close(self):
self.data.close()
def info(self):
if self.is_gzip:
return {"Content-Encoding": "gzip"}
return {}
def __iter__(self):
return iter(self.data)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
# Disable the disk-based cache when testing `fetch_openml`:
# the mock data in sklearn/datasets/tests/data/openml/ is not always consistent
# with the version on openml.org. If one were to load the dataset outside of
# the tests, it may result in data that does not represent openml.org.
fetch_openml = partial(fetch_openml_orig, data_home=None)
def _monkey_patch_webbased_functions(context, data_id, gzip_response):
# monkey patches the urlopen function. Important note: Do NOT use this
# in combination with a regular cache directory, as the files that are
# stored as cache should not be mixed up with real openml datasets
url_prefix_data_description = "https://openml.org/api/v1/json/data/"
url_prefix_data_features = "https://openml.org/api/v1/json/data/features/"
url_prefix_download_data = "https://openml.org/data/v1/"
url_prefix_data_list = "https://openml.org/api/v1/json/data/list/"
path_suffix = ".gz"
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + "." + f"id_{data_id}"
def _file_name(url, suffix):
output = (
re.sub(r"\W", "-", url[len("https://openml.org/") :]) + suffix + path_suffix
)
# Shorten the filenames to have better compatibility with windows 10
# and filenames > 260 characters
return (
output.replace("-json-data-list", "-jdl")
.replace("-json-data-features", "-jdf")
.replace("-json-data-qualities", "-jdq")
.replace("-json-data", "-jd")
.replace("-data_name", "-dn")
.replace("-download", "-dl")
.replace("-limit", "-l")
.replace("-data_version", "-dv")
.replace("-status", "-s")
.replace("-deactivated", "-dact")
.replace("-active", "-act")
)
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix)
data_file_name = _file_name(url, suffix)
with resources.open_binary(data_module, data_file_name) as f:
if has_gzip_header and gzip_response:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, "rb")
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(
url=url,
has_gzip_header=has_gzip_header,
expected_prefix=url_prefix_data_description,
suffix=".json",
)
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(
url=url,
has_gzip_header=has_gzip_header,
expected_prefix=url_prefix_data_features,
suffix=".json",
)
def _mock_urlopen_download_data(url, has_gzip_header):
return _mock_urlopen_shared(
url=url,
has_gzip_header=has_gzip_header,
expected_prefix=url_prefix_download_data,
suffix=".arff",
)
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
data_file_name = _file_name(url, ".json")
# load the file itself, to simulate a http error
with resources.open_binary(data_module, data_file_name) as f:
decompressed_f = read_fn(f, "rb")
decoded_s = decompressed_f.read().decode("utf-8")
json_data = json.loads(decoded_s)
if "error" in json_data:
raise HTTPError(
url=None, code=412, msg="Simulated mock error", hdrs=None, fp=None
)
with resources.open_binary(data_module, data_file_name) as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, "rb")
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header("Accept-encoding") == "gzip"
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError("Unknown mocking URL pattern: %s" % url)
# XXX: Global variable
if test_offline:
context.setattr(sklearn.datasets._openml, "urlopen", _mock_urlopen)
###############################################################################
# Test the behaviour of `fetch_openml` depending of the input parameters.
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
@pytest.mark.parametrize(
"data_id, dataset_params, n_samples, n_features, n_targets",
[
# iris
(61, {"data_id": 61}, 150, 4, 1),
(61, {"name": "iris", "version": 1}, 150, 4, 1),
# anneal
(2, {"data_id": 2}, 11, 38, 1),
(2, {"name": "anneal", "version": 1}, 11, 38, 1),
# cpu
(561, {"data_id": 561}, 209, 7, 1),
(561, {"name": "cpu", "version": 1}, 209, 7, 1),
# emotions
(40589, {"data_id": 40589}, 13, 72, 6),
# adult-census
(1119, {"data_id": 1119}, 10, 14, 1),
(1119, {"name": "adult-census"}, 10, 14, 1),
# miceprotein
(40966, {"data_id": 40966}, 7, 77, 1),
(40966, {"name": "MiceProtein"}, 7, 77, 1),
# titanic
(40945, {"data_id": 40945}, 1309, 13, 1),
],
)
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_as_frame_true(
monkeypatch,
data_id,
dataset_params,
n_samples,
n_features,
n_targets,
parser,
gzip_response,
):
"""Check the behaviour of `fetch_openml` with `as_frame=True`.
Fetch by ID and/or name (depending if the file was previously cached).
"""
pd = pytest.importorskip("pandas")
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=gzip_response)
bunch = fetch_openml(
as_frame=True,
cache=False,
parser=parser,
**dataset_params,
)
assert int(bunch.details["id"]) == data_id
assert isinstance(bunch, Bunch)
assert isinstance(bunch.frame, pd.DataFrame)
assert bunch.frame.shape == (n_samples, n_features + n_targets)
assert isinstance(bunch.data, pd.DataFrame)
assert bunch.data.shape == (n_samples, n_features)
if n_targets == 1:
assert isinstance(bunch.target, pd.Series)
assert bunch.target.shape == (n_samples,)
else:
assert isinstance(bunch.target, pd.DataFrame)
assert bunch.target.shape == (n_samples, n_targets)
assert bunch.categories is None
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
@pytest.mark.parametrize(
"data_id, dataset_params, n_samples, n_features, n_targets",
[
# iris
(61, {"data_id": 61}, 150, 4, 1),
(61, {"name": "iris", "version": 1}, 150, 4, 1),
# anneal
(2, {"data_id": 2}, 11, 38, 1),
(2, {"name": "anneal", "version": 1}, 11, 38, 1),
# cpu
(561, {"data_id": 561}, 209, 7, 1),
(561, {"name": "cpu", "version": 1}, 209, 7, 1),
# emotions
(40589, {"data_id": 40589}, 13, 72, 6),
# adult-census
(1119, {"data_id": 1119}, 10, 14, 1),
(1119, {"name": "adult-census"}, 10, 14, 1),
# miceprotein
(40966, {"data_id": 40966}, 7, 77, 1),
(40966, {"name": "MiceProtein"}, 7, 77, 1),
],
)
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
def test_fetch_openml_as_frame_false(
monkeypatch,
data_id,
dataset_params,
n_samples,
n_features,
n_targets,
parser,
):
"""Check the behaviour of `fetch_openml` with `as_frame=False`.
Fetch both by ID and/or name + version.
"""
pytest.importorskip("pandas")
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
bunch = fetch_openml(
as_frame=False,
cache=False,
parser=parser,
**dataset_params,
)
assert int(bunch.details["id"]) == data_id
assert isinstance(bunch, Bunch)
assert bunch.frame is None
assert isinstance(bunch.data, np.ndarray)
assert bunch.data.shape == (n_samples, n_features)
assert isinstance(bunch.target, np.ndarray)
if n_targets == 1:
assert bunch.target.shape == (n_samples,)
else:
assert bunch.target.shape == (n_samples, n_targets)
assert isinstance(bunch.categories, dict)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
@pytest.mark.parametrize("data_id", [61, 1119, 40945])
def test_fetch_openml_consistency_parser(monkeypatch, data_id):
"""Check the consistency of the LIAC-ARFF and pandas parsers."""
pd = pytest.importorskip("pandas")
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
bunch_liac = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser="liac-arff",
)
bunch_pandas = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser="pandas",
)
# The data frames for the input features should match up to some numerical
# dtype conversions (e.g. float64 <=> Int64) due to limitations of the
# LIAC-ARFF parser.
data_liac, data_pandas = bunch_liac.data, bunch_pandas.data
def convert_numerical_dtypes(series):
pandas_series = data_pandas[series.name]
if pd.api.types.is_numeric_dtype(pandas_series):
return series.astype(pandas_series.dtype)
else:
return series
data_liac_with_fixed_dtypes = data_liac.apply(convert_numerical_dtypes)
pd.testing.assert_frame_equal(data_liac_with_fixed_dtypes, data_pandas)
# Let's also check that the .frame attributes also match
frame_liac, frame_pandas = bunch_liac.frame, bunch_pandas.frame
# Note that the .frame attribute is a superset of the .data attribute:
pd.testing.assert_frame_equal(frame_pandas[bunch_pandas.feature_names], data_pandas)
# However the remaining columns, typically the target(s), are not necessarily
# dtyped similarly by both parsers due to limitations of the LIAC-ARFF parser.
# Therefore, extra dtype conversions are required for those columns:
def convert_numerical_and_categorical_dtypes(series):
pandas_series = frame_pandas[series.name]
if pd.api.types.is_numeric_dtype(pandas_series):
return series.astype(pandas_series.dtype)
elif pd.api.types.is_categorical_dtype(pandas_series):
# Compare categorical features by converting categorical liac uses
# strings to denote the categories, we rename the categories to make
# them comparable to the pandas parser. Fixing this behavior in
# LIAC-ARFF would allow to check the consistency in the future but
# we do not plan to maintain the LIAC-ARFF on the long term.
return series.cat.rename_categories(pandas_series.cat.categories)
else:
return series
frame_liac_with_fixed_dtypes = frame_liac.apply(
convert_numerical_and_categorical_dtypes
)
pd.testing.assert_frame_equal(frame_liac_with_fixed_dtypes, frame_pandas)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
def test_fetch_openml_equivalence_array_dataframe(monkeypatch, parser):
"""Check the equivalence of the dataset when using `as_frame=False` and
`as_frame=True`.
"""
pytest.importorskip("pandas")
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
bunch_as_frame_true = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser=parser,
)
bunch_as_frame_false = fetch_openml(
data_id=data_id,
as_frame=False,
cache=False,
parser=parser,
)
assert_allclose(bunch_as_frame_false.data, bunch_as_frame_true.data)
assert_array_equal(bunch_as_frame_false.target, bunch_as_frame_true.target)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
def test_fetch_openml_iris_pandas(monkeypatch, parser):
"""Check fetching on a numerical only dataset with string labels."""
pd = pytest.importorskip("pandas")
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 61
data_shape = (150, 4)
target_shape = (150,)
frame_shape = (150, 5)
target_dtype = CategoricalDtype(
["Iris-setosa", "Iris-versicolor", "Iris-virginica"]
)
data_dtypes = [np.float64] * 4
data_names = ["sepallength", "sepalwidth", "petallength", "petalwidth"]
target_name = "class"
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser=parser,
)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert np.all(data.dtypes == data_dtypes)
assert data.shape == data_shape
assert np.all(data.columns == data_names)
assert np.all(bunch.feature_names == data_names)
assert bunch.target_names == [target_name]
assert isinstance(target, pd.Series)
assert target.dtype == target_dtype
assert target.shape == target_shape
assert target.name == target_name
assert target.index.is_unique
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
assert np.all(frame.dtypes == data_dtypes + [target_dtype])
assert frame.index.is_unique
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
@pytest.mark.parametrize("target_column", ["petalwidth", ["petalwidth", "petallength"]])
def test_fetch_openml_forcing_targets(monkeypatch, parser, target_column):
"""Check that we can force the target to not be the default target."""
pd = pytest.importorskip("pandas")
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch_forcing_target = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
target_column=target_column,
parser=parser,
)
bunch_default = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser=parser,
)
pd.testing.assert_frame_equal(bunch_forcing_target.frame, bunch_default.frame)
if isinstance(target_column, list):
pd.testing.assert_index_equal(
bunch_forcing_target.target.columns, pd.Index(target_column)
)
assert bunch_forcing_target.data.shape == (150, 3)
else:
assert bunch_forcing_target.target.name == target_column
assert bunch_forcing_target.data.shape == (150, 4)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
@pytest.mark.parametrize("data_id", [61, 2, 561, 40589, 1119])
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
def test_fetch_openml_equivalence_frame_return_X_y(monkeypatch, data_id, parser):
"""Check the behaviour of `return_X_y=True` when `as_frame=True`."""
pd = pytest.importorskip("pandas")
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
bunch = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
return_X_y=False,
parser=parser,
)
X, y = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
return_X_y=True,
parser=parser,
)
pd.testing.assert_frame_equal(bunch.data, X)
if isinstance(y, pd.Series):
pd.testing.assert_series_equal(bunch.target, y)
else:
pd.testing.assert_frame_equal(bunch.target, y)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
@pytest.mark.parametrize("data_id", [61, 561, 40589, 1119])
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
def test_fetch_openml_equivalence_array_return_X_y(monkeypatch, data_id, parser):
"""Check the behaviour of `return_X_y=True` when `as_frame=False`."""
pytest.importorskip("pandas")
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
bunch = fetch_openml(
data_id=data_id,
as_frame=False,
cache=False,
return_X_y=False,
parser=parser,
)
X, y = fetch_openml(
data_id=data_id,
as_frame=False,
cache=False,
return_X_y=True,
parser=parser,
)
assert_array_equal(bunch.data, X)
assert_array_equal(bunch.target, y)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_difference_parsers(monkeypatch):
"""Check the difference between liac-arff and pandas parser."""
pytest.importorskip("pandas")
data_id = 1119
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
# When `as_frame=False`, the categories will be ordinally encoded with
# liac-arff parser while this is not the case with pandas parser.
as_frame = False
bunch_liac_arff = fetch_openml(
data_id=data_id,
as_frame=as_frame,
cache=False,
parser="liac-arff",
)
bunch_pandas = fetch_openml(
data_id=data_id,
as_frame=as_frame,
cache=False,
parser="pandas",
)
assert bunch_liac_arff.data.dtype.kind == "f"
assert bunch_pandas.data.dtype == "O"
###############################################################################
# Test the ARFF parsing on several dataset to check if detect the correct
# types (categories, intgers, floats).
@pytest.fixture(scope="module")
def datasets_column_names():
"""Returns the columns names for each dataset."""
return {
61: ["sepallength", "sepalwidth", "petallength", "petalwidth", "class"],
2: [
"family",
"product-type",
"steel",
"carbon",
"hardness",
"temper_rolling",
"condition",
"formability",
"strength",
"non-ageing",
"surface-finish",
"surface-quality",
"enamelability",
"bc",
"bf",
"bt",
"bw%2Fme",
"bl",
"m",
"chrom",
"phos",
"cbond",
"marvi",
"exptl",
"ferro",
"corr",
"blue%2Fbright%2Fvarn%2Fclean",
"lustre",
"jurofm",
"s",
"p",
"shape",
"thick",
"width",
"len",
"oil",
"bore",
"packing",
"class",
],
561: ["vendor", "MYCT", "MMIN", "MMAX", "CACH", "CHMIN", "CHMAX", "class"],
40589: [
"Mean_Acc1298_Mean_Mem40_Centroid",
"Mean_Acc1298_Mean_Mem40_Rolloff",
"Mean_Acc1298_Mean_Mem40_Flux",
"Mean_Acc1298_Mean_Mem40_MFCC_0",
"Mean_Acc1298_Mean_Mem40_MFCC_1",
"Mean_Acc1298_Mean_Mem40_MFCC_2",
"Mean_Acc1298_Mean_Mem40_MFCC_3",
"Mean_Acc1298_Mean_Mem40_MFCC_4",
"Mean_Acc1298_Mean_Mem40_MFCC_5",
"Mean_Acc1298_Mean_Mem40_MFCC_6",
"Mean_Acc1298_Mean_Mem40_MFCC_7",
"Mean_Acc1298_Mean_Mem40_MFCC_8",
"Mean_Acc1298_Mean_Mem40_MFCC_9",
"Mean_Acc1298_Mean_Mem40_MFCC_10",
"Mean_Acc1298_Mean_Mem40_MFCC_11",
"Mean_Acc1298_Mean_Mem40_MFCC_12",
"Mean_Acc1298_Std_Mem40_Centroid",
"Mean_Acc1298_Std_Mem40_Rolloff",
"Mean_Acc1298_Std_Mem40_Flux",
"Mean_Acc1298_Std_Mem40_MFCC_0",
"Mean_Acc1298_Std_Mem40_MFCC_1",
"Mean_Acc1298_Std_Mem40_MFCC_2",
"Mean_Acc1298_Std_Mem40_MFCC_3",
"Mean_Acc1298_Std_Mem40_MFCC_4",
"Mean_Acc1298_Std_Mem40_MFCC_5",
"Mean_Acc1298_Std_Mem40_MFCC_6",
"Mean_Acc1298_Std_Mem40_MFCC_7",
"Mean_Acc1298_Std_Mem40_MFCC_8",
"Mean_Acc1298_Std_Mem40_MFCC_9",
"Mean_Acc1298_Std_Mem40_MFCC_10",
"Mean_Acc1298_Std_Mem40_MFCC_11",
"Mean_Acc1298_Std_Mem40_MFCC_12",
"Std_Acc1298_Mean_Mem40_Centroid",
"Std_Acc1298_Mean_Mem40_Rolloff",
"Std_Acc1298_Mean_Mem40_Flux",
"Std_Acc1298_Mean_Mem40_MFCC_0",
"Std_Acc1298_Mean_Mem40_MFCC_1",
"Std_Acc1298_Mean_Mem40_MFCC_2",
"Std_Acc1298_Mean_Mem40_MFCC_3",
"Std_Acc1298_Mean_Mem40_MFCC_4",
"Std_Acc1298_Mean_Mem40_MFCC_5",
"Std_Acc1298_Mean_Mem40_MFCC_6",
"Std_Acc1298_Mean_Mem40_MFCC_7",
"Std_Acc1298_Mean_Mem40_MFCC_8",
"Std_Acc1298_Mean_Mem40_MFCC_9",
"Std_Acc1298_Mean_Mem40_MFCC_10",
"Std_Acc1298_Mean_Mem40_MFCC_11",
"Std_Acc1298_Mean_Mem40_MFCC_12",
"Std_Acc1298_Std_Mem40_Centroid",
"Std_Acc1298_Std_Mem40_Rolloff",
"Std_Acc1298_Std_Mem40_Flux",
"Std_Acc1298_Std_Mem40_MFCC_0",
"Std_Acc1298_Std_Mem40_MFCC_1",
"Std_Acc1298_Std_Mem40_MFCC_2",
"Std_Acc1298_Std_Mem40_MFCC_3",
"Std_Acc1298_Std_Mem40_MFCC_4",
"Std_Acc1298_Std_Mem40_MFCC_5",
"Std_Acc1298_Std_Mem40_MFCC_6",
"Std_Acc1298_Std_Mem40_MFCC_7",
"Std_Acc1298_Std_Mem40_MFCC_8",
"Std_Acc1298_Std_Mem40_MFCC_9",
"Std_Acc1298_Std_Mem40_MFCC_10",
"Std_Acc1298_Std_Mem40_MFCC_11",
"Std_Acc1298_Std_Mem40_MFCC_12",
"BH_LowPeakAmp",
"BH_LowPeakBPM",
"BH_HighPeakAmp",
"BH_HighPeakBPM",
"BH_HighLowRatio",
"BHSUM1",
"BHSUM2",
"BHSUM3",
"amazed.suprised",
"happy.pleased",
"relaxing.calm",
"quiet.still",
"sad.lonely",
"angry.aggresive",
],
1119: [
"age",
"workclass",
"fnlwgt:",
"education:",
"education-num:",
"marital-status:",
"occupation:",
"relationship:",
"race:",
"sex:",
"capital-gain:",
"capital-loss:",
"hours-per-week:",
"native-country:",
"class",
],
40966: [
"DYRK1A_N",
"ITSN1_N",
"BDNF_N",
"NR1_N",
"NR2A_N",
"pAKT_N",
"pBRAF_N",
"pCAMKII_N",
"pCREB_N",
"pELK_N",
"pERK_N",
"pJNK_N",
"PKCA_N",
"pMEK_N",
"pNR1_N",
"pNR2A_N",
"pNR2B_N",
"pPKCAB_N",
"pRSK_N",
"AKT_N",
"BRAF_N",
"CAMKII_N",
"CREB_N",
"ELK_N",
"ERK_N",
"GSK3B_N",
"JNK_N",
"MEK_N",
"TRKA_N",
"RSK_N",
"APP_N",
"Bcatenin_N",
"SOD1_N",
"MTOR_N",
"P38_N",
"pMTOR_N",
"DSCR1_N",
"AMPKA_N",
"NR2B_N",
"pNUMB_N",
"RAPTOR_N",
"TIAM1_N",
"pP70S6_N",
"NUMB_N",
"P70S6_N",
"pGSK3B_N",
"pPKCG_N",
"CDK5_N",
"S6_N",
"ADARB1_N",
"AcetylH3K9_N",
"RRP1_N",
"BAX_N",
"ARC_N",
"ERBB4_N",
"nNOS_N",
"Tau_N",
"GFAP_N",
"GluR3_N",
"GluR4_N",
"IL1B_N",
"P3525_N",
"pCASP9_N",
"PSD95_N",
"SNCA_N",
"Ubiquitin_N",
"pGSK3B_Tyr216_N",
"SHH_N",
"BAD_N",
"BCL2_N",
"pS6_N",
"pCFOS_N",
"SYP_N",
"H3AcK18_N",
"EGR1_N",
"H3MeK4_N",
"CaNA_N",
"class",
],
40945: [
"pclass",
"survived",
"name",
"sex",
"age",
"sibsp",
"parch",
"ticket",
"fare",
"cabin",
"embarked",
"boat",
"body",
"home.dest",
],
}
@pytest.fixture(scope="module")
def datasets_missing_values():
return {
61: {},
2: {
"family": 11,
"temper_rolling": 9,
"condition": 2,
"formability": 4,
"non-ageing": 10,
"surface-finish": 11,
"enamelability": 11,
"bc": 11,
"bf": 10,
"bt": 11,
"bw%2Fme": 8,
"bl": 9,
"m": 11,
"chrom": 11,
"phos": 11,
"cbond": 10,
"marvi": 11,
"exptl": 11,
"ferro": 11,
"corr": 11,
"blue%2Fbright%2Fvarn%2Fclean": 11,
"lustre": 8,
"jurofm": 11,
"s": 11,
"p": 11,
"oil": 10,
"packing": 11,
},
561: {},
40589: {},
1119: {},
40966: {"BCL2_N": 7},
40945: {
"age": 263,
"fare": 1,
"cabin": 1014,
"embarked": 2,
"boat": 823,
"body": 1188,
"home.dest": 564,
},
}
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
@pytest.mark.parametrize(
"data_id, parser, expected_n_categories, expected_n_floats, expected_n_ints",
[
# iris dataset
(61, "liac-arff", 1, 4, 0),
(61, "pandas", 1, 4, 0),
# anneal dataset
(2, "liac-arff", 33, 6, 0),
(2, "pandas", 33, 2, 4),
# cpu dataset
(561, "liac-arff", 1, 7, 0),
(561, "pandas", 1, 0, 7),
# emotions dataset
(40589, "liac-arff", 6, 72, 0),
(40589, "pandas", 6, 69, 3),
# adult-census dataset
(1119, "liac-arff", 9, 6, 0),
(1119, "pandas", 9, 0, 6),
# miceprotein
# 1 column has only missing values with object dtype
(40966, "liac-arff", 1, 76, 0),
# with casting it will be transformed to either float or Int64
(40966, "pandas", 1, 77, 0),
# titanic
(40945, "liac-arff", 3, 5, 0),
(40945, "pandas", 3, 3, 3),
],
)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_types_inference(
monkeypatch,
data_id,
parser,
expected_n_categories,
expected_n_floats,
expected_n_ints,
gzip_response,
datasets_column_names,
datasets_missing_values,
):
"""Check that `fetch_openml` infer the right number of categories, integers, and
floats."""
pd = pytest.importorskip("pandas")
CategoricalDtype = pd.api.types.CategoricalDtype
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=gzip_response)
bunch = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser=parser,
)
frame = bunch.frame
n_categories = len(
[dtype for dtype in frame.dtypes if isinstance(dtype, CategoricalDtype)]
)
n_floats = len([dtype for dtype in frame.dtypes if dtype.kind == "f"])
n_ints = len([dtype for dtype in frame.dtypes if dtype.kind == "i"])
assert n_categories == expected_n_categories
assert n_floats == expected_n_floats
assert n_ints == expected_n_ints
assert frame.columns.tolist() == datasets_column_names[data_id]
frame_feature_to_n_nan = frame.isna().sum().to_dict()
for name, n_missing in frame_feature_to_n_nan.items():
expected_missing = datasets_missing_values[data_id].get(name, 0)
assert n_missing == expected_missing
###############################################################################
# Test some more specific behaviour
# TODO(1.4): remove this filterwarning decorator
@pytest.mark.filterwarnings("ignore:The default value of `parser` will change")
@pytest.mark.parametrize(
"params, err_msg",
[
({"parser": "unknown"}, "`parser` must be one of"),
({"as_frame": "unknown"}, "`as_frame` must be one of"),
],
)
def test_fetch_openml_validation_parameter(monkeypatch, params, err_msg):
data_id = 1119
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
with pytest.raises(ValueError, match=err_msg):
fetch_openml(data_id=data_id, **params)
@pytest.mark.parametrize(
"params",
[
{"as_frame": True, "parser": "auto"},
{"as_frame": "auto", "parser": "auto"},
{"as_frame": False, "parser": "pandas"},
],
)
def test_fetch_openml_requires_pandas_error(monkeypatch, params):
"""Check that we raise the proper errors when we require pandas."""
data_id = 1119
try:
check_pandas_support("test_fetch_openml_requires_pandas")
except ImportError:
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
err_msg = "requires pandas to be installed. Alternatively, explicitely"
with pytest.raises(ImportError, match=err_msg):
fetch_openml(data_id=data_id, **params)
else:
raise SkipTest("This test requires pandas to not be installed.")
# TODO(1.4): move this parameter option in`test_fetch_openml_requires_pandas_error`
def test_fetch_openml_requires_pandas_in_future(monkeypatch):
"""Check that we raise a warning that pandas will be required in the future."""
params = {"as_frame": False, "parser": "auto"}
data_id = 1119
try:
check_pandas_support("test_fetch_openml_requires_pandas")
except ImportError:
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
warn_msg = (
"From version 1.4, `parser='auto'` with `as_frame=False` will use pandas"
)
with pytest.warns(FutureWarning, match=warn_msg):
fetch_openml(data_id=data_id, **params)
else:
raise SkipTest("This test requires pandas to not be installed.")
@pytest.mark.filterwarnings("ignore:Version 1 of dataset Australian is inactive")
# TODO(1.4): remove this filterwarning decorator for `parser`
@pytest.mark.filterwarnings("ignore:The default value of `parser` will change")
@pytest.mark.parametrize(
"params, err_msg",
[
(
{"parser": "pandas"},
"Sparse ARFF datasets cannot be loaded with parser='pandas'",
),
(
{"as_frame": True},
"Sparse ARFF datasets cannot be loaded with as_frame=True.",
),
(
{"parser": "pandas", "as_frame": True},
"Sparse ARFF datasets cannot be loaded with as_frame=True.",
),
],
)
def test_fetch_openml_sparse_arff_error(monkeypatch, params, err_msg):
"""Check that we raise the expected error for sparse ARFF datasets and
a wrong set of incompatible parameters.
"""
pytest.importorskip("pandas")
data_id = 292
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
with pytest.raises(ValueError, match=err_msg):
fetch_openml(
data_id=data_id,
cache=False,
**params,
)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
@pytest.mark.filterwarnings("ignore:Version 1 of dataset Australian is inactive")
@pytest.mark.parametrize(
"data_id, data_type",
[
(61, "dataframe"), # iris dataset version 1
(292, "sparse"), # Australian dataset version 1
],
)
def test_fetch_openml_auto_mode(monkeypatch, data_id, data_type):
"""Check the auto mode of `fetch_openml`."""
pd = pytest.importorskip("pandas")
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
data = fetch_openml(data_id=data_id, as_frame="auto", parser="auto", cache=False)
klass = pd.DataFrame if data_type == "dataframe" else scipy.sparse.csr_matrix
assert isinstance(data.data, klass)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_convert_arff_data_dataframe_warning_low_memory_pandas(monkeypatch):
"""Check that we raise a warning regarding the working memory when using
LIAC-ARFF parser."""
pytest.importorskip("pandas")
data_id = 1119
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
msg = "Could not adhere to working_memory config."
with pytest.warns(UserWarning, match=msg):
with config_context(working_memory=1e-6):
fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser="liac-arff",
)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_iris_warn_multiple_version(monkeypatch, gzip_response):
"""Check that a warning is raised when multiple versions exist and no version is
requested."""
data_id = 61
data_name = "iris"
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
msg = (
"Multiple active versions of the dataset matching the name"
" iris exist. Versions may be fundamentally different, "
"returning version 1."
)
with pytest.warns(UserWarning, match=msg):
fetch_openml(
name=data_name,
as_frame=False,
cache=False,
parser="liac-arff",
)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_no_target(monkeypatch, gzip_response):
"""Check that we can get a dataset without target."""
data_id = 61
target_column = None
expected_observations = 150
expected_features = 5
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
data = fetch_openml(
data_id=data_id,
target_column=target_column,
cache=False,
as_frame=False,
parser="liac-arff",
)
assert data.data.shape == (expected_observations, expected_features)
assert data.target is None
@pytest.mark.parametrize("gzip_response", [True, False])
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
def test_missing_values_pandas(monkeypatch, gzip_response, parser):
"""check that missing values in categories are compatible with pandas
categorical"""
pytest.importorskip("pandas")
data_id = 42585
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=gzip_response)
penguins = fetch_openml(
data_id=data_id,
cache=False,
as_frame=True,
parser=parser,
)
cat_dtype = penguins.data.dtypes["sex"]
# there are nans in the categorical
assert penguins.data["sex"].isna().any()
assert_array_equal(cat_dtype.categories, ["FEMALE", "MALE", "_"])
@pytest.mark.parametrize("gzip_response", [True, False])
@pytest.mark.parametrize(
"dataset_params",
[
{"data_id": 40675},
{"data_id": None, "name": "glass2", "version": 1},
],
)
def test_fetch_openml_inactive(monkeypatch, gzip_response, dataset_params):
"""Check that we raise a warning when the dataset is inactive."""
data_id = 40675
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
msg = "Version 1 of dataset glass2 is inactive,"
with pytest.warns(UserWarning, match=msg):
glass2 = fetch_openml(
cache=False, as_frame=False, parser="liac-arff", **dataset_params
)
assert glass2.data.shape == (163, 9)
assert glass2.details["id"] == "40675"
@pytest.mark.parametrize("gzip_response", [True, False])
@pytest.mark.parametrize(
"data_id, params, err_type, err_msg",
[
(40675, {"name": "glass2"}, ValueError, "No active dataset glass2 found"),
(
61,
{"data_id": 61, "target_column": ["sepalwidth", "class"]},
ValueError,
"Can only handle homogeneous multi-target datasets",
),
(
40945,
{"data_id": 40945, "as_frame": False},
ValueError,
"STRING attributes are not supported for array representation. Try"
" as_frame=True",
),
(
2,
{"data_id": 2, "target_column": "family", "as_frame": True},
ValueError,
"Target column 'family'",
),
(
2,
{"data_id": 2, "target_column": "family", "as_frame": False},
ValueError,
"Target column 'family'",
),
(
61,
{"data_id": 61, "target_column": "undefined"},
KeyError,
"Could not find target_column='undefined'",
),
(
61,
{"data_id": 61, "target_column": ["undefined", "class"]},
KeyError,
"Could not find target_column='undefined'",
),
],
)
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
def test_fetch_openml_error(
monkeypatch, gzip_response, data_id, params, err_type, err_msg, parser
):
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
if params.get("as_frame", True) or parser == "pandas":
pytest.importorskip("pandas")
with pytest.raises(err_type, match=err_msg):
fetch_openml(cache=False, parser=parser, **params)
@pytest.mark.parametrize(
"params, err_type, err_msg",
[
(
{"data_id": -1, "name": None, "version": "version"},
ValueError,
"Dataset data_id=-1 and version=version passed, but you can only",
),
(
{"data_id": -1, "name": "nAmE"},
ValueError,
"Dataset data_id=-1 and name=name passed, but you can only",
),
(
{"data_id": -1, "name": "nAmE", "version": "version"},
ValueError,
"Dataset data_id=-1 and name=name passed, but you can only",
),
(
{},
ValueError,
"Neither name nor data_id are provided. Please provide name or data_id.",
),
],
)
def test_fetch_openml_raises_illegal_argument(params, err_type, err_msg):
with pytest.raises(err_type, match=err_msg):
fetch_openml(**params)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_warn_ignore_attribute(monkeypatch, gzip_response):
data_id = 40966
expected_row_id_msg = "target_column='{}' has flag is_row_identifier."
expected_ignore_msg = "target_column='{}' has flag is_ignore."
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
# single column test
target_col = "MouseID"
msg = expected_row_id_msg.format(target_col)
with pytest.warns(UserWarning, match=msg):
fetch_openml(
data_id=data_id,
target_column=target_col,
cache=False,
as_frame=False,
parser="liac-arff",
)
target_col = "Genotype"
msg = expected_ignore_msg.format(target_col)
with pytest.warns(UserWarning, match=msg):
fetch_openml(
data_id=data_id,
target_column=target_col,
cache=False,
as_frame=False,
parser="liac-arff",
)
# multi column test
target_col = "MouseID"
msg = expected_row_id_msg.format(target_col)
with pytest.warns(UserWarning, match=msg):
fetch_openml(
data_id=data_id,
target_column=[target_col, "class"],
cache=False,
as_frame=False,
parser="liac-arff",
)
target_col = "Genotype"
msg = expected_ignore_msg.format(target_col)
with pytest.warns(UserWarning, match=msg):
fetch_openml(
data_id=data_id,
target_column=[target_col, "class"],
cache=False,
as_frame=False,
parser="liac-arff",
)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_dataset_with_openml_error(monkeypatch, gzip_response):
data_id = 1
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
msg = "OpenML registered a problem with the dataset. It might be unusable. Error:"
with pytest.warns(UserWarning, match=msg):
fetch_openml(data_id=data_id, cache=False, as_frame=False, parser="liac-arff")
@pytest.mark.parametrize("gzip_response", [True, False])
def test_dataset_with_openml_warning(monkeypatch, gzip_response):
data_id = 3
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
msg = "OpenML raised a warning on the dataset. It might be unusable. Warning:"
with pytest.warns(UserWarning, match=msg):
fetch_openml(data_id=data_id, cache=False, as_frame=False, parser="liac-arff")
###############################################################################
# Test cache, retry mechanisms, checksum, etc.
@pytest.mark.parametrize("gzip_response", [True, False])
def test_open_openml_url_cache(monkeypatch, gzip_response, tmpdir):
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
cache_directory = str(tmpdir.mkdir("scikit_learn_data"))
# first fill the cache
response1 = _open_openml_url(openml_path, cache_directory)
# assert file exists
location = _get_local_path(openml_path, cache_directory)
assert os.path.isfile(location)
# redownload, to utilize cache
response2 = _open_openml_url(openml_path, cache_directory)
assert response1.read() == response2.read()
@pytest.mark.parametrize("write_to_disk", [True, False])
def test_open_openml_url_unlinks_local_path(monkeypatch, tmpdir, write_to_disk):
data_id = 61
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
cache_directory = str(tmpdir.mkdir("scikit_learn_data"))
location = _get_local_path(openml_path, cache_directory)
def _mock_urlopen(request, *args, **kwargs):
if write_to_disk:
with open(location, "w") as f:
f.write("")
raise ValueError("Invalid request")
monkeypatch.setattr(sklearn.datasets._openml, "urlopen", _mock_urlopen)
with pytest.raises(ValueError, match="Invalid request"):
_open_openml_url(openml_path, cache_directory)
assert not os.path.exists(location)
def test_retry_with_clean_cache(tmpdir):
data_id = 61
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
cache_directory = str(tmpdir.mkdir("scikit_learn_data"))
location = _get_local_path(openml_path, cache_directory)
os.makedirs(os.path.dirname(location))
with open(location, "w") as f:
f.write("")
@_retry_with_clean_cache(openml_path, cache_directory)
def _load_data():
# The first call will raise an error since location exists
if os.path.exists(location):
raise Exception("File exist!")
return 1
warn_msg = "Invalid cache, redownloading file"
with pytest.warns(RuntimeWarning, match=warn_msg):
result = _load_data()
assert result == 1
def test_retry_with_clean_cache_http_error(tmpdir):
data_id = 61
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
cache_directory = str(tmpdir.mkdir("scikit_learn_data"))
@_retry_with_clean_cache(openml_path, cache_directory)
def _load_data():
raise HTTPError(
url=None, code=412, msg="Simulated mock error", hdrs=None, fp=None
)
error_msg = "Simulated mock error"
with pytest.raises(HTTPError, match=error_msg):
_load_data()
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_cache(monkeypatch, gzip_response, tmpdir):
def _mock_urlopen_raise(request, *args, **kwargs):
raise ValueError(
"This mechanism intends to test correct cache"
"handling. As such, urlopen should never be "
"accessed. URL: %s"
% request.get_full_url()
)
data_id = 61
cache_directory = str(tmpdir.mkdir("scikit_learn_data"))
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
X_fetched, y_fetched = fetch_openml(
data_id=data_id,
cache=True,
data_home=cache_directory,
return_X_y=True,
as_frame=False,
parser="liac-arff",
)
monkeypatch.setattr(sklearn.datasets._openml, "urlopen", _mock_urlopen_raise)
X_cached, y_cached = fetch_openml(
data_id=data_id,
cache=True,
data_home=cache_directory,
return_X_y=True,
as_frame=False,
parser="liac-arff",
)
np.testing.assert_array_equal(X_fetched, X_cached)
np.testing.assert_array_equal(y_fetched, y_cached)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
@pytest.mark.parametrize(
"as_frame, parser",
[
(True, "liac-arff"),
(False, "liac-arff"),
(True, "pandas"),
(False, "pandas"),
],
)
def test_fetch_openml_verify_checksum(monkeypatch, as_frame, cache, tmpdir, parser):
"""Check that the checksum is working as expected."""
if as_frame or parser == "pandas":
pytest.importorskip("pandas")
data_id = 2
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
# create a temporary modified arff file
original_data_module = OPENML_TEST_DATA_MODULE + "." + f"id_{data_id}"
original_data_file_name = "data-v1-dl-1666876.arff.gz"
corrupt_copy_path = tmpdir / "test_invalid_checksum.arff"
with resources.open_binary(
original_data_module, original_data_file_name
) as orig_file:
orig_gzip = gzip.open(orig_file, "rb")
data = bytearray(orig_gzip.read())
data[len(data) - 1] = 37
with gzip.GzipFile(corrupt_copy_path, "wb") as modified_gzip:
modified_gzip.write(data)
# Requests are already mocked by monkey_patch_webbased_functions.
# We want to re-use that mock for all requests except file download,
# hence creating a thin mock over the original mock
mocked_openml_url = sklearn.datasets._openml.urlopen
def swap_file_mock(request, *args, **kwargs):
url = request.get_full_url()
if url.endswith("data/v1/download/1666876"):
with open(corrupt_copy_path, "rb") as f:
corrupted_data = f.read()
return _MockHTTPResponse(BytesIO(corrupted_data), is_gzip=True)
else:
return mocked_openml_url(request)
monkeypatch.setattr(sklearn.datasets._openml, "urlopen", swap_file_mock)
# validate failed checksum
with pytest.raises(ValueError) as exc:
sklearn.datasets.fetch_openml(
data_id=data_id, cache=False, as_frame=as_frame, parser=parser
)
# exception message should have file-path
assert exc.match("1666876")
def test_open_openml_url_retry_on_network_error(monkeypatch):
def _mock_urlopen_network_error(request, *args, **kwargs):
raise HTTPError("", 404, "Simulated network error", None, None)
monkeypatch.setattr(
sklearn.datasets._openml, "urlopen", _mock_urlopen_network_error
)
invalid_openml_url = "invalid-url"
with pytest.warns(
UserWarning,
match=re.escape(
"A network error occurred while downloading"
f" {_OPENML_PREFIX + invalid_openml_url}. Retrying..."
),
) as record:
with pytest.raises(HTTPError, match="Simulated network error"):
_open_openml_url(invalid_openml_url, None, delay=0)
assert len(record) == 3
###############################################################################
# Non-regressiont tests
@pytest.mark.parametrize("gzip_response", [True, False])
@pytest.mark.parametrize("parser", ("liac-arff", "pandas"))
def test_fetch_openml_with_ignored_feature(monkeypatch, gzip_response, parser):
"""Check that we can load the "zoo" dataset.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/14340
"""
if parser == "pandas":
pytest.importorskip("pandas")
data_id = 62
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
dataset = sklearn.datasets.fetch_openml(
data_id=data_id, cache=False, as_frame=False, parser=parser
)
assert dataset is not None
# The dataset has 17 features, including 1 ignored (animal),
# so we assert that we don't have the ignored feature in the final Bunch
assert dataset["data"].shape == (101, 16)
assert "animal" not in dataset["feature_names"]
def test_fetch_openml_strip_quotes(monkeypatch):
"""Check that we strip the single quotes when used as a string delimiter.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/23381
"""
pd = pytest.importorskip("pandas")
data_id = 40966
_monkey_patch_webbased_functions(monkeypatch, data_id=data_id, gzip_response=False)
common_params = {"as_frame": True, "cache": False, "data_id": data_id}
mice_pandas = fetch_openml(parser="pandas", **common_params)
mice_liac_arff = fetch_openml(parser="liac-arff", **common_params)
pd.testing.assert_series_equal(mice_pandas.target, mice_liac_arff.target)
assert not mice_pandas.target.str.startswith("'").any()
assert not mice_pandas.target.str.endswith("'").any()
# similar behaviour should be observed when the column is not the target
mice_pandas = fetch_openml(parser="pandas", target_column="NUMB_N", **common_params)
mice_liac_arff = fetch_openml(
parser="liac-arff", target_column="NUMB_N", **common_params
)
pd.testing.assert_series_equal(
mice_pandas.frame["class"], mice_liac_arff.frame["class"]
)
assert not mice_pandas.frame["class"].str.startswith("'").any()
assert not mice_pandas.frame["class"].str.endswith("'").any()
###############################################################################
# Deprecation-changed parameters
# TODO(1.4): remove this test
def test_fetch_openml_deprecation_parser(monkeypatch):
"""Check that we raise a deprecation warning for parser parameter."""
pytest.importorskip("pandas")
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id=data_id, gzip_response=False)
with pytest.warns(FutureWarning, match="The default value of `parser` will change"):
sklearn.datasets.fetch_openml(data_id=data_id)
| bsd-3-clause | 8069f1655407df4ae4467731e61a144d | 32.47678 | 88 | 0.588865 | 3.434879 | false | true | false | false |
scikit-learn/scikit-learn | sklearn/multiclass.py | 9 | 36636 | """
Multiclass classification strategies
====================================
This module implements multiclass learning algorithms:
- one-vs-the-rest / one-vs-all
- one-vs-one
- error correcting output codes
The estimators provided in this module are meta-estimators: they require a base
estimator to be provided in their constructor. For example, it is possible to
use these estimators to turn a binary classifier or a regressor into a
multiclass classifier. It is also possible to use these estimators with
multiclass estimators in the hope that their accuracy or runtime performance
improves.
All classifiers in scikit-learn implement multiclass classification; you
only need to use this module if you want to experiment with custom multiclass
strategies.
The one-vs-the-rest meta-classifier also implements a `predict_proba` method,
so long as such a method is implemented by the base classifier. This method
returns probabilities of class membership in both the single label and
multilabel case. Note that in the multilabel case, probabilities are the
marginal probability that a given sample falls in the given class. As such, in
the multilabel case the sum of these probabilities over all possible labels
for a given sample *will not* sum to unity, as they do in the single label
case.
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Author: Hamzeh Alsalhi <93hamsal@gmail.com>
#
# License: BSD 3 clause
import array
from numbers import Integral, Real
import numpy as np
import warnings
import scipy.sparse as sp
import itertools
from .base import BaseEstimator, ClassifierMixin, clone, is_classifier
from .base import MultiOutputMixin
from .base import MetaEstimatorMixin, is_regressor
from .preprocessing import LabelBinarizer
from .metrics.pairwise import euclidean_distances
from .utils import check_random_state
from .utils._param_validation import HasMethods, Interval
from .utils._tags import _safe_tags
from .utils.validation import _num_samples
from .utils.validation import check_is_fitted
from .utils.multiclass import (
_check_partial_fit_first_call,
check_classification_targets,
_ovr_decision_function,
)
from .utils.metaestimators import _safe_split, available_if
from .utils.fixes import delayed
from joblib import Parallel
__all__ = [
"OneVsRestClassifier",
"OneVsOneClassifier",
"OutputCodeClassifier",
]
def _fit_binary(estimator, X, y, classes=None):
"""Fit a single binary estimator."""
unique_y = np.unique(y)
if len(unique_y) == 1:
if classes is not None:
if y[0] == -1:
c = 0
else:
c = y[0]
warnings.warn(
"Label %s is present in all training examples." % str(classes[c])
)
estimator = _ConstantPredictor().fit(X, unique_y)
else:
estimator = clone(estimator)
estimator.fit(X, y)
return estimator
def _partial_fit_binary(estimator, X, y):
"""Partially fit a single binary estimator."""
estimator.partial_fit(X, y, np.array((0, 1)))
return estimator
def _predict_binary(estimator, X):
"""Make predictions using a single binary estimator."""
if is_regressor(estimator):
return estimator.predict(X)
try:
score = np.ravel(estimator.decision_function(X))
except (AttributeError, NotImplementedError):
# probabilities of the positive class
score = estimator.predict_proba(X)[:, 1]
return score
def _threshold_for_binary_predict(estimator):
"""Threshold for predictions from binary estimator."""
if hasattr(estimator, "decision_function") and is_classifier(estimator):
return 0.0
else:
# predict_proba threshold
return 0.5
class _ConstantPredictor(BaseEstimator):
def fit(self, X, y):
check_params = dict(
force_all_finite=False, dtype=None, ensure_2d=False, accept_sparse=True
)
self._validate_data(
X, y, reset=True, validate_separately=(check_params, check_params)
)
self.y_ = y
return self
def predict(self, X):
check_is_fitted(self)
self._validate_data(
X,
force_all_finite=False,
dtype=None,
accept_sparse=True,
ensure_2d=False,
reset=False,
)
return np.repeat(self.y_, _num_samples(X))
def decision_function(self, X):
check_is_fitted(self)
self._validate_data(
X,
force_all_finite=False,
dtype=None,
accept_sparse=True,
ensure_2d=False,
reset=False,
)
return np.repeat(self.y_, _num_samples(X))
def predict_proba(self, X):
check_is_fitted(self)
self._validate_data(
X,
force_all_finite=False,
dtype=None,
accept_sparse=True,
ensure_2d=False,
reset=False,
)
y_ = self.y_.astype(np.float64)
return np.repeat([np.hstack([1 - y_, y_])], _num_samples(X), axis=0)
def _estimators_has(attr):
"""Check if self.estimator or self.estimators_[0] has attr.
If `self.estimators_[0]` has the attr, then its safe to assume that other
values has it too. This function is used together with `avaliable_if`.
"""
return lambda self: (
hasattr(self.estimator, attr)
or (hasattr(self, "estimators_") and hasattr(self.estimators_[0], attr))
)
class OneVsRestClassifier(
MultiOutputMixin, ClassifierMixin, MetaEstimatorMixin, BaseEstimator
):
"""One-vs-the-rest (OvR) multiclass strategy.
Also known as one-vs-all, this strategy consists in fitting one classifier
per class. For each classifier, the class is fitted against all the other
classes. In addition to its computational efficiency (only `n_classes`
classifiers are needed), one advantage of this approach is its
interpretability. Since each class is represented by one and one classifier
only, it is possible to gain knowledge about the class by inspecting its
corresponding classifier. This is the most commonly used strategy for
multiclass classification and is a fair default choice.
OneVsRestClassifier can also be used for multilabel classification. To use
this feature, provide an indicator matrix for the target `y` when calling
`.fit`. In other words, the target labels should be formatted as a 2D
binary (0/1) matrix, where [i, j] == 1 indicates the presence of label j
in sample i. This estimator uses the binary relevance method to perform
multilabel classification, which involves training one binary classifier
independently for each label.
Read more in the :ref:`User Guide <ovr_classification>`.
Parameters
----------
estimator : estimator object
A regressor or a classifier that implements :term:`fit`.
When a classifier is passed, :term:`decision_function` will be used
in priority and it will fallback to :term`predict_proba` if it is not
available.
When a regressor is passed, :term:`predict` is used.
n_jobs : int, default=None
The number of jobs to use for the computation: the `n_classes`
one-vs-rest problems are computed in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionchanged:: 0.20
`n_jobs` default changed from 1 to None
verbose : int, default=0
The verbosity level, if non zero, progress messages are printed.
Below 50, the output is sent to stderr. Otherwise, the output is sent
to stdout. The frequency of the messages increases with the verbosity
level, reporting all iterations at 10. See :class:`joblib.Parallel` for
more details.
.. versionadded:: 1.1
Attributes
----------
estimators_ : list of `n_classes` estimators
Estimators used for predictions.
classes_ : array, shape = [`n_classes`]
Class labels.
n_classes_ : int
Number of classes.
label_binarizer_ : LabelBinarizer object
Object used to transform multiclass labels to binary labels and
vice-versa.
multilabel_ : boolean
Whether a OneVsRestClassifier is a multilabel classifier.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying estimator exposes such an attribute when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if the
underlying estimator exposes such an attribute when fit.
.. versionadded:: 1.0
See Also
--------
MultiOutputClassifier : Alternate way of extending an estimator for
multilabel classification.
sklearn.preprocessing.MultiLabelBinarizer : Transform iterable of iterables
to binary indicator matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn.multiclass import OneVsRestClassifier
>>> from sklearn.svm import SVC
>>> X = np.array([
... [10, 10],
... [8, 10],
... [-5, 5.5],
... [-5.4, 5.5],
... [-20, -20],
... [-15, -20]
... ])
>>> y = np.array([0, 0, 1, 1, 2, 2])
>>> clf = OneVsRestClassifier(SVC()).fit(X, y)
>>> clf.predict([[-19, -20], [9, 9], [-5, 5]])
array([2, 0, 1])
"""
_parameter_constraints = {
"estimator": [HasMethods(["fit"])],
"n_jobs": [Integral, None],
"verbose": ["verbose"],
}
def __init__(self, estimator, *, n_jobs=None, verbose=0):
self.estimator = estimator
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like of shape (n_samples, n_features)
Data.
y : (sparse) array-like of shape (n_samples,) or (n_samples, n_classes)
Multi-class targets. An indicator matrix turns on multilabel
classification.
Returns
-------
self : object
Instance of fitted estimator.
"""
self._validate_params()
# A sparse LabelBinarizer, with sparse_output=True, has been shown to
# outperform or match a dense label binarizer in all cases and has also
# resulted in less or equal memory consumption in the fit_ovr function
# overall.
self.label_binarizer_ = LabelBinarizer(sparse_output=True)
Y = self.label_binarizer_.fit_transform(y)
Y = Y.tocsc()
self.classes_ = self.label_binarizer_.classes_
columns = (col.toarray().ravel() for col in Y.T)
# In cases where individual estimators are very fast to train setting
# n_jobs > 1 in can results in slower performance due to the overhead
# of spawning threads. See joblib issue #112.
self.estimators_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_fit_binary)(
self.estimator,
X,
column,
classes=[
"not %s" % self.label_binarizer_.classes_[i],
self.label_binarizer_.classes_[i],
],
)
for i, column in enumerate(columns)
)
if hasattr(self.estimators_[0], "n_features_in_"):
self.n_features_in_ = self.estimators_[0].n_features_in_
if hasattr(self.estimators_[0], "feature_names_in_"):
self.feature_names_in_ = self.estimators_[0].feature_names_in_
return self
@available_if(_estimators_has("partial_fit"))
def partial_fit(self, X, y, classes=None):
"""Partially fit underlying estimators.
Should be used when memory is inefficient to train all data.
Chunks of data can be passed in several iteration.
Parameters
----------
X : (sparse) array-like of shape (n_samples, n_features)
Data.
y : (sparse) array-like of shape (n_samples,) or (n_samples, n_classes)
Multi-class targets. An indicator matrix turns on multilabel
classification.
classes : array, shape (n_classes, )
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is only required in the first call of partial_fit
and can be omitted in the subsequent calls.
Returns
-------
self : object
Instance of partially fitted estimator.
"""
if _check_partial_fit_first_call(self, classes):
self._validate_params()
if not hasattr(self.estimator, "partial_fit"):
raise ValueError(
("Base estimator {0}, doesn't have partial_fit method").format(
self.estimator
)
)
self.estimators_ = [clone(self.estimator) for _ in range(self.n_classes_)]
# A sparse LabelBinarizer, with sparse_output=True, has been
# shown to outperform or match a dense label binarizer in all
# cases and has also resulted in less or equal memory consumption
# in the fit_ovr function overall.
self.label_binarizer_ = LabelBinarizer(sparse_output=True)
self.label_binarizer_.fit(self.classes_)
if len(np.setdiff1d(y, self.classes_)):
raise ValueError(
(
"Mini-batch contains {0} while classes " + "must be subset of {1}"
).format(np.unique(y), self.classes_)
)
Y = self.label_binarizer_.transform(y)
Y = Y.tocsc()
columns = (col.toarray().ravel() for col in Y.T)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_partial_fit_binary)(estimator, X, column)
for estimator, column in zip(self.estimators_, columns)
)
if hasattr(self.estimators_[0], "n_features_in_"):
self.n_features_in_ = self.estimators_[0].n_features_in_
return self
def predict(self, X):
"""Predict multi-class targets using underlying estimators.
Parameters
----------
X : (sparse) array-like of shape (n_samples, n_features)
Data.
Returns
-------
y : (sparse) array-like of shape (n_samples,) or (n_samples, n_classes)
Predicted multi-class targets.
"""
check_is_fitted(self)
n_samples = _num_samples(X)
if self.label_binarizer_.y_type_ == "multiclass":
maxima = np.empty(n_samples, dtype=float)
maxima.fill(-np.inf)
argmaxima = np.zeros(n_samples, dtype=int)
for i, e in enumerate(self.estimators_):
pred = _predict_binary(e, X)
np.maximum(maxima, pred, out=maxima)
argmaxima[maxima == pred] = i
return self.classes_[argmaxima]
else:
thresh = _threshold_for_binary_predict(self.estimators_[0])
indices = array.array("i")
indptr = array.array("i", [0])
for e in self.estimators_:
indices.extend(np.where(_predict_binary(e, X) > thresh)[0])
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
indicator = sp.csc_matrix(
(data, indices, indptr), shape=(n_samples, len(self.estimators_))
)
return self.label_binarizer_.inverse_transform(indicator)
@available_if(_estimators_has("predict_proba"))
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by label of classes.
Note that in the multilabel case, each sample can have any number of
labels. This returns the marginal probability that the given sample has
the label in question. For example, it is entirely consistent that two
labels both have a 90% probability of applying to a given sample.
In the single label multiclass case, the rows of the returned matrix
sum to 1.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
T : (sparse) array-like of shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
"""
check_is_fitted(self)
# Y[i, j] gives the probability that sample i has the label j.
# In the multi-label case, these are not disjoint.
Y = np.array([e.predict_proba(X)[:, 1] for e in self.estimators_]).T
if len(self.estimators_) == 1:
# Only one estimator, but we still want to return probabilities
# for two classes.
Y = np.concatenate(((1 - Y), Y), axis=1)
if not self.multilabel_:
# Then, probabilities should be normalized to 1.
Y /= np.sum(Y, axis=1)[:, np.newaxis]
return Y
@available_if(_estimators_has("decision_function"))
def decision_function(self, X):
"""Decision function for the OneVsRestClassifier.
Return the distance of each sample from the decision boundary for each
class. This can only be used with estimators which implement the
`decision_function` method.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
T : array-like of shape (n_samples, n_classes) or (n_samples,) for \
binary classification.
Result of calling `decision_function` on the final estimator.
.. versionchanged:: 0.19
output shape changed to ``(n_samples,)`` to conform to
scikit-learn conventions for binary classification.
"""
check_is_fitted(self)
if len(self.estimators_) == 1:
return self.estimators_[0].decision_function(X)
return np.array(
[est.decision_function(X).ravel() for est in self.estimators_]
).T
@property
def multilabel_(self):
"""Whether this is a multilabel classifier."""
return self.label_binarizer_.y_type_.startswith("multilabel")
@property
def n_classes_(self):
"""Number of classes."""
return len(self.classes_)
def _more_tags(self):
"""Indicate if wrapped estimator is using a precomputed Gram matrix"""
return {"pairwise": _safe_tags(self.estimator, key="pairwise")}
def _fit_ovo_binary(estimator, X, y, i, j):
"""Fit a single binary estimator (one-vs-one)."""
cond = np.logical_or(y == i, y == j)
y = y[cond]
y_binary = np.empty(y.shape, int)
y_binary[y == i] = 0
y_binary[y == j] = 1
indcond = np.arange(_num_samples(X))[cond]
return (
_fit_binary(
estimator,
_safe_split(estimator, X, None, indices=indcond)[0],
y_binary,
classes=[i, j],
),
indcond,
)
def _partial_fit_ovo_binary(estimator, X, y, i, j):
"""Partially fit a single binary estimator(one-vs-one)."""
cond = np.logical_or(y == i, y == j)
y = y[cond]
if len(y) != 0:
y_binary = np.zeros_like(y)
y_binary[y == j] = 1
return _partial_fit_binary(estimator, X[cond], y_binary)
return estimator
class OneVsOneClassifier(MetaEstimatorMixin, ClassifierMixin, BaseEstimator):
"""One-vs-one multiclass strategy.
This strategy consists in fitting one classifier per class pair.
At prediction time, the class which received the most votes is selected.
Since it requires to fit `n_classes * (n_classes - 1) / 2` classifiers,
this method is usually slower than one-vs-the-rest, due to its
O(n_classes^2) complexity. However, this method may be advantageous for
algorithms such as kernel algorithms which don't scale well with
`n_samples`. This is because each individual learning problem only involves
a small subset of the data whereas, with one-vs-the-rest, the complete
dataset is used `n_classes` times.
Read more in the :ref:`User Guide <ovo_classification>`.
Parameters
----------
estimator : estimator object
A regressor or a classifier that implements :term:`fit`.
When a classifier is passed, :term:`decision_function` will be used
in priority and it will fallback to :term`predict_proba` if it is not
available.
When a regressor is passed, :term:`predict` is used.
n_jobs : int, default=None
The number of jobs to use for the computation: the `n_classes * (
n_classes - 1) / 2` OVO problems are computed in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
estimators_ : list of ``n_classes * (n_classes - 1) / 2`` estimators
Estimators used for predictions.
classes_ : numpy array of shape [n_classes]
Array containing labels.
n_classes_ : int
Number of classes.
pairwise_indices_ : list, length = ``len(estimators_)``, or ``None``
Indices of samples used when training the estimators.
``None`` when ``estimator``'s `pairwise` tag is False.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
OneVsRestClassifier : One-vs-all multiclass strategy.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.multiclass import OneVsOneClassifier
>>> from sklearn.svm import LinearSVC
>>> X, y = load_iris(return_X_y=True)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, shuffle=True, random_state=0)
>>> clf = OneVsOneClassifier(
... LinearSVC(random_state=0)).fit(X_train, y_train)
>>> clf.predict(X_test[:10])
array([2, 1, 0, 2, 0, 2, 0, 1, 1, 1])
"""
_parameter_constraints: dict = {
"estimator": [HasMethods(["fit"])],
"n_jobs": [Integral, None],
}
def __init__(self, estimator, *, n_jobs=None):
self.estimator = estimator
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like of shape (n_samples, n_features)
Data.
y : array-like of shape (n_samples,)
Multi-class targets.
Returns
-------
self : object
The fitted underlying estimator.
"""
self._validate_params()
# We need to validate the data because we do a safe_indexing later.
X, y = self._validate_data(
X, y, accept_sparse=["csr", "csc"], force_all_finite=False
)
check_classification_targets(y)
self.classes_ = np.unique(y)
if len(self.classes_) == 1:
raise ValueError(
"OneVsOneClassifier can not be fit when only one class is present."
)
n_classes = self.classes_.shape[0]
estimators_indices = list(
zip(
*(
Parallel(n_jobs=self.n_jobs)(
delayed(_fit_ovo_binary)(
self.estimator, X, y, self.classes_[i], self.classes_[j]
)
for i in range(n_classes)
for j in range(i + 1, n_classes)
)
)
)
)
self.estimators_ = estimators_indices[0]
pairwise = self._get_tags()["pairwise"]
self.pairwise_indices_ = estimators_indices[1] if pairwise else None
return self
@available_if(_estimators_has("partial_fit"))
def partial_fit(self, X, y, classes=None):
"""Partially fit underlying estimators.
Should be used when memory is inefficient to train all data. Chunks
of data can be passed in several iteration, where the first call
should have an array of all target variables.
Parameters
----------
X : (sparse) array-like of shape (n_samples, n_features)
Data.
y : array-like of shape (n_samples,)
Multi-class targets.
classes : array, shape (n_classes, )
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is only required in the first call of partial_fit
and can be omitted in the subsequent calls.
Returns
-------
self : object
The partially fitted underlying estimator.
"""
first_call = _check_partial_fit_first_call(self, classes)
if first_call:
self._validate_params()
self.estimators_ = [
clone(self.estimator)
for _ in range(self.n_classes_ * (self.n_classes_ - 1) // 2)
]
if len(np.setdiff1d(y, self.classes_)):
raise ValueError(
"Mini-batch contains {0} while it must be subset of {1}".format(
np.unique(y), self.classes_
)
)
X, y = self._validate_data(
X,
y,
accept_sparse=["csr", "csc"],
force_all_finite=False,
reset=first_call,
)
check_classification_targets(y)
combinations = itertools.combinations(range(self.n_classes_), 2)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_partial_fit_ovo_binary)(
estimator, X, y, self.classes_[i], self.classes_[j]
)
for estimator, (i, j) in zip(self.estimators_, (combinations))
)
self.pairwise_indices_ = None
if hasattr(self.estimators_[0], "n_features_in_"):
self.n_features_in_ = self.estimators_[0].n_features_in_
return self
def predict(self, X):
"""Estimate the best class label for each sample in X.
This is implemented as ``argmax(decision_function(X), axis=1)`` which
will return the label of the class with most votes by estimators
predicting the outcome of a decision for each possible class pair.
Parameters
----------
X : (sparse) array-like of shape (n_samples, n_features)
Data.
Returns
-------
y : numpy array of shape [n_samples]
Predicted multi-class targets.
"""
Y = self.decision_function(X)
if self.n_classes_ == 2:
thresh = _threshold_for_binary_predict(self.estimators_[0])
return self.classes_[(Y > thresh).astype(int)]
return self.classes_[Y.argmax(axis=1)]
def decision_function(self, X):
"""Decision function for the OneVsOneClassifier.
The decision values for the samples are computed by adding the
normalized sum of pair-wise classification confidence levels to the
votes in order to disambiguate between the decision values when the
votes for all the classes are equal leading to a tie.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
Y : array-like of shape (n_samples, n_classes) or (n_samples,)
Result of calling `decision_function` on the final estimator.
.. versionchanged:: 0.19
output shape changed to ``(n_samples,)`` to conform to
scikit-learn conventions for binary classification.
"""
check_is_fitted(self)
X = self._validate_data(
X,
accept_sparse=True,
force_all_finite=False,
reset=False,
)
indices = self.pairwise_indices_
if indices is None:
Xs = [X] * len(self.estimators_)
else:
Xs = [X[:, idx] for idx in indices]
predictions = np.vstack(
[est.predict(Xi) for est, Xi in zip(self.estimators_, Xs)]
).T
confidences = np.vstack(
[_predict_binary(est, Xi) for est, Xi in zip(self.estimators_, Xs)]
).T
Y = _ovr_decision_function(predictions, confidences, len(self.classes_))
if self.n_classes_ == 2:
return Y[:, 1]
return Y
@property
def n_classes_(self):
"""Number of classes."""
return len(self.classes_)
def _more_tags(self):
"""Indicate if wrapped estimator is using a precomputed Gram matrix"""
return {"pairwise": _safe_tags(self.estimator, key="pairwise")}
class OutputCodeClassifier(MetaEstimatorMixin, ClassifierMixin, BaseEstimator):
"""(Error-Correcting) Output-Code multiclass strategy.
Output-code based strategies consist in representing each class with a
binary code (an array of 0s and 1s). At fitting time, one binary
classifier per bit in the code book is fitted. At prediction time, the
classifiers are used to project new points in the class space and the class
closest to the points is chosen. The main advantage of these strategies is
that the number of classifiers used can be controlled by the user, either
for compressing the model (0 < code_size < 1) or for making the model more
robust to errors (code_size > 1). See the documentation for more details.
Read more in the :ref:`User Guide <ecoc>`.
Parameters
----------
estimator : estimator object
An estimator object implementing :term:`fit` and one of
:term:`decision_function` or :term:`predict_proba`.
code_size : float, default=1.5
Percentage of the number of classes to be used to create the code book.
A number between 0 and 1 will require fewer classifiers than
one-vs-the-rest. A number greater than 1 will require more classifiers
than one-vs-the-rest.
random_state : int, RandomState instance, default=None
The generator used to initialize the codebook.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
n_jobs : int, default=None
The number of jobs to use for the computation: the multiclass problems
are computed in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
estimators_ : list of `int(n_classes * code_size)` estimators
Estimators used for predictions.
classes_ : ndarray of shape (n_classes,)
Array containing labels.
code_book_ : ndarray of shape (n_classes, code_size)
Binary array containing the code of each class.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying estimator exposes such an attribute when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if the
underlying estimator exposes such an attribute when fit.
.. versionadded:: 1.0
See Also
--------
OneVsRestClassifier : One-vs-all multiclass strategy.
OneVsOneClassifier : One-vs-one multiclass strategy.
References
----------
.. [1] "Solving multiclass learning problems via error-correcting output
codes",
Dietterich T., Bakiri G.,
Journal of Artificial Intelligence Research 2,
1995.
.. [2] "The error coding method and PICTs",
James G., Hastie T.,
Journal of Computational and Graphical statistics 7,
1998.
.. [3] "The Elements of Statistical Learning",
Hastie T., Tibshirani R., Friedman J., page 606 (second-edition)
2008.
Examples
--------
>>> from sklearn.multiclass import OutputCodeClassifier
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_samples=100, n_features=4,
... n_informative=2, n_redundant=0,
... random_state=0, shuffle=False)
>>> clf = OutputCodeClassifier(
... estimator=RandomForestClassifier(random_state=0),
... random_state=0).fit(X, y)
>>> clf.predict([[0, 0, 0, 0]])
array([1])
"""
_parameter_constraints: dict = {
"estimator": [
HasMethods(["fit", "decision_function"]),
HasMethods(["fit", "predict_proba"]),
],
"code_size": [Interval(Real, 0.0, None, closed="neither")],
"random_state": ["random_state"],
"n_jobs": [Integral, None],
}
def __init__(self, estimator, *, code_size=1.5, random_state=None, n_jobs=None):
self.estimator = estimator
self.code_size = code_size
self.random_state = random_state
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like of shape (n_samples, n_features)
Data.
y : array-like of shape (n_samples,)
Multi-class targets.
Returns
-------
self : object
Returns a fitted instance of self.
"""
self._validate_params()
y = self._validate_data(X="no_validation", y=y)
random_state = check_random_state(self.random_state)
check_classification_targets(y)
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
if n_classes == 0:
raise ValueError(
"OutputCodeClassifier can not be fit when no class is present."
)
code_size_ = int(n_classes * self.code_size)
# FIXME: there are more elaborate methods than generating the codebook
# randomly.
self.code_book_ = random_state.uniform(size=(n_classes, code_size_))
self.code_book_[self.code_book_ > 0.5] = 1
if hasattr(self.estimator, "decision_function"):
self.code_book_[self.code_book_ != 1] = -1
else:
self.code_book_[self.code_book_ != 1] = 0
classes_index = {c: i for i, c in enumerate(self.classes_)}
Y = np.array(
[self.code_book_[classes_index[y[i]]] for i in range(_num_samples(y))],
dtype=int,
)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_binary)(self.estimator, X, Y[:, i]) for i in range(Y.shape[1])
)
if hasattr(self.estimators_[0], "n_features_in_"):
self.n_features_in_ = self.estimators_[0].n_features_in_
if hasattr(self.estimators_[0], "feature_names_in_"):
self.feature_names_in_ = self.estimators_[0].feature_names_in_
return self
def predict(self, X):
"""Predict multi-class targets using underlying estimators.
Parameters
----------
X : (sparse) array-like of shape (n_samples, n_features)
Data.
Returns
-------
y : ndarray of shape (n_samples,)
Predicted multi-class targets.
"""
check_is_fitted(self)
Y = np.array([_predict_binary(e, X) for e in self.estimators_]).T
pred = euclidean_distances(Y, self.code_book_).argmin(axis=1)
return self.classes_[pred]
| bsd-3-clause | 4dc9e30ec744b309dc545f94af4d780a | 34.091954 | 87 | 0.599001 | 4.120571 | false | false | false | false |
scikit-learn/scikit-learn | sklearn/feature_extraction/image.py | 9 | 19739 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
from numbers import Integral, Number, Real
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils._param_validation import Interval
from ..base import BaseEstimator
__all__ = [
"PatchExtractor",
"extract_patches_2d",
"grid_to_graph",
"img_to_graph",
"reconstruct_from_patches_2d",
]
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
----------
n_x : int
The size of the grid in the x direction.
n_y : int
The size of the grid in the y direction.
n_z : integer, default=1
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(), vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(), vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
_, n_y, n_z = img.shape
gradient = np.abs(
img[
edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z,
]
- img[
edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z,
]
)
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds), np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.flatnonzero(mask), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(
n_x, n_y, n_z, mask=None, img=None, return_as=sparse.coo_matrix, dtype=None
):
"""Auxiliary function for img_to_graph and grid_to_graph"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = mask.astype(dtype=bool, copy=False)
mask = np.asarray(mask, dtype=bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix(
(
np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)), np.hstack((j_idx, diag_idx))),
),
(n_voxels, n_voxels),
dtype=dtype,
)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, *, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections.
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray of shape (height, width) or (height, width, channel)
2D or 3D image.
mask : ndarray of shape (height, width) or \
(height, width, channel), dtype=bool, default=None
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, \
default=sparse.coo_matrix
The class to use to build the returned adjacency matrix.
dtype : dtype, default=None
The data of the returned sparse matrix. By default it is the
dtype of img.
Returns
-------
graph : ndarray or a sparse matrix class
The computed adjacency matrix.
Notes
-----
For scikit-learn versions 0.14.1 and prior, return_as=np.ndarray was
handled by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(
n_x, n_y, n_z=1, *, mask=None, return_as=sparse.coo_matrix, dtype=int
):
"""Graph of the pixel-to-pixel connections.
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis.
n_y : int
Dimension in y axis.
n_z : int, default=1
Dimension in z axis.
mask : ndarray of shape (n_x, n_y, n_z), dtype=bool, default=None
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, \
default=sparse.coo_matrix
The class to use to build the returned adjacency matrix.
dtype : dtype, default=int
The data of the returned sparse matrix. By default it is int.
Returns
-------
graph : np.ndarray or a sparse matrix class
The computed adjacency matrix.
Notes
-----
For scikit-learn versions 0.14.1 and prior, return_as=np.ndarray was
handled by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as, dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : int or float, default=None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if isinstance(max_patches, (Integral)) and max_patches < all_patches:
return max_patches
elif isinstance(max_patches, (Integral)) and max_patches >= all_patches:
return all_patches
elif isinstance(max_patches, (Real)) and 0 < max_patches < 1:
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def _extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : int or tuple of length arr.ndim.default=8
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : int or tuple of length arr.ndim, default=1
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = tuple(slice(None, None, st) for st in extraction_step)
indexing_strides = arr[slices].strides
patch_indices_shape = (
(np.array(arr.shape) - np.array(patch_shape)) // np.array(extraction_step)
) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, *, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches.
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : ndarray of shape (image_height, image_width) or \
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of int (patch_height, patch_width)
The dimensions of one patch.
max_patches : int or float, default=None
The maximum number of patches to extract. If `max_patches` is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int, RandomState instance, default=None
Determines the random number generator used for random sampling when
`max_patches` is not None. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
patches : array of shape (n_patches, patch_height, patch_width) or \
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.datasets import load_sample_image
>>> from sklearn.feature_extraction import image
>>> # Use the array data from the first image in this dataset:
>>> one_image = load_sample_image("china.jpg")
>>> print('Image shape: {}'.format(one_image.shape))
Image shape: (427, 640, 3)
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print('Patches shape: {}'.format(patches.shape))
Patches shape: (272214, 2, 2, 3)
>>> # Here are just two of these patches:
>>> print(patches[1])
[[[174 201 231]
[174 201 231]]
[[173 200 230]
[173 200 230]]]
>>> print(patches[800])
[[[187 214 243]
[188 215 244]]
[[187 214 243]
[188 215 244]]]
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError(
"Height of the patch should be less than the height of the image."
)
if p_w > i_w:
raise ValueError(
"Width of the patch should be less than the width of the image."
)
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = _extract_patches(
image, patch_shape=(p_h, p_w, n_colors), extraction_step=1
)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : ndarray of shape (n_patches, patch_height, patch_width) or \
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of int (image_height, image_width) or \
(image_height, image_width, n_channels)
The size of the image that will be reconstructed.
Returns
-------
image : ndarray of shape image_size
The reconstructed image.
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i : i + p_h, j : j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) * min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images.
Read more in the :ref:`User Guide <image_feature_extraction>`.
.. versionadded:: 0.9
Parameters
----------
patch_size : tuple of int (patch_height, patch_width), default=None
The dimensions of one patch.
max_patches : int or float, default=None
The maximum number of patches per image to extract. If `max_patches` is
a float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int, RandomState instance, default=None
Determines the random number generator used for random sampling when
`max_patches is not None`. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
See Also
--------
reconstruct_from_patches_2d : Reconstruct image from all of its patches.
Examples
--------
>>> from sklearn.datasets import load_sample_images
>>> from sklearn.feature_extraction import image
>>> # Use the array data from the second image in this dataset:
>>> X = load_sample_images().images[1]
>>> print('Image shape: {}'.format(X.shape))
Image shape: (427, 640, 3)
>>> pe = image.PatchExtractor(patch_size=(2, 2))
>>> pe_fit = pe.fit(X)
>>> pe_trans = pe.transform(X)
>>> print('Patches shape: {}'.format(pe_trans.shape))
Patches shape: (545706, 2, 2)
"""
_parameter_constraints: dict = {
"patch_size": [tuple, None],
"max_patches": [
None,
Interval(Real, 0, 1, closed="neither"),
Interval(Integral, 1, None, closed="left"),
],
"random_state": ["random_state"],
}
def __init__(self, *, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged.
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
self._validate_params()
return self
def transform(self, X):
"""Transform the image samples in `X` into a matrix of patch data.
Parameters
----------
X : ndarray of shape (n_samples, image_height, image_width) or \
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches : array of shape (n_patches, patch_height, patch_width) or \
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches : (ii + 1) * n_patches] = extract_patches_2d(
image,
patch_size,
max_patches=self.max_patches,
random_state=self.random_state,
)
return patches
def _more_tags(self):
return {"X_types": ["3darray"]}
| bsd-3-clause | c413c44434a0fd0220b4a72fe755d094 | 32.455932 | 85 | 0.59998 | 3.690222 | false | false | false | false |
scikit-learn/scikit-learn | sklearn/datasets/_covtype.py | 12 | 6938 | """Forest covertype dataset.
A classic dataset for classification benchmarks, featuring categorical and
real-valued features.
The dataset page is available from UCI Machine Learning Repository
https://archive.ics.uci.edu/ml/datasets/Covertype
Courtesy of Jock A. Blackard and Colorado State University.
"""
# Author: Lars Buitinck
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# License: BSD 3 clause
from gzip import GzipFile
import logging
from os.path import exists, join
import os
from tempfile import TemporaryDirectory
import numpy as np
import joblib
from . import get_data_home
from ._base import _convert_data_dataframe
from ._base import _fetch_remote
from ._base import RemoteFileMetadata
from ._base import load_descr
from ..utils import Bunch
from ._base import _pkl_filepath
from ..utils import check_random_state
# The original data can be found in:
# https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.data.gz
ARCHIVE = RemoteFileMetadata(
filename="covtype.data.gz",
url="https://ndownloader.figshare.com/files/5976039",
checksum="614360d0257557dd1792834a85a1cdebfadc3c4f30b011d56afee7ffb5b15771",
)
logger = logging.getLogger(__name__)
# Column names reference:
# https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.info
FEATURE_NAMES = [
"Elevation",
"Aspect",
"Slope",
"Horizontal_Distance_To_Hydrology",
"Vertical_Distance_To_Hydrology",
"Horizontal_Distance_To_Roadways",
"Hillshade_9am",
"Hillshade_Noon",
"Hillshade_3pm",
"Horizontal_Distance_To_Fire_Points",
]
FEATURE_NAMES += [f"Wilderness_Area_{i}" for i in range(4)]
FEATURE_NAMES += [f"Soil_Type_{i}" for i in range(40)]
TARGET_NAMES = ["Cover_Type"]
def fetch_covtype(
*,
data_home=None,
download_if_missing=True,
random_state=None,
shuffle=False,
return_X_y=False,
as_frame=False,
):
"""Load the covertype dataset (classification).
Download it if necessary.
================= ============
Classes 7
Samples total 581012
Dimensionality 54
Features int
================= ============
Read more in the :ref:`User Guide <covtype_dataset>`.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
shuffle : bool, default=False
Whether to shuffle dataset.
return_X_y : bool, default=False
If True, returns ``(data.data, data.target)`` instead of a Bunch
object.
.. versionadded:: 0.20
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is a pandas DataFrame or
Series depending on the number of target columns. If `return_X_y` is
True, then (`data`, `target`) will be pandas DataFrames or Series as
described below.
.. versionadded:: 0.24
Returns
-------
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : ndarray of shape (581012, 54)
Each row corresponds to the 54 features in the dataset.
target : ndarray of shape (581012,)
Each value corresponds to one of
the 7 forest covertypes with values
ranging between 1 to 7.
frame : dataframe of shape (581012, 55)
Only present when `as_frame=True`. Contains `data` and `target`.
DESCR : str
Description of the forest covertype dataset.
feature_names : list
The names of the dataset columns.
target_names: list
The names of the target columns.
(data, target) : tuple if ``return_X_y`` is True
A tuple of two ndarray. The first containing a 2D array of
shape (n_samples, n_features) with each row representing one
sample and each column representing the features. The second
ndarray of shape (n_samples,) containing the target samples.
.. versionadded:: 0.20
"""
data_home = get_data_home(data_home=data_home)
covtype_dir = join(data_home, "covertype")
samples_path = _pkl_filepath(covtype_dir, "samples")
targets_path = _pkl_filepath(covtype_dir, "targets")
available = exists(samples_path) and exists(targets_path)
if download_if_missing and not available:
os.makedirs(covtype_dir, exist_ok=True)
# Creating temp_dir as a direct subdirectory of the target directory
# guarantees that both reside on the same filesystem, so that we can use
# os.rename to atomically move the data files to their target location.
with TemporaryDirectory(dir=covtype_dir) as temp_dir:
logger.info(f"Downloading {ARCHIVE.url}")
archive_path = _fetch_remote(ARCHIVE, dirname=temp_dir)
Xy = np.genfromtxt(GzipFile(filename=archive_path), delimiter=",")
X = Xy[:, :-1]
y = Xy[:, -1].astype(np.int32, copy=False)
samples_tmp_path = _pkl_filepath(temp_dir, "samples")
joblib.dump(X, samples_tmp_path, compress=9)
os.rename(samples_tmp_path, samples_path)
targets_tmp_path = _pkl_filepath(temp_dir, "targets")
joblib.dump(y, targets_tmp_path, compress=9)
os.rename(targets_tmp_path, targets_path)
elif not available and not download_if_missing:
raise IOError("Data not found and `download_if_missing` is False")
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
fdescr = load_descr("covtype.rst")
frame = None
if as_frame:
frame, X, y = _convert_data_dataframe(
caller_name="fetch_covtype",
data=X,
target=y,
feature_names=FEATURE_NAMES,
target_names=TARGET_NAMES,
)
if return_X_y:
return X, y
return Bunch(
data=X,
target=y,
frame=frame,
target_names=TARGET_NAMES,
feature_names=FEATURE_NAMES,
DESCR=fdescr,
)
| bsd-3-clause | ec40157b813889c29f1e814f77e36981 | 31.726415 | 83 | 0.640819 | 3.826806 | false | false | false | false |
scikit-learn/scikit-learn | sklearn/metrics/_pairwise_distances_reduction/__init__.py | 4 | 4467 | # Pairwise Distances Reductions
# =============================
#
# Author: Julien Jerphanion <git@jjerphan.xyz>
#
# Overview
# --------
#
# This module provides routines to compute pairwise distances between a set
# of row vectors of X and another set of row vectors of Y and apply a
# reduction on top. The canonical example is the brute-force computation
# of the top k nearest neighbors by leveraging the arg-k-min reduction.
#
# The reduction takes a matrix of pairwise distances between rows of X and Y
# as input and outputs an aggregate data-structure for each row of X. The
# aggregate values are typically smaller than the number of rows in Y, hence
# the term reduction.
#
# For computational reasons, the reduction are performed on the fly on chunks
# of rows of X and Y so as to keep intermediate data-structures in CPU cache
# and avoid unnecessary round trips of large distance arrays with the RAM
# that would otherwise severely degrade the speed by making the overall
# processing memory-bound.
#
# Finally, the routines follow a generic parallelization template to process
# chunks of data with OpenMP loops (via Cython prange), either on rows of X
# or rows of Y depending on their respective sizes.
#
#
# Dispatching to specialized implementations
# ------------------------------------------
#
# Dispatchers are meant to be used in the Python code. Under the hood, a
# dispatcher must only define the logic to choose at runtime to the correct
# dtype-specialized :class:`BaseDistancesReductionDispatcher` implementation based
# on the dtype of X and of Y.
#
#
# High-level diagram
# ------------------
#
# Legend:
#
# A ---⊳ B: A inherits from B
# A ---x B: A dispatches to B
#
#
# (base dispatcher)
# BaseDistancesReductionDispatcher
# ∆
# |
# |
# +-----------------------+----------------------+
# | |
# (dispatcher) (dispatcher)
# ArgKmin RadiusNeighbors
# | |
# | |
# | (float{32,64} implem.) |
# | BaseDistancesReduction{32,64} |
# | ∆ |
# | | |
# | | |
# | +-----------------+-----------------+ |
# | | | |
# | | | |
# x | | x
# ArgKmin{32,64} RadiusNeighbors{32,64}
# | ∆ ∆ |
# | | | |
# ======================= Specializations =============================
# | | | |
# | | | |
# x | | x
# EuclideanArgKmin{32,64} EuclideanRadiusNeighbors{32,64}
#
# For instance :class:`ArgKmin` dispatches to:
# - :class:`ArgKmin64` if X and Y are two `float64` array-likes
# - :class:`ArgKmin32` if X and Y are two `float32` array-likes
#
# In addition, if the metric parameter is set to "euclidean" or "sqeuclidean",
# then `ArgKmin{32,64}` further dispatches to `EuclideanArgKmin{32,64}`. For
# example, :class:`ArgKmin64` would dispatch to :class:`EuclideanArgKmin64`, a
# specialized subclass that optimally handles the Euclidean distance case
# using Generalized Matrix Multiplication over `float64` data (see the
# docstring of :class:`GEMMTermComputer64` for details).
from ._dispatcher import (
BaseDistancesReductionDispatcher,
ArgKmin,
RadiusNeighbors,
sqeuclidean_row_norms,
)
__all__ = [
"BaseDistancesReductionDispatcher",
"ArgKmin",
"RadiusNeighbors",
"sqeuclidean_row_norms",
]
| bsd-3-clause | 779c2d0538bd4b3b55df603744fe5a53 | 43.128713 | 85 | 0.466233 | 4.408506 | false | false | false | false |
scikit-learn/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 8 | 2710 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
# Authors: Clay Woolam <clay@woolam.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD
# %%
# We generate a dataset with two concentric circles. In addition, a label
# is associated with each sample of the dataset that is: 0 (belonging to
# the outer circle), 1 (belonging to the inner circle), and -1 (unknown).
# Here, all labels but two are tagged as unknown.
import numpy as np
from sklearn.datasets import make_circles
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = np.full(n_samples, -1.0)
labels[0] = outer
labels[-1] = inner
# %%
# Plot raw data
import matplotlib.pyplot as plt
plt.figure(figsize=(4, 4))
plt.scatter(
X[labels == outer, 0],
X[labels == outer, 1],
color="navy",
marker="s",
lw=0,
label="outer labeled",
s=10,
)
plt.scatter(
X[labels == inner, 0],
X[labels == inner, 1],
color="c",
marker="s",
lw=0,
label="inner labeled",
s=10,
)
plt.scatter(
X[labels == -1, 0],
X[labels == -1, 1],
color="darkorange",
marker=".",
label="unlabeled",
)
plt.legend(scatterpoints=1, shadow=False, loc="center")
_ = plt.title("Raw data (2 classes=outer and inner)")
# %%
#
# The aim of :class:`~sklearn.semi_supervised.LabelSpreading` is to associate
# a label to sample where the label is initially unknown.
from sklearn.semi_supervised import LabelSpreading
label_spread = LabelSpreading(kernel="knn", alpha=0.8)
label_spread.fit(X, labels)
# %%
# Now, we can check which labels have been associated with each sample
# when the label was unknown.
output_labels = label_spread.transduction_
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plt.figure(figsize=(4, 4))
plt.scatter(
X[outer_numbers, 0],
X[outer_numbers, 1],
color="navy",
marker="s",
lw=0,
s=10,
label="outer learned",
)
plt.scatter(
X[inner_numbers, 0],
X[inner_numbers, 1],
color="c",
marker="s",
lw=0,
s=10,
label="inner learned",
)
plt.legend(scatterpoints=1, shadow=False, loc="center")
plt.title("Labels learned with Label Spreading (KNN)")
plt.show()
| bsd-3-clause | 66b39cac762e31844f7a01f735ceb208 | 24.809524 | 77 | 0.654982 | 3.237754 | false | false | true | false |
scikit-learn/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 4 | 7762 | import sys
import re
import pytest
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils._testing import (
assert_almost_equal,
assert_array_equal,
assert_allclose,
)
from sklearn.datasets import load_digits
from io import StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
Xdigits, _ = load_digits(return_X_y=True)
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(
n_components=64, learning_rate=0.1, batch_size=10, n_iter=7, random_state=9
)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21.0, decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(
n_components=64, learning_rate=0.1, batch_size=20, random_state=9
)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21.0, decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5, n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(
n_components=64, learning_rate=0.1, batch_size=10, random_state=9
)
rbm2 = BernoulliRBM(
n_components=64, learning_rate=0.1, batch_size=10, random_state=9
)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(
rbm1.score_samples(X).mean(), rbm2.score_samples(X).mean(), decimal=0
)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5, n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# XXX: this test is very seed-dependent! It probably needs to be rewritten.
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.0], [1.0]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2, n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(
rbm1.components_, np.array([[0.02649814], [0.02009084]]), decimal=4
)
assert_almost_equal(rbm1.gibbs(X), X)
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rng = np.random.RandomState(42)
X = csc_matrix([[0.0], [1.0]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2, n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(
rbm2.components_, np.array([[0.02649814], [0.02009084]]), decimal=4
)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40, n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert np.all((X_sampled != X_sampled2).max(axis=1))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2, n_iter=10, random_state=rng)
rbm1.fit(X)
assert (rbm1.score_samples(X) < -300).all()
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under="ignore"):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.0], [1.0]])
rbm = BernoulliRBM(
n_components=2, batch_size=2, n_iter=1, random_state=42, verbose=True
)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert re.match(
r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s,
)
finally:
sys.stdout = old_stdout
@pytest.mark.parametrize(
"dtype_in, dtype_out",
[(np.float32, np.float32), (np.float64, np.float64), (int, np.float64)],
)
def test_transformer_dtypes_casting(dtype_in, dtype_out):
X = Xdigits[:100].astype(dtype_in)
rbm = BernoulliRBM(n_components=16, batch_size=5, n_iter=5, random_state=42)
Xt = rbm.fit_transform(X)
# dtype_in and dtype_out should be consistent
assert Xt.dtype == dtype_out, "transform dtype: {} - original dtype: {}".format(
Xt.dtype, X.dtype
)
def test_convergence_dtype_consistency():
# float 64 transformer
X_64 = Xdigits[:100].astype(np.float64)
rbm_64 = BernoulliRBM(n_components=16, batch_size=5, n_iter=5, random_state=42)
Xt_64 = rbm_64.fit_transform(X_64)
# float 32 transformer
X_32 = Xdigits[:100].astype(np.float32)
rbm_32 = BernoulliRBM(n_components=16, batch_size=5, n_iter=5, random_state=42)
Xt_32 = rbm_32.fit_transform(X_32)
# results and attributes should be close enough in 32 bit and 64 bit
assert_allclose(Xt_64, Xt_32, rtol=1e-06, atol=0)
assert_allclose(
rbm_64.intercept_hidden_, rbm_32.intercept_hidden_, rtol=1e-06, atol=0
)
assert_allclose(
rbm_64.intercept_visible_, rbm_32.intercept_visible_, rtol=1e-05, atol=0
)
assert_allclose(rbm_64.components_, rbm_32.components_, rtol=1e-03, atol=0)
assert_allclose(rbm_64.h_samples_, rbm_32.h_samples_)
@pytest.mark.parametrize("method", ["fit", "partial_fit"])
def test_feature_names_out(method):
"""Check `get_feature_names_out` for `BernoulliRBM`."""
n_components = 10
rbm = BernoulliRBM(n_components=n_components)
getattr(rbm, method)(Xdigits)
names = rbm.get_feature_names_out()
expected_names = [f"bernoullirbm{i}" for i in range(n_components)]
assert_array_equal(expected_names, names)
| bsd-3-clause | a79edd376d3263e28c01d843cf09795a | 30.298387 | 84 | 0.644679 | 2.959207 | false | true | false | false |
scikit-learn/scikit-learn | examples/svm/plot_linearsvc_support_vectors.py | 12 | 1805 | """
=====================================
Plot the support vectors in LinearSVC
=====================================
Unlike SVC (based on LIBSVM), LinearSVC (based on LIBLINEAR) does not provide
the support vectors. This example demonstrates how to obtain the support
vectors in LinearSVC.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.svm import LinearSVC
from sklearn.inspection import DecisionBoundaryDisplay
X, y = make_blobs(n_samples=40, centers=2, random_state=0)
plt.figure(figsize=(10, 5))
for i, C in enumerate([1, 100]):
# "hinge" is the standard SVM loss
clf = LinearSVC(C=C, loss="hinge", random_state=42).fit(X, y)
# obtain the support vectors through the decision function
decision_function = clf.decision_function(X)
# we can also calculate the decision function manually
# decision_function = np.dot(X, clf.coef_[0]) + clf.intercept_[0]
# The support vectors are the samples that lie within the margin
# boundaries, whose size is conventionally constrained to 1
support_vector_indices = np.where(np.abs(decision_function) <= 1 + 1e-15)[0]
support_vectors = X[support_vector_indices]
plt.subplot(1, 2, i + 1)
plt.scatter(X[:, 0], X[:, 1], c=y, s=30, cmap=plt.cm.Paired)
ax = plt.gca()
DecisionBoundaryDisplay.from_estimator(
clf,
X,
ax=ax,
grid_resolution=50,
plot_method="contour",
colors="k",
levels=[-1, 0, 1],
alpha=0.5,
linestyles=["--", "-", "--"],
)
plt.scatter(
support_vectors[:, 0],
support_vectors[:, 1],
s=100,
linewidth=1,
facecolors="none",
edgecolors="k",
)
plt.title("C=" + str(C))
plt.tight_layout()
plt.show()
| bsd-3-clause | 8a320792298ed42419e67d378331ee97 | 30.666667 | 80 | 0.617729 | 3.588469 | false | false | false | false |
scikit-learn/scikit-learn | benchmarks/bench_random_projections.py | 12 | 8566 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.random_projection import (
SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim,
)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10**6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transformer):
gc.collect()
clf = clone(transformer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = datetime.now() - t_start
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = datetime.now() - t_start
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros, random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(
rng.randn(n_nonzeros),
(
rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros),
),
),
shape=(n_samples, n_features),
)
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print(
"%s | %s | %s"
% (
clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12),
)
)
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option(
"--n-times",
dest="n_times",
default=5,
type=int,
help="Benchmark results are average over n_times experiments",
)
op.add_option(
"--n-features",
dest="n_features",
default=10**4,
type=int,
help="Number of features in the benchmarks",
)
op.add_option(
"--n-components",
dest="n_components",
default="auto",
help="Size of the random subspace. ('auto' or int > 0)",
)
op.add_option(
"--ratio-nonzeros",
dest="ratio_nonzeros",
default=10**-3,
type=float,
help="Number of features in the benchmarks",
)
op.add_option(
"--n-samples",
dest="n_samples",
default=500,
type=int,
help="Number of samples in the benchmarks",
)
op.add_option(
"--random-seed",
dest="random_seed",
default=13,
type=int,
help="Seed used by the random number generators.",
)
op.add_option(
"--density",
dest="density",
default=1 / 3,
help=(
"Density used by the sparse random projection. ('auto' or float (0.0, 1.0]"
),
)
op.add_option(
"--eps",
dest="eps",
default=0.5,
type=float,
help="See the documentation of the underlying transformers.",
)
op.add_option(
"--transformers",
dest="selected_transformers",
default="GaussianRandomProjection,SparseRandomProjection",
type=str,
help=(
"Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection"
),
)
op.add_option(
"--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.",
)
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(",")
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print("Dataset statistics")
print("===========================")
print("n_samples \t= %s" % opts.n_samples)
print("n_features \t= %s" % opts.n_features)
if opts.n_components == "auto":
print(
"n_components \t= %s (auto)"
% johnson_lindenstrauss_min_dim(n_samples=opts.n_samples, eps=opts.eps)
)
else:
print("n_components \t= %s" % opts.n_components)
print("n_elements \t= %s" % (opts.n_features * opts.n_samples))
print("n_nonzeros \t= %s per feature" % n_nonzeros)
print("ratio_nonzeros \t= %s" % opts.ratio_nonzeros)
print("")
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
}
transformers["GaussianRandomProjection"] = GaussianRandomProjection(
**gaussian_matrix_params
)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = SparseRandomProjection(
**sparse_matrix_params
)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print("Benchmarks")
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(
opts.n_samples, opts.n_features, n_nonzeros, random_state=opts.random_seed
)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in range(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(
X_dense, transformers[name]
)
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print(
"%s \t | %s "
% (
"Arguments".ljust(16),
"Value".center(12),
)
)
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16), str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print(
"%s | %s | %s"
% ("Transformer".ljust(30), "fit".center(12), "transform".center(12))
)
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name, np.mean(time_fit[name]), np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause | 4c493e182df5d6a5db52671565b152c6 | 27.270627 | 87 | 0.497548 | 4.118269 | false | false | false | false |
scikit-learn/scikit-learn | sklearn/utils/_pprint.py | 13 | 18516 | """This module contains the _EstimatorPrettyPrinter class used in
BaseEstimator.__repr__ for pretty-printing estimators"""
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 Python Software Foundation;
# All Rights Reserved
# Authors: Fred L. Drake, Jr. <fdrake@acm.org> (built-in CPython pprint module)
# Nicolas Hug (scikit-learn specific changes)
# License: PSF License version 2 (see below)
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
# 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"),
# and the Individual or Organization ("Licensee") accessing and otherwise
# using this software ("Python") in source or binary form and its associated
# documentation.
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to
# reproduce, analyze, test, perform and/or display publicly, prepare
# derivative works, distribute, and otherwise use Python alone or in any
# derivative version, provided, however, that PSF's License Agreement and
# PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004,
# 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016,
# 2017, 2018 Python Software Foundation; All Rights Reserved" are retained in
# Python alone or in any derivative version prepared by Licensee.
# 3. In the event Licensee prepares a derivative work that is based on or
# incorporates Python or any part thereof, and wants to make the derivative
# work available to others as provided herein, then Licensee hereby agrees to
# include in any such work a brief summary of the changes made to Python.
# 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES
# NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT
# NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF
# MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF
# PYTHON WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY
# INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF
# MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE
# THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote products
# or services of Licensee, or any third party.
# 8. By copying, installing or otherwise using Python, Licensee agrees to be
# bound by the terms and conditions of this License Agreement.
# Brief summary of changes to original code:
# - "compact" parameter is supported for dicts, not just lists or tuples
# - estimators have a custom handler, they're not just treated as objects
# - long sequences (lists, tuples, dict items) with more than N elements are
# shortened using ellipsis (', ...') at the end.
import inspect
import pprint
from collections import OrderedDict
from ..base import BaseEstimator
from .._config import get_config
from . import is_scalar_nan
class KeyValTuple(tuple):
"""Dummy class for correctly rendering key-value tuples from dicts."""
def __repr__(self):
# needed for _dispatch[tuple.__repr__] not to be overridden
return super().__repr__()
class KeyValTupleParam(KeyValTuple):
"""Dummy class for correctly rendering key-value tuples from parameters."""
pass
def _changed_params(estimator):
"""Return dict (param_name: value) of parameters that were given to
estimator with non-default values."""
params = estimator.get_params(deep=False)
init_func = getattr(estimator.__init__, "deprecated_original", estimator.__init__)
init_params = inspect.signature(init_func).parameters
init_params = {name: param.default for name, param in init_params.items()}
def has_changed(k, v):
if k not in init_params: # happens if k is part of a **kwargs
return True
if init_params[k] == inspect._empty: # k has no default value
return True
# try to avoid calling repr on nested estimators
if isinstance(v, BaseEstimator) and v.__class__ != init_params[k].__class__:
return True
# Use repr as a last resort. It may be expensive.
if repr(v) != repr(init_params[k]) and not (
is_scalar_nan(init_params[k]) and is_scalar_nan(v)
):
return True
return False
return {k: v for k, v in params.items() if has_changed(k, v)}
class _EstimatorPrettyPrinter(pprint.PrettyPrinter):
"""Pretty Printer class for estimator objects.
This extends the pprint.PrettyPrinter class, because:
- we need estimators to be printed with their parameters, e.g.
Estimator(param1=value1, ...) which is not supported by default.
- the 'compact' parameter of PrettyPrinter is ignored for dicts, which
may lead to very long representations that we want to avoid.
Quick overview of pprint.PrettyPrinter (see also
https://stackoverflow.com/questions/49565047/pprint-with-hex-numbers):
- the entry point is the _format() method which calls format() (overridden
here)
- format() directly calls _safe_repr() for a first try at rendering the
object
- _safe_repr formats the whole object recursively, only calling itself,
not caring about line length or anything
- back to _format(), if the output string is too long, _format() then calls
the appropriate _pprint_TYPE() method (e.g. _pprint_list()) depending on
the type of the object. This where the line length and the compact
parameters are taken into account.
- those _pprint_TYPE() methods will internally use the format() method for
rendering the nested objects of an object (e.g. the elements of a list)
In the end, everything has to be implemented twice: in _safe_repr and in
the custom _pprint_TYPE methods. Unfortunately PrettyPrinter is really not
straightforward to extend (especially when we want a compact output), so
the code is a bit convoluted.
This class overrides:
- format() to support the changed_only parameter
- _safe_repr to support printing of estimators (for when they fit on a
single line)
- _format_dict_items so that dict are correctly 'compacted'
- _format_items so that ellipsis is used on long lists and tuples
When estimators cannot be printed on a single line, the builtin _format()
will call _pprint_estimator() because it was registered to do so (see
_dispatch[BaseEstimator.__repr__] = _pprint_estimator).
both _format_dict_items() and _pprint_estimator() use the
_format_params_or_dict_items() method that will format parameters and
key-value pairs respecting the compact parameter. This method needs another
subroutine _pprint_key_val_tuple() used when a parameter or a key-value
pair is too long to fit on a single line. This subroutine is called in
_format() and is registered as well in the _dispatch dict (just like
_pprint_estimator). We had to create the two classes KeyValTuple and
KeyValTupleParam for this.
"""
def __init__(
self,
indent=1,
width=80,
depth=None,
stream=None,
*,
compact=False,
indent_at_name=True,
n_max_elements_to_show=None,
):
super().__init__(indent, width, depth, stream, compact=compact)
self._indent_at_name = indent_at_name
if self._indent_at_name:
self._indent_per_level = 1 # ignore indent param
self._changed_only = get_config()["print_changed_only"]
# Max number of elements in a list, dict, tuple until we start using
# ellipsis. This also affects the number of arguments of an estimators
# (they are treated as dicts)
self.n_max_elements_to_show = n_max_elements_to_show
def format(self, object, context, maxlevels, level):
return _safe_repr(
object, context, maxlevels, level, changed_only=self._changed_only
)
def _pprint_estimator(self, object, stream, indent, allowance, context, level):
stream.write(object.__class__.__name__ + "(")
if self._indent_at_name:
indent += len(object.__class__.__name__)
if self._changed_only:
params = _changed_params(object)
else:
params = object.get_params(deep=False)
params = OrderedDict((name, val) for (name, val) in sorted(params.items()))
self._format_params(
params.items(), stream, indent, allowance + 1, context, level
)
stream.write(")")
def _format_dict_items(self, items, stream, indent, allowance, context, level):
return self._format_params_or_dict_items(
items, stream, indent, allowance, context, level, is_dict=True
)
def _format_params(self, items, stream, indent, allowance, context, level):
return self._format_params_or_dict_items(
items, stream, indent, allowance, context, level, is_dict=False
)
def _format_params_or_dict_items(
self, object, stream, indent, allowance, context, level, is_dict
):
"""Format dict items or parameters respecting the compact=True
parameter. For some reason, the builtin rendering of dict items doesn't
respect compact=True and will use one line per key-value if all cannot
fit in a single line.
Dict items will be rendered as <'key': value> while params will be
rendered as <key=value>. The implementation is mostly copy/pasting from
the builtin _format_items().
This also adds ellipsis if the number of items is greater than
self.n_max_elements_to_show.
"""
write = stream.write
indent += self._indent_per_level
delimnl = ",\n" + " " * indent
delim = ""
width = max_width = self._width - indent + 1
it = iter(object)
try:
next_ent = next(it)
except StopIteration:
return
last = False
n_items = 0
while not last:
if n_items == self.n_max_elements_to_show:
write(", ...")
break
n_items += 1
ent = next_ent
try:
next_ent = next(it)
except StopIteration:
last = True
max_width -= allowance
width -= allowance
if self._compact:
k, v = ent
krepr = self._repr(k, context, level)
vrepr = self._repr(v, context, level)
if not is_dict:
krepr = krepr.strip("'")
middle = ": " if is_dict else "="
rep = krepr + middle + vrepr
w = len(rep) + 2
if width < w:
width = max_width
if delim:
delim = delimnl
if width >= w:
width -= w
write(delim)
delim = ", "
write(rep)
continue
write(delim)
delim = delimnl
class_ = KeyValTuple if is_dict else KeyValTupleParam
self._format(
class_(ent), stream, indent, allowance if last else 1, context, level
)
def _format_items(self, items, stream, indent, allowance, context, level):
"""Format the items of an iterable (list, tuple...). Same as the
built-in _format_items, with support for ellipsis if the number of
elements is greater than self.n_max_elements_to_show.
"""
write = stream.write
indent += self._indent_per_level
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * " ")
delimnl = ",\n" + " " * indent
delim = ""
width = max_width = self._width - indent + 1
it = iter(items)
try:
next_ent = next(it)
except StopIteration:
return
last = False
n_items = 0
while not last:
if n_items == self.n_max_elements_to_show:
write(", ...")
break
n_items += 1
ent = next_ent
try:
next_ent = next(it)
except StopIteration:
last = True
max_width -= allowance
width -= allowance
if self._compact:
rep = self._repr(ent, context, level)
w = len(rep) + 2
if width < w:
width = max_width
if delim:
delim = delimnl
if width >= w:
width -= w
write(delim)
delim = ", "
write(rep)
continue
write(delim)
delim = delimnl
self._format(ent, stream, indent, allowance if last else 1, context, level)
def _pprint_key_val_tuple(self, object, stream, indent, allowance, context, level):
"""Pretty printing for key-value tuples from dict or parameters."""
k, v = object
rep = self._repr(k, context, level)
if isinstance(object, KeyValTupleParam):
rep = rep.strip("'")
middle = "="
else:
middle = ": "
stream.write(rep)
stream.write(middle)
self._format(
v, stream, indent + len(rep) + len(middle), allowance, context, level
)
# Note: need to copy _dispatch to prevent instances of the builtin
# PrettyPrinter class to call methods of _EstimatorPrettyPrinter (see issue
# 12906)
# mypy error: "Type[PrettyPrinter]" has no attribute "_dispatch"
_dispatch = pprint.PrettyPrinter._dispatch.copy() # type: ignore
_dispatch[BaseEstimator.__repr__] = _pprint_estimator
_dispatch[KeyValTuple.__repr__] = _pprint_key_val_tuple
def _safe_repr(object, context, maxlevels, level, changed_only=False):
"""Same as the builtin _safe_repr, with added support for Estimator
objects."""
typ = type(object)
if typ in pprint._builtin_scalars:
return repr(object), True, False
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r is dict.__repr__:
if not object:
return "{}", True, False
objid = id(object)
if maxlevels and level >= maxlevels:
return "{...}", False, objid in context
if objid in context:
return pprint._recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
saferepr = _safe_repr
items = sorted(object.items(), key=pprint._safe_tuple)
for k, v in items:
krepr, kreadable, krecur = saferepr(
k, context, maxlevels, level, changed_only=changed_only
)
vrepr, vreadable, vrecur = saferepr(
v, context, maxlevels, level, changed_only=changed_only
)
append("%s: %s" % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return "{%s}" % ", ".join(components), readable, recursive
if (issubclass(typ, list) and r is list.__repr__) or (
issubclass(typ, tuple) and r is tuple.__repr__
):
if issubclass(typ, list):
if not object:
return "[]", True, False
format = "[%s]"
elif len(object) == 1:
format = "(%s,)"
else:
if not object:
return "()", True, False
format = "(%s)"
objid = id(object)
if maxlevels and level >= maxlevels:
return format % "...", False, objid in context
if objid in context:
return pprint._recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in object:
orepr, oreadable, orecur = _safe_repr(
o, context, maxlevels, level, changed_only=changed_only
)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
return format % ", ".join(components), readable, recursive
if issubclass(typ, BaseEstimator):
objid = id(object)
if maxlevels and level >= maxlevels:
return "{...}", False, objid in context
if objid in context:
return pprint._recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
if changed_only:
params = _changed_params(object)
else:
params = object.get_params(deep=False)
components = []
append = components.append
level += 1
saferepr = _safe_repr
items = sorted(params.items(), key=pprint._safe_tuple)
for k, v in items:
krepr, kreadable, krecur = saferepr(
k, context, maxlevels, level, changed_only=changed_only
)
vrepr, vreadable, vrecur = saferepr(
v, context, maxlevels, level, changed_only=changed_only
)
append("%s=%s" % (krepr.strip("'"), vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return ("%s(%s)" % (typ.__name__, ", ".join(components)), readable, recursive)
rep = repr(object)
return rep, (rep and not rep.startswith("<")), False
| bsd-3-clause | 2e84fef264f67109704cd35cc9c45853 | 38.991361 | 87 | 0.603694 | 4.243869 | false | false | false | false |
scikit-learn/scikit-learn | asv_benchmarks/benchmarks/cluster.py | 8 | 2925 | from sklearn.cluster import KMeans, MiniBatchKMeans
from .common import Benchmark, Estimator, Predictor, Transformer
from .datasets import _blobs_dataset, _20newsgroups_highdim_dataset
from .utils import neg_mean_inertia
class KMeansBenchmark(Predictor, Transformer, Estimator, Benchmark):
"""
Benchmarks for KMeans.
"""
param_names = ["representation", "algorithm", "init"]
params = (["dense", "sparse"], ["lloyd", "elkan"], ["random", "k-means++"])
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, algorithm, init = params
if representation == "sparse":
data = _20newsgroups_highdim_dataset(n_samples=8000)
else:
data = _blobs_dataset(n_clusters=20)
return data
def make_estimator(self, params):
representation, algorithm, init = params
max_iter = 30 if representation == "sparse" else 100
estimator = KMeans(
n_clusters=20,
algorithm=algorithm,
init=init,
n_init=1,
max_iter=max_iter,
tol=0,
random_state=0,
)
return estimator
def make_scorers(self):
self.train_scorer = lambda _, __: neg_mean_inertia(
self.X, self.estimator.predict(self.X), self.estimator.cluster_centers_
)
self.test_scorer = lambda _, __: neg_mean_inertia(
self.X_val,
self.estimator.predict(self.X_val),
self.estimator.cluster_centers_,
)
class MiniBatchKMeansBenchmark(Predictor, Transformer, Estimator, Benchmark):
"""
Benchmarks for MiniBatchKMeans.
"""
param_names = ["representation", "init"]
params = (["dense", "sparse"], ["random", "k-means++"])
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, init = params
if representation == "sparse":
data = _20newsgroups_highdim_dataset()
else:
data = _blobs_dataset(n_clusters=20)
return data
def make_estimator(self, params):
representation, init = params
max_iter = 5 if representation == "sparse" else 2
estimator = MiniBatchKMeans(
n_clusters=20,
init=init,
n_init=1,
max_iter=max_iter,
batch_size=1000,
max_no_improvement=None,
compute_labels=False,
random_state=0,
)
return estimator
def make_scorers(self):
self.train_scorer = lambda _, __: neg_mean_inertia(
self.X, self.estimator.predict(self.X), self.estimator.cluster_centers_
)
self.test_scorer = lambda _, __: neg_mean_inertia(
self.X_val,
self.estimator.predict(self.X_val),
self.estimator.cluster_centers_,
)
| bsd-3-clause | b70a2e9c3135dc2c40880702197eec8b | 27.125 | 83 | 0.577778 | 4.001368 | false | false | false | false |
scikit-learn/scikit-learn | examples/cluster/plot_kmeans_plusplus.py | 13 | 1167 | """
===========================================================
An example of K-Means++ initialization
===========================================================
An example to show the output of the :func:`sklearn.cluster.kmeans_plusplus`
function for generating initial seeds for clustering.
K-Means++ is used as the default initialization for :ref:`k_means`.
"""
from sklearn.cluster import kmeans_plusplus
from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt
# Generate sample data
n_samples = 4000
n_components = 4
X, y_true = make_blobs(
n_samples=n_samples, centers=n_components, cluster_std=0.60, random_state=0
)
X = X[:, ::-1]
# Calculate seeds from kmeans++
centers_init, indices = kmeans_plusplus(X, n_clusters=4, random_state=0)
# Plot init seeds along side sample data
plt.figure(1)
colors = ["#4EACC5", "#FF9C34", "#4E9A06", "m"]
for k, col in enumerate(colors):
cluster_data = y_true == k
plt.scatter(X[cluster_data, 0], X[cluster_data, 1], c=col, marker=".", s=10)
plt.scatter(centers_init[:, 0], centers_init[:, 1], c="b", s=50)
plt.title("K-Means++ Initialization")
plt.xticks([])
plt.yticks([])
plt.show()
| bsd-3-clause | 587066d14cb2718081ee32b5fe704478 | 27.463415 | 80 | 0.628963 | 3.29661 | false | false | false | false |
scikit-learn/scikit-learn | examples/miscellaneous/plot_pipeline_display.py | 12 | 6254 | """
=================================================================
Displaying Pipelines
=================================================================
The default configuration for displaying a pipeline in a Jupyter Notebook is
`'diagram'` where `set_config(display='diagram')`. To deactivate HTML representation,
use `set_config(display='text')`.
To see more detailed steps in the visualization of the pipeline, click on the
steps in the pipeline.
"""
# %%
# Displaying a Pipeline with a Preprocessing Step and Classifier
################################################################################
# This section constructs a :class:`~sklearn.pipeline.Pipeline` with a preprocessing
# step, :class:`~sklearn.preprocessing.StandardScaler`, and classifier,
# :class:`~sklearn.linear_model.LogisticRegression`, and displays its visual
# representation.
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn import set_config
steps = [
("preprocessing", StandardScaler()),
("classifier", LogisticRegression()),
]
pipe = Pipeline(steps)
# %%
# To visualize the diagram, the default is `display='diagram'`.
set_config(display="diagram")
pipe # click on the diagram below to see the details of each step
# %%
# To view the text pipeline, change to `display='text'`.
set_config(display="text")
pipe
# %%
# Put back the default display
set_config(display="diagram")
# %%
# Displaying a Pipeline Chaining Multiple Preprocessing Steps & Classifier
################################################################################
# This section constructs a :class:`~sklearn.pipeline.Pipeline` with multiple
# preprocessing steps, :class:`~sklearn.preprocessing.PolynomialFeatures` and
# :class:`~sklearn.preprocessing.StandardScaler`, and a classifier step,
# :class:`~sklearn.linear_model.LogisticRegression`, and displays its visual
# representation.
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.linear_model import LogisticRegression
steps = [
("standard_scaler", StandardScaler()),
("polynomial", PolynomialFeatures(degree=3)),
("classifier", LogisticRegression(C=2.0)),
]
pipe = Pipeline(steps)
pipe # click on the diagram below to see the details of each step
# %%
# Displaying a Pipeline and Dimensionality Reduction and Classifier
################################################################################
# This section constructs a :class:`~sklearn.pipeline.Pipeline` with a
# dimensionality reduction step, :class:`~sklearn.decomposition.PCA`,
# a classifier, :class:`~sklearn.svm.SVC`, and displays its visual
# representation.
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
from sklearn.decomposition import PCA
steps = [("reduce_dim", PCA(n_components=4)), ("classifier", SVC(kernel="linear"))]
pipe = Pipeline(steps)
pipe # click on the diagram below to see the details of each step
# %%
# Displaying a Complex Pipeline Chaining a Column Transformer
################################################################################
# This section constructs a complex :class:`~sklearn.pipeline.Pipeline` with a
# :class:`~sklearn.compose.ColumnTransformer` and a classifier,
# :class:`~sklearn.linear_model.LogisticRegression`, and displays its visual
# representation.
import numpy as np
from sklearn.pipeline import make_pipeline
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.linear_model import LogisticRegression
numeric_preprocessor = Pipeline(
steps=[
("imputation_mean", SimpleImputer(missing_values=np.nan, strategy="mean")),
("scaler", StandardScaler()),
]
)
categorical_preprocessor = Pipeline(
steps=[
(
"imputation_constant",
SimpleImputer(fill_value="missing", strategy="constant"),
),
("onehot", OneHotEncoder(handle_unknown="ignore")),
]
)
preprocessor = ColumnTransformer(
[
("categorical", categorical_preprocessor, ["state", "gender"]),
("numerical", numeric_preprocessor, ["age", "weight"]),
]
)
pipe = make_pipeline(preprocessor, LogisticRegression(max_iter=500))
pipe # click on the diagram below to see the details of each step
# %%
# Displaying a Grid Search over a Pipeline with a Classifier
################################################################################
# This section constructs a :class:`~sklearn.model_selection.GridSearchCV`
# over a :class:`~sklearn.pipeline.Pipeline` with
# :class:`~sklearn.ensemble.RandomForestClassifier` and displays its visual
# representation.
import numpy as np
from sklearn.pipeline import make_pipeline
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
numeric_preprocessor = Pipeline(
steps=[
("imputation_mean", SimpleImputer(missing_values=np.nan, strategy="mean")),
("scaler", StandardScaler()),
]
)
categorical_preprocessor = Pipeline(
steps=[
(
"imputation_constant",
SimpleImputer(fill_value="missing", strategy="constant"),
),
("onehot", OneHotEncoder(handle_unknown="ignore")),
]
)
preprocessor = ColumnTransformer(
[
("categorical", categorical_preprocessor, ["state", "gender"]),
("numerical", numeric_preprocessor, ["age", "weight"]),
]
)
pipe = Pipeline(
steps=[("preprocessor", preprocessor), ("classifier", RandomForestClassifier())]
)
param_grid = {
"classifier__n_estimators": [200, 500],
"classifier__max_features": ["auto", "sqrt", "log2"],
"classifier__max_depth": [4, 5, 6, 7, 8],
"classifier__criterion": ["gini", "entropy"],
}
grid_search = GridSearchCV(pipe, param_grid=param_grid, n_jobs=1)
grid_search # click on the diagram below to see the details of each step
| bsd-3-clause | 1077c94f87dd5bc3c03aea2b58c439c8 | 33.744444 | 85 | 0.670451 | 4.432318 | false | false | false | false |
scikit-learn/scikit-learn | examples/svm/plot_separating_hyperplane.py | 12 | 1114 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.datasets import make_blobs
from sklearn.inspection import DecisionBoundaryDisplay
# we create 40 separable points
X, y = make_blobs(n_samples=40, centers=2, random_state=6)
# fit the model, don't regularize for illustration purposes
clf = svm.SVC(kernel="linear", C=1000)
clf.fit(X, y)
plt.scatter(X[:, 0], X[:, 1], c=y, s=30, cmap=plt.cm.Paired)
# plot the decision function
ax = plt.gca()
DecisionBoundaryDisplay.from_estimator(
clf,
X,
plot_method="contour",
colors="k",
levels=[-1, 0, 1],
alpha=0.5,
linestyles=["--", "-", "--"],
ax=ax,
)
# plot support vectors
ax.scatter(
clf.support_vectors_[:, 0],
clf.support_vectors_[:, 1],
s=100,
linewidth=1,
facecolors="none",
edgecolors="k",
)
plt.show()
| bsd-3-clause | f4cf9f940b55008aeab16f2ff66b69b9 | 22.208333 | 64 | 0.633752 | 3.448916 | false | false | false | false |
scikit-learn/scikit-learn | examples/preprocessing/plot_discretization_classification.py | 13 | 7753 | # -*- coding: utf-8 -*-
"""
======================
Feature discretization
======================
A demonstration of feature discretization on synthetic classification datasets.
Feature discretization decomposes each feature into a set of bins, here equally
distributed in width. The discrete values are then one-hot encoded, and given
to a linear classifier. This preprocessing enables a non-linear behavior even
though the classifier is linear.
On this example, the first two rows represent linearly non-separable datasets
(moons and concentric circles) while the third is approximately linearly
separable. On the two linearly non-separable datasets, feature discretization
largely increases the performance of linear classifiers. On the linearly
separable dataset, feature discretization decreases the performance of linear
classifiers. Two non-linear classifiers are also shown for comparison.
This example should be taken with a grain of salt, as the intuition conveyed
does not necessarily carry over to real datasets. Particularly in
high-dimensional spaces, data can more easily be separated linearly. Moreover,
using feature discretization and one-hot encoding increases the number of
features, which easily lead to overfitting when the number of samples is small.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
# Code source: Tom Dupré la Tour
# Adapted from plot_classifier_comparison by Gaël Varoquaux and Andreas Müller
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.utils._testing import ignore_warnings
from sklearn.exceptions import ConvergenceWarning
h = 0.02 # step size in the mesh
def get_name(estimator):
name = estimator.__class__.__name__
if name == "Pipeline":
name = [get_name(est[1]) for est in estimator.steps]
name = " + ".join(name)
return name
# list of (estimator, param_grid), where param_grid is used in GridSearchCV
# The parameter spaces in this example are limited to a narrow band to reduce
# its runtime. In a real use case, a broader search space for the algorithms
# should be used.
classifiers = [
(
make_pipeline(StandardScaler(), LogisticRegression(random_state=0)),
{"logisticregression__C": np.logspace(-1, 1, 3)},
),
(
make_pipeline(StandardScaler(), LinearSVC(random_state=0)),
{"linearsvc__C": np.logspace(-1, 1, 3)},
),
(
make_pipeline(
StandardScaler(),
KBinsDiscretizer(encode="onehot"),
LogisticRegression(random_state=0),
),
{
"kbinsdiscretizer__n_bins": np.arange(5, 8),
"logisticregression__C": np.logspace(-1, 1, 3),
},
),
(
make_pipeline(
StandardScaler(),
KBinsDiscretizer(encode="onehot"),
LinearSVC(random_state=0),
),
{
"kbinsdiscretizer__n_bins": np.arange(5, 8),
"linearsvc__C": np.logspace(-1, 1, 3),
},
),
(
make_pipeline(
StandardScaler(), GradientBoostingClassifier(n_estimators=5, random_state=0)
),
{"gradientboostingclassifier__learning_rate": np.logspace(-2, 0, 5)},
),
(
make_pipeline(StandardScaler(), SVC(random_state=0)),
{"svc__C": np.logspace(-1, 1, 3)},
),
]
names = [get_name(e).replace("StandardScaler + ", "") for e, _ in classifiers]
n_samples = 100
datasets = [
make_moons(n_samples=n_samples, noise=0.2, random_state=0),
make_circles(n_samples=n_samples, noise=0.2, factor=0.5, random_state=1),
make_classification(
n_samples=n_samples,
n_features=2,
n_redundant=0,
n_informative=2,
random_state=2,
n_clusters_per_class=1,
),
]
fig, axes = plt.subplots(
nrows=len(datasets), ncols=len(classifiers) + 1, figsize=(21, 9)
)
cm_piyg = plt.cm.PiYG
cm_bright = ListedColormap(["#b30065", "#178000"])
# iterate over datasets
for ds_cnt, (X, y) in enumerate(datasets):
print(f"\ndataset {ds_cnt}\n---------")
# split into training and test part
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=42
)
# create the grid for background colors
x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5
y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# plot the dataset first
ax = axes[ds_cnt, 0]
if ds_cnt == 0:
ax.set_title("Input data")
# plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors="k")
# and testing points
ax.scatter(
X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6, edgecolors="k"
)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
# iterate over classifiers
for est_idx, (name, (estimator, param_grid)) in enumerate(zip(names, classifiers)):
ax = axes[ds_cnt, est_idx + 1]
clf = GridSearchCV(estimator=estimator, param_grid=param_grid)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
print(f"{name}: {score:.2f}")
# plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]*[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.column_stack([xx.ravel(), yy.ravel()]))
else:
Z = clf.predict_proba(np.column_stack([xx.ravel(), yy.ravel()]))[:, 1]
# put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm_piyg, alpha=0.8)
# plot the training points
ax.scatter(
X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors="k"
)
# and testing points
ax.scatter(
X_test[:, 0],
X_test[:, 1],
c=y_test,
cmap=cm_bright,
edgecolors="k",
alpha=0.6,
)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name.replace(" + ", "\n"))
ax.text(
0.95,
0.06,
(f"{score:.2f}").lstrip("0"),
size=15,
bbox=dict(boxstyle="round", alpha=0.8, facecolor="white"),
transform=ax.transAxes,
horizontalalignment="right",
)
plt.tight_layout()
# Add suptitles above the figure
plt.subplots_adjust(top=0.90)
suptitles = [
"Linear classifiers",
"Feature discretization and linear classifiers",
"Non-linear classifiers",
]
for i, suptitle in zip([1, 3, 5], suptitles):
ax = axes[0, i]
ax.text(
1.05,
1.25,
suptitle,
transform=ax.transAxes,
horizontalalignment="center",
size="x-large",
)
plt.show()
| bsd-3-clause | 0c9f59c73af7ca44463c60b18a4a9776 | 32.261803 | 88 | 0.627613 | 3.519528 | false | true | false | false |
scikit-learn/scikit-learn | examples/neighbors/plot_lof_outlier_detection.py | 13 | 2750 | """
=================================================
Outlier detection with Local Outlier Factor (LOF)
=================================================
The Local Outlier Factor (LOF) algorithm is an unsupervised anomaly detection
method which computes the local density deviation of a given data point with
respect to its neighbors. It considers as outliers the samples that have a
substantially lower density than their neighbors. This example shows how to
use LOF for outlier detection which is the default use case of this estimator
in scikit-learn. Note that when LOF is used for outlier detection it has no
predict, decision_function and score_samples methods. See
:ref:`User Guide <outlier_detection>`: for details on the difference between
outlier detection and novelty detection and how to use LOF for novelty
detection.
The number of neighbors considered (parameter n_neighbors) is typically
set 1) greater than the minimum number of samples a cluster has to contain,
so that other samples can be local outliers relative to this cluster, and 2)
smaller than the maximum number of close by samples that can potentially be
local outliers.
In practice, such information is generally not available, and taking
n_neighbors=20 appears to work well in general.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import LocalOutlierFactor
np.random.seed(42)
# Generate train data
X_inliers = 0.3 * np.random.randn(100, 2)
X_inliers = np.r_[X_inliers + 2, X_inliers - 2]
# Generate some outliers
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
X = np.r_[X_inliers, X_outliers]
n_outliers = len(X_outliers)
ground_truth = np.ones(len(X), dtype=int)
ground_truth[-n_outliers:] = -1
# fit the model for outlier detection (default)
clf = LocalOutlierFactor(n_neighbors=20, contamination=0.1)
# use fit_predict to compute the predicted labels of the training samples
# (when LOF is used for outlier detection, the estimator has no predict,
# decision_function and score_samples methods).
y_pred = clf.fit_predict(X)
n_errors = (y_pred != ground_truth).sum()
X_scores = clf.negative_outlier_factor_
plt.title("Local Outlier Factor (LOF)")
plt.scatter(X[:, 0], X[:, 1], color="k", s=3.0, label="Data points")
# plot circles with radius proportional to the outlier scores
radius = (X_scores.max() - X_scores) / (X_scores.max() - X_scores.min())
plt.scatter(
X[:, 0],
X[:, 1],
s=1000 * radius,
edgecolors="r",
facecolors="none",
label="Outlier scores",
)
plt.axis("tight")
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.xlabel("prediction errors: %d" % (n_errors))
legend = plt.legend(loc="upper left")
legend.legendHandles[0]._sizes = [10]
legend.legendHandles[1]._sizes = [20]
plt.show()
| bsd-3-clause | 1c4194a7f0e08945e1ed55e444b4dccc | 36.671233 | 77 | 0.715273 | 3.498728 | false | false | false | false |
scikit-learn/scikit-learn | sklearn/svm/_bounds.py | 2 | 2911 | """Determination of parameter bounds"""
# Author: Paolo Losi
# License: BSD 3 clause
from numbers import Real
import numpy as np
from ..preprocessing import LabelBinarizer
from ..utils.validation import check_consistent_length, check_array
from ..utils.extmath import safe_sparse_dot
from ..utils._param_validation import StrOptions, Interval, validate_params
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"y": ["array-like"],
"loss": [StrOptions({"squared_hinge", "log"})],
"fit_intercept": ["boolean"],
"intercept_scaling": [Interval(Real, 0, None, closed="neither")],
}
)
def l1_min_c(X, y, *, loss="squared_hinge", fit_intercept=True, intercept_scaling=1.0):
"""Return the lowest bound for C.
The lower bound for C is computed such that for C in (l1_min_C, infinity)
the model is guaranteed not to be empty. This applies to l1 penalized
classifiers, such as LinearSVC with penalty='l1' and
linear_model.LogisticRegression with penalty='l1'.
This value is valid if class_weight parameter in fit() is not set.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
loss : {'squared_hinge', 'log'}, default='squared_hinge'
Specifies the loss function.
With 'squared_hinge' it is the squared hinge loss (a.k.a. L2 loss).
With 'log' it is the loss of logistic regression models.
fit_intercept : bool, default=True
Specifies if the intercept should be fitted by the model.
It must match the fit() method parameter.
intercept_scaling : float, default=1.0
When fit_intercept is True, instance vector x becomes
[x, intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
It must match the fit() method parameter.
Returns
-------
l1_min_c : float
Minimum value for C.
"""
X = check_array(X, accept_sparse="csc")
check_consistent_length(X, y)
Y = LabelBinarizer(neg_label=-1).fit_transform(y).T
# maximum absolute value over classes and features
den = np.max(np.abs(safe_sparse_dot(Y, X)))
if fit_intercept:
bias = np.full(
(np.size(y), 1), intercept_scaling, dtype=np.array(intercept_scaling).dtype
)
den = max(den, abs(np.dot(Y, bias)).max())
if den == 0.0:
raise ValueError(
"Ill-posed l1_min_c calculation: l1 will always "
"select zero coefficients for this data"
)
if loss == "squared_hinge":
return 0.5 / den
else: # loss == 'log':
return 2.0 / den
| bsd-3-clause | 48573472567c88c36051c6b7438e3a2b | 33.247059 | 87 | 0.637582 | 3.795306 | false | false | false | false |
scikit-learn/scikit-learn | examples/linear_model/plot_iris_logistic.py | 12 | 1406 | # -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
first two dimensions (sepal length and width) of the `iris
<https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The datapoints
are colored according to their labels.
"""
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.inspection import DecisionBoundaryDisplay
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
# Create an instance of Logistic Regression Classifier and fit the data.
logreg = LogisticRegression(C=1e5)
logreg.fit(X, Y)
_, ax = plt.subplots(figsize=(4, 3))
DecisionBoundaryDisplay.from_estimator(
logreg,
X,
cmap=plt.cm.Paired,
ax=ax,
response_method="predict",
plot_method="pcolormesh",
shading="auto",
xlabel="Sepal length",
ylabel="Sepal width",
eps=0.5,
)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors="k", cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause | 6b7715c2fba3a7a266143183b56d0eaf | 25.509434 | 78 | 0.662633 | 3.584184 | false | false | false | false |
scikit-learn/scikit-learn | examples/cluster/plot_coin_ward_segmentation.py | 12 | 2376 | """
======================================================================
A demo of structured Ward hierarchical clustering on an image of coins
======================================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
# %%
# Generate data
# -------------
from skimage.data import coins
orig_coins = coins()
# %%
# Resize it to 20% of the original size to speed up the processing
# Applying a Gaussian filter for smoothing prior to down-scaling
# reduces aliasing artifacts.
import numpy as np
from scipy.ndimage import gaussian_filter
from skimage.transform import rescale
smoothened_coins = gaussian_filter(orig_coins, sigma=2)
rescaled_coins = rescale(
smoothened_coins,
0.2,
mode="reflect",
anti_aliasing=False,
)
X = np.reshape(rescaled_coins, (-1, 1))
# %%
# Define structure of the data
# ----------------------------
#
# Pixels are connected to their neighbors.
from sklearn.feature_extraction.image import grid_to_graph
connectivity = grid_to_graph(*rescaled_coins.shape)
# %%
# Compute clustering
# ------------------
import time as time
from sklearn.cluster import AgglomerativeClustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 27 # number of regions
ward = AgglomerativeClustering(
n_clusters=n_clusters, linkage="ward", connectivity=connectivity
)
ward.fit(X)
label = np.reshape(ward.labels_, rescaled_coins.shape)
print(f"Elapsed time: {time.time() - st:.3f}s")
print(f"Number of pixels: {label.size}")
print(f"Number of clusters: {np.unique(label).size}")
# %%
# Plot the results on an image
# ----------------------------
#
# Agglomerative clustering is able to segment each coin however, we have had to
# use a ``n_cluster`` larger than the number of coins because the segmentation
# is finding a large in the background.
import matplotlib.pyplot as plt
plt.figure(figsize=(5, 5))
plt.imshow(rescaled_coins, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(
label == l,
colors=[
plt.cm.nipy_spectral(l / float(n_clusters)),
],
)
plt.axis("off")
plt.show()
| bsd-3-clause | d180e9b8d9e39b30656f7a02b470ff20 | 24.548387 | 79 | 0.649411 | 3.578313 | false | false | false | false |
scikit-learn/scikit-learn | examples/neighbors/plot_species_kde.py | 12 | 4756 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <https://matplotlib.org/basemap/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<https://www.iucnredlist.org/species/3038/47437046>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://rob.schapire.net/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
""" # noqa: E501
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ["Bradypus Variegatus", "Microryzomys Minutus"]
Xtrain = np.vstack([data["train"]["dd lat"], data["train"]["dd long"]]).T
ytrain = np.array(
[d.decode("ascii").startswith("micro") for d in data["train"]["species"]],
dtype="int",
)
Xtrain *= np.pi / 180.0 # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.0
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(
bandwidth=0.04, metric="haversine", kernel="gaussian", algorithm="ball_tree"
)
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = np.full(land_mask.shape[0], -9999, dtype="int")
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(
projection="cyl",
llcrnrlat=Y.min(),
urcrnrlat=Y.max(),
llcrnrlon=X.min(),
urcrnrlon=X.max(),
resolution="c",
)
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(
X, Y, land_reference, levels=[-9998], colors="k", linestyles="solid"
)
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause | 179ea12c49e5911e3714256e393f84fe | 30.496689 | 89 | 0.656013 | 3.363508 | false | false | false | false |
scikit-learn/scikit-learn | examples/svm/plot_weighted_samples.py | 12 | 2047 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(
X[:, 0],
X[:, 1],
c=y,
s=100 * sample_weight,
alpha=0.9,
cmap=plt.cm.bone,
edgecolors="black",
)
axis.axis("off")
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# Fit the models.
# This model does not take into account sample weights.
clf_no_weights = svm.SVC(gamma=1)
clf_no_weights.fit(X, y)
# This other model takes into account some dedicated sample weights.
clf_weights = svm.SVC(gamma=1)
clf_weights.fit(X, y, sample_weight=sample_weight_last_ten)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(
clf_no_weights, sample_weight_constant, axes[0], "Constant weights"
)
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1], "Modified weights")
plt.show()
| bsd-3-clause | b9bb82c3f324bf5e88d79e3e5cf46bc7 | 27.830986 | 88 | 0.675134 | 3.198438 | false | false | false | false |
scikit-learn/scikit-learn | build_tools/github/vendor.py | 4 | 3038 | """Embed vcomp140.dll and msvcp140.dll."""
import os
import os.path as op
import shutil
import sys
import textwrap
TARGET_FOLDER = op.join("sklearn", ".libs")
DISTRIBUTOR_INIT = op.join("sklearn", "_distributor_init.py")
VCOMP140_SRC_PATH = "C:\\Windows\\System32\\vcomp140.dll"
MSVCP140_SRC_PATH = "C:\\Windows\\System32\\msvcp140.dll"
def make_distributor_init_64_bits(
distributor_init,
vcomp140_dll_filename,
msvcp140_dll_filename,
):
"""Create a _distributor_init.py file for 64-bit architectures.
This file is imported first when importing the sklearn package
so as to pre-load the vendored vcomp140.dll and msvcp140.dll.
"""
with open(distributor_init, "wt") as f:
f.write(
textwrap.dedent(
"""
'''Helper to preload vcomp140.dll and msvcp140.dll to prevent
"not found" errors.
Once vcomp140.dll and msvcp140.dll are
preloaded, the namespace is made available to any subsequent
vcomp140.dll and msvcp140.dll. This is
created as part of the scripts that build the wheel.
'''
import os
import os.path as op
from ctypes import WinDLL
if os.name == "nt":
libs_path = op.join(op.dirname(__file__), ".libs")
vcomp140_dll_filename = op.join(libs_path, "{0}")
msvcp140_dll_filename = op.join(libs_path, "{1}")
WinDLL(op.abspath(vcomp140_dll_filename))
WinDLL(op.abspath(msvcp140_dll_filename))
""".format(
vcomp140_dll_filename,
msvcp140_dll_filename,
)
)
)
def main(wheel_dirname):
"""Embed vcomp140.dll and msvcp140.dll."""
if not op.exists(VCOMP140_SRC_PATH):
raise ValueError(f"Could not find {VCOMP140_SRC_PATH}.")
if not op.exists(MSVCP140_SRC_PATH):
raise ValueError(f"Could not find {MSVCP140_SRC_PATH}.")
if not op.isdir(wheel_dirname):
raise RuntimeError(f"Could not find {wheel_dirname} file.")
vcomp140_dll_filename = op.basename(VCOMP140_SRC_PATH)
msvcp140_dll_filename = op.basename(MSVCP140_SRC_PATH)
target_folder = op.join(wheel_dirname, TARGET_FOLDER)
distributor_init = op.join(wheel_dirname, DISTRIBUTOR_INIT)
# Create the "sklearn/.libs" subfolder
if not op.exists(target_folder):
os.mkdir(target_folder)
print(f"Copying {VCOMP140_SRC_PATH} to {target_folder}.")
shutil.copy2(VCOMP140_SRC_PATH, target_folder)
print(f"Copying {MSVCP140_SRC_PATH} to {target_folder}.")
shutil.copy2(MSVCP140_SRC_PATH, target_folder)
# Generate the _distributor_init file in the source tree
print("Generating the '_distributor_init.py' file.")
make_distributor_init_64_bits(
distributor_init,
vcomp140_dll_filename,
msvcp140_dll_filename,
)
if __name__ == "__main__":
_, wheel_file = sys.argv
main(wheel_file)
| bsd-3-clause | 338872134b4b322333db0185049c8df0 | 30 | 73 | 0.620803 | 3.475973 | false | false | false | false |
scikit-learn/scikit-learn | sklearn/__check_build/__init__.py | 17 | 1702 | """ Module to give helpful messages to the user that did not
compile scikit-learn properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if (i + 1) % 3:
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + "\n")
raise ImportError(
"""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s"""
% (e, local_dir, "".join(dir_content).strip(), msg)
)
try:
from ._check_build import check_build # noqa
except ImportError as e:
raise_build_error(e)
| bsd-3-clause | 8ec13c4ce998fb107f827d82d21da85c | 33.04 | 75 | 0.615159 | 4.033175 | false | false | false | false |
scikit-learn/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 12 | 4535 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances (choice of the number of components), the held-out
data is more likely for low rank models than for shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
# %%
# Create the data
# ---------------
import numpy as np
from scipy import linalg
n_samples, n_features, rank = 500, 25, 5
sigma = 1.0
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.0
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
# %%
# Fit the models
# --------------
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA(svd_solver="full")
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {"shrinkage": shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, "Homoscedastic Noise"), (X_hetero, "Heteroscedastic Noise")]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(svd_solver="full", n_components="mle")
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, "b", label="PCA scores")
plt.plot(n_components, fa_scores, "r", label="FA scores")
plt.axvline(rank, color="g", label="TRUTH: %d" % rank, linestyle="-")
plt.axvline(
n_components_pca,
color="b",
label="PCA CV: %d" % n_components_pca,
linestyle="--",
)
plt.axvline(
n_components_fa,
color="r",
label="FactorAnalysis CV: %d" % n_components_fa,
linestyle="--",
)
plt.axvline(
n_components_pca_mle,
color="k",
label="PCA MLE: %d" % n_components_pca_mle,
linestyle="--",
)
# compare with other covariance estimators
plt.axhline(
shrunk_cov_score(X),
color="violet",
label="Shrunk Covariance MLE",
linestyle="-.",
)
plt.axhline(
lw_score(X),
color="orange",
label="LedoitWolf MLE" % n_components_pca_mle,
linestyle="-.",
)
plt.xlabel("nb of components")
plt.ylabel("CV scores")
plt.legend(loc="lower right")
plt.title(title)
plt.show()
| bsd-3-clause | a96cc6f0cb0299246154662a16d4e499 | 30.061644 | 87 | 0.654245 | 3.374256 | false | false | false | false |
scikit-learn/scikit-learn | sklearn/gaussian_process/tests/test_gpc.py | 12 | 9858 | """Testing for Gaussian process classification """
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy.optimize import approx_fprime
import pytest
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import (
RBF,
CompoundKernel,
ConstantKernel as C,
WhiteKernel,
)
from sklearn.gaussian_process.tests._mini_sequence_kernel import MiniSeqKernel
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils._testing import assert_almost_equal, assert_array_equal
def f(x):
return np.sin(x)
X = np.atleast_2d(np.linspace(0, 10, 30)).T
X2 = np.atleast_2d([2.0, 4.0, 5.5, 6.5, 7.5]).T
y = np.array(f(X).ravel() > 0, dtype=int)
fX = f(X).ravel()
y_mc = np.empty(y.shape, dtype=int) # multi-class
y_mc[fX < -0.35] = 0
y_mc[(fX >= -0.35) & (fX < 0.35)] = 1
y_mc[fX > 0.35] = 2
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [
RBF(length_scale=0.1),
fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
]
non_fixed_kernels = [kernel for kernel in kernels if kernel != fixed_kernel]
@pytest.mark.parametrize("kernel", kernels)
def test_predict_consistent(kernel):
# Check binary predict decision has also predicted probability above 0.5.
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_array_equal(gpc.predict(X), gpc.predict_proba(X)[:, 1] >= 0.5)
def test_predict_consistent_structured():
# Check binary predict decision has also predicted probability above 0.5.
X = ["A", "AB", "B"]
y = np.array([True, False, True])
kernel = MiniSeqKernel(baseline_similarity_bounds="fixed")
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_array_equal(gpc.predict(X), gpc.predict_proba(X)[:, 1] >= 0.5)
@pytest.mark.parametrize("kernel", non_fixed_kernels)
def test_lml_improving(kernel):
# Test that hyperparameter-tuning improves log-marginal likelihood.
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert gpc.log_marginal_likelihood(gpc.kernel_.theta) > gpc.log_marginal_likelihood(
kernel.theta
)
@pytest.mark.parametrize("kernel", kernels)
def test_lml_precomputed(kernel):
# Test that lml of optimized kernel is stored correctly.
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_almost_equal(
gpc.log_marginal_likelihood(gpc.kernel_.theta), gpc.log_marginal_likelihood(), 7
)
@pytest.mark.parametrize("kernel", kernels)
def test_lml_without_cloning_kernel(kernel):
# Test that clone_kernel=False has side-effects of kernel.theta.
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
input_theta = np.ones(gpc.kernel_.theta.shape, dtype=np.float64)
gpc.log_marginal_likelihood(input_theta, clone_kernel=False)
assert_almost_equal(gpc.kernel_.theta, input_theta, 7)
@pytest.mark.parametrize("kernel", non_fixed_kernels)
def test_converged_to_local_maximum(kernel):
# Test that we are in local maximum after hyperparameter-optimization.
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = gpc.log_marginal_likelihood(gpc.kernel_.theta, True)
assert np.all(
(np.abs(lml_gradient) < 1e-4)
| (gpc.kernel_.theta == gpc.kernel_.bounds[:, 0])
| (gpc.kernel_.theta == gpc.kernel_.bounds[:, 1])
)
@pytest.mark.parametrize("kernel", kernels)
def test_lml_gradient(kernel):
# Compare analytic and numeric gradient of log marginal likelihood.
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = gpc.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = approx_fprime(
kernel.theta, lambda theta: gpc.log_marginal_likelihood(theta, False), 1e-10
)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the log marginal likelihood of the chosen theta.
n_samples, n_features = 25, 2
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = (np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)) > 0
kernel = C(1.0, (1e-2, 1e2)) * RBF(
length_scale=[1e-3] * n_features, length_scale_bounds=[(1e-4, 1e2)] * n_features
)
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessClassifier(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer, random_state=0
).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert lml > last_lml - np.finfo(np.float32).eps
last_lml = lml
@pytest.mark.parametrize("kernel", non_fixed_kernels)
def test_custom_optimizer(kernel):
# Test that GPC can use externally defined optimizers.
# Define a dummy optimizer that simply tests 10 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = initial_theta, obj_func(
initial_theta, eval_gradient=False
)
for _ in range(10):
theta = np.atleast_1d(
rng.uniform(np.maximum(-2, bounds[:, 0]), np.minimum(1, bounds[:, 1]))
)
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
gpc = GaussianProcessClassifier(kernel=kernel, optimizer=optimizer)
gpc.fit(X, y_mc)
# Checks that optimizer improved marginal likelihood
assert gpc.log_marginal_likelihood(gpc.kernel_.theta) > gpc.log_marginal_likelihood(
kernel.theta
)
@pytest.mark.parametrize("kernel", kernels)
def test_multi_class(kernel):
# Test GPC for multi-class classification problems.
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
assert_almost_equal(y_prob.sum(1), 1)
y_pred = gpc.predict(X2)
assert_array_equal(np.argmax(y_prob, 1), y_pred)
@pytest.mark.parametrize("kernel", kernels)
def test_multi_class_n_jobs(kernel):
# Test that multi-class GPC produces identical results with n_jobs>1.
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2)
gpc_2.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
y_prob_2 = gpc_2.predict_proba(X2)
assert_almost_equal(y_prob, y_prob_2)
def test_warning_bounds():
kernel = RBF(length_scale_bounds=[1e-5, 1e-3])
gpc = GaussianProcessClassifier(kernel=kernel)
warning_message = (
"The optimal value found for dimension 0 of parameter "
"length_scale is close to the specified upper bound "
"0.001. Increasing the bound and calling fit again may "
"find a better value."
)
with pytest.warns(ConvergenceWarning, match=warning_message):
gpc.fit(X, y)
kernel_sum = WhiteKernel(noise_level_bounds=[1e-5, 1e-3]) + RBF(
length_scale_bounds=[1e3, 1e5]
)
gpc_sum = GaussianProcessClassifier(kernel=kernel_sum)
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter("always")
gpc_sum.fit(X, y)
assert len(record) == 2
assert issubclass(record[0].category, ConvergenceWarning)
assert (
record[0].message.args[0]
== "The optimal value found for "
"dimension 0 of parameter "
"k1__noise_level is close to the "
"specified upper bound 0.001. "
"Increasing the bound and calling "
"fit again may find a better value."
)
assert issubclass(record[1].category, ConvergenceWarning)
assert (
record[1].message.args[0]
== "The optimal value found for "
"dimension 0 of parameter "
"k2__length_scale is close to the "
"specified lower bound 1000.0. "
"Decreasing the bound and calling "
"fit again may find a better value."
)
X_tile = np.tile(X, 2)
kernel_dims = RBF(length_scale=[1.0, 2.0], length_scale_bounds=[1e1, 1e2])
gpc_dims = GaussianProcessClassifier(kernel=kernel_dims)
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter("always")
gpc_dims.fit(X_tile, y)
assert len(record) == 2
assert issubclass(record[0].category, ConvergenceWarning)
assert (
record[0].message.args[0]
== "The optimal value found for "
"dimension 0 of parameter "
"length_scale is close to the "
"specified upper bound 100.0. "
"Increasing the bound and calling "
"fit again may find a better value."
)
assert issubclass(record[1].category, ConvergenceWarning)
assert (
record[1].message.args[0]
== "The optimal value found for "
"dimension 1 of parameter "
"length_scale is close to the "
"specified upper bound 100.0. "
"Increasing the bound and calling "
"fit again may find a better value."
)
@pytest.mark.parametrize(
"params, error_type, err_msg",
[
(
{"kernel": CompoundKernel(0)},
ValueError,
"kernel cannot be a CompoundKernel",
)
],
)
def test_gpc_fit_error(params, error_type, err_msg):
"""Check that expected error are raised during fit."""
gpc = GaussianProcessClassifier(**params)
with pytest.raises(error_type, match=err_msg):
gpc.fit(X, y)
| bsd-3-clause | 1ce0794e84a68d65edd39f0668cb549d | 33.468531 | 88 | 0.649523 | 3.36221 | false | true | false | false |
scikit-learn/scikit-learn | examples/svm/plot_svm_kernels.py | 13 | 1970 | # -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[
(0.4, -0.7),
(-1.5, -1),
(-1.4, -0.9),
(-1.3, -1.2),
(-1.1, -0.2),
(-1.2, -0.4),
(-0.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, 0.8),
(1.2, 0.5),
(0.2, -2),
(0.5, -2.4),
(0.2, -2.3),
(0, -2.7),
(1.3, 2.1),
].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ("linear", "poly", "rbf"):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(
clf.support_vectors_[:, 0],
clf.support_vectors_[:, 1],
s=80,
facecolors="none",
zorder=10,
edgecolors="k",
)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired, edgecolors="k")
plt.axis("tight")
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(
XX,
YY,
Z,
colors=["k", "k", "k"],
linestyles=["--", "-", "--"],
levels=[-0.5, 0, 0.5],
)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause | f47e3a130ac11a77e04024d532e01998 | 19.946809 | 85 | 0.485018 | 2.723375 | false | false | false | false |
scikit-learn/scikit-learn | examples/linear_model/plot_sgd_iris.py | 12 | 1947 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
from sklearn.inspection import DecisionBoundaryDisplay
# import some data to play with
iris = datasets.load_iris()
# we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
X = iris.data[:, :2]
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
clf = SGDClassifier(alpha=0.001, max_iter=100).fit(X, y)
ax = plt.gca()
DecisionBoundaryDisplay.from_estimator(
clf,
X,
cmap=plt.cm.Paired,
ax=ax,
response_method="predict",
xlabel=iris.feature_names[0],
ylabel=iris.feature_names[1],
)
plt.axis("tight")
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(
X[idx, 0],
X[idx, 1],
c=color,
label=iris.target_names[i],
cmap=plt.cm.Paired,
edgecolor="black",
s=20,
)
plt.title("Decision surface of multi-class SGD")
plt.axis("tight")
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)], ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause | 38caa5ae404343dc4d6445df0d5b7668 | 22.178571 | 75 | 0.639959 | 3.120192 | false | false | false | false |
scikit-learn/scikit-learn | sklearn/neural_network/_base.py | 12 | 6330 | """Utilities for the neural network modules
"""
# Author: Issam H. Laradji <issam.laradji@gmail.com>
# License: BSD 3 clause
import numpy as np
from scipy.special import expit as logistic_sigmoid
from scipy.special import xlogy
def inplace_identity(X):
"""Simply leave the input array unchanged.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data, where `n_samples` is the number of samples
and `n_features` is the number of features.
"""
# Nothing to do
def inplace_logistic(X):
"""Compute the logistic function inplace.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
"""
logistic_sigmoid(X, out=X)
def inplace_tanh(X):
"""Compute the hyperbolic tan function inplace.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
"""
np.tanh(X, out=X)
def inplace_relu(X):
"""Compute the rectified linear unit function inplace.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
"""
np.maximum(X, 0, out=X)
def inplace_softmax(X):
"""Compute the K-way softmax function inplace.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
"""
tmp = X - X.max(axis=1)[:, np.newaxis]
np.exp(tmp, out=X)
X /= X.sum(axis=1)[:, np.newaxis]
ACTIVATIONS = {
"identity": inplace_identity,
"tanh": inplace_tanh,
"logistic": inplace_logistic,
"relu": inplace_relu,
"softmax": inplace_softmax,
}
def inplace_identity_derivative(Z, delta):
"""Apply the derivative of the identity function: do nothing.
Parameters
----------
Z : {array-like, sparse matrix}, shape (n_samples, n_features)
The data which was output from the identity activation function during
the forward pass.
delta : {array-like}, shape (n_samples, n_features)
The backpropagated error signal to be modified inplace.
"""
# Nothing to do
def inplace_logistic_derivative(Z, delta):
"""Apply the derivative of the logistic sigmoid function.
It exploits the fact that the derivative is a simple function of the output
value from logistic function.
Parameters
----------
Z : {array-like, sparse matrix}, shape (n_samples, n_features)
The data which was output from the logistic activation function during
the forward pass.
delta : {array-like}, shape (n_samples, n_features)
The backpropagated error signal to be modified inplace.
"""
delta *= Z
delta *= 1 - Z
def inplace_tanh_derivative(Z, delta):
"""Apply the derivative of the hyperbolic tanh function.
It exploits the fact that the derivative is a simple function of the output
value from hyperbolic tangent.
Parameters
----------
Z : {array-like, sparse matrix}, shape (n_samples, n_features)
The data which was output from the hyperbolic tangent activation
function during the forward pass.
delta : {array-like}, shape (n_samples, n_features)
The backpropagated error signal to be modified inplace.
"""
delta *= 1 - Z**2
def inplace_relu_derivative(Z, delta):
"""Apply the derivative of the relu function.
It exploits the fact that the derivative is a simple function of the output
value from rectified linear units activation function.
Parameters
----------
Z : {array-like, sparse matrix}, shape (n_samples, n_features)
The data which was output from the rectified linear units activation
function during the forward pass.
delta : {array-like}, shape (n_samples, n_features)
The backpropagated error signal to be modified inplace.
"""
delta[Z == 0] = 0
DERIVATIVES = {
"identity": inplace_identity_derivative,
"tanh": inplace_tanh_derivative,
"logistic": inplace_logistic_derivative,
"relu": inplace_relu_derivative,
}
def squared_loss(y_true, y_pred):
"""Compute the squared loss for regression.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) values.
y_pred : array-like or label indicator matrix
Predicted values, as returned by a regression estimator.
Returns
-------
loss : float
The degree to which the samples are correctly predicted.
"""
return ((y_true - y_pred) ** 2).mean() / 2
def log_loss(y_true, y_prob):
"""Compute Logistic loss for classification.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels.
y_prob : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
Returns
-------
loss : float
The degree to which the samples are correctly predicted.
"""
eps = np.finfo(y_prob.dtype).eps
y_prob = np.clip(y_prob, eps, 1 - eps)
if y_prob.shape[1] == 1:
y_prob = np.append(1 - y_prob, y_prob, axis=1)
if y_true.shape[1] == 1:
y_true = np.append(1 - y_true, y_true, axis=1)
return -xlogy(y_true, y_prob).sum() / y_prob.shape[0]
def binary_log_loss(y_true, y_prob):
"""Compute binary logistic loss for classification.
This is identical to log_loss in binary classification case,
but is kept for its use in multilabel case.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels.
y_prob : array-like of float, shape = (n_samples, 1)
Predicted probabilities, as returned by a classifier's
predict_proba method.
Returns
-------
loss : float
The degree to which the samples are correctly predicted.
"""
eps = np.finfo(y_prob.dtype).eps
y_prob = np.clip(y_prob, eps, 1 - eps)
return (
-(xlogy(y_true, y_prob).sum() + xlogy(1 - y_true, 1 - y_prob).sum())
/ y_prob.shape[0]
)
LOSS_FUNCTIONS = {
"squared_error": squared_loss,
"log_loss": log_loss,
"binary_log_loss": binary_log_loss,
}
| bsd-3-clause | d33856e43386cba54abc8c1938093098 | 25.708861 | 79 | 0.630332 | 3.829401 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.