repo_name
string
path
string
copies
string
size
string
content
string
license
string
apache/spark
python/pyspark/sql/functions.py
14
161861
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ A collections of builtin functions """ import sys import functools import warnings from pyspark import since, SparkContext from pyspark.rdd import PythonEvalType from pyspark.sql.column import Column, _to_java_column, _to_seq, _create_column_from_literal from pyspark.sql.dataframe import DataFrame from pyspark.sql.types import StringType, DataType # Keep UserDefinedFunction import for backwards compatible import; moved in SPARK-22409 from pyspark.sql.udf import UserDefinedFunction, _create_udf # noqa: F401 from pyspark.sql.udf import _create_udf # Keep pandas_udf and PandasUDFType import for backwards compatible import; moved in SPARK-28264 from pyspark.sql.pandas.functions import pandas_udf, PandasUDFType # noqa: F401 from pyspark.sql.utils import to_str # Note to developers: all of PySpark functions here take string as column names whenever possible. # Namely, if columns are referred as arguments, they can be always both Column or string, # even though there might be few exceptions for legacy or inevitable reasons. # If you are fixing other language APIs together, also please note that Scala side is not the case # since it requires to make every single overridden definition. def _get_get_jvm_function(name, sc): """ Retrieves JVM function identified by name from Java gateway associated with sc. """ return getattr(sc._jvm.functions, name) def _invoke_function(name, *args): """ Invokes JVM function identified by name with args and wraps the result with :class:`~pyspark.sql.Column`. """ jf = _get_get_jvm_function(name, SparkContext._active_spark_context) return Column(jf(*args)) def _invoke_function_over_column(name, col): """ Invokes unary JVM function identified by name and wraps the result with :class:`~pyspark.sql.Column`. """ return _invoke_function(name, _to_java_column(col)) def _invoke_binary_math_function(name, col1, col2): """ Invokes binary JVM math function identified by name and wraps the result with :class:`~pyspark.sql.Column`. """ return _invoke_function( name, # For legacy reasons, the arguments here can be implicitly converted into floats, # if they are not columns or strings. _to_java_column(col1) if isinstance(col1, (str, Column)) else float(col1), _to_java_column(col2) if isinstance(col2, (str, Column)) else float(col2) ) def _options_to_str(options=None): if options: return {key: to_str(value) for (key, value) in options.items()} return {} def lit(col): """ Creates a :class:`~pyspark.sql.Column` of literal value. .. versionadded:: 1.3.0 Examples -------- >>> df.select(lit(5).alias('height')).withColumn('spark_user', lit(True)).take(1) [Row(height=5, spark_user=True)] """ return col if isinstance(col, Column) else _invoke_function("lit", col) @since(1.3) def col(col): """ Returns a :class:`~pyspark.sql.Column` based on the given column name.' Examples -------- >>> col('x') Column<'x'> >>> column('x') Column<'x'> """ return _invoke_function("col", col) column = col @since(1.3) def asc(col): """ Returns a sort expression based on the ascending order of the given column name. """ return ( col.asc() if isinstance(col, Column) else _invoke_function("asc", col) ) @since(1.3) def desc(col): """ Returns a sort expression based on the descending order of the given column name. """ return ( col.desc() if isinstance(col, Column) else _invoke_function("desc", col) ) @since(1.3) def sqrt(col): """ Computes the square root of the specified float value. """ return _invoke_function_over_column("sqrt", col) @since(1.3) def abs(col): """ Computes the absolute value. """ return _invoke_function_over_column("abs", col) @since(1.3) def max(col): """ Aggregate function: returns the maximum value of the expression in a group. """ return _invoke_function_over_column("max", col) @since(1.3) def min(col): """ Aggregate function: returns the minimum value of the expression in a group. """ return _invoke_function_over_column("min", col) @since(1.3) def count(col): """ Aggregate function: returns the number of items in a group. """ return _invoke_function_over_column("count", col) @since(1.3) def sum(col): """ Aggregate function: returns the sum of all values in the expression. """ return _invoke_function_over_column("sum", col) @since(1.3) def avg(col): """ Aggregate function: returns the average of the values in a group. """ return _invoke_function_over_column("avg", col) @since(1.3) def mean(col): """ Aggregate function: returns the average of the values in a group. """ return _invoke_function_over_column("mean", col) @since(1.3) def sumDistinct(col): """ Aggregate function: returns the sum of distinct values in the expression. .. deprecated:: 3.2.0 Use :func:`sum_distinct` instead. """ warnings.warn("Deprecated in 3.2, use sum_distinct instead.", FutureWarning) return sum_distinct(col) @since(3.2) def sum_distinct(col): """ Aggregate function: returns the sum of distinct values in the expression. """ return _invoke_function_over_column("sum_distinct", col) def product(col): """ Aggregate function: returns the product of the values in a group. .. versionadded:: 3.2.0 Parameters ---------- col : str, :class:`Column` column containing values to be multiplied together Examples -------- >>> df = spark.range(1, 10).toDF('x').withColumn('mod3', col('x') % 3) >>> prods = df.groupBy('mod3').agg(product('x').alias('product')) >>> prods.orderBy('mod3').show() +----+-------+ |mod3|product| +----+-------+ | 0| 162.0| | 1| 28.0| | 2| 80.0| +----+-------+ """ return _invoke_function_over_column("product", col) def acos(col): """ .. versionadded:: 1.4.0 Returns ------- :class:`~pyspark.sql.Column` inverse cosine of `col`, as if computed by `java.lang.Math.acos()` """ return _invoke_function_over_column("acos", col) def acosh(col): """ Computes inverse hyperbolic cosine of the input column. .. versionadded:: 3.1.0 Returns ------- :class:`~pyspark.sql.Column` """ return _invoke_function_over_column("acosh", col) def asin(col): """ .. versionadded:: 1.3.0 Returns ------- :class:`~pyspark.sql.Column` inverse sine of `col`, as if computed by `java.lang.Math.asin()` """ return _invoke_function_over_column("asin", col) def asinh(col): """ Computes inverse hyperbolic sine of the input column. .. versionadded:: 3.1.0 Returns ------- :class:`~pyspark.sql.Column` """ return _invoke_function_over_column("asinh", col) def atan(col): """ .. versionadded:: 1.4.0 Returns ------- :class:`~pyspark.sql.Column` inverse tangent of `col`, as if computed by `java.lang.Math.atan()` """ return _invoke_function_over_column("atan", col) def atanh(col): """ Computes inverse hyperbolic tangent of the input column. .. versionadded:: 3.1.0 Returns ------- :class:`~pyspark.sql.Column` """ return _invoke_function_over_column("atanh", col) @since(1.4) def cbrt(col): """ Computes the cube-root of the given value. """ return _invoke_function_over_column("cbrt", col) @since(1.4) def ceil(col): """ Computes the ceiling of the given value. """ return _invoke_function_over_column("ceil", col) def cos(col): """ .. versionadded:: 1.4.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str angle in radians Returns ------- :class:`~pyspark.sql.Column` cosine of the angle, as if computed by `java.lang.Math.cos()`. """ return _invoke_function_over_column("cos", col) def cosh(col): """ .. versionadded:: 1.4.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str hyperbolic angle Returns ------- :class:`~pyspark.sql.Column` hyperbolic cosine of the angle, as if computed by `java.lang.Math.cosh()` """ return _invoke_function_over_column("cosh", col) @since(1.4) def exp(col): """ Computes the exponential of the given value. """ return _invoke_function_over_column("exp", col) @since(1.4) def expm1(col): """ Computes the exponential of the given value minus one. """ return _invoke_function_over_column("expm1", col) @since(1.4) def floor(col): """ Computes the floor of the given value. """ return _invoke_function_over_column("floor", col) @since(1.4) def log(col): """ Computes the natural logarithm of the given value. """ return _invoke_function_over_column("log", col) @since(1.4) def log10(col): """ Computes the logarithm of the given value in Base 10. """ return _invoke_function_over_column("log10", col) @since(1.4) def log1p(col): """ Computes the natural logarithm of the given value plus one. """ return _invoke_function_over_column("log1p", col) @since(1.4) def rint(col): """ Returns the double value that is closest in value to the argument and is equal to a mathematical integer. """ return _invoke_function_over_column("rint", col) @since(1.4) def signum(col): """ Computes the signum of the given value. """ return _invoke_function_over_column("signum", col) def sin(col): """ .. versionadded:: 1.4.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str Returns ------- :class:`~pyspark.sql.Column` sine of the angle, as if computed by `java.lang.Math.sin()` """ return _invoke_function_over_column("sin", col) def sinh(col): """ .. versionadded:: 1.4.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str hyperbolic angle Returns ------- :class:`~pyspark.sql.Column` hyperbolic sine of the given value, as if computed by `java.lang.Math.sinh()` """ return _invoke_function_over_column("sinh", col) def tan(col): """ .. versionadded:: 1.4.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str angle in radians Returns ------- :class:`~pyspark.sql.Column` tangent of the given value, as if computed by `java.lang.Math.tan()` """ return _invoke_function_over_column("tan", col) def tanh(col): """ .. versionadded:: 1.4.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str hyperbolic angle Returns ------- :class:`~pyspark.sql.Column` hyperbolic tangent of the given value as if computed by `java.lang.Math.tanh()` """ return _invoke_function_over_column("tanh", col) @since(1.4) def toDegrees(col): """ .. deprecated:: 2.1.0 Use :func:`degrees` instead. """ warnings.warn("Deprecated in 2.1, use degrees instead.", FutureWarning) return degrees(col) @since(1.4) def toRadians(col): """ .. deprecated:: 2.1.0 Use :func:`radians` instead. """ warnings.warn("Deprecated in 2.1, use radians instead.", FutureWarning) return radians(col) @since(1.4) def bitwiseNOT(col): """ Computes bitwise not. .. deprecated:: 3.2.0 Use :func:`bitwise_not` instead. """ warnings.warn("Deprecated in 3.2, use bitwise_not instead.", FutureWarning) return bitwise_not(col) @since(3.2) def bitwise_not(col): """ Computes bitwise not. """ return _invoke_function_over_column("bitwise_not", col) @since(2.4) def asc_nulls_first(col): """ Returns a sort expression based on the ascending order of the given column name, and null values return before non-null values. """ return ( col.asc_nulls_first() if isinstance(col, Column) else _invoke_function("asc_nulls_first", col) ) @since(2.4) def asc_nulls_last(col): """ Returns a sort expression based on the ascending order of the given column name, and null values appear after non-null values. """ return ( col.asc_nulls_last() if isinstance(col, Column) else _invoke_function("asc_nulls_last", col) ) @since(2.4) def desc_nulls_first(col): """ Returns a sort expression based on the descending order of the given column name, and null values appear before non-null values. """ return ( col.desc_nulls_first() if isinstance(col, Column) else _invoke_function("desc_nulls_first", col) ) @since(2.4) def desc_nulls_last(col): """ Returns a sort expression based on the descending order of the given column name, and null values appear after non-null values. """ return ( col.desc_nulls_last() if isinstance(col, Column) else _invoke_function("desc_nulls_last", col) ) @since(1.6) def stddev(col): """ Aggregate function: alias for stddev_samp. """ return _invoke_function_over_column("stddev", col) @since(1.6) def stddev_samp(col): """ Aggregate function: returns the unbiased sample standard deviation of the expression in a group. """ return _invoke_function_over_column("stddev_samp", col) @since(1.6) def stddev_pop(col): """ Aggregate function: returns population standard deviation of the expression in a group. """ return _invoke_function_over_column("stddev_pop", col) @since(1.6) def variance(col): """ Aggregate function: alias for var_samp """ return _invoke_function_over_column("variance", col) @since(1.6) def var_samp(col): """ Aggregate function: returns the unbiased sample variance of the values in a group. """ return _invoke_function_over_column("var_samp", col) @since(1.6) def var_pop(col): """ Aggregate function: returns the population variance of the values in a group. """ return _invoke_function_over_column("var_pop", col) @since(1.6) def skewness(col): """ Aggregate function: returns the skewness of the values in a group. """ return _invoke_function_over_column("skewness", col) @since(1.6) def kurtosis(col): """ Aggregate function: returns the kurtosis of the values in a group. """ return _invoke_function_over_column("kurtosis", col) def collect_list(col): """ Aggregate function: returns a list of objects with duplicates. .. versionadded:: 1.6.0 Notes ----- The function is non-deterministic because the order of collected results depends on the order of the rows which may be non-deterministic after a shuffle. Examples -------- >>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',)) >>> df2.agg(collect_list('age')).collect() [Row(collect_list(age)=[2, 5, 5])] """ return _invoke_function_over_column("collect_list", col) def collect_set(col): """ Aggregate function: returns a set of objects with duplicate elements eliminated. .. versionadded:: 1.6.0 Notes ----- The function is non-deterministic because the order of collected results depends on the order of the rows which may be non-deterministic after a shuffle. Examples -------- >>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',)) >>> df2.agg(collect_set('age')).collect() [Row(collect_set(age)=[5, 2])] """ return _invoke_function_over_column("collect_set", col) def degrees(col): """ Converts an angle measured in radians to an approximately equivalent angle measured in degrees. .. versionadded:: 2.1.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str angle in radians Returns ------- :class:`~pyspark.sql.Column` angle in degrees, as if computed by `java.lang.Math.toDegrees()` """ return _invoke_function_over_column("degrees", col) def radians(col): """ Converts an angle measured in degrees to an approximately equivalent angle measured in radians. .. versionadded:: 2.1.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str angle in degrees Returns ------- :class:`~pyspark.sql.Column` angle in radians, as if computed by `java.lang.Math.toRadians()` """ return _invoke_function_over_column("radians", col) def atan2(col1, col2): """ .. versionadded:: 1.4.0 Parameters ---------- col1 : str, :class:`~pyspark.sql.Column` or float coordinate on y-axis col2 : str, :class:`~pyspark.sql.Column` or float coordinate on x-axis Returns ------- :class:`~pyspark.sql.Column` the `theta` component of the point (`r`, `theta`) in polar coordinates that corresponds to the point (`x`, `y`) in Cartesian coordinates, as if computed by `java.lang.Math.atan2()` """ return _invoke_binary_math_function("atan2", col1, col2) @since(1.4) def hypot(col1, col2): """ Computes ``sqrt(a^2 + b^2)`` without intermediate overflow or underflow. """ return _invoke_binary_math_function("hypot", col1, col2) @since(1.4) def pow(col1, col2): """ Returns the value of the first argument raised to the power of the second argument. """ return _invoke_binary_math_function("pow", col1, col2) @since(1.6) def row_number(): """ Window function: returns a sequential number starting at 1 within a window partition. """ return _invoke_function("row_number") @since(1.6) def dense_rank(): """ Window function: returns the rank of rows within a window partition, without any gaps. The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking sequence when there are ties. That is, if you were ranking a competition using dense_rank and had three people tie for second place, you would say that all three were in second place and that the next person came in third. Rank would give me sequential numbers, making the person that came in third place (after the ties) would register as coming in fifth. This is equivalent to the DENSE_RANK function in SQL. """ return _invoke_function("dense_rank") @since(1.6) def rank(): """ Window function: returns the rank of rows within a window partition. The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking sequence when there are ties. That is, if you were ranking a competition using dense_rank and had three people tie for second place, you would say that all three were in second place and that the next person came in third. Rank would give me sequential numbers, making the person that came in third place (after the ties) would register as coming in fifth. This is equivalent to the RANK function in SQL. """ return _invoke_function("rank") @since(1.6) def cume_dist(): """ Window function: returns the cumulative distribution of values within a window partition, i.e. the fraction of rows that are below the current row. """ return _invoke_function("cume_dist") @since(1.6) def percent_rank(): """ Window function: returns the relative rank (i.e. percentile) of rows within a window partition. """ return _invoke_function("percent_rank") @since(1.3) def approxCountDistinct(col, rsd=None): """ .. deprecated:: 2.1.0 Use :func:`approx_count_distinct` instead. """ warnings.warn("Deprecated in 2.1, use approx_count_distinct instead.", FutureWarning) return approx_count_distinct(col, rsd) def approx_count_distinct(col, rsd=None): """Aggregate function: returns a new :class:`~pyspark.sql.Column` for approximate distinct count of column `col`. .. versionadded:: 2.1.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str rsd : float, optional maximum relative standard deviation allowed (default = 0.05). For rsd < 0.01, it is more efficient to use :func:`count_distinct` Examples -------- >>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collect() [Row(distinct_ages=2)] """ sc = SparkContext._active_spark_context if rsd is None: jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col)) else: jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col), rsd) return Column(jc) @since(1.6) def broadcast(df): """Marks a DataFrame as small enough for use in broadcast joins.""" sc = SparkContext._active_spark_context return DataFrame(sc._jvm.functions.broadcast(df._jdf), df.sql_ctx) def coalesce(*cols): """Returns the first column that is not null. .. versionadded:: 1.4.0 Examples -------- >>> cDf = spark.createDataFrame([(None, None), (1, None), (None, 2)], ("a", "b")) >>> cDf.show() +----+----+ | a| b| +----+----+ |null|null| | 1|null| |null| 2| +----+----+ >>> cDf.select(coalesce(cDf["a"], cDf["b"])).show() +--------------+ |coalesce(a, b)| +--------------+ | null| | 1| | 2| +--------------+ >>> cDf.select('*', coalesce(cDf["a"], lit(0.0))).show() +----+----+----------------+ | a| b|coalesce(a, 0.0)| +----+----+----------------+ |null|null| 0.0| | 1|null| 1.0| |null| 2| 0.0| +----+----+----------------+ """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.coalesce(_to_seq(sc, cols, _to_java_column)) return Column(jc) def corr(col1, col2): """Returns a new :class:`~pyspark.sql.Column` for the Pearson Correlation Coefficient for ``col1`` and ``col2``. .. versionadded:: 1.6.0 Examples -------- >>> a = range(20) >>> b = [2 * x for x in range(20)] >>> df = spark.createDataFrame(zip(a, b), ["a", "b"]) >>> df.agg(corr("a", "b").alias('c')).collect() [Row(c=1.0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.corr(_to_java_column(col1), _to_java_column(col2))) def covar_pop(col1, col2): """Returns a new :class:`~pyspark.sql.Column` for the population covariance of ``col1`` and ``col2``. .. versionadded:: 2.0.0 Examples -------- >>> a = [1] * 10 >>> b = [1] * 10 >>> df = spark.createDataFrame(zip(a, b), ["a", "b"]) >>> df.agg(covar_pop("a", "b").alias('c')).collect() [Row(c=0.0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.covar_pop(_to_java_column(col1), _to_java_column(col2))) def covar_samp(col1, col2): """Returns a new :class:`~pyspark.sql.Column` for the sample covariance of ``col1`` and ``col2``. .. versionadded:: 2.0.0 Examples -------- >>> a = [1] * 10 >>> b = [1] * 10 >>> df = spark.createDataFrame(zip(a, b), ["a", "b"]) >>> df.agg(covar_samp("a", "b").alias('c')).collect() [Row(c=0.0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.covar_samp(_to_java_column(col1), _to_java_column(col2))) def countDistinct(col, *cols): """Returns a new :class:`~pyspark.sql.Column` for distinct count of ``col`` or ``cols``. An alias of :func:`count_distinct`, and it is encouraged to use :func:`count_distinct` directly. .. versionadded:: 1.3.0 """ return count_distinct(col, *cols) def count_distinct(col, *cols): """Returns a new :class:`Column` for distinct count of ``col`` or ``cols``. .. versionadded:: 3.2.0 Examples -------- >>> df.agg(count_distinct(df.age, df.name).alias('c')).collect() [Row(c=2)] >>> df.agg(count_distinct("age", "name").alias('c')).collect() [Row(c=2)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.count_distinct(_to_java_column(col), _to_seq(sc, cols, _to_java_column)) return Column(jc) def first(col, ignorenulls=False): """Aggregate function: returns the first value in a group. The function by default returns the first values it sees. It will return the first non-null value it sees when ignoreNulls is set to true. If all values are null, then null is returned. .. versionadded:: 1.3.0 Notes ----- The function is non-deterministic because its results depends on the order of the rows which may be non-deterministic after a shuffle. """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.first(_to_java_column(col), ignorenulls) return Column(jc) def grouping(col): """ Aggregate function: indicates whether a specified column in a GROUP BY list is aggregated or not, returns 1 for aggregated or 0 for not aggregated in the result set. .. versionadded:: 2.0.0 Examples -------- >>> df.cube("name").agg(grouping("name"), sum("age")).orderBy("name").show() +-----+--------------+--------+ | name|grouping(name)|sum(age)| +-----+--------------+--------+ | null| 1| 7| |Alice| 0| 2| | Bob| 0| 5| +-----+--------------+--------+ """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.grouping(_to_java_column(col)) return Column(jc) def grouping_id(*cols): """ Aggregate function: returns the level of grouping, equals to (grouping(c1) << (n-1)) + (grouping(c2) << (n-2)) + ... + grouping(cn) .. versionadded:: 2.0.0 Notes ----- The list of columns should match with grouping columns exactly, or empty (means all the grouping columns). Examples -------- >>> df.cube("name").agg(grouping_id(), sum("age")).orderBy("name").show() +-----+-------------+--------+ | name|grouping_id()|sum(age)| +-----+-------------+--------+ | null| 1| 7| |Alice| 0| 2| | Bob| 0| 5| +-----+-------------+--------+ """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.grouping_id(_to_seq(sc, cols, _to_java_column)) return Column(jc) @since(1.6) def input_file_name(): """Creates a string column for the file name of the current Spark task. """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.input_file_name()) def isnan(col): """An expression that returns true iff the column is NaN. .. versionadded:: 1.6.0 Examples -------- >>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b")) >>> df.select(isnan("a").alias("r1"), isnan(df.a).alias("r2")).collect() [Row(r1=False, r2=False), Row(r1=True, r2=True)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.isnan(_to_java_column(col))) def isnull(col): """An expression that returns true iff the column is null. .. versionadded:: 1.6.0 Examples -------- >>> df = spark.createDataFrame([(1, None), (None, 2)], ("a", "b")) >>> df.select(isnull("a").alias("r1"), isnull(df.a).alias("r2")).collect() [Row(r1=False, r2=False), Row(r1=True, r2=True)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.isnull(_to_java_column(col))) def last(col, ignorenulls=False): """Aggregate function: returns the last value in a group. The function by default returns the last values it sees. It will return the last non-null value it sees when ignoreNulls is set to true. If all values are null, then null is returned. .. versionadded:: 1.3.0 Notes ----- The function is non-deterministic because its results depends on the order of the rows which may be non-deterministic after a shuffle. """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.last(_to_java_column(col), ignorenulls) return Column(jc) def monotonically_increasing_id(): """A column that generates monotonically increasing 64-bit integers. The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive. The current implementation puts the partition ID in the upper 31 bits, and the record number within each partition in the lower 33 bits. The assumption is that the data frame has less than 1 billion partitions, and each partition has less than 8 billion records. .. versionadded:: 1.6.0 Notes ----- The function is non-deterministic because its result depends on partition IDs. As an example, consider a :class:`DataFrame` with two partitions, each with 3 records. This expression would return the following IDs: 0, 1, 2, 8589934592 (1L << 33), 8589934593, 8589934594. >>> df0 = sc.parallelize(range(2), 2).mapPartitions(lambda x: [(1,), (2,), (3,)]).toDF(['col1']) >>> df0.select(monotonically_increasing_id().alias('id')).collect() [Row(id=0), Row(id=1), Row(id=2), Row(id=8589934592), Row(id=8589934593), Row(id=8589934594)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.monotonically_increasing_id()) def nanvl(col1, col2): """Returns col1 if it is not NaN, or col2 if col1 is NaN. Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`). .. versionadded:: 1.6.0 Examples -------- >>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b")) >>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect() [Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.nanvl(_to_java_column(col1), _to_java_column(col2))) def percentile_approx(col, percentage, accuracy=10000): """Returns the approximate `percentile` of the numeric column `col` which is the smallest value in the ordered `col` values (sorted from least to greatest) such that no more than `percentage` of `col` values is less than the value or equal to that value. The value of percentage must be between 0.0 and 1.0. The accuracy parameter (default: 10000) is a positive numeric literal which controls approximation accuracy at the cost of memory. Higher value of accuracy yields better accuracy, 1.0/accuracy is the relative error of the approximation. When percentage is an array, each value of the percentage array must be between 0.0 and 1.0. In this case, returns the approximate percentile array of column col at the given percentage array. .. versionadded:: 3.1.0 Examples -------- >>> key = (col("id") % 3).alias("key") >>> value = (randn(42) + key * 10).alias("value") >>> df = spark.range(0, 1000, 1, 1).select(key, value) >>> df.select( ... percentile_approx("value", [0.25, 0.5, 0.75], 1000000).alias("quantiles") ... ).printSchema() root |-- quantiles: array (nullable = true) | |-- element: double (containsNull = false) >>> df.groupBy("key").agg( ... percentile_approx("value", 0.5, lit(1000000)).alias("median") ... ).printSchema() root |-- key: long (nullable = true) |-- median: double (nullable = true) """ sc = SparkContext._active_spark_context if isinstance(percentage, (list, tuple)): # A local list percentage = sc._jvm.functions.array(_to_seq(sc, [ _create_column_from_literal(x) for x in percentage ])) elif isinstance(percentage, Column): # Already a Column percentage = _to_java_column(percentage) else: # Probably scalar percentage = _create_column_from_literal(percentage) accuracy = ( _to_java_column(accuracy) if isinstance(accuracy, Column) else _create_column_from_literal(accuracy) ) return Column(sc._jvm.functions.percentile_approx(_to_java_column(col), percentage, accuracy)) def rand(seed=None): """Generates a random column with independent and identically distributed (i.i.d.) samples uniformly distributed in [0.0, 1.0). .. versionadded:: 1.4.0 Notes ----- The function is non-deterministic in general case. Examples -------- >>> df.withColumn('rand', rand(seed=42) * 3).collect() [Row(age=2, name='Alice', rand=2.4052597283576684), Row(age=5, name='Bob', rand=2.3913904055683974)] """ sc = SparkContext._active_spark_context if seed is not None: jc = sc._jvm.functions.rand(seed) else: jc = sc._jvm.functions.rand() return Column(jc) def randn(seed=None): """Generates a column with independent and identically distributed (i.i.d.) samples from the standard normal distribution. .. versionadded:: 1.4.0 Notes ----- The function is non-deterministic in general case. Examples -------- >>> df.withColumn('randn', randn(seed=42)).collect() [Row(age=2, name='Alice', randn=1.1027054481455365), Row(age=5, name='Bob', randn=0.7400395449950132)] """ sc = SparkContext._active_spark_context if seed is not None: jc = sc._jvm.functions.randn(seed) else: jc = sc._jvm.functions.randn() return Column(jc) def round(col, scale=0): """ Round the given value to `scale` decimal places using HALF_UP rounding mode if `scale` >= 0 or at integral part when `scale` < 0. .. versionadded:: 1.5.0 Examples -------- >>> spark.createDataFrame([(2.5,)], ['a']).select(round('a', 0).alias('r')).collect() [Row(r=3.0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.round(_to_java_column(col), scale)) def bround(col, scale=0): """ Round the given value to `scale` decimal places using HALF_EVEN rounding mode if `scale` >= 0 or at integral part when `scale` < 0. .. versionadded:: 2.0.0 Examples -------- >>> spark.createDataFrame([(2.5,)], ['a']).select(bround('a', 0).alias('r')).collect() [Row(r=2.0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.bround(_to_java_column(col), scale)) def shiftLeft(col, numBits): """Shift the given value numBits left. .. versionadded:: 1.5.0 .. deprecated:: 3.2.0 Use :func:`shiftleft` instead. """ warnings.warn("Deprecated in 3.2, use shiftleft instead.", FutureWarning) return shiftleft(col, numBits) def shiftleft(col, numBits): """Shift the given value numBits left. .. versionadded:: 3.2.0 Examples -------- >>> spark.createDataFrame([(21,)], ['a']).select(shiftleft('a', 1).alias('r')).collect() [Row(r=42)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.shiftleft(_to_java_column(col), numBits)) def shiftRight(col, numBits): """(Signed) shift the given value numBits right. .. versionadded:: 1.5.0 .. deprecated:: 3.2.0 Use :func:`shiftright` instead. """ warnings.warn("Deprecated in 3.2, use shiftright instead.", FutureWarning) return shiftright(col, numBits) def shiftright(col, numBits): """(Signed) shift the given value numBits right. .. versionadded:: 3.2.0 Examples -------- >>> spark.createDataFrame([(42,)], ['a']).select(shiftright('a', 1).alias('r')).collect() [Row(r=21)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.shiftRight(_to_java_column(col), numBits) return Column(jc) def shiftRightUnsigned(col, numBits): """Unsigned shift the given value numBits right. .. versionadded:: 1.5.0 .. deprecated:: 3.2.0 Use :func:`shiftrightunsigned` instead. """ warnings.warn("Deprecated in 3.2, use shiftrightunsigned instead.", FutureWarning) return shiftrightunsigned(col, numBits) def shiftrightunsigned(col, numBits): """Unsigned shift the given value numBits right. .. versionadded:: 3.2.0 Examples -------- >>> df = spark.createDataFrame([(-42,)], ['a']) >>> df.select(shiftrightunsigned('a', 1).alias('r')).collect() [Row(r=9223372036854775787)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.shiftRightUnsigned(_to_java_column(col), numBits) return Column(jc) def spark_partition_id(): """A column for partition ID. .. versionadded:: 1.6.0 Notes ----- This is non deterministic because it depends on data partitioning and task scheduling. Examples -------- >>> df.repartition(1).select(spark_partition_id().alias("pid")).collect() [Row(pid=0), Row(pid=0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.spark_partition_id()) def expr(str): """Parses the expression string into the column that it represents .. versionadded:: 1.5.0 Examples -------- >>> df.select(expr("length(name)")).collect() [Row(length(name)=5), Row(length(name)=3)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.expr(str)) def struct(*cols): """Creates a new struct column. .. versionadded:: 1.4.0 Parameters ---------- cols : list, set, str or :class:`~pyspark.sql.Column` column names or :class:`~pyspark.sql.Column`\\s to contain in the output struct. Examples -------- >>> df.select(struct('age', 'name').alias("struct")).collect() [Row(struct=Row(age=2, name='Alice')), Row(struct=Row(age=5, name='Bob'))] >>> df.select(struct([df.age, df.name]).alias("struct")).collect() [Row(struct=Row(age=2, name='Alice')), Row(struct=Row(age=5, name='Bob'))] """ sc = SparkContext._active_spark_context if len(cols) == 1 and isinstance(cols[0], (list, set)): cols = cols[0] jc = sc._jvm.functions.struct(_to_seq(sc, cols, _to_java_column)) return Column(jc) def greatest(*cols): """ Returns the greatest value of the list of column names, skipping null values. This function takes at least 2 parameters. It will return null iff all parameters are null. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c']) >>> df.select(greatest(df.a, df.b, df.c).alias("greatest")).collect() [Row(greatest=4)] """ if len(cols) < 2: raise ValueError("greatest should take at least two columns") sc = SparkContext._active_spark_context return Column(sc._jvm.functions.greatest(_to_seq(sc, cols, _to_java_column))) def least(*cols): """ Returns the least value of the list of column names, skipping null values. This function takes at least 2 parameters. It will return null iff all parameters are null. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c']) >>> df.select(least(df.a, df.b, df.c).alias("least")).collect() [Row(least=1)] """ if len(cols) < 2: raise ValueError("least should take at least two columns") sc = SparkContext._active_spark_context return Column(sc._jvm.functions.least(_to_seq(sc, cols, _to_java_column))) def when(condition, value): """Evaluates a list of conditions and returns one of multiple possible result expressions. If :func:`pyspark.sql.Column.otherwise` is not invoked, None is returned for unmatched conditions. .. versionadded:: 1.4.0 Parameters ---------- condition : :class:`~pyspark.sql.Column` a boolean :class:`~pyspark.sql.Column` expression. value : a literal value, or a :class:`~pyspark.sql.Column` expression. >>> df.select(when(df['age'] == 2, 3).otherwise(4).alias("age")).collect() [Row(age=3), Row(age=4)] >>> df.select(when(df.age == 2, df.age + 1).alias("age")).collect() [Row(age=3), Row(age=None)] """ sc = SparkContext._active_spark_context if not isinstance(condition, Column): raise TypeError("condition should be a Column") v = value._jc if isinstance(value, Column) else value jc = sc._jvm.functions.when(condition._jc, v) return Column(jc) def log(arg1, arg2=None): """Returns the first argument-based logarithm of the second argument. If there is only one argument, then this takes the natural logarithm of the argument. .. versionadded:: 1.5.0 Examples -------- >>> df.select(log(10.0, df.age).alias('ten')).rdd.map(lambda l: str(l.ten)[:7]).collect() ['0.30102', '0.69897'] >>> df.select(log(df.age).alias('e')).rdd.map(lambda l: str(l.e)[:7]).collect() ['0.69314', '1.60943'] """ sc = SparkContext._active_spark_context if arg2 is None: jc = sc._jvm.functions.log(_to_java_column(arg1)) else: jc = sc._jvm.functions.log(arg1, _to_java_column(arg2)) return Column(jc) def log2(col): """Returns the base-2 logarithm of the argument. .. versionadded:: 1.5.0 Examples -------- >>> spark.createDataFrame([(4,)], ['a']).select(log2('a').alias('log2')).collect() [Row(log2=2.0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.log2(_to_java_column(col))) def conv(col, fromBase, toBase): """ Convert a number in a string column from one base to another. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([("010101",)], ['n']) >>> df.select(conv(df.n, 2, 16).alias('hex')).collect() [Row(hex='15')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.conv(_to_java_column(col), fromBase, toBase)) def factorial(col): """ Computes the factorial of the given value. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([(5,)], ['n']) >>> df.select(factorial(df.n).alias('f')).collect() [Row(f=120)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.factorial(_to_java_column(col))) # --------------- Window functions ------------------------ def lag(col, offset=1, default=None): """ Window function: returns the value that is `offset` rows before the current row, and `default` if there is less than `offset` rows before the current row. For example, an `offset` of one will return the previous row at any given point in the window partition. This is equivalent to the LAG function in SQL. .. versionadded:: 1.4.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression offset : int, optional number of row to extend default : optional default value """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.lag(_to_java_column(col), offset, default)) def lead(col, offset=1, default=None): """ Window function: returns the value that is `offset` rows after the current row, and `default` if there is less than `offset` rows after the current row. For example, an `offset` of one will return the next row at any given point in the window partition. This is equivalent to the LEAD function in SQL. .. versionadded:: 1.4.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression offset : int, optional number of row to extend default : optional default value """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.lead(_to_java_column(col), offset, default)) def nth_value(col, offset, ignoreNulls=False): """ Window function: returns the value that is the `offset`\\th row of the window frame (counting from 1), and `null` if the size of window frame is less than `offset` rows. It will return the `offset`\\th non-null value it sees when `ignoreNulls` is set to true. If all values are null, then null is returned. This is equivalent to the nth_value function in SQL. .. versionadded:: 3.1.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression offset : int, optional number of row to use as the value ignoreNulls : bool, optional indicates the Nth value should skip null in the determination of which row to use """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.nth_value(_to_java_column(col), offset, ignoreNulls)) def ntile(n): """ Window function: returns the ntile group id (from 1 to `n` inclusive) in an ordered window partition. For example, if `n` is 4, the first quarter of the rows will get value 1, the second quarter will get 2, the third quarter will get 3, and the last quarter will get 4. This is equivalent to the NTILE function in SQL. .. versionadded:: 1.4.0 Parameters ---------- n : int an integer """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.ntile(int(n))) # ---------------------- Date/Timestamp functions ------------------------------ @since(1.5) def current_date(): """ Returns the current date at the start of query evaluation as a :class:`DateType` column. All calls of current_date within the same query return the same value. """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.current_date()) def current_timestamp(): """ Returns the current timestamp at the start of query evaluation as a :class:`TimestampType` column. All calls of current_timestamp within the same query return the same value. """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.current_timestamp()) def date_format(date, format): """ Converts a date/timestamp/string to a value of string in the format specified by the date format given by the second argument. A pattern could be for instance `dd.MM.yyyy` and could return a string like '18.03.1993'. All pattern letters of `datetime pattern`_. can be used. .. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html .. versionadded:: 1.5.0 Notes ----- Whenever possible, use specialized functions like `year`. Examples -------- >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(date_format('dt', 'MM/dd/yyy').alias('date')).collect() [Row(date='04/08/2015')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.date_format(_to_java_column(date), format)) def year(col): """ Extract the year of a given date as integer. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(year('dt').alias('year')).collect() [Row(year=2015)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.year(_to_java_column(col))) def quarter(col): """ Extract the quarter of a given date as integer. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(quarter('dt').alias('quarter')).collect() [Row(quarter=2)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.quarter(_to_java_column(col))) def month(col): """ Extract the month of a given date as integer. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(month('dt').alias('month')).collect() [Row(month=4)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.month(_to_java_column(col))) def dayofweek(col): """ Extract the day of the week of a given date as integer. .. versionadded:: 2.3.0 Examples -------- >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(dayofweek('dt').alias('day')).collect() [Row(day=4)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.dayofweek(_to_java_column(col))) def dayofmonth(col): """ Extract the day of the month of a given date as integer. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(dayofmonth('dt').alias('day')).collect() [Row(day=8)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.dayofmonth(_to_java_column(col))) def dayofyear(col): """ Extract the day of the year of a given date as integer. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(dayofyear('dt').alias('day')).collect() [Row(day=98)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.dayofyear(_to_java_column(col))) def hour(col): """ Extract the hours of a given date as integer. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts']) >>> df.select(hour('ts').alias('hour')).collect() [Row(hour=13)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.hour(_to_java_column(col))) def minute(col): """ Extract the minutes of a given date as integer. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts']) >>> df.select(minute('ts').alias('minute')).collect() [Row(minute=8)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.minute(_to_java_column(col))) def second(col): """ Extract the seconds of a given date as integer. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts']) >>> df.select(second('ts').alias('second')).collect() [Row(second=15)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.second(_to_java_column(col))) def weekofyear(col): """ Extract the week number of a given date as integer. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(weekofyear(df.dt).alias('week')).collect() [Row(week=15)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.weekofyear(_to_java_column(col))) def date_add(start, days): """ Returns the date that is `days` days after `start` .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(date_add(df.dt, 1).alias('next_date')).collect() [Row(next_date=datetime.date(2015, 4, 9))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.date_add(_to_java_column(start), days)) def date_sub(start, days): """ Returns the date that is `days` days before `start` .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(date_sub(df.dt, 1).alias('prev_date')).collect() [Row(prev_date=datetime.date(2015, 4, 7))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.date_sub(_to_java_column(start), days)) def datediff(end, start): """ Returns the number of days from `start` to `end`. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('2015-04-08','2015-05-10')], ['d1', 'd2']) >>> df.select(datediff(df.d2, df.d1).alias('diff')).collect() [Row(diff=32)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.datediff(_to_java_column(end), _to_java_column(start))) def add_months(start, months): """ Returns the date that is `months` months after `start` .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> df.select(add_months(df.dt, 1).alias('next_month')).collect() [Row(next_month=datetime.date(2015, 5, 8))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.add_months(_to_java_column(start), months)) def months_between(date1, date2, roundOff=True): """ Returns number of months between dates date1 and date2. If date1 is later than date2, then the result is positive. If date1 and date2 are on the same day of month, or both are the last day of month, returns an integer (time of day will be ignored). The result is rounded off to 8 digits unless `roundOff` is set to `False`. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('1997-02-28 10:30:00', '1996-10-30')], ['date1', 'date2']) >>> df.select(months_between(df.date1, df.date2).alias('months')).collect() [Row(months=3.94959677)] >>> df.select(months_between(df.date1, df.date2, False).alias('months')).collect() [Row(months=3.9495967741935485)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.months_between( _to_java_column(date1), _to_java_column(date2), roundOff)) def to_date(col, format=None): """Converts a :class:`~pyspark.sql.Column` into :class:`pyspark.sql.types.DateType` using the optionally specified format. Specify formats according to `datetime pattern`_. By default, it follows casting rules to :class:`pyspark.sql.types.DateType` if the format is omitted. Equivalent to ``col.cast("date")``. .. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html .. versionadded:: 2.2.0 Examples -------- >>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t']) >>> df.select(to_date(df.t).alias('date')).collect() [Row(date=datetime.date(1997, 2, 28))] >>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t']) >>> df.select(to_date(df.t, 'yyyy-MM-dd HH:mm:ss').alias('date')).collect() [Row(date=datetime.date(1997, 2, 28))] """ sc = SparkContext._active_spark_context if format is None: jc = sc._jvm.functions.to_date(_to_java_column(col)) else: jc = sc._jvm.functions.to_date(_to_java_column(col), format) return Column(jc) def to_timestamp(col, format=None): """Converts a :class:`~pyspark.sql.Column` into :class:`pyspark.sql.types.TimestampType` using the optionally specified format. Specify formats according to `datetime pattern`_. By default, it follows casting rules to :class:`pyspark.sql.types.TimestampType` if the format is omitted. Equivalent to ``col.cast("timestamp")``. .. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html .. versionadded:: 2.2.0 Examples -------- >>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t']) >>> df.select(to_timestamp(df.t).alias('dt')).collect() [Row(dt=datetime.datetime(1997, 2, 28, 10, 30))] >>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t']) >>> df.select(to_timestamp(df.t, 'yyyy-MM-dd HH:mm:ss').alias('dt')).collect() [Row(dt=datetime.datetime(1997, 2, 28, 10, 30))] """ sc = SparkContext._active_spark_context if format is None: jc = sc._jvm.functions.to_timestamp(_to_java_column(col)) else: jc = sc._jvm.functions.to_timestamp(_to_java_column(col), format) return Column(jc) def trunc(date, format): """ Returns date truncated to the unit specified by the format. .. versionadded:: 1.5.0 Parameters ---------- date : :class:`~pyspark.sql.Column` or str format : str 'year', 'yyyy', 'yy' or 'month', 'mon', 'mm' Examples -------- >>> df = spark.createDataFrame([('1997-02-28',)], ['d']) >>> df.select(trunc(df.d, 'year').alias('year')).collect() [Row(year=datetime.date(1997, 1, 1))] >>> df.select(trunc(df.d, 'mon').alias('month')).collect() [Row(month=datetime.date(1997, 2, 1))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.trunc(_to_java_column(date), format)) def date_trunc(format, timestamp): """ Returns timestamp truncated to the unit specified by the format. .. versionadded:: 2.3.0 Parameters ---------- format : str 'year', 'yyyy', 'yy', 'month', 'mon', 'mm', 'day', 'dd', 'hour', 'minute', 'second', 'week', 'quarter' timestamp : :class:`~pyspark.sql.Column` or str Examples -------- >>> df = spark.createDataFrame([('1997-02-28 05:02:11',)], ['t']) >>> df.select(date_trunc('year', df.t).alias('year')).collect() [Row(year=datetime.datetime(1997, 1, 1, 0, 0))] >>> df.select(date_trunc('mon', df.t).alias('month')).collect() [Row(month=datetime.datetime(1997, 2, 1, 0, 0))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.date_trunc(format, _to_java_column(timestamp))) def next_day(date, dayOfWeek): """ Returns the first date which is later than the value of the date column. Day of the week parameter is case insensitive, and accepts: "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun". .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('2015-07-27',)], ['d']) >>> df.select(next_day(df.d, 'Sun').alias('date')).collect() [Row(date=datetime.date(2015, 8, 2))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.next_day(_to_java_column(date), dayOfWeek)) def last_day(date): """ Returns the last day of the month which the given date belongs to. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('1997-02-10',)], ['d']) >>> df.select(last_day(df.d).alias('date')).collect() [Row(date=datetime.date(1997, 2, 28))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.last_day(_to_java_column(date))) def from_unixtime(timestamp, format="yyyy-MM-dd HH:mm:ss"): """ Converts the number of seconds from unix epoch (1970-01-01 00:00:00 UTC) to a string representing the timestamp of that moment in the current system time zone in the given format. .. versionadded:: 1.5.0 Examples -------- >>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles") >>> time_df = spark.createDataFrame([(1428476400,)], ['unix_time']) >>> time_df.select(from_unixtime('unix_time').alias('ts')).collect() [Row(ts='2015-04-08 00:00:00')] >>> spark.conf.unset("spark.sql.session.timeZone") """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.from_unixtime(_to_java_column(timestamp), format)) def unix_timestamp(timestamp=None, format='yyyy-MM-dd HH:mm:ss'): """ Convert time string with given pattern ('yyyy-MM-dd HH:mm:ss', by default) to Unix time stamp (in seconds), using the default timezone and the default locale, return null if fail. if `timestamp` is None, then it returns current timestamp. .. versionadded:: 1.5.0 Examples -------- >>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles") >>> time_df = spark.createDataFrame([('2015-04-08',)], ['dt']) >>> time_df.select(unix_timestamp('dt', 'yyyy-MM-dd').alias('unix_time')).collect() [Row(unix_time=1428476400)] >>> spark.conf.unset("spark.sql.session.timeZone") """ sc = SparkContext._active_spark_context if timestamp is None: return Column(sc._jvm.functions.unix_timestamp()) return Column(sc._jvm.functions.unix_timestamp(_to_java_column(timestamp), format)) def from_utc_timestamp(timestamp, tz): """ This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in UTC, and renders that timestamp as a timestamp in the given time zone. However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not timezone-agnostic. So in Spark this function just shift the timestamp value from UTC timezone to the given timezone. This function may return confusing result if the input is a string with timezone, e.g. '2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp according to the timezone in the string, and finally display the result by converting the timestamp to string according to the session local timezone. .. versionadded:: 1.5.0 Parameters ---------- timestamp : :class:`~pyspark.sql.Column` or str the column that contains timestamps tz : :class:`~pyspark.sql.Column` or str A string detailing the time zone ID that the input should be adjusted to. It should be in the format of either region-based zone IDs or zone offsets. Region IDs must have the form 'area/city', such as 'America/Los_Angeles'. Zone offsets must be in the format '(+|-)HH:mm', for example '-08:00' or '+01:00'. Also 'UTC' and 'Z' are supported as aliases of '+00:00'. Other short names are not recommended to use because they can be ambiguous. .. versionchanged:: 2.4 `tz` can take a :class:`~pyspark.sql.Column` containing timezone ID strings. Examples -------- >>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz']) >>> df.select(from_utc_timestamp(df.ts, "PST").alias('local_time')).collect() [Row(local_time=datetime.datetime(1997, 2, 28, 2, 30))] >>> df.select(from_utc_timestamp(df.ts, df.tz).alias('local_time')).collect() [Row(local_time=datetime.datetime(1997, 2, 28, 19, 30))] """ sc = SparkContext._active_spark_context if isinstance(tz, Column): tz = _to_java_column(tz) return Column(sc._jvm.functions.from_utc_timestamp(_to_java_column(timestamp), tz)) def to_utc_timestamp(timestamp, tz): """ This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in the given timezone, and renders that timestamp as a timestamp in UTC. However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not timezone-agnostic. So in Spark this function just shift the timestamp value from the given timezone to UTC timezone. This function may return confusing result if the input is a string with timezone, e.g. '2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp according to the timezone in the string, and finally display the result by converting the timestamp to string according to the session local timezone. .. versionadded:: 1.5.0 Parameters ---------- timestamp : :class:`~pyspark.sql.Column` or str the column that contains timestamps tz : :class:`~pyspark.sql.Column` or str A string detailing the time zone ID that the input should be adjusted to. It should be in the format of either region-based zone IDs or zone offsets. Region IDs must have the form 'area/city', such as 'America/Los_Angeles'. Zone offsets must be in the format '(+|-)HH:mm', for example '-08:00' or '+01:00'. Also 'UTC' and 'Z' are upported as aliases of '+00:00'. Other short names are not recommended to use because they can be ambiguous. .. versionchanged:: 2.4.0 `tz` can take a :class:`~pyspark.sql.Column` containing timezone ID strings. Examples -------- >>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz']) >>> df.select(to_utc_timestamp(df.ts, "PST").alias('utc_time')).collect() [Row(utc_time=datetime.datetime(1997, 2, 28, 18, 30))] >>> df.select(to_utc_timestamp(df.ts, df.tz).alias('utc_time')).collect() [Row(utc_time=datetime.datetime(1997, 2, 28, 1, 30))] """ sc = SparkContext._active_spark_context if isinstance(tz, Column): tz = _to_java_column(tz) return Column(sc._jvm.functions.to_utc_timestamp(_to_java_column(timestamp), tz)) def timestamp_seconds(col): """ .. versionadded:: 3.1.0 Examples -------- >>> from pyspark.sql.functions import timestamp_seconds >>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles") >>> time_df = spark.createDataFrame([(1230219000,)], ['unix_time']) >>> time_df.select(timestamp_seconds(time_df.unix_time).alias('ts')).show() +-------------------+ | ts| +-------------------+ |2008-12-25 07:30:00| +-------------------+ >>> spark.conf.unset("spark.sql.session.timeZone") """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.timestamp_seconds(_to_java_column(col))) def window(timeColumn, windowDuration, slideDuration=None, startTime=None): """Bucketize rows into one or more time windows given a timestamp specifying column. Window starts are inclusive but the window ends are exclusive, e.g. 12:05 will be in the window [12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in the order of months are not supported. The time column must be of :class:`pyspark.sql.types.TimestampType`. Durations are provided as strings, e.g. '1 second', '1 day 12 hours', '2 minutes'. Valid interval strings are 'week', 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond'. If the ``slideDuration`` is not provided, the windows will be tumbling windows. The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide `startTime` as `15 minutes`. The output column will be a struct called 'window' by default with the nested columns 'start' and 'end', where 'start' and 'end' will be of :class:`pyspark.sql.types.TimestampType`. .. versionadded:: 2.0.0 Examples -------- >>> df = spark.createDataFrame([("2016-03-11 09:00:07", 1)]).toDF("date", "val") >>> w = df.groupBy(window("date", "5 seconds")).agg(sum("val").alias("sum")) >>> w.select(w.window.start.cast("string").alias("start"), ... w.window.end.cast("string").alias("end"), "sum").collect() [Row(start='2016-03-11 09:00:05', end='2016-03-11 09:00:10', sum=1)] """ def check_string_field(field, fieldName): if not field or type(field) is not str: raise TypeError("%s should be provided as a string" % fieldName) sc = SparkContext._active_spark_context time_col = _to_java_column(timeColumn) check_string_field(windowDuration, "windowDuration") if slideDuration and startTime: check_string_field(slideDuration, "slideDuration") check_string_field(startTime, "startTime") res = sc._jvm.functions.window(time_col, windowDuration, slideDuration, startTime) elif slideDuration: check_string_field(slideDuration, "slideDuration") res = sc._jvm.functions.window(time_col, windowDuration, slideDuration) elif startTime: check_string_field(startTime, "startTime") res = sc._jvm.functions.window(time_col, windowDuration, windowDuration, startTime) else: res = sc._jvm.functions.window(time_col, windowDuration) return Column(res) # ---------------------------- misc functions ---------------------------------- def crc32(col): """ Calculates the cyclic redundancy check value (CRC32) of a binary column and returns the value as a bigint. .. versionadded:: 1.5.0 Examples -------- >>> spark.createDataFrame([('ABC',)], ['a']).select(crc32('a').alias('crc32')).collect() [Row(crc32=2743272264)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.crc32(_to_java_column(col))) def md5(col): """Calculates the MD5 digest and returns the value as a 32 character hex string. .. versionadded:: 1.5.0 Examples -------- >>> spark.createDataFrame([('ABC',)], ['a']).select(md5('a').alias('hash')).collect() [Row(hash='902fbdd2b1df0c4f70b4a5d23525e932')] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.md5(_to_java_column(col)) return Column(jc) def sha1(col): """Returns the hex string result of SHA-1. .. versionadded:: 1.5.0 Examples -------- >>> spark.createDataFrame([('ABC',)], ['a']).select(sha1('a').alias('hash')).collect() [Row(hash='3c01bdbb26f358bab27f267924aa2c9a03fcfdb8')] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.sha1(_to_java_column(col)) return Column(jc) def sha2(col, numBits): """Returns the hex string result of SHA-2 family of hash functions (SHA-224, SHA-256, SHA-384, and SHA-512). The numBits indicates the desired bit length of the result, which must have a value of 224, 256, 384, 512, or 0 (which is equivalent to 256). .. versionadded:: 1.5.0 Examples -------- >>> digests = df.select(sha2(df.name, 256).alias('s')).collect() >>> digests[0] Row(s='3bc51062973c458d5a6f2d8d64a023246354ad7e064b1e4e009ec8a0699a3043') >>> digests[1] Row(s='cd9fb1e148ccd8442e5aa74904cc73bf6fb54d1d54d333bd596aa9bb4bb4e961') """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.sha2(_to_java_column(col), numBits) return Column(jc) def hash(*cols): """Calculates the hash code of given columns, and returns the result as an int column. .. versionadded:: 2.0.0 Examples -------- >>> spark.createDataFrame([('ABC',)], ['a']).select(hash('a').alias('hash')).collect() [Row(hash=-757602832)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.hash(_to_seq(sc, cols, _to_java_column)) return Column(jc) def xxhash64(*cols): """Calculates the hash code of given columns using the 64-bit variant of the xxHash algorithm, and returns the result as a long column. .. versionadded:: 3.0.0 Examples -------- >>> spark.createDataFrame([('ABC',)], ['a']).select(xxhash64('a').alias('hash')).collect() [Row(hash=4105715581806190027)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.xxhash64(_to_seq(sc, cols, _to_java_column)) return Column(jc) def assert_true(col, errMsg=None): """ Returns null if the input column is true; throws an exception with the provided error message otherwise. .. versionadded:: 3.1.0 Examples -------- >>> df = spark.createDataFrame([(0,1)], ['a', 'b']) >>> df.select(assert_true(df.a < df.b).alias('r')).collect() [Row(r=None)] >>> df = spark.createDataFrame([(0,1)], ['a', 'b']) >>> df.select(assert_true(df.a < df.b, df.a).alias('r')).collect() [Row(r=None)] >>> df = spark.createDataFrame([(0,1)], ['a', 'b']) >>> df.select(assert_true(df.a < df.b, 'error').alias('r')).collect() [Row(r=None)] """ sc = SparkContext._active_spark_context if errMsg is None: return Column(sc._jvm.functions.assert_true(_to_java_column(col))) if not isinstance(errMsg, (str, Column)): raise TypeError( "errMsg should be a Column or a str, got {}".format(type(errMsg)) ) errMsg = ( _create_column_from_literal(errMsg) if isinstance(errMsg, str) else _to_java_column(errMsg) ) return Column(sc._jvm.functions.assert_true(_to_java_column(col), errMsg)) @since(3.1) def raise_error(errMsg): """ Throws an exception with the provided error message. """ if not isinstance(errMsg, (str, Column)): raise TypeError( "errMsg should be a Column or a str, got {}".format(type(errMsg)) ) sc = SparkContext._active_spark_context errMsg = ( _create_column_from_literal(errMsg) if isinstance(errMsg, str) else _to_java_column(errMsg) ) return Column(sc._jvm.functions.raise_error(errMsg)) # ---------------------- String/Binary functions ------------------------------ @since(1.5) def upper(col): """ Converts a string expression to upper case. """ return _invoke_function_over_column("upper", col) @since(1.5) def lower(col): """ Converts a string expression to lower case. """ return _invoke_function_over_column("lower", col) @since(1.5) def ascii(col): """ Computes the numeric value of the first character of the string column. """ return _invoke_function_over_column("ascii", col) @since(1.5) def base64(col): """ Computes the BASE64 encoding of a binary column and returns it as a string column. """ return _invoke_function_over_column("base64", col) @since(1.5) def unbase64(col): """ Decodes a BASE64 encoded string column and returns it as a binary column. """ return _invoke_function_over_column("unbase64", col) @since(1.5) def ltrim(col): """ Trim the spaces from left end for the specified string value. """ return _invoke_function_over_column("ltrim", col) @since(1.5) def rtrim(col): """ Trim the spaces from right end for the specified string value. """ return _invoke_function_over_column("rtrim", col) @since(1.5) def trim(col): """ Trim the spaces from both ends for the specified string column. """ return _invoke_function_over_column("trim", col) def concat_ws(sep, *cols): """ Concatenates multiple input string columns together into a single string column, using the given separator. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('abcd','123')], ['s', 'd']) >>> df.select(concat_ws('-', df.s, df.d).alias('s')).collect() [Row(s='abcd-123')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.concat_ws(sep, _to_seq(sc, cols, _to_java_column))) @since(1.5) def decode(col, charset): """ Computes the first argument into a string from a binary using the provided character set (one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16'). """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.decode(_to_java_column(col), charset)) @since(1.5) def encode(col, charset): """ Computes the first argument into a binary from a string using the provided character set (one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16'). """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.encode(_to_java_column(col), charset)) def format_number(col, d): """ Formats the number X to a format like '#,--#,--#.--', rounded to d decimal places with HALF_EVEN round mode, and returns the result as a string. .. versionadded:: 1.5.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str the column name of the numeric value to be formatted d : int the N decimal places >>> spark.createDataFrame([(5,)], ['a']).select(format_number('a', 4).alias('v')).collect() [Row(v='5.0000')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.format_number(_to_java_column(col), d)) def format_string(format, *cols): """ Formats the arguments in printf-style and returns the result as a string column. .. versionadded:: 1.5.0 Parameters ---------- format : str string that can contain embedded format tags and used as result column's value cols : :class:`~pyspark.sql.Column` or str column names or :class:`~pyspark.sql.Column`\\s to be used in formatting Examples -------- >>> df = spark.createDataFrame([(5, "hello")], ['a', 'b']) >>> df.select(format_string('%d %s', df.a, df.b).alias('v')).collect() [Row(v='5 hello')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.format_string(format, _to_seq(sc, cols, _to_java_column))) def instr(str, substr): """ Locate the position of the first occurrence of substr column in the given string. Returns null if either of the arguments are null. .. versionadded:: 1.5.0 Notes ----- The position is not zero based, but 1 based index. Returns 0 if substr could not be found in str. >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(instr(df.s, 'b').alias('s')).collect() [Row(s=2)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.instr(_to_java_column(str), substr)) def overlay(src, replace, pos, len=-1): """ Overlay the specified portion of `src` with `replace`, starting from byte position `pos` of `src` and proceeding for `len` bytes. .. versionadded:: 3.0.0 Examples -------- >>> df = spark.createDataFrame([("SPARK_SQL", "CORE")], ("x", "y")) >>> df.select(overlay("x", "y", 7).alias("overlayed")).show() +----------+ | overlayed| +----------+ |SPARK_CORE| +----------+ """ if not isinstance(pos, (int, str, Column)): raise TypeError( "pos should be an integer or a Column / column name, got {}".format(type(pos))) if len is not None and not isinstance(len, (int, str, Column)): raise TypeError( "len should be an integer or a Column / column name, got {}".format(type(len))) pos = _create_column_from_literal(pos) if isinstance(pos, int) else _to_java_column(pos) len = _create_column_from_literal(len) if isinstance(len, int) else _to_java_column(len) sc = SparkContext._active_spark_context return Column(sc._jvm.functions.overlay( _to_java_column(src), _to_java_column(replace), pos, len )) def sentences(string, language=None, country=None): """ Splits a string into arrays of sentences, where each sentence is an array of words. The 'language' and 'country' arguments are optional, and if omitted, the default locale is used. .. versionadded:: 3.2.0 Parameters ---------- string : :class:`~pyspark.sql.Column` or str a string to be split language : :class:`~pyspark.sql.Column` or str, optional a language of the locale country : :class:`~pyspark.sql.Column` or str, optional a country of the locale Examples -------- >>> df = spark.createDataFrame([["This is an example sentence."]], ["string"]) >>> df.select(sentences(df.string, lit("en"), lit("US"))).show(truncate=False) +-----------------------------------+ |sentences(string, en, US) | +-----------------------------------+ |[[This, is, an, example, sentence]]| +-----------------------------------+ """ if language is None: language = lit("") if country is None: country = lit("") sc = SparkContext._active_spark_context return Column(sc._jvm.functions.sentences( _to_java_column(string), _to_java_column(language), _to_java_column(country) )) def substring(str, pos, len): """ Substring starts at `pos` and is of length `len` when str is String type or returns the slice of byte array that starts at `pos` in byte and is of length `len` when str is Binary type. .. versionadded:: 1.5.0 Notes ----- The position is not zero based, but 1 based index. Examples -------- >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(substring(df.s, 1, 2).alias('s')).collect() [Row(s='ab')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.substring(_to_java_column(str), pos, len)) def substring_index(str, delim, count): """ Returns the substring from string str before count occurrences of the delimiter delim. If count is positive, everything the left of the final delimiter (counting from left) is returned. If count is negative, every to the right of the final delimiter (counting from the right) is returned. substring_index performs a case-sensitive match when searching for delim. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('a.b.c.d',)], ['s']) >>> df.select(substring_index(df.s, '.', 2).alias('s')).collect() [Row(s='a.b')] >>> df.select(substring_index(df.s, '.', -3).alias('s')).collect() [Row(s='b.c.d')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.substring_index(_to_java_column(str), delim, count)) def levenshtein(left, right): """Computes the Levenshtein distance of the two given strings. .. versionadded:: 1.5.0 Examples -------- >>> df0 = spark.createDataFrame([('kitten', 'sitting',)], ['l', 'r']) >>> df0.select(levenshtein('l', 'r').alias('d')).collect() [Row(d=3)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.levenshtein(_to_java_column(left), _to_java_column(right)) return Column(jc) def locate(substr, str, pos=1): """ Locate the position of the first occurrence of substr in a string column, after position pos. .. versionadded:: 1.5.0 Parameters ---------- substr : str a string str : :class:`~pyspark.sql.Column` or str a Column of :class:`pyspark.sql.types.StringType` pos : int, optional start position (zero based) Notes ----- The position is not zero based, but 1 based index. Returns 0 if substr could not be found in str. Examples -------- >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(locate('b', df.s, 1).alias('s')).collect() [Row(s=2)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.locate(substr, _to_java_column(str), pos)) def lpad(col, len, pad): """ Left-pad the string column to width `len` with `pad`. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(lpad(df.s, 6, '#').alias('s')).collect() [Row(s='##abcd')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.lpad(_to_java_column(col), len, pad)) def rpad(col, len, pad): """ Right-pad the string column to width `len` with `pad`. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(rpad(df.s, 6, '#').alias('s')).collect() [Row(s='abcd##')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.rpad(_to_java_column(col), len, pad)) def repeat(col, n): """ Repeats a string column n times, and returns it as a new string column. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('ab',)], ['s',]) >>> df.select(repeat(df.s, 3).alias('s')).collect() [Row(s='ababab')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.repeat(_to_java_column(col), n)) def split(str, pattern, limit=-1): """ Splits str around matches of the given pattern. .. versionadded:: 1.5.0 Parameters ---------- str : :class:`~pyspark.sql.Column` or str a string expression to split pattern : str a string representing a regular expression. The regex string should be a Java regular expression. limit : int, optional an integer which controls the number of times `pattern` is applied. * ``limit > 0``: The resulting array's length will not be more than `limit`, and the resulting array's last entry will contain all input beyond the last matched pattern. * ``limit <= 0``: `pattern` will be applied as many times as possible, and the resulting array can be of any size. .. versionchanged:: 3.0 `split` now takes an optional `limit` field. If not provided, default limit value is -1. Examples -------- >>> df = spark.createDataFrame([('oneAtwoBthreeC',)], ['s',]) >>> df.select(split(df.s, '[ABC]', 2).alias('s')).collect() [Row(s=['one', 'twoBthreeC'])] >>> df.select(split(df.s, '[ABC]', -1).alias('s')).collect() [Row(s=['one', 'two', 'three', ''])] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.split(_to_java_column(str), pattern, limit)) def regexp_extract(str, pattern, idx): r"""Extract a specific group matched by a Java regex, from the specified string column. If the regex did not match, or the specified group did not match, an empty string is returned. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('100-200',)], ['str']) >>> df.select(regexp_extract('str', r'(\d+)-(\d+)', 1).alias('d')).collect() [Row(d='100')] >>> df = spark.createDataFrame([('foo',)], ['str']) >>> df.select(regexp_extract('str', r'(\d+)', 1).alias('d')).collect() [Row(d='')] >>> df = spark.createDataFrame([('aaaac',)], ['str']) >>> df.select(regexp_extract('str', '(a+)(b)?(c)', 2).alias('d')).collect() [Row(d='')] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.regexp_extract(_to_java_column(str), pattern, idx) return Column(jc) def regexp_replace(str, pattern, replacement): r"""Replace all substrings of the specified string value that match regexp with rep. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('100-200',)], ['str']) >>> df.select(regexp_replace('str', r'(\d+)', '--').alias('d')).collect() [Row(d='-----')] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.regexp_replace(_to_java_column(str), pattern, replacement) return Column(jc) def initcap(col): """Translate the first letter of each word to upper case in the sentence. .. versionadded:: 1.5.0 Examples -------- >>> spark.createDataFrame([('ab cd',)], ['a']).select(initcap("a").alias('v')).collect() [Row(v='Ab Cd')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.initcap(_to_java_column(col))) def soundex(col): """ Returns the SoundEx encoding for a string .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([("Peters",),("Uhrbach",)], ['name']) >>> df.select(soundex(df.name).alias("soundex")).collect() [Row(soundex='P362'), Row(soundex='U612')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.soundex(_to_java_column(col))) def bin(col): """Returns the string representation of the binary value of the given column. .. versionadded:: 1.5.0 Examples -------- >>> df.select(bin(df.age).alias('c')).collect() [Row(c='10'), Row(c='101')] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.bin(_to_java_column(col)) return Column(jc) def hex(col): """Computes hex value of the given column, which could be :class:`pyspark.sql.types.StringType`, :class:`pyspark.sql.types.BinaryType`, :class:`pyspark.sql.types.IntegerType` or :class:`pyspark.sql.types.LongType`. .. versionadded:: 1.5.0 Examples -------- >>> spark.createDataFrame([('ABC', 3)], ['a', 'b']).select(hex('a'), hex('b')).collect() [Row(hex(a)='414243', hex(b)='3')] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.hex(_to_java_column(col)) return Column(jc) def unhex(col): """Inverse of hex. Interprets each pair of characters as a hexadecimal number and converts to the byte representation of number. .. versionadded:: 1.5.0 Examples -------- >>> spark.createDataFrame([('414243',)], ['a']).select(unhex('a')).collect() [Row(unhex(a)=bytearray(b'ABC'))] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.unhex(_to_java_column(col))) def length(col): """Computes the character length of string data or number of bytes of binary data. The length of character data includes the trailing spaces. The length of binary data includes binary zeros. .. versionadded:: 1.5.0 Examples -------- >>> spark.createDataFrame([('ABC ',)], ['a']).select(length('a').alias('length')).collect() [Row(length=4)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.length(_to_java_column(col))) def translate(srcCol, matching, replace): """A function translate any character in the `srcCol` by a character in `matching`. The characters in `replace` is corresponding to the characters in `matching`. The translate will happen when any character in the string matching with the character in the `matching`. .. versionadded:: 1.5.0 Examples -------- >>> spark.createDataFrame([('translate',)], ['a']).select(translate('a', "rnlt", "123") \\ ... .alias('r')).collect() [Row(r='1a2s3ae')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.translate(_to_java_column(srcCol), matching, replace)) # ---------------------- Collection functions ------------------------------ def create_map(*cols): """Creates a new map column. .. versionadded:: 2.0.0 Parameters ---------- cols : :class:`~pyspark.sql.Column` or str column names or :class:`~pyspark.sql.Column`\\s that are grouped as key-value pairs, e.g. (key1, value1, key2, value2, ...). Examples -------- >>> df.select(create_map('name', 'age').alias("map")).collect() [Row(map={'Alice': 2}), Row(map={'Bob': 5})] >>> df.select(create_map([df.name, df.age]).alias("map")).collect() [Row(map={'Alice': 2}), Row(map={'Bob': 5})] """ sc = SparkContext._active_spark_context if len(cols) == 1 and isinstance(cols[0], (list, set)): cols = cols[0] jc = sc._jvm.functions.map(_to_seq(sc, cols, _to_java_column)) return Column(jc) def map_from_arrays(col1, col2): """Creates a new map from two arrays. .. versionadded:: 2.4.0 Parameters ---------- col1 : :class:`~pyspark.sql.Column` or str name of column containing a set of keys. All elements should not be null col2 : :class:`~pyspark.sql.Column` or str name of column containing a set of values Examples -------- >>> df = spark.createDataFrame([([2, 5], ['a', 'b'])], ['k', 'v']) >>> df.select(map_from_arrays(df.k, df.v).alias("map")).show() +----------------+ | map| +----------------+ |{2 -> a, 5 -> b}| +----------------+ """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.map_from_arrays(_to_java_column(col1), _to_java_column(col2))) def array(*cols): """Creates a new array column. .. versionadded:: 1.4.0 Parameters ---------- cols : :class:`~pyspark.sql.Column` or str column names or :class:`~pyspark.sql.Column`\\s that have the same data type. Examples -------- >>> df.select(array('age', 'age').alias("arr")).collect() [Row(arr=[2, 2]), Row(arr=[5, 5])] >>> df.select(array([df.age, df.age]).alias("arr")).collect() [Row(arr=[2, 2]), Row(arr=[5, 5])] """ sc = SparkContext._active_spark_context if len(cols) == 1 and isinstance(cols[0], (list, set)): cols = cols[0] jc = sc._jvm.functions.array(_to_seq(sc, cols, _to_java_column)) return Column(jc) def array_contains(col, value): """ Collection function: returns null if the array is null, true if the array contains the given value, and false otherwise. .. versionadded:: 1.5.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column containing array value : value or column to check for in array Examples -------- >>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data']) >>> df.select(array_contains(df.data, "a")).collect() [Row(array_contains(data, a)=True), Row(array_contains(data, a)=False)] >>> df.select(array_contains(df.data, lit("a"))).collect() [Row(array_contains(data, a)=True), Row(array_contains(data, a)=False)] """ sc = SparkContext._active_spark_context value = value._jc if isinstance(value, Column) else value return Column(sc._jvm.functions.array_contains(_to_java_column(col), value)) def arrays_overlap(a1, a2): """ Collection function: returns true if the arrays contain any common non-null element; if not, returns null if both the arrays are non-empty and any of them contains a null element; returns false otherwise. .. versionadded:: 2.4.0 Examples -------- >>> df = spark.createDataFrame([(["a", "b"], ["b", "c"]), (["a"], ["b", "c"])], ['x', 'y']) >>> df.select(arrays_overlap(df.x, df.y).alias("overlap")).collect() [Row(overlap=True), Row(overlap=False)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.arrays_overlap(_to_java_column(a1), _to_java_column(a2))) def slice(x, start, length): """ Collection function: returns an array containing all the elements in `x` from index `start` (array indices start at 1, or from the end if `start` is negative) with the specified `length`. .. versionadded:: 2.4.0 Parameters ---------- x : :class:`~pyspark.sql.Column` or str the array to be sliced start : :class:`~pyspark.sql.Column` or int the starting index length : :class:`~pyspark.sql.Column` or int the length of the slice Examples -------- >>> df = spark.createDataFrame([([1, 2, 3],), ([4, 5],)], ['x']) >>> df.select(slice(df.x, 2, 2).alias("sliced")).collect() [Row(sliced=[2, 3]), Row(sliced=[5])] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.slice( _to_java_column(x), start._jc if isinstance(start, Column) else start, length._jc if isinstance(length, Column) else length )) def array_join(col, delimiter, null_replacement=None): """ Concatenates the elements of `column` using the `delimiter`. Null values are replaced with `null_replacement` if set, otherwise they are ignored. .. versionadded:: 2.4.0 Examples -------- >>> df = spark.createDataFrame([(["a", "b", "c"],), (["a", None],)], ['data']) >>> df.select(array_join(df.data, ",").alias("joined")).collect() [Row(joined='a,b,c'), Row(joined='a')] >>> df.select(array_join(df.data, ",", "NULL").alias("joined")).collect() [Row(joined='a,b,c'), Row(joined='a,NULL')] """ sc = SparkContext._active_spark_context if null_replacement is None: return Column(sc._jvm.functions.array_join(_to_java_column(col), delimiter)) else: return Column(sc._jvm.functions.array_join( _to_java_column(col), delimiter, null_replacement)) def concat(*cols): """ Concatenates multiple input columns together into a single column. The function works with strings, binary and compatible array columns. .. versionadded:: 1.5.0 Examples -------- >>> df = spark.createDataFrame([('abcd','123')], ['s', 'd']) >>> df.select(concat(df.s, df.d).alias('s')).collect() [Row(s='abcd123')] >>> df = spark.createDataFrame([([1, 2], [3, 4], [5]), ([1, 2], None, [3])], ['a', 'b', 'c']) >>> df.select(concat(df.a, df.b, df.c).alias("arr")).collect() [Row(arr=[1, 2, 3, 4, 5]), Row(arr=None)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.concat(_to_seq(sc, cols, _to_java_column))) def array_position(col, value): """ Collection function: Locates the position of the first occurrence of the given value in the given array. Returns null if either of the arguments are null. .. versionadded:: 2.4.0 Notes ----- The position is not zero based, but 1 based index. Returns 0 if the given value could not be found in the array. Examples -------- >>> df = spark.createDataFrame([(["c", "b", "a"],), ([],)], ['data']) >>> df.select(array_position(df.data, "a")).collect() [Row(array_position(data, a)=3), Row(array_position(data, a)=0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.array_position(_to_java_column(col), value)) def element_at(col, extraction): """ Collection function: Returns element of array at given index in extraction if col is array. Returns value for the given key in extraction if col is map. .. versionadded:: 2.4.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column containing array or map extraction : index to check for in array or key to check for in map Notes ----- The position is not zero based, but 1 based index. Examples -------- >>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data']) >>> df.select(element_at(df.data, 1)).collect() [Row(element_at(data, 1)='a'), Row(element_at(data, 1)=None)] >>> df = spark.createDataFrame([({"a": 1.0, "b": 2.0},), ({},)], ['data']) >>> df.select(element_at(df.data, lit("a"))).collect() [Row(element_at(data, a)=1.0), Row(element_at(data, a)=None)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.element_at( _to_java_column(col), lit(extraction)._jc)) def array_remove(col, element): """ Collection function: Remove all elements that equal to element from the given array. .. versionadded:: 2.4.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column containing array element : element to be removed from the array Examples -------- >>> df = spark.createDataFrame([([1, 2, 3, 1, 1],), ([],)], ['data']) >>> df.select(array_remove(df.data, 1)).collect() [Row(array_remove(data, 1)=[2, 3]), Row(array_remove(data, 1)=[])] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.array_remove(_to_java_column(col), element)) def array_distinct(col): """ Collection function: removes duplicate values from the array. .. versionadded:: 2.4.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression Examples -------- >>> df = spark.createDataFrame([([1, 2, 3, 2],), ([4, 5, 5, 4],)], ['data']) >>> df.select(array_distinct(df.data)).collect() [Row(array_distinct(data)=[1, 2, 3]), Row(array_distinct(data)=[4, 5])] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.array_distinct(_to_java_column(col))) def array_intersect(col1, col2): """ Collection function: returns an array of the elements in the intersection of col1 and col2, without duplicates. .. versionadded:: 2.4.0 Parameters ---------- col1 : :class:`~pyspark.sql.Column` or str name of column containing array col2 : :class:`~pyspark.sql.Column` or str name of column containing array Examples -------- >>> from pyspark.sql import Row >>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])]) >>> df.select(array_intersect(df.c1, df.c2)).collect() [Row(array_intersect(c1, c2)=['a', 'c'])] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.array_intersect(_to_java_column(col1), _to_java_column(col2))) def array_union(col1, col2): """ Collection function: returns an array of the elements in the union of col1 and col2, without duplicates. .. versionadded:: 2.4.0 Parameters ---------- col1 : :class:`~pyspark.sql.Column` or str name of column containing array col2 : :class:`~pyspark.sql.Column` or str name of column containing array Examples -------- >>> from pyspark.sql import Row >>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])]) >>> df.select(array_union(df.c1, df.c2)).collect() [Row(array_union(c1, c2)=['b', 'a', 'c', 'd', 'f'])] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.array_union(_to_java_column(col1), _to_java_column(col2))) def array_except(col1, col2): """ Collection function: returns an array of the elements in col1 but not in col2, without duplicates. .. versionadded:: 2.4.0 Parameters ---------- col1 : :class:`~pyspark.sql.Column` or str name of column containing array col2 : :class:`~pyspark.sql.Column` or str name of column containing array Examples -------- >>> from pyspark.sql import Row >>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])]) >>> df.select(array_except(df.c1, df.c2)).collect() [Row(array_except(c1, c2)=['b'])] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.array_except(_to_java_column(col1), _to_java_column(col2))) def explode(col): """ Returns a new row for each element in the given array or map. Uses the default column name `col` for elements in the array and `key` and `value` for elements in the map unless specified otherwise. .. versionadded:: 1.4.0 Examples -------- >>> from pyspark.sql import Row >>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})]) >>> eDF.select(explode(eDF.intlist).alias("anInt")).collect() [Row(anInt=1), Row(anInt=2), Row(anInt=3)] >>> eDF.select(explode(eDF.mapfield).alias("key", "value")).show() +---+-----+ |key|value| +---+-----+ | a| b| +---+-----+ """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.explode(_to_java_column(col)) return Column(jc) def posexplode(col): """ Returns a new row for each element with position in the given array or map. Uses the default column name `pos` for position, and `col` for elements in the array and `key` and `value` for elements in the map unless specified otherwise. .. versionadded:: 2.1.0 Examples -------- >>> from pyspark.sql import Row >>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})]) >>> eDF.select(posexplode(eDF.intlist)).collect() [Row(pos=0, col=1), Row(pos=1, col=2), Row(pos=2, col=3)] >>> eDF.select(posexplode(eDF.mapfield)).show() +---+---+-----+ |pos|key|value| +---+---+-----+ | 0| a| b| +---+---+-----+ """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.posexplode(_to_java_column(col)) return Column(jc) def explode_outer(col): """ Returns a new row for each element in the given array or map. Unlike explode, if the array/map is null or empty then null is produced. Uses the default column name `col` for elements in the array and `key` and `value` for elements in the map unless specified otherwise. .. versionadded:: 2.3.0 Examples -------- >>> df = spark.createDataFrame( ... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)], ... ("id", "an_array", "a_map") ... ) >>> df.select("id", "an_array", explode_outer("a_map")).show() +---+----------+----+-----+ | id| an_array| key|value| +---+----------+----+-----+ | 1|[foo, bar]| x| 1.0| | 2| []|null| null| | 3| null|null| null| +---+----------+----+-----+ >>> df.select("id", "a_map", explode_outer("an_array")).show() +---+----------+----+ | id| a_map| col| +---+----------+----+ | 1|{x -> 1.0}| foo| | 1|{x -> 1.0}| bar| | 2| {}|null| | 3| null|null| +---+----------+----+ """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.explode_outer(_to_java_column(col)) return Column(jc) def posexplode_outer(col): """ Returns a new row for each element with position in the given array or map. Unlike posexplode, if the array/map is null or empty then the row (null, null) is produced. Uses the default column name `pos` for position, and `col` for elements in the array and `key` and `value` for elements in the map unless specified otherwise. .. versionadded:: 2.3.0 Examples -------- >>> df = spark.createDataFrame( ... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)], ... ("id", "an_array", "a_map") ... ) >>> df.select("id", "an_array", posexplode_outer("a_map")).show() +---+----------+----+----+-----+ | id| an_array| pos| key|value| +---+----------+----+----+-----+ | 1|[foo, bar]| 0| x| 1.0| | 2| []|null|null| null| | 3| null|null|null| null| +---+----------+----+----+-----+ >>> df.select("id", "a_map", posexplode_outer("an_array")).show() +---+----------+----+----+ | id| a_map| pos| col| +---+----------+----+----+ | 1|{x -> 1.0}| 0| foo| | 1|{x -> 1.0}| 1| bar| | 2| {}|null|null| | 3| null|null|null| +---+----------+----+----+ """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.posexplode_outer(_to_java_column(col)) return Column(jc) def get_json_object(col, path): """ Extracts json object from a json string based on json path specified, and returns json string of the extracted json object. It will return null if the input json string is invalid. .. versionadded:: 1.6.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str string column in json format path : str path to the json object to extract Examples -------- >>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')] >>> df = spark.createDataFrame(data, ("key", "jstring")) >>> df.select(df.key, get_json_object(df.jstring, '$.f1').alias("c0"), \\ ... get_json_object(df.jstring, '$.f2').alias("c1") ).collect() [Row(key='1', c0='value1', c1='value2'), Row(key='2', c0='value12', c1=None)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.get_json_object(_to_java_column(col), path) return Column(jc) def json_tuple(col, *fields): """Creates a new row for a json column according to the given field names. .. versionadded:: 1.6.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str string column in json format fields : str fields to extract Examples -------- >>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')] >>> df = spark.createDataFrame(data, ("key", "jstring")) >>> df.select(df.key, json_tuple(df.jstring, 'f1', 'f2')).collect() [Row(key='1', c0='value1', c1='value2'), Row(key='2', c0='value12', c1=None)] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.json_tuple(_to_java_column(col), _to_seq(sc, fields)) return Column(jc) def from_json(col, schema, options=None): """ Parses a column containing a JSON string into a :class:`MapType` with :class:`StringType` as keys type, :class:`StructType` or :class:`ArrayType` with the specified schema. Returns `null`, in the case of an unparseable string. .. versionadded:: 2.1.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str string column in json format schema : :class:`DataType` or str a StructType or ArrayType of StructType to use when parsing the json column. .. versionchanged:: 2.3 the DDL-formatted string is also supported for ``schema``. options : dict, optional options to control parsing. accepts the same options as the json datasource. See `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-json.html#data-source-option>`_ in the version you use. .. # noqa Examples -------- >>> from pyspark.sql.types import * >>> data = [(1, '''{"a": 1}''')] >>> schema = StructType([StructField("a", IntegerType())]) >>> df = spark.createDataFrame(data, ("key", "value")) >>> df.select(from_json(df.value, schema).alias("json")).collect() [Row(json=Row(a=1))] >>> df.select(from_json(df.value, "a INT").alias("json")).collect() [Row(json=Row(a=1))] >>> df.select(from_json(df.value, "MAP<STRING,INT>").alias("json")).collect() [Row(json={'a': 1})] >>> data = [(1, '''[{"a": 1}]''')] >>> schema = ArrayType(StructType([StructField("a", IntegerType())])) >>> df = spark.createDataFrame(data, ("key", "value")) >>> df.select(from_json(df.value, schema).alias("json")).collect() [Row(json=[Row(a=1)])] >>> schema = schema_of_json(lit('''{"a": 0}''')) >>> df.select(from_json(df.value, schema).alias("json")).collect() [Row(json=Row(a=None))] >>> data = [(1, '''[1, 2, 3]''')] >>> schema = ArrayType(IntegerType()) >>> df = spark.createDataFrame(data, ("key", "value")) >>> df.select(from_json(df.value, schema).alias("json")).collect() [Row(json=[1, 2, 3])] """ sc = SparkContext._active_spark_context if isinstance(schema, DataType): schema = schema.json() elif isinstance(schema, Column): schema = _to_java_column(schema) jc = sc._jvm.functions.from_json(_to_java_column(col), schema, _options_to_str(options)) return Column(jc) def to_json(col, options=None): """ Converts a column containing a :class:`StructType`, :class:`ArrayType` or a :class:`MapType` into a JSON string. Throws an exception, in the case of an unsupported type. .. versionadded:: 2.1.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column containing a struct, an array or a map. options : dict, optional options to control converting. accepts the same options as the JSON datasource. See `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-json.html#data-source-option>`_ in the version you use. Additionally the function supports the `pretty` option which enables pretty JSON generation. .. # noqa Examples -------- >>> from pyspark.sql import Row >>> from pyspark.sql.types import * >>> data = [(1, Row(age=2, name='Alice'))] >>> df = spark.createDataFrame(data, ("key", "value")) >>> df.select(to_json(df.value).alias("json")).collect() [Row(json='{"age":2,"name":"Alice"}')] >>> data = [(1, [Row(age=2, name='Alice'), Row(age=3, name='Bob')])] >>> df = spark.createDataFrame(data, ("key", "value")) >>> df.select(to_json(df.value).alias("json")).collect() [Row(json='[{"age":2,"name":"Alice"},{"age":3,"name":"Bob"}]')] >>> data = [(1, {"name": "Alice"})] >>> df = spark.createDataFrame(data, ("key", "value")) >>> df.select(to_json(df.value).alias("json")).collect() [Row(json='{"name":"Alice"}')] >>> data = [(1, [{"name": "Alice"}, {"name": "Bob"}])] >>> df = spark.createDataFrame(data, ("key", "value")) >>> df.select(to_json(df.value).alias("json")).collect() [Row(json='[{"name":"Alice"},{"name":"Bob"}]')] >>> data = [(1, ["Alice", "Bob"])] >>> df = spark.createDataFrame(data, ("key", "value")) >>> df.select(to_json(df.value).alias("json")).collect() [Row(json='["Alice","Bob"]')] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.to_json(_to_java_column(col), _options_to_str(options)) return Column(jc) def schema_of_json(json, options=None): """ Parses a JSON string and infers its schema in DDL format. .. versionadded:: 2.4.0 Parameters ---------- json : :class:`~pyspark.sql.Column` or str a JSON string or a foldable string column containing a JSON string. options : dict, optional options to control parsing. accepts the same options as the JSON datasource. See `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-json.html#data-source-option>`_ in the version you use. .. # noqa .. versionchanged:: 3.0 It accepts `options` parameter to control schema inferring. Examples -------- >>> df = spark.range(1) >>> df.select(schema_of_json(lit('{"a": 0}')).alias("json")).collect() [Row(json='STRUCT<`a`: BIGINT>')] >>> schema = schema_of_json('{a: 1}', {'allowUnquotedFieldNames':'true'}) >>> df.select(schema.alias("json")).collect() [Row(json='STRUCT<`a`: BIGINT>')] """ if isinstance(json, str): col = _create_column_from_literal(json) elif isinstance(json, Column): col = _to_java_column(json) else: raise TypeError("schema argument should be a column or string") sc = SparkContext._active_spark_context jc = sc._jvm.functions.schema_of_json(col, _options_to_str(options)) return Column(jc) def schema_of_csv(csv, options=None): """ Parses a CSV string and infers its schema in DDL format. .. versionadded:: 3.0.0 Parameters ---------- csv : :class:`~pyspark.sql.Column` or str a CSV string or a foldable string column containing a CSV string. options : dict, optional options to control parsing. accepts the same options as the CSV datasource. See `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-csv.html#data-source-option>`_ in the version you use. .. # noqa Examples -------- >>> df = spark.range(1) >>> df.select(schema_of_csv(lit('1|a'), {'sep':'|'}).alias("csv")).collect() [Row(csv='STRUCT<`_c0`: INT, `_c1`: STRING>')] >>> df.select(schema_of_csv('1|a', {'sep':'|'}).alias("csv")).collect() [Row(csv='STRUCT<`_c0`: INT, `_c1`: STRING>')] """ if isinstance(csv, str): col = _create_column_from_literal(csv) elif isinstance(csv, Column): col = _to_java_column(csv) else: raise TypeError("schema argument should be a column or string") sc = SparkContext._active_spark_context jc = sc._jvm.functions.schema_of_csv(col, _options_to_str(options)) return Column(jc) def to_csv(col, options=None): """ Converts a column containing a :class:`StructType` into a CSV string. Throws an exception, in the case of an unsupported type. .. versionadded:: 3.0.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column containing a struct. options: dict, optional options to control converting. accepts the same options as the CSV datasource. See `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-csv.html#data-source-option>`_ in the version you use. .. # noqa Examples -------- >>> from pyspark.sql import Row >>> data = [(1, Row(age=2, name='Alice'))] >>> df = spark.createDataFrame(data, ("key", "value")) >>> df.select(to_csv(df.value).alias("csv")).collect() [Row(csv='2,Alice')] """ sc = SparkContext._active_spark_context jc = sc._jvm.functions.to_csv(_to_java_column(col), _options_to_str(options)) return Column(jc) def size(col): """ Collection function: returns the length of the array or map stored in the column. .. versionadded:: 1.5.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression Examples -------- >>> df = spark.createDataFrame([([1, 2, 3],),([1],),([],)], ['data']) >>> df.select(size(df.data)).collect() [Row(size(data)=3), Row(size(data)=1), Row(size(data)=0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.size(_to_java_column(col))) def array_min(col): """ Collection function: returns the minimum value of the array. .. versionadded:: 2.4.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression Examples -------- >>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data']) >>> df.select(array_min(df.data).alias('min')).collect() [Row(min=1), Row(min=-1)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.array_min(_to_java_column(col))) def array_max(col): """ Collection function: returns the maximum value of the array. .. versionadded:: 2.4.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression Examples -------- >>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data']) >>> df.select(array_max(df.data).alias('max')).collect() [Row(max=3), Row(max=10)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.array_max(_to_java_column(col))) def sort_array(col, asc=True): """ Collection function: sorts the input array in ascending or descending order according to the natural ordering of the array elements. Null elements will be placed at the beginning of the returned array in ascending order or at the end of the returned array in descending order. .. versionadded:: 1.5.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression asc : bool, optional Examples -------- >>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data']) >>> df.select(sort_array(df.data).alias('r')).collect() [Row(r=[None, 1, 2, 3]), Row(r=[1]), Row(r=[])] >>> df.select(sort_array(df.data, asc=False).alias('r')).collect() [Row(r=[3, 2, 1, None]), Row(r=[1]), Row(r=[])] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.sort_array(_to_java_column(col), asc)) def array_sort(col): """ Collection function: sorts the input array in ascending order. The elements of the input array must be orderable. Null elements will be placed at the end of the returned array. .. versionadded:: 2.4.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression Examples -------- >>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data']) >>> df.select(array_sort(df.data).alias('r')).collect() [Row(r=[1, 2, 3, None]), Row(r=[1]), Row(r=[])] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.array_sort(_to_java_column(col))) def shuffle(col): """ Collection function: Generates a random permutation of the given array. .. versionadded:: 2.4.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression Notes ----- The function is non-deterministic. Examples -------- >>> df = spark.createDataFrame([([1, 20, 3, 5],), ([1, 20, None, 3],)], ['data']) >>> df.select(shuffle(df.data).alias('s')).collect() # doctest: +SKIP [Row(s=[3, 1, 5, 20]), Row(s=[20, None, 3, 1])] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.shuffle(_to_java_column(col))) def reverse(col): """ Collection function: returns a reversed string or an array with reverse order of elements. .. versionadded:: 1.5.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression Examples -------- >>> df = spark.createDataFrame([('Spark SQL',)], ['data']) >>> df.select(reverse(df.data).alias('s')).collect() [Row(s='LQS krapS')] >>> df = spark.createDataFrame([([2, 1, 3],) ,([1],) ,([],)], ['data']) >>> df.select(reverse(df.data).alias('r')).collect() [Row(r=[3, 1, 2]), Row(r=[1]), Row(r=[])] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.reverse(_to_java_column(col))) def flatten(col): """ Collection function: creates a single array from an array of arrays. If a structure of nested arrays is deeper than two levels, only one level of nesting is removed. .. versionadded:: 2.4.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression Examples -------- >>> df = spark.createDataFrame([([[1, 2, 3], [4, 5], [6]],), ([None, [4, 5]],)], ['data']) >>> df.select(flatten(df.data).alias('r')).collect() [Row(r=[1, 2, 3, 4, 5, 6]), Row(r=None)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.flatten(_to_java_column(col))) def map_keys(col): """ Collection function: Returns an unordered array containing the keys of the map. .. versionadded:: 2.3.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression Examples -------- >>> from pyspark.sql.functions import map_keys >>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data") >>> df.select(map_keys("data").alias("keys")).show() +------+ | keys| +------+ |[1, 2]| +------+ """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.map_keys(_to_java_column(col))) def map_values(col): """ Collection function: Returns an unordered array containing the values of the map. .. versionadded:: 2.3.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression Examples -------- >>> from pyspark.sql.functions import map_values >>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data") >>> df.select(map_values("data").alias("values")).show() +------+ |values| +------+ |[a, b]| +------+ """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.map_values(_to_java_column(col))) def map_entries(col): """ Collection function: Returns an unordered array of all entries in the given map. .. versionadded:: 3.0.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression Examples -------- >>> from pyspark.sql.functions import map_entries >>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data") >>> df.select(map_entries("data").alias("entries")).show() +----------------+ | entries| +----------------+ |[{1, a}, {2, b}]| +----------------+ """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.map_entries(_to_java_column(col))) def map_from_entries(col): """ Collection function: Returns a map created from the given array of entries. .. versionadded:: 2.4.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression Examples -------- >>> from pyspark.sql.functions import map_from_entries >>> df = spark.sql("SELECT array(struct(1, 'a'), struct(2, 'b')) as data") >>> df.select(map_from_entries("data").alias("map")).show() +----------------+ | map| +----------------+ |{1 -> a, 2 -> b}| +----------------+ """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.map_from_entries(_to_java_column(col))) def array_repeat(col, count): """ Collection function: creates an array containing a column repeated count times. .. versionadded:: 2.4.0 Examples -------- >>> df = spark.createDataFrame([('ab',)], ['data']) >>> df.select(array_repeat(df.data, 3).alias('r')).collect() [Row(r=['ab', 'ab', 'ab'])] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.array_repeat( _to_java_column(col), _to_java_column(count) if isinstance(count, Column) else count )) def arrays_zip(*cols): """ Collection function: Returns a merged array of structs in which the N-th struct contains all N-th values of input arrays. .. versionadded:: 2.4.0 Parameters ---------- cols : :class:`~pyspark.sql.Column` or str columns of arrays to be merged. Examples -------- >>> from pyspark.sql.functions import arrays_zip >>> df = spark.createDataFrame([(([1, 2, 3], [2, 3, 4]))], ['vals1', 'vals2']) >>> df.select(arrays_zip(df.vals1, df.vals2).alias('zipped')).collect() [Row(zipped=[Row(vals1=1, vals2=2), Row(vals1=2, vals2=3), Row(vals1=3, vals2=4)])] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.arrays_zip(_to_seq(sc, cols, _to_java_column))) def map_concat(*cols): """Returns the union of all the given maps. .. versionadded:: 2.4.0 Parameters ---------- cols : :class:`~pyspark.sql.Column` or str column names or :class:`~pyspark.sql.Column`\\s Examples -------- >>> from pyspark.sql.functions import map_concat >>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as map1, map(3, 'c') as map2") >>> df.select(map_concat("map1", "map2").alias("map3")).show(truncate=False) +------------------------+ |map3 | +------------------------+ |{1 -> a, 2 -> b, 3 -> c}| +------------------------+ """ sc = SparkContext._active_spark_context if len(cols) == 1 and isinstance(cols[0], (list, set)): cols = cols[0] jc = sc._jvm.functions.map_concat(_to_seq(sc, cols, _to_java_column)) return Column(jc) def sequence(start, stop, step=None): """ Generate a sequence of integers from `start` to `stop`, incrementing by `step`. If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`, otherwise -1. .. versionadded:: 2.4.0 Examples -------- >>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2')) >>> df1.select(sequence('C1', 'C2').alias('r')).collect() [Row(r=[-2, -1, 0, 1, 2])] >>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3')) >>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect() [Row(r=[4, 2, 0, -2, -4])] """ sc = SparkContext._active_spark_context if step is None: return Column(sc._jvm.functions.sequence(_to_java_column(start), _to_java_column(stop))) else: return Column(sc._jvm.functions.sequence( _to_java_column(start), _to_java_column(stop), _to_java_column(step))) def from_csv(col, schema, options=None): """ Parses a column containing a CSV string to a row with the specified schema. Returns `null`, in the case of an unparseable string. .. versionadded:: 3.0.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str string column in CSV format schema :class:`~pyspark.sql.Column` or str a string with schema in DDL format to use when parsing the CSV column. options : dict, optional options to control parsing. accepts the same options as the CSV datasource. See `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-csv.html#data-source-option>`_ in the version you use. .. # noqa Examples -------- >>> data = [("1,2,3",)] >>> df = spark.createDataFrame(data, ("value",)) >>> df.select(from_csv(df.value, "a INT, b INT, c INT").alias("csv")).collect() [Row(csv=Row(a=1, b=2, c=3))] >>> value = data[0][0] >>> df.select(from_csv(df.value, schema_of_csv(value)).alias("csv")).collect() [Row(csv=Row(_c0=1, _c1=2, _c2=3))] >>> data = [(" abc",)] >>> df = spark.createDataFrame(data, ("value",)) >>> options = {'ignoreLeadingWhiteSpace': True} >>> df.select(from_csv(df.value, "s string", options).alias("csv")).collect() [Row(csv=Row(s='abc'))] """ sc = SparkContext._active_spark_context if isinstance(schema, str): schema = _create_column_from_literal(schema) elif isinstance(schema, Column): schema = _to_java_column(schema) else: raise TypeError("schema argument should be a column or string") jc = sc._jvm.functions.from_csv(_to_java_column(col), schema, _options_to_str(options)) return Column(jc) def _unresolved_named_lambda_variable(*name_parts): """ Create `o.a.s.sql.expressions.UnresolvedNamedLambdaVariable`, convert it to o.s.sql.Column and wrap in Python `Column` Parameters ---------- name_parts : str """ sc = SparkContext._active_spark_context name_parts_seq = _to_seq(sc, name_parts) expressions = sc._jvm.org.apache.spark.sql.catalyst.expressions return Column( sc._jvm.Column( expressions.UnresolvedNamedLambdaVariable(name_parts_seq) ) ) def _get_lambda_parameters(f): import inspect signature = inspect.signature(f) parameters = signature.parameters.values() # We should exclude functions that use # variable args and keyword argnames # as well as keyword only args supported_parameter_types = { inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.POSITIONAL_ONLY, } # Validate that # function arity is between 1 and 3 if not (1 <= len(parameters) <= 3): raise ValueError( "f should take between 1 and 3 arguments, but provided function takes {}".format( len(parameters) ) ) # and all arguments can be used as positional if not all(p.kind in supported_parameter_types for p in parameters): raise ValueError( "f should use only POSITIONAL or POSITIONAL OR KEYWORD arguments" ) return parameters def _create_lambda(f): """ Create `o.a.s.sql.expressions.LambdaFunction` corresponding to transformation described by f :param f: A Python of one of the following forms: - (Column) -> Column: ... - (Column, Column) -> Column: ... - (Column, Column, Column) -> Column: ... """ parameters = _get_lambda_parameters(f) sc = SparkContext._active_spark_context expressions = sc._jvm.org.apache.spark.sql.catalyst.expressions argnames = ["x", "y", "z"] args = [ _unresolved_named_lambda_variable( expressions.UnresolvedNamedLambdaVariable.freshVarName(arg) ) for arg in argnames[: len(parameters)] ] result = f(*args) if not isinstance(result, Column): raise ValueError("f should return Column, got {}".format(type(result))) jexpr = result._jc.expr() jargs = _to_seq(sc, [arg._jc.expr() for arg in args]) return expressions.LambdaFunction(jexpr, jargs, False) def _invoke_higher_order_function(name, cols, funs): """ Invokes expression identified by name, (relative to ```org.apache.spark.sql.catalyst.expressions``) and wraps the result with Column (first Scala one, then Python). :param name: Name of the expression :param cols: a list of columns :param funs: a list of((*Column) -> Column functions. :return: a Column """ sc = SparkContext._active_spark_context expressions = sc._jvm.org.apache.spark.sql.catalyst.expressions expr = getattr(expressions, name) jcols = [_to_java_column(col).expr() for col in cols] jfuns = [_create_lambda(f) for f in funs] return Column(sc._jvm.Column(expr(*jcols + jfuns))) def transform(col, f): """ Returns an array of elements after applying a transformation to each element in the input array. .. versionadded:: 3.1.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression f : function a function that is applied to each element of the input array. Can take one of the following forms: - Unary ``(x: Column) -> Column: ...`` - Binary ``(x: Column, i: Column) -> Column...``, where the second argument is a 0-based index of the element. and can use methods of :class:`~pyspark.sql.Column`, functions defined in :py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``. Python ``UserDefinedFunctions`` are not supported (`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__). Returns ------- :class:`~pyspark.sql.Column` Examples -------- >>> df = spark.createDataFrame([(1, [1, 2, 3, 4])], ("key", "values")) >>> df.select(transform("values", lambda x: x * 2).alias("doubled")).show() +------------+ | doubled| +------------+ |[2, 4, 6, 8]| +------------+ >>> def alternate(x, i): ... return when(i % 2 == 0, x).otherwise(-x) >>> df.select(transform("values", alternate).alias("alternated")).show() +--------------+ | alternated| +--------------+ |[1, -2, 3, -4]| +--------------+ """ return _invoke_higher_order_function("ArrayTransform", [col], [f]) def exists(col, f): """ Returns whether a predicate holds for one or more elements in the array. .. versionadded:: 3.1.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression f : function ``(x: Column) -> Column: ...`` returning the Boolean expression. Can use methods of :class:`~pyspark.sql.Column`, functions defined in :py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``. Python ``UserDefinedFunctions`` are not supported (`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__). :return: a :class:`~pyspark.sql.Column` Examples -------- >>> df = spark.createDataFrame([(1, [1, 2, 3, 4]), (2, [3, -1, 0])],("key", "values")) >>> df.select(exists("values", lambda x: x < 0).alias("any_negative")).show() +------------+ |any_negative| +------------+ | false| | true| +------------+ """ return _invoke_higher_order_function("ArrayExists", [col], [f]) def forall(col, f): """ Returns whether a predicate holds for every element in the array. .. versionadded:: 3.1.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression f : function ``(x: Column) -> Column: ...`` returning the Boolean expression. Can use methods of :class:`~pyspark.sql.Column`, functions defined in :py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``. Python ``UserDefinedFunctions`` are not supported (`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__). Returns ------- :class:`~pyspark.sql.Column` Examples -------- >>> df = spark.createDataFrame( ... [(1, ["bar"]), (2, ["foo", "bar"]), (3, ["foobar", "foo"])], ... ("key", "values") ... ) >>> df.select(forall("values", lambda x: x.rlike("foo")).alias("all_foo")).show() +-------+ |all_foo| +-------+ | false| | false| | true| +-------+ """ return _invoke_higher_order_function("ArrayForAll", [col], [f]) def filter(col, f): """ Returns an array of elements for which a predicate holds in a given array. .. versionadded:: 3.1.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression f : function A function that returns the Boolean expression. Can take one of the following forms: - Unary ``(x: Column) -> Column: ...`` - Binary ``(x: Column, i: Column) -> Column...``, where the second argument is a 0-based index of the element. and can use methods of :class:`~pyspark.sql.Column`, functions defined in :py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``. Python ``UserDefinedFunctions`` are not supported (`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__). Returns ------- :class:`~pyspark.sql.Column` Examples -------- >>> df = spark.createDataFrame( ... [(1, ["2018-09-20", "2019-02-03", "2019-07-01", "2020-06-01"])], ... ("key", "values") ... ) >>> def after_second_quarter(x): ... return month(to_date(x)) > 6 >>> df.select( ... filter("values", after_second_quarter).alias("after_second_quarter") ... ).show(truncate=False) +------------------------+ |after_second_quarter | +------------------------+ |[2018-09-20, 2019-07-01]| +------------------------+ """ return _invoke_higher_order_function("ArrayFilter", [col], [f]) def aggregate(col, initialValue, merge, finish=None): """ Applies a binary operator to an initial state and all elements in the array, and reduces this to a single state. The final state is converted into the final result by applying a finish function. Both functions can use methods of :class:`~pyspark.sql.Column`, functions defined in :py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``. Python ``UserDefinedFunctions`` are not supported (`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__). .. versionadded:: 3.1.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression initialValue : :class:`~pyspark.sql.Column` or str initial value. Name of column or expression merge : function a binary function ``(acc: Column, x: Column) -> Column...`` returning expression of the same type as ``zero`` finish : function an optional unary function ``(x: Column) -> Column: ...`` used to convert accumulated value. Returns ------- :class:`~pyspark.sql.Column` Examples -------- >>> df = spark.createDataFrame([(1, [20.0, 4.0, 2.0, 6.0, 10.0])], ("id", "values")) >>> df.select(aggregate("values", lit(0.0), lambda acc, x: acc + x).alias("sum")).show() +----+ | sum| +----+ |42.0| +----+ >>> def merge(acc, x): ... count = acc.count + 1 ... sum = acc.sum + x ... return struct(count.alias("count"), sum.alias("sum")) >>> df.select( ... aggregate( ... "values", ... struct(lit(0).alias("count"), lit(0.0).alias("sum")), ... merge, ... lambda acc: acc.sum / acc.count, ... ).alias("mean") ... ).show() +----+ |mean| +----+ | 8.4| +----+ """ if finish is not None: return _invoke_higher_order_function( "ArrayAggregate", [col, initialValue], [merge, finish] ) else: return _invoke_higher_order_function( "ArrayAggregate", [col, initialValue], [merge] ) def zip_with(left, right, f): """ Merge two given arrays, element-wise, into a single array using a function. If one array is shorter, nulls are appended at the end to match the length of the longer array, before applying the function. .. versionadded:: 3.1.0 Parameters ---------- left : :class:`~pyspark.sql.Column` or str name of the first column or expression right : :class:`~pyspark.sql.Column` or str name of the second column or expression f : function a binary function ``(x1: Column, x2: Column) -> Column...`` Can use methods of :class:`~pyspark.sql.Column`, functions defined in :py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``. Python ``UserDefinedFunctions`` are not supported (`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__). Returns ------- :class:`~pyspark.sql.Column` Examples -------- >>> df = spark.createDataFrame([(1, [1, 3, 5, 8], [0, 2, 4, 6])], ("id", "xs", "ys")) >>> df.select(zip_with("xs", "ys", lambda x, y: x ** y).alias("powers")).show(truncate=False) +---------------------------+ |powers | +---------------------------+ |[1.0, 9.0, 625.0, 262144.0]| +---------------------------+ >>> df = spark.createDataFrame([(1, ["foo", "bar"], [1, 2, 3])], ("id", "xs", "ys")) >>> df.select(zip_with("xs", "ys", lambda x, y: concat_ws("_", x, y)).alias("xs_ys")).show() +-----------------+ | xs_ys| +-----------------+ |[foo_1, bar_2, 3]| +-----------------+ """ return _invoke_higher_order_function("ZipWith", [left, right], [f]) def transform_keys(col, f): """ Applies a function to every key-value pair in a map and returns a map with the results of those applications as the new keys for the pairs. .. versionadded:: 3.1.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression f : function a binary function ``(k: Column, v: Column) -> Column...`` Can use methods of :class:`~pyspark.sql.Column`, functions defined in :py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``. Python ``UserDefinedFunctions`` are not supported (`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__). Returns ------- :class:`~pyspark.sql.Column` Examples -------- >>> df = spark.createDataFrame([(1, {"foo": -2.0, "bar": 2.0})], ("id", "data")) >>> df.select(transform_keys( ... "data", lambda k, _: upper(k)).alias("data_upper") ... ).show(truncate=False) +-------------------------+ |data_upper | +-------------------------+ |{BAR -> 2.0, FOO -> -2.0}| +-------------------------+ """ return _invoke_higher_order_function("TransformKeys", [col], [f]) def transform_values(col, f): """ Applies a function to every key-value pair in a map and returns a map with the results of those applications as the new values for the pairs. .. versionadded:: 3.1.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression f : function a binary function ``(k: Column, v: Column) -> Column...`` Can use methods of :class:`~pyspark.sql.Column`, functions defined in :py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``. Python ``UserDefinedFunctions`` are not supported (`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__). Returns ------- :class:`~pyspark.sql.Column` Examples -------- >>> df = spark.createDataFrame([(1, {"IT": 10.0, "SALES": 2.0, "OPS": 24.0})], ("id", "data")) >>> df.select(transform_values( ... "data", lambda k, v: when(k.isin("IT", "OPS"), v + 10.0).otherwise(v) ... ).alias("new_data")).show(truncate=False) +---------------------------------------+ |new_data | +---------------------------------------+ |{OPS -> 34.0, IT -> 20.0, SALES -> 2.0}| +---------------------------------------+ """ return _invoke_higher_order_function("TransformValues", [col], [f]) def map_filter(col, f): """ Returns a map whose key-value pairs satisfy a predicate. .. versionadded:: 3.1.0 Parameters ---------- col : :class:`~pyspark.sql.Column` or str name of column or expression f : function a binary function ``(k: Column, v: Column) -> Column...`` Can use methods of :class:`~pyspark.sql.Column`, functions defined in :py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``. Python ``UserDefinedFunctions`` are not supported (`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__). Returns ------- :class:`~pyspark.sql.Column` Examples -------- >>> df = spark.createDataFrame([(1, {"foo": 42.0, "bar": 1.0, "baz": 32.0})], ("id", "data")) >>> df.select(map_filter( ... "data", lambda _, v: v > 30.0).alias("data_filtered") ... ).show(truncate=False) +--------------------------+ |data_filtered | +--------------------------+ |{baz -> 32.0, foo -> 42.0}| +--------------------------+ """ return _invoke_higher_order_function("MapFilter", [col], [f]) def map_zip_with(col1, col2, f): """ Merge two given maps, key-wise into a single map using a function. .. versionadded:: 3.1.0 Parameters ---------- col1 : :class:`~pyspark.sql.Column` or str name of the first column or expression col2 : :class:`~pyspark.sql.Column` or str name of the second column or expression f : function a ternary function ``(k: Column, v1: Column, v2: Column) -> Column...`` Can use methods of :class:`~pyspark.sql.Column`, functions defined in :py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``. Python ``UserDefinedFunctions`` are not supported (`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__). Returns ------- :class:`~pyspark.sql.Column` Examples -------- >>> df = spark.createDataFrame([ ... (1, {"IT": 24.0, "SALES": 12.00}, {"IT": 2.0, "SALES": 1.4})], ... ("id", "base", "ratio") ... ) >>> df.select(map_zip_with( ... "base", "ratio", lambda k, v1, v2: round(v1 * v2, 2)).alias("updated_data") ... ).show(truncate=False) +---------------------------+ |updated_data | +---------------------------+ |{SALES -> 16.8, IT -> 48.0}| +---------------------------+ """ return _invoke_higher_order_function("MapZipWith", [col1, col2], [f]) # ---------------------- Partition transform functions -------------------------------- def years(col): """ Partition transform function: A transform for timestamps and dates to partition data into years. .. versionadded:: 3.1.0 Examples -------- >>> df.writeTo("catalog.db.table").partitionedBy( # doctest: +SKIP ... years("ts") ... ).createOrReplace() Notes ----- This function can be used only in combination with :py:meth:`~pyspark.sql.readwriter.DataFrameWriterV2.partitionedBy` method of the `DataFrameWriterV2`. """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.years(_to_java_column(col))) def months(col): """ Partition transform function: A transform for timestamps and dates to partition data into months. .. versionadded:: 3.1.0 Examples -------- >>> df.writeTo("catalog.db.table").partitionedBy( ... months("ts") ... ).createOrReplace() # doctest: +SKIP Notes ----- This function can be used only in combination with :py:meth:`~pyspark.sql.readwriter.DataFrameWriterV2.partitionedBy` method of the `DataFrameWriterV2`. """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.months(_to_java_column(col))) def days(col): """ Partition transform function: A transform for timestamps and dates to partition data into days. .. versionadded:: 3.1.0 Examples -------- >>> df.writeTo("catalog.db.table").partitionedBy( # doctest: +SKIP ... days("ts") ... ).createOrReplace() Notes ----- This function can be used only in combination with :py:meth:`~pyspark.sql.readwriter.DataFrameWriterV2.partitionedBy` method of the `DataFrameWriterV2`. """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.days(_to_java_column(col))) def hours(col): """ Partition transform function: A transform for timestamps to partition data into hours. .. versionadded:: 3.1.0 Examples -------- >>> df.writeTo("catalog.db.table").partitionedBy( # doctest: +SKIP ... hours("ts") ... ).createOrReplace() Notes ----- This function can be used only in combination with :py:meth:`~pyspark.sql.readwriter.DataFrameWriterV2.partitionedBy` method of the `DataFrameWriterV2`. """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.hours(_to_java_column(col))) def bucket(numBuckets, col): """ Partition transform function: A transform for any type that partitions by a hash of the input column. .. versionadded:: 3.1.0 Examples -------- >>> df.writeTo("catalog.db.table").partitionedBy( # doctest: +SKIP ... bucket(42, "ts") ... ).createOrReplace() Notes ----- This function can be used only in combination with :py:meth:`~pyspark.sql.readwriter.DataFrameWriterV2.partitionedBy` method of the `DataFrameWriterV2`. """ if not isinstance(numBuckets, (int, Column)): raise TypeError( "numBuckets should be a Column or an int, got {}".format(type(numBuckets)) ) sc = SparkContext._active_spark_context numBuckets = ( _create_column_from_literal(numBuckets) if isinstance(numBuckets, int) else _to_java_column(numBuckets) ) return Column(sc._jvm.functions.bucket(numBuckets, _to_java_column(col))) # ---------------------------- User Defined Function ---------------------------------- def udf(f=None, returnType=StringType()): """Creates a user defined function (UDF). .. versionadded:: 1.3.0 Parameters ---------- f : function python function if used as a standalone function returnType : :class:`pyspark.sql.types.DataType` or str the return type of the user-defined function. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. Examples -------- >>> from pyspark.sql.types import IntegerType >>> slen = udf(lambda s: len(s), IntegerType()) >>> @udf ... def to_upper(s): ... if s is not None: ... return s.upper() ... >>> @udf(returnType=IntegerType()) ... def add_one(x): ... if x is not None: ... return x + 1 ... >>> df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name", "age")) >>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")).show() +----------+--------------+------------+ |slen(name)|to_upper(name)|add_one(age)| +----------+--------------+------------+ | 8| JOHN DOE| 22| +----------+--------------+------------+ Notes ----- The user-defined functions are considered deterministic by default. Due to optimization, duplicate invocations may be eliminated or the function may even be invoked more times than it is present in the query. If your function is not deterministic, call `asNondeterministic` on the user defined function. E.g.: >>> from pyspark.sql.types import IntegerType >>> import random >>> random_udf = udf(lambda: int(random.random() * 100), IntegerType()).asNondeterministic() The user-defined functions do not support conditional expressions or short circuiting in boolean expressions and it ends up with being executed all internally. If the functions can fail on special rows, the workaround is to incorporate the condition into the functions. The user-defined functions do not take keyword arguments on the calling side. """ # The following table shows most of Python data and SQL type conversions in normal UDFs that # are not yet visible to the user. Some of behaviors are buggy and might be changed in the near # future. The table might have to be eventually documented externally. # Please see SPARK-28131's PR to see the codes in order to generate the table below. # # +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa # |SQL Type \ Python Value(Type)|None(NoneType)|True(bool)|1(int)| a(str)| 1970-01-01(date)|1970-01-01 00:00:00(datetime)|1.0(float)|array('i', [1])(array)|[1](list)| (1,)(tuple)|bytearray(b'ABC')(bytearray)| 1(Decimal)|{'a': 1}(dict)|Row(kwargs=1)(Row)|Row(namedtuple=1)(Row)| # noqa # +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa # | boolean| None| True| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa # | tinyint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa # | smallint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa # | int| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa # | bigint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa # | string| None| 'true'| '1'| 'a'|'java.util.Gregor...| 'java.util.Gregor...| '1.0'| '[I@66cbb73a'| '[1]'|'[Ljava.lang.Obje...| '[B@5a51eb1a'| '1'| '{a=1}'| X| X| # noqa # | date| None| X| X| X|datetime.date(197...| datetime.date(197...| X| X| X| X| X| X| X| X| X| # noqa # | timestamp| None| X| X| X| X| datetime.datetime...| X| X| X| X| X| X| X| X| X| # noqa # | float| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa # | double| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa # | array<int>| None| None| None| None| None| None| None| [1]| [1]| [1]| [65, 66, 67]| None| None| X| X| # noqa # | binary| None| None| None|bytearray(b'a')| None| None| None| None| None| None| bytearray(b'ABC')| None| None| X| X| # noqa # | decimal(10,0)| None| None| None| None| None| None| None| None| None| None| None|Decimal('1')| None| X| X| # noqa # | map<string,int>| None| None| None| None| None| None| None| None| None| None| None| None| {'a': 1}| X| X| # noqa # | struct<_1:int>| None| X| X| X| X| X| X| X|Row(_1=1)| Row(_1=1)| X| X| Row(_1=None)| Row(_1=1)| Row(_1=1)| # noqa # +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa # # Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be # used in `returnType`. # Note: The values inside of the table are generated by `repr`. # Note: 'X' means it throws an exception during the conversion. # Note: Python 3.7.3 is used. # decorator @udf, @udf(), @udf(dataType()) if f is None or isinstance(f, (str, DataType)): # If DataType has been passed as a positional argument # for decorator use it as a returnType return_type = f or returnType return functools.partial(_create_udf, returnType=return_type, evalType=PythonEvalType.SQL_BATCHED_UDF) else: return _create_udf(f=f, returnType=returnType, evalType=PythonEvalType.SQL_BATCHED_UDF) def _test(): import doctest from pyspark.sql import Row, SparkSession import pyspark.sql.functions globs = pyspark.sql.functions.__dict__.copy() spark = SparkSession.builder\ .master("local[4]")\ .appName("sql.functions tests")\ .getOrCreate() sc = spark.sparkContext globs['sc'] = sc globs['spark'] = spark globs['df'] = spark.createDataFrame([Row(age=2, name='Alice'), Row(age=5, name='Bob')]) (failure_count, test_count) = doctest.testmod( pyspark.sql.functions, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE) spark.stop() if failure_count: sys.exit(-1) if __name__ == "__main__": _test()
apache-2.0
JeanKossaifi/scikit-learn
sklearn/tree/tests/test_tree.py
48
47506
""" Testing for the tree module (sklearn.tree). """ import pickle from functools import partial from itertools import product import platform import numpy as np from scipy.sparse import csc_matrix from scipy.sparse import csr_matrix from scipy.sparse import coo_matrix from sklearn.random_projection import sparse_random_matrix from sklearn.metrics import accuracy_score from sklearn.metrics import mean_squared_error from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_in from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_true from sklearn.utils.testing import raises from sklearn.utils.validation import check_random_state from sklearn.utils.validation import NotFittedError from sklearn.utils.testing import ignore_warnings from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeRegressor from sklearn.tree import ExtraTreeClassifier from sklearn.tree import ExtraTreeRegressor from sklearn import tree from sklearn.tree.tree import SPARSE_SPLITTERS from sklearn.tree._tree import TREE_LEAF from sklearn import datasets from sklearn.preprocessing._weights import _balance_weights CLF_CRITERIONS = ("gini", "entropy") REG_CRITERIONS = ("mse", ) CLF_TREES = { "DecisionTreeClassifier": DecisionTreeClassifier, "Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier, splitter="presort-best"), "ExtraTreeClassifier": ExtraTreeClassifier, } REG_TREES = { "DecisionTreeRegressor": DecisionTreeRegressor, "Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor, splitter="presort-best"), "ExtraTreeRegressor": ExtraTreeRegressor, } ALL_TREES = dict() ALL_TREES.update(CLF_TREES) ALL_TREES.update(REG_TREES) SPARSE_TREES = [name for name, Tree in ALL_TREES.items() if Tree().splitter in SPARSE_SPLITTERS] X_small = np.array([ [0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ], [0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ], [-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ], [-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ], [-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ], [-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ], [2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ], [2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ], [2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ], [2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ], [2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ], [2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ], [2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ], [1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ], [3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ], [2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ], [2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ], [2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ], [2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ], [2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ], [2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ], [1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ], [3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]]) y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0] y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1, 0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0] # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] true_result = [-1, 1, 1] # also load the iris dataset # and randomly permute it iris = datasets.load_iris() rng = np.random.RandomState(1) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # also load the boston dataset # and randomly permute it boston = datasets.load_boston() perm = rng.permutation(boston.target.size) boston.data = boston.data[perm] boston.target = boston.target[perm] digits = datasets.load_digits() perm = rng.permutation(digits.target.size) digits.data = digits.data[perm] digits.target = digits.target[perm] random_state = check_random_state(0) X_multilabel, y_multilabel = datasets.make_multilabel_classification( random_state=0, n_samples=30, n_features=10) X_sparse_pos = random_state.uniform(size=(20, 5)) X_sparse_pos[X_sparse_pos <= 0.8] = 0. y_random = random_state.randint(0, 4, size=(20, )) X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0) DATASETS = { "iris": {"X": iris.data, "y": iris.target}, "boston": {"X": boston.data, "y": boston.target}, "digits": {"X": digits.data, "y": digits.target}, "toy": {"X": X, "y": y}, "clf_small": {"X": X_small, "y": y_small}, "reg_small": {"X": X_small, "y": y_small_reg}, "multilabel": {"X": X_multilabel, "y": y_multilabel}, "sparse-pos": {"X": X_sparse_pos, "y": y_random}, "sparse-neg": {"X": - X_sparse_pos, "y": y_random}, "sparse-mix": {"X": X_sparse_mix, "y": y_random}, "zeros": {"X": np.zeros((20, 3)), "y": y_random} } for name in DATASETS: DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"]) def assert_tree_equal(d, s, message): assert_equal(s.node_count, d.node_count, "{0}: inequal number of node ({1} != {2})" "".format(message, s.node_count, d.node_count)) assert_array_equal(d.children_right, s.children_right, message + ": inequal children_right") assert_array_equal(d.children_left, s.children_left, message + ": inequal children_left") external = d.children_right == TREE_LEAF internal = np.logical_not(external) assert_array_equal(d.feature[internal], s.feature[internal], message + ": inequal features") assert_array_equal(d.threshold[internal], s.threshold[internal], message + ": inequal threshold") assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(), message + ": inequal sum(n_node_samples)") assert_array_equal(d.n_node_samples, s.n_node_samples, message + ": inequal n_node_samples") assert_almost_equal(d.impurity, s.impurity, err_msg=message + ": inequal impurity") assert_array_almost_equal(d.value[external], s.value[external], err_msg=message + ": inequal value") def test_classification_toy(): # Check classification on a toy dataset. for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) clf = Tree(max_features=1, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) def test_weighted_classification_toy(): # Check classification on a weighted toy dataset. for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y, sample_weight=np.ones(len(X))) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) def test_regression_toy(): # Check regression on a toy dataset. for name, Tree in REG_TREES.items(): reg = Tree(random_state=1) reg.fit(X, y) assert_almost_equal(reg.predict(T), true_result, err_msg="Failed with {0}".format(name)) clf = Tree(max_features=1, random_state=1) clf.fit(X, y) assert_almost_equal(reg.predict(T), true_result, err_msg="Failed with {0}".format(name)) def test_xor(): # Check on a XOR problem y = np.zeros((10, 10)) y[:5, :5] = 1 y[5:, 5:] = 1 gridx, gridy = np.indices(y.shape) X = np.vstack([gridx.ravel(), gridy.ravel()]).T y = y.ravel() for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y) assert_equal(clf.score(X, y), 1.0, "Failed with {0}".format(name)) clf = Tree(random_state=0, max_features=1) clf.fit(X, y) assert_equal(clf.score(X, y), 1.0, "Failed with {0}".format(name)) def test_iris(): # Check consistency on dataset iris. for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS): clf = Tree(criterion=criterion, random_state=0) clf.fit(iris.data, iris.target) score = accuracy_score(clf.predict(iris.data), iris.target) assert_greater(score, 0.9, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) clf = Tree(criterion=criterion, max_features=2, random_state=0) clf.fit(iris.data, iris.target) score = accuracy_score(clf.predict(iris.data), iris.target) assert_greater(score, 0.5, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) def test_boston(): # Check consistency on dataset boston house prices. for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS): reg = Tree(criterion=criterion, random_state=0) reg.fit(boston.data, boston.target) score = mean_squared_error(boston.target, reg.predict(boston.data)) assert_less(score, 1, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) # using fewer features reduces the learning ability of this tree, # but reduces training time. reg = Tree(criterion=criterion, max_features=6, random_state=0) reg.fit(boston.data, boston.target) score = mean_squared_error(boston.target, reg.predict(boston.data)) assert_less(score, 2, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) def test_probability(): # Predict probabilities using DecisionTreeClassifier. for name, Tree in CLF_TREES.items(): clf = Tree(max_depth=1, max_features=1, random_state=42) clf.fit(iris.data, iris.target) prob_predict = clf.predict_proba(iris.data) assert_array_almost_equal(np.sum(prob_predict, 1), np.ones(iris.data.shape[0]), err_msg="Failed with {0}".format(name)) assert_array_equal(np.argmax(prob_predict, 1), clf.predict(iris.data), err_msg="Failed with {0}".format(name)) assert_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8, err_msg="Failed with {0}".format(name)) def test_arrayrepr(): # Check the array representation. # Check resize X = np.arange(10000)[:, np.newaxis] y = np.arange(10000) for name, Tree in REG_TREES.items(): reg = Tree(max_depth=None, random_state=0) reg.fit(X, y) def test_pure_set(): # Check when y is pure. X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [1, 1, 1, 1, 1, 1] for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) clf.fit(X, y) assert_array_equal(clf.predict(X), y, err_msg="Failed with {0}".format(name)) for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(random_state=0) reg.fit(X, y) assert_almost_equal(clf.predict(X), y, err_msg="Failed with {0}".format(name)) def test_numerical_stability(): # Check numerical stability. X = np.array([ [152.08097839, 140.40744019, 129.75102234, 159.90493774], [142.50700378, 135.81935120, 117.82884979, 162.75781250], [127.28772736, 140.40744019, 129.75102234, 159.90493774], [132.37025452, 143.71923828, 138.35694885, 157.84558105], [103.10237122, 143.71928406, 138.35696411, 157.84559631], [127.71276855, 143.71923828, 138.35694885, 157.84558105], [120.91514587, 140.40744019, 129.75102234, 159.90493774]]) y = np.array( [1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521]) with np.errstate(all="raise"): for name, Tree in REG_TREES.items(): reg = Tree(random_state=0) reg.fit(X, y) reg.fit(X, -y) reg.fit(-X, y) reg.fit(-X, -y) def test_importances(): # Check variable importances. X, y = datasets.make_classification(n_samples=2000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y) importances = clf.feature_importances_ n_important = np.sum(importances > 0.1) assert_equal(importances.shape[0], 10, "Failed with {0}".format(name)) assert_equal(n_important, 3, "Failed with {0}".format(name)) X_new = clf.transform(X, threshold="mean") assert_less(0, X_new.shape[1], "Failed with {0}".format(name)) assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name)) # Check on iris that importances are the same for all builders clf = DecisionTreeClassifier(random_state=0) clf.fit(iris.data, iris.target) clf2 = DecisionTreeClassifier(random_state=0, max_leaf_nodes=len(iris.data)) clf2.fit(iris.data, iris.target) assert_array_equal(clf.feature_importances_, clf2.feature_importances_) @raises(ValueError) def test_importances_raises(): # Check if variable importance before fit raises ValueError. clf = DecisionTreeClassifier() clf.feature_importances_ def test_importances_gini_equal_mse(): # Check that gini is equivalent to mse for binary output variable X, y = datasets.make_classification(n_samples=2000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) # The gini index and the mean square error (variance) might differ due # to numerical instability. Since those instabilities mainly occurs at # high tree depth, we restrict this maximal depth. clf = DecisionTreeClassifier(criterion="gini", max_depth=5, random_state=0).fit(X, y) reg = DecisionTreeRegressor(criterion="mse", max_depth=5, random_state=0).fit(X, y) assert_almost_equal(clf.feature_importances_, reg.feature_importances_) assert_array_equal(clf.tree_.feature, reg.tree_.feature) assert_array_equal(clf.tree_.children_left, reg.tree_.children_left) assert_array_equal(clf.tree_.children_right, reg.tree_.children_right) assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples) def test_max_features(): # Check max_features. for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(max_features="auto") reg.fit(boston.data, boston.target) assert_equal(reg.max_features_, boston.data.shape[1]) for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(max_features="auto") clf.fit(iris.data, iris.target) assert_equal(clf.max_features_, 2) for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(max_features="sqrt") est.fit(iris.data, iris.target) assert_equal(est.max_features_, int(np.sqrt(iris.data.shape[1]))) est = TreeEstimator(max_features="log2") est.fit(iris.data, iris.target) assert_equal(est.max_features_, int(np.log2(iris.data.shape[1]))) est = TreeEstimator(max_features=1) est.fit(iris.data, iris.target) assert_equal(est.max_features_, 1) est = TreeEstimator(max_features=3) est.fit(iris.data, iris.target) assert_equal(est.max_features_, 3) est = TreeEstimator(max_features=0.01) est.fit(iris.data, iris.target) assert_equal(est.max_features_, 1) est = TreeEstimator(max_features=0.5) est.fit(iris.data, iris.target) assert_equal(est.max_features_, int(0.5 * iris.data.shape[1])) est = TreeEstimator(max_features=1.0) est.fit(iris.data, iris.target) assert_equal(est.max_features_, iris.data.shape[1]) est = TreeEstimator(max_features=None) est.fit(iris.data, iris.target) assert_equal(est.max_features_, iris.data.shape[1]) # use values of max_features that are invalid est = TreeEstimator(max_features=10) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features=-1) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features=0.0) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features=1.5) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features="foobar") assert_raises(ValueError, est.fit, X, y) def test_error(): # Test that it gives proper exception on deficient input. for name, TreeEstimator in CLF_TREES.items(): # predict before fit est = TreeEstimator() assert_raises(NotFittedError, est.predict_proba, X) est.fit(X, y) X2 = [[-2, -1, 1]] # wrong feature shape for sample assert_raises(ValueError, est.predict_proba, X2) for name, TreeEstimator in ALL_TREES.items(): # Invalid values for parameters assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(min_weight_fraction_leaf=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(min_weight_fraction_leaf=0.51).fit, X, y) assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y) # Wrong dimensions est = TreeEstimator() y2 = y[:-1] assert_raises(ValueError, est.fit, X, y2) # Test with arrays that are non-contiguous. Xf = np.asfortranarray(X) est = TreeEstimator() est.fit(Xf, y) assert_almost_equal(est.predict(T), true_result) # predict before fitting est = TreeEstimator() assert_raises(NotFittedError, est.predict, T) # predict on vector with different dims est.fit(X, y) t = np.asarray(T) assert_raises(ValueError, est.predict, t[:, 1:]) # wrong sample shape Xt = np.array(X).T est = TreeEstimator() est.fit(np.dot(X, Xt), y) assert_raises(ValueError, est.predict, X) assert_raises(ValueError, est.apply, X) clf = TreeEstimator() clf.fit(X, y) assert_raises(ValueError, clf.predict, Xt) assert_raises(ValueError, clf.apply, Xt) # apply before fitting est = TreeEstimator() assert_raises(NotFittedError, est.apply, T) def test_min_samples_leaf(): # Test if leaves contain more than leaf_count training examples X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE)) y = iris.target # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes in (None, 1000): for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(min_samples_leaf=5, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) out = est.tree_.apply(X) node_counts = np.bincount(out) # drop inner nodes leaf_count = node_counts[node_counts != 0] assert_greater(np.min(leaf_count), 4, "Failed with {0}".format(name)) def check_min_weight_fraction_leaf(name, datasets, sparse=False): """Test if leaves contain at least min_weight_fraction_leaf of the training set""" if sparse: X = DATASETS[datasets]["X_sparse"].astype(np.float32) else: X = DATASETS[datasets]["X"].astype(np.float32) y = DATASETS[datasets]["y"] weights = rng.rand(X.shape[0]) total_weight = np.sum(weights) TreeEstimator = ALL_TREES[name] # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)): est = TreeEstimator(min_weight_fraction_leaf=frac, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y, sample_weight=weights) if sparse: out = est.tree_.apply(X.tocsr()) else: out = est.tree_.apply(X) node_weights = np.bincount(out, weights=weights) # drop inner nodes leaf_weights = node_weights[node_weights != 0] assert_greater_equal( np.min(leaf_weights), total_weight * est.min_weight_fraction_leaf, "Failed with {0} " "min_weight_fraction_leaf={1}".format( name, est.min_weight_fraction_leaf)) def test_min_weight_fraction_leaf(): # Check on dense input for name in ALL_TREES: yield check_min_weight_fraction_leaf, name, "iris" # Check on sparse input for name in SPARSE_TREES: yield check_min_weight_fraction_leaf, name, "multilabel", True def test_pickle(): # Check that tree estimator are pickable for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) clf.fit(iris.data, iris.target) score = clf.score(iris.data, iris.target) serialized_object = pickle.dumps(clf) clf2 = pickle.loads(serialized_object) assert_equal(type(clf2), clf.__class__) score2 = clf2.score(iris.data, iris.target) assert_equal(score, score2, "Failed to generate same score " "after pickling (classification) " "with {0}".format(name)) for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(random_state=0) reg.fit(boston.data, boston.target) score = reg.score(boston.data, boston.target) serialized_object = pickle.dumps(reg) reg2 = pickle.loads(serialized_object) assert_equal(type(reg2), reg.__class__) score2 = reg2.score(boston.data, boston.target) assert_equal(score, score2, "Failed to generate same score " "after pickling (regression) " "with {0}".format(name)) def test_multioutput(): # Check estimators on multi-output problems. X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1], [-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]] y = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2], [-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]] T = [[-1, -1], [1, 1], [-1, 1], [1, -1]] y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]] # toy classification problem for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) y_hat = clf.fit(X, y).predict(T) assert_array_equal(y_hat, y_true) assert_equal(y_hat.shape, (4, 2)) proba = clf.predict_proba(T) assert_equal(len(proba), 2) assert_equal(proba[0].shape, (4, 2)) assert_equal(proba[1].shape, (4, 4)) log_proba = clf.predict_log_proba(T) assert_equal(len(log_proba), 2) assert_equal(log_proba[0].shape, (4, 2)) assert_equal(log_proba[1].shape, (4, 4)) # toy regression problem for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(random_state=0) y_hat = reg.fit(X, y).predict(T) assert_almost_equal(y_hat, y_true) assert_equal(y_hat.shape, (4, 2)) def test_classes_shape(): # Test that n_classes_ and classes_ have proper shape. for name, TreeClassifier in CLF_TREES.items(): # Classification, single output clf = TreeClassifier(random_state=0) clf.fit(X, y) assert_equal(clf.n_classes_, 2) assert_array_equal(clf.classes_, [-1, 1]) # Classification, multi-output _y = np.vstack((y, np.array(y) * 2)).T clf = TreeClassifier(random_state=0) clf.fit(X, _y) assert_equal(len(clf.n_classes_), 2) assert_equal(len(clf.classes_), 2) assert_array_equal(clf.n_classes_, [2, 2]) assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]]) def test_unbalanced_iris(): # Check class rebalancing. unbalanced_X = iris.data[:125] unbalanced_y = iris.target[:125] sample_weight = _balance_weights(unbalanced_y) for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight) assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y) def test_memory_layout(): # Check that it works no matter the memory layout for (name, TreeEstimator), dtype in product(ALL_TREES.items(), [np.float64, np.float32]): est = TreeEstimator(random_state=0) # Nothing X = np.asarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # C-order X = np.asarray(iris.data, order="C", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # F-order X = np.asarray(iris.data, order="F", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Contiguous X = np.ascontiguousarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) if est.splitter in SPARSE_SPLITTERS: # csr matrix X = csr_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # csc_matrix X = csc_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Strided X = np.asarray(iris.data[::3], dtype=dtype) y = iris.target[::3] assert_array_equal(est.fit(X, y).predict(X), y) def test_sample_weight(): # Check sample weighting. # Test that zero-weighted samples are not taken into account X = np.arange(100)[:, np.newaxis] y = np.ones(100) y[:50] = 0.0 sample_weight = np.ones(100) sample_weight[y == 0] = 0.0 clf = DecisionTreeClassifier(random_state=0) clf.fit(X, y, sample_weight=sample_weight) assert_array_equal(clf.predict(X), np.ones(100)) # Test that low weighted samples are not taken into account at low depth X = np.arange(200)[:, np.newaxis] y = np.zeros(200) y[50:100] = 1 y[100:200] = 2 X[100:200, 0] = 200 sample_weight = np.ones(200) sample_weight[y == 2] = .51 # Samples of class '2' are still weightier clf = DecisionTreeClassifier(max_depth=1, random_state=0) clf.fit(X, y, sample_weight=sample_weight) assert_equal(clf.tree_.threshold[0], 149.5) sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier clf = DecisionTreeClassifier(max_depth=1, random_state=0) clf.fit(X, y, sample_weight=sample_weight) assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved # Test that sample weighting is the same as having duplicates X = iris.data y = iris.target duplicates = rng.randint(0, X.shape[0], 100) clf = DecisionTreeClassifier(random_state=1) clf.fit(X[duplicates], y[duplicates]) sample_weight = np.bincount(duplicates, minlength=X.shape[0]) clf2 = DecisionTreeClassifier(random_state=1) clf2.fit(X, y, sample_weight=sample_weight) internal = clf.tree_.children_left != tree._tree.TREE_LEAF assert_array_almost_equal(clf.tree_.threshold[internal], clf2.tree_.threshold[internal]) def test_sample_weight_invalid(): # Check sample weighting raises errors. X = np.arange(100)[:, np.newaxis] y = np.ones(100) y[:50] = 0.0 clf = DecisionTreeClassifier(random_state=0) sample_weight = np.random.rand(100, 1) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) sample_weight = np.array(0) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) sample_weight = np.ones(101) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) sample_weight = np.ones(99) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) def check_class_weights(name): """Check class_weights resemble sample_weights behavior.""" TreeClassifier = CLF_TREES[name] # Iris is balanced, so no effect expected for using 'balanced' weights clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target) clf2 = TreeClassifier(class_weight='balanced', random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Make a multi-output problem with three copies of Iris iris_multi = np.vstack((iris.target, iris.target, iris.target)).T # Create user-defined weights that should balance over the outputs clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.}, {0: 2., 1: 1., 2: 2.}, {0: 1., 1: 2., 2: 2.}], random_state=0) clf3.fit(iris.data, iris_multi) assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_) # Check against multi-output "auto" which should also have no effect clf4 = TreeClassifier(class_weight='balanced', random_state=0) clf4.fit(iris.data, iris_multi) assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_) # Inflate importance of class 1, check against user-defined weights sample_weight = np.ones(iris.target.shape) sample_weight[iris.target == 1] *= 100 class_weight = {0: 1., 1: 100., 2: 1.} clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight) clf2 = TreeClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Check that sample_weight and class_weight are multiplicative clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight ** 2) clf2 = TreeClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target, sample_weight) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) def test_class_weights(): for name in CLF_TREES: yield check_class_weights, name def check_class_weight_errors(name): # Test if class_weight raises errors and warnings when expected. TreeClassifier = CLF_TREES[name] _y = np.vstack((y, np.array(y) * 2)).T # Invalid preset string clf = TreeClassifier(class_weight='the larch', random_state=0) assert_raises(ValueError, clf.fit, X, y) assert_raises(ValueError, clf.fit, X, _y) # Not a list or preset for multi-output clf = TreeClassifier(class_weight=1, random_state=0) assert_raises(ValueError, clf.fit, X, _y) # Incorrect length list for multi-output clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0) assert_raises(ValueError, clf.fit, X, _y) def test_class_weight_errors(): for name in CLF_TREES: yield check_class_weight_errors, name def test_max_leaf_nodes(): # Test greedy trees with max_depth + 1 leafs. from sklearn.tree._tree import TREE_LEAF X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) k = 4 for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y) tree = est.tree_ assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1) # max_leaf_nodes in (0, 1) should raise ValueError est = TreeEstimator(max_depth=None, max_leaf_nodes=0) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_depth=None, max_leaf_nodes=1) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1) assert_raises(ValueError, est.fit, X, y) def test_max_leaf_nodes_max_depth(): # Test preceedence of max_leaf_nodes over max_depth. X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) k = 4 for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y) tree = est.tree_ assert_greater(tree.max_depth, 1) def test_arrays_persist(): # Ensure property arrays' memory stays alive when tree disappears # non-regression for #2726 for attr in ['n_classes', 'value', 'children_left', 'children_right', 'threshold', 'impurity', 'feature', 'n_node_samples']: value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr) # if pointing to freed memory, contents may be arbitrary assert_true(-2 <= value.flat[0] < 2, 'Array points to arbitrary memory') def test_only_constant_features(): random_state = check_random_state(0) X = np.zeros((10, 20)) y = random_state.randint(0, 2, (10, )) for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(random_state=0) est.fit(X, y) assert_equal(est.tree_.max_depth, 0) def test_with_only_one_non_constant_features(): X = np.hstack([np.array([[1.], [1.], [0.], [0.]]), np.zeros((4, 1000))]) y = np.array([0., 1., 0., 1.0]) for name, TreeEstimator in CLF_TREES.items(): est = TreeEstimator(random_state=0, max_features=1) est.fit(X, y) assert_equal(est.tree_.max_depth, 1) assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2))) for name, TreeEstimator in REG_TREES.items(): est = TreeEstimator(random_state=0, max_features=1) est.fit(X, y) assert_equal(est.tree_.max_depth, 1) assert_array_equal(est.predict(X), 0.5 * np.ones((4, ))) def test_big_input(): # Test if the warning for too large inputs is appropriate. X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1) clf = DecisionTreeClassifier() try: clf.fit(X, [0, 1, 0, 1]) except ValueError as e: assert_in("float32", str(e)) def test_realloc(): from sklearn.tree._utils import _realloc_test assert_raises(MemoryError, _realloc_test) def test_huge_allocations(): n_bits = int(platform.architecture()[0].rstrip('bit')) X = np.random.randn(10, 2) y = np.random.randint(0, 2, 10) # Sanity check: we cannot request more memory than the size of the address # space. Currently raises OverflowError. huge = 2 ** (n_bits + 1) clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge) assert_raises(Exception, clf.fit, X, y) # Non-regression test: MemoryError used to be dropped by Cython # because of missing "except *". huge = 2 ** (n_bits - 1) - 1 clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge) assert_raises(MemoryError, clf.fit, X, y) def check_sparse_input(tree, dataset, max_depth=None): TreeEstimator = ALL_TREES[tree] X = DATASETS[dataset]["X"] X_sparse = DATASETS[dataset]["X_sparse"] y = DATASETS[dataset]["y"] # Gain testing time if dataset in ["digits", "boston"]: n_samples = X.shape[0] // 5 X = X[:n_samples] X_sparse = X_sparse[:n_samples] y = y[:n_samples] for sparse_format in (csr_matrix, csc_matrix, coo_matrix): X_sparse = sparse_format(X_sparse) # Check the default (depth first search) d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y) s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) y_pred = d.predict(X) if tree in CLF_TREES: y_proba = d.predict_proba(X) y_log_proba = d.predict_log_proba(X) for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix): X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32) assert_array_almost_equal(s.predict(X_sparse_test), y_pred) if tree in CLF_TREES: assert_array_almost_equal(s.predict_proba(X_sparse_test), y_proba) assert_array_almost_equal(s.predict_log_proba(X_sparse_test), y_log_proba) def test_sparse_input(): for tree, dataset in product(SPARSE_TREES, ("clf_small", "toy", "digits", "multilabel", "sparse-pos", "sparse-neg", "sparse-mix", "zeros")): max_depth = 3 if dataset == "digits" else None yield (check_sparse_input, tree, dataset, max_depth) # Due to numerical instability of MSE and too strict test, we limit the # maximal depth for tree, dataset in product(REG_TREES, ["boston", "reg_small"]): if tree in SPARSE_TREES: yield (check_sparse_input, tree, dataset, 2) def check_sparse_parameters(tree, dataset): TreeEstimator = ALL_TREES[tree] X = DATASETS[dataset]["X"] X_sparse = DATASETS[dataset]["X_sparse"] y = DATASETS[dataset]["y"] # Check max_features d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y) s = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) # Check min_samples_split d = TreeEstimator(random_state=0, max_features=1, min_samples_split=10).fit(X, y) s = TreeEstimator(random_state=0, max_features=1, min_samples_split=10).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) # Check min_samples_leaf d = TreeEstimator(random_state=0, min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y) s = TreeEstimator(random_state=0, min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) # Check best-first search d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y) s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) def test_sparse_parameters(): for tree, dataset in product(SPARSE_TREES, ["sparse-pos", "sparse-neg", "sparse-mix", "zeros"]): yield (check_sparse_parameters, tree, dataset) def check_sparse_criterion(tree, dataset): TreeEstimator = ALL_TREES[tree] X = DATASETS[dataset]["X"] X_sparse = DATASETS[dataset]["X_sparse"] y = DATASETS[dataset]["y"] # Check various criterion CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS for criterion in CRITERIONS: d = TreeEstimator(random_state=0, max_depth=3, criterion=criterion).fit(X, y) s = TreeEstimator(random_state=0, max_depth=3, criterion=criterion).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) def test_sparse_criterion(): for tree, dataset in product(SPARSE_TREES, ["sparse-pos", "sparse-neg", "sparse-mix", "zeros"]): yield (check_sparse_criterion, tree, dataset) def check_explicit_sparse_zeros(tree, max_depth=3, n_features=10): TreeEstimator = ALL_TREES[tree] # n_samples set n_feature to ease construction of a simultaneous # construction of a csr and csc matrix n_samples = n_features samples = np.arange(n_samples) # Generate X, y random_state = check_random_state(0) indices = [] data = [] offset = 0 indptr = [offset] for i in range(n_features): n_nonzero_i = random_state.binomial(n_samples, 0.5) indices_i = random_state.permutation(samples)[:n_nonzero_i] indices.append(indices_i) data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1 data.append(data_i) offset += n_nonzero_i indptr.append(offset) indices = np.concatenate(indices) data = np.array(np.concatenate(data), dtype=np.float32) X_sparse = csc_matrix((data, indices, indptr), shape=(n_samples, n_features)) X = X_sparse.toarray() X_sparse_test = csr_matrix((data, indices, indptr), shape=(n_samples, n_features)) X_test = X_sparse_test.toarray() y = random_state.randint(0, 3, size=(n_samples, )) # Ensure that X_sparse_test owns its data, indices and indptr array X_sparse_test = X_sparse_test.copy() # Ensure that we have explicit zeros assert_greater((X_sparse.data == 0.).sum(), 0) assert_greater((X_sparse_test.data == 0.).sum(), 0) # Perform the comparison d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y) s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) Xs = (X_test, X_sparse_test) for X1, X2 in product(Xs, Xs): assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2)) assert_array_almost_equal(s.apply(X1), d.apply(X2)) assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1)) assert_array_almost_equal(s.predict(X1), d.predict(X2)) if tree in CLF_TREES: assert_array_almost_equal(s.predict_proba(X1), d.predict_proba(X2)) def test_explicit_sparse_zeros(): for tree in SPARSE_TREES: yield (check_explicit_sparse_zeros, tree) @ignore_warnings def check_raise_error_on_1d_input(name): TreeEstimator = ALL_TREES[name] X = iris.data[:, 0].ravel() X_2d = iris.data[:, 0].reshape((-1, 1)) y = iris.target assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y) est = TreeEstimator(random_state=0) est.fit(X_2d, y) assert_raises(ValueError, est.predict, [X]) @ignore_warnings def test_1d_input(): for name in ALL_TREES: yield check_raise_error_on_1d_input, name def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight): # Private function to keep pretty printing in nose yielded tests est = TreeEstimator(random_state=0) est.fit(X, y, sample_weight=sample_weight) assert_equal(est.tree_.max_depth, 1) est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4) est.fit(X, y, sample_weight=sample_weight) assert_equal(est.tree_.max_depth, 0) def check_min_weight_leaf_split_level(name): TreeEstimator = ALL_TREES[name] X = np.array([[0], [0], [0], [0], [1]]) y = [0, 0, 0, 0, 1] sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2] _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight) if TreeEstimator().splitter in SPARSE_SPLITTERS: _check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y, sample_weight) def test_min_weight_leaf_split_level(): for name in ALL_TREES: yield check_min_weight_leaf_split_level, name def check_public_apply(name): X_small32 = X_small.astype(tree._tree.DTYPE) est = ALL_TREES[name]() est.fit(X_small, y_small) assert_array_equal(est.apply(X_small), est.tree_.apply(X_small32)) def check_public_apply_sparse(name): X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE)) est = ALL_TREES[name]() est.fit(X_small, y_small) assert_array_equal(est.apply(X_small), est.tree_.apply(X_small32)) def test_public_apply(): for name in ALL_TREES: yield (check_public_apply, name) for name in SPARSE_TREES: yield (check_public_apply_sparse, name)
bsd-3-clause
Sumith1896/sympy
sympy/utilities/runtests.py
4
78928
""" This is our testing framework. Goals: * it should be compatible with py.test and operate very similarly (or identically) * doesn't require any external dependencies * preferably all the functionality should be in this file only * no magic, just import the test file and execute the test functions, that's it * portable """ from __future__ import print_function, division import os import sys import platform import inspect import traceback import pdb import re import linecache from fnmatch import fnmatch from timeit import default_timer as clock import doctest as pdoctest # avoid clashing with our doctest() function from doctest import DocTestFinder, DocTestRunner import random import subprocess import signal import stat from inspect import isgeneratorfunction from sympy.core.cache import clear_cache from sympy.core.compatibility import exec_, PY3, string_types, range from sympy.utilities.misc import find_executable from sympy.external import import_module from sympy.utilities.exceptions import SymPyDeprecationWarning IS_WINDOWS = (os.name == 'nt') class Skipped(Exception): pass import __future__ # add more flags ?? future_flags = __future__.division.compiler_flag def _indent(s, indent=4): """ Add the given number of space characters to the beginning of every non-blank line in ``s``, and return the result. If the string ``s`` is Unicode, it is encoded using the stdout encoding and the ``backslashreplace`` error handler. """ # After a 2to3 run the below code is bogus, so wrap it with a version check if not PY3: if isinstance(s, unicode): s = s.encode(pdoctest._encoding, 'backslashreplace') # This regexp matches the start of non-blank lines: return re.sub('(?m)^(?!$)', indent*' ', s) pdoctest._indent = _indent # ovverride reporter to maintain windows and python3 def _report_failure(self, out, test, example, got): """ Report that the given example failed. """ s = self._checker.output_difference(example, got, self.optionflags) s = s.encode('raw_unicode_escape').decode('utf8', 'ignore') out(self._failure_header(test, example) + s) if PY3 and IS_WINDOWS: DocTestRunner.report_failure = _report_failure def convert_to_native_paths(lst): """ Converts a list of '/' separated paths into a list of native (os.sep separated) paths and converts to lowercase if the system is case insensitive. """ newlst = [] for i, rv in enumerate(lst): rv = os.path.join(*rv.split("/")) # on windows the slash after the colon is dropped if sys.platform == "win32": pos = rv.find(':') if pos != -1: if rv[pos + 1] != '\\': rv = rv[:pos + 1] + '\\' + rv[pos + 1:] newlst.append(sys_normcase(rv)) return newlst def get_sympy_dir(): """ Returns the root sympy directory and set the global value indicating whether the system is case sensitive or not. """ global sys_case_insensitive this_file = os.path.abspath(__file__) sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..") sympy_dir = os.path.normpath(sympy_dir) sys_case_insensitive = (os.path.isdir(sympy_dir) and os.path.isdir(sympy_dir.lower()) and os.path.isdir(sympy_dir.upper())) return sys_normcase(sympy_dir) def sys_normcase(f): if sys_case_insensitive: # global defined after call to get_sympy_dir() return f.lower() return f def setup_pprint(): from sympy import pprint_use_unicode, init_printing # force pprint to be in ascii mode in doctests pprint_use_unicode(False) # hook our nice, hash-stable strprinter init_printing(pretty_print=False) def run_in_subprocess_with_hash_randomization(function, function_args=(), function_kwargs={}, command=sys.executable, module='sympy.utilities.runtests', force=False): """ Run a function in a Python subprocess with hash randomization enabled. If hash randomization is not supported by the version of Python given, it returns False. Otherwise, it returns the exit value of the command. The function is passed to sys.exit(), so the return value of the function will be the return value. The environment variable PYTHONHASHSEED is used to seed Python's hash randomization. If it is set, this function will return False, because starting a new subprocess is unnecessary in that case. If it is not set, one is set at random, and the tests are run. Note that if this environment variable is set when Python starts, hash randomization is automatically enabled. To force a subprocess to be created even if PYTHONHASHSEED is set, pass ``force=True``. This flag will not force a subprocess in Python versions that do not support hash randomization (see below), because those versions of Python do not support the ``-R`` flag. ``function`` should be a string name of a function that is importable from the module ``module``, like "_test". The default for ``module`` is "sympy.utilities.runtests". ``function_args`` and ``function_kwargs`` should be a repr-able tuple and dict, respectively. The default Python command is sys.executable, which is the currently running Python command. This function is necessary because the seed for hash randomization must be set by the environment variable before Python starts. Hence, in order to use a predetermined seed for tests, we must start Python in a separate subprocess. Hash randomization was added in the minor Python versions 2.6.8, 2.7.3, 3.1.5, and 3.2.3, and is enabled by default in all Python versions after and including 3.3.0. Examples ======== >>> from sympy.utilities.runtests import ( ... run_in_subprocess_with_hash_randomization) >>> # run the core tests in verbose mode >>> run_in_subprocess_with_hash_randomization("_test", ... function_args=("core",), ... function_kwargs={'verbose': True}) # doctest: +SKIP # Will return 0 if sys.executable supports hash randomization and tests # pass, 1 if they fail, and False if it does not support hash # randomization. """ # Note, we must return False everywhere, not None, as subprocess.call will # sometimes return None. # First check if the Python version supports hash randomization # If it doesn't have this support, it won't reconize the -R flag p = subprocess.Popen([command, "-RV"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) p.communicate() if p.returncode != 0: return False hash_seed = os.getenv("PYTHONHASHSEED") if not hash_seed: os.environ["PYTHONHASHSEED"] = str(random.randrange(2**32)) else: if not force: return False # Now run the command commandstring = ("import sys; from %s import %s;sys.exit(%s(*%s, **%s))" % (module, function, function, repr(function_args), repr(function_kwargs))) try: p = subprocess.Popen([command, "-R", "-c", commandstring]) p.communicate() except KeyboardInterrupt: p.wait() finally: # Put the environment variable back, so that it reads correctly for # the current Python process. if hash_seed is None: del os.environ["PYTHONHASHSEED"] else: os.environ["PYTHONHASHSEED"] = hash_seed return p.returncode def run_all_tests(test_args=(), test_kwargs={}, doctest_args=(), doctest_kwargs={}, examples_args=(), examples_kwargs={'quiet': True}): """ Run all tests. Right now, this runs the regular tests (bin/test), the doctests (bin/doctest), the examples (examples/all.py), and the sage tests (see sympy/external/tests/test_sage.py). This is what ``setup.py test`` uses. You can pass arguments and keyword arguments to the test functions that support them (for now, test, doctest, and the examples). See the docstrings of those functions for a description of the available options. For example, to run the solvers tests with colors turned off: >>> from sympy.utilities.runtests import run_all_tests >>> run_all_tests(test_args=("solvers",), ... test_kwargs={"colors:False"}) # doctest: +SKIP """ tests_successful = True try: # Regular tests if not test(*test_args, **test_kwargs): # some regular test fails, so set the tests_successful # flag to false and continue running the doctests tests_successful = False # Doctests print() if not doctest(*doctest_args, **doctest_kwargs): tests_successful = False # Examples print() sys.path.append("examples") from all import run_examples # examples/all.py if not run_examples(*examples_args, **examples_kwargs): tests_successful = False # Sage tests if not (sys.platform == "win32" or PY3): # run Sage tests; Sage currently doesn't support Windows or Python 3 dev_null = open(os.devnull, 'w') if subprocess.call("sage -v", shell=True, stdout=dev_null, stderr=dev_null) == 0: if subprocess.call("sage -python bin/test " "sympy/external/tests/test_sage.py", shell=True) != 0: tests_successful = False if tests_successful: return else: # Return nonzero exit code sys.exit(1) except KeyboardInterrupt: print() print("DO *NOT* COMMIT!") sys.exit(1) def test(*paths, **kwargs): """ Run tests in the specified test_*.py files. Tests in a particular test_*.py file are run if any of the given strings in ``paths`` matches a part of the test file's path. If ``paths=[]``, tests in all test_*.py files are run. Notes: - If sort=False, tests are run in random order (not default). - Paths can be entered in native system format or in unix, forward-slash format. - Files that are on the blacklist can be tested by providing their path; they are only excluded if no paths are given. **Explanation of test results** ====== =============================================================== Output Meaning ====== =============================================================== . passed F failed X XPassed (expected to fail but passed) f XFAILed (expected to fail and indeed failed) s skipped w slow T timeout (e.g., when ``--timeout`` is used) K KeyboardInterrupt (when running the slow tests with ``--slow``, you can interrupt one of them without killing the test runner) ====== =============================================================== Colors have no additional meaning and are used just to facilitate interpreting the output. Examples ======== >>> import sympy Run all tests: >>> sympy.test() # doctest: +SKIP Run one file: >>> sympy.test("sympy/core/tests/test_basic.py") # doctest: +SKIP >>> sympy.test("_basic") # doctest: +SKIP Run all tests in sympy/functions/ and some particular file: >>> sympy.test("sympy/core/tests/test_basic.py", ... "sympy/functions") # doctest: +SKIP Run all tests in sympy/core and sympy/utilities: >>> sympy.test("/core", "/util") # doctest: +SKIP Run specific test from a file: >>> sympy.test("sympy/core/tests/test_basic.py", ... kw="test_equality") # doctest: +SKIP Run specific test from any file: >>> sympy.test(kw="subs") # doctest: +SKIP Run the tests with verbose mode on: >>> sympy.test(verbose=True) # doctest: +SKIP Don't sort the test output: >>> sympy.test(sort=False) # doctest: +SKIP Turn on post-mortem pdb: >>> sympy.test(pdb=True) # doctest: +SKIP Turn off colors: >>> sympy.test(colors=False) # doctest: +SKIP Force colors, even when the output is not to a terminal (this is useful, e.g., if you are piping to ``less -r`` and you still want colors) >>> sympy.test(force_colors=False) # doctest: +SKIP The traceback verboseness can be set to "short" or "no" (default is "short") >>> sympy.test(tb='no') # doctest: +SKIP The ``split`` option can be passed to split the test run into parts. The split currently only splits the test files, though this may change in the future. ``split`` should be a string of the form 'a/b', which will run part ``a`` of ``b``. For instance, to run the first half of the test suite: >>> sympy.test(split='1/2') # doctest: +SKIP You can disable running the tests in a separate subprocess using ``subprocess=False``. This is done to support seeding hash randomization, which is enabled by default in the Python versions where it is supported. If subprocess=False, hash randomization is enabled/disabled according to whether it has been enabled or not in the calling Python process. However, even if it is enabled, the seed cannot be printed unless it is called from a new Python process. Hash randomization was added in the minor Python versions 2.6.8, 2.7.3, 3.1.5, and 3.2.3, and is enabled by default in all Python versions after and including 3.3.0. If hash randomization is not supported ``subprocess=False`` is used automatically. >>> sympy.test(subprocess=False) # doctest: +SKIP To set the hash randomization seed, set the environment variable ``PYTHONHASHSEED`` before running the tests. This can be done from within Python using >>> import os >>> os.environ['PYTHONHASHSEED'] = '42' # doctest: +SKIP Or from the command line using $ PYTHONHASHSEED=42 ./bin/test If the seed is not set, a random seed will be chosen. Note that to reproduce the same hash values, you must use both the same seed as well as the same architecture (32-bit vs. 64-bit). """ subprocess = kwargs.pop("subprocess", True) rerun = kwargs.pop("rerun", 0) # count up from 0, do not print 0 print_counter = lambda i : (print("rerun %d" % (rerun-i)) if rerun-i else None) if subprocess: # loop backwards so last i is 0 for i in range(rerun, -1, -1): print_counter(i) ret = run_in_subprocess_with_hash_randomization("_test", function_args=paths, function_kwargs=kwargs) if ret is False: break val = not bool(ret) # exit on the first failure or if done if not val or i == 0: return val # rerun even if hash randomization is not supported for i in range(rerun, -1, -1): print_counter(i) val = not bool(_test(*paths, **kwargs)) if not val or i == 0: return val def _test(*paths, **kwargs): """ Internal function that actually runs the tests. All keyword arguments from ``test()`` are passed to this function except for ``subprocess``. Returns 0 if tests passed and 1 if they failed. See the docstring of ``test()`` for more information. """ verbose = kwargs.get("verbose", False) tb = kwargs.get("tb", "short") kw = kwargs.get("kw", None) or () # ensure that kw is a tuple if isinstance(kw, str): kw = (kw, ) post_mortem = kwargs.get("pdb", False) colors = kwargs.get("colors", True) force_colors = kwargs.get("force_colors", False) sort = kwargs.get("sort", True) seed = kwargs.get("seed", None) if seed is None: seed = random.randrange(100000000) timeout = kwargs.get("timeout", False) slow = kwargs.get("slow", False) enhance_asserts = kwargs.get("enhance_asserts", False) split = kwargs.get('split', None) blacklist = kwargs.get('blacklist', []) blacklist = convert_to_native_paths(blacklist) r = PyTestReporter(verbose=verbose, tb=tb, colors=colors, force_colors=force_colors, split=split) t = SymPyTests(r, kw, post_mortem, seed) # Disable warnings for external modules import sympy.external sympy.external.importtools.WARN_OLD_VERSION = False sympy.external.importtools.WARN_NOT_INSTALLED = False # Show deprecation warnings import warnings warnings.simplefilter("error", SymPyDeprecationWarning) test_files = t.get_test_files('sympy') not_blacklisted = [f for f in test_files if not any(b in f for b in blacklist)] if len(paths) == 0: matched = not_blacklisted else: paths = convert_to_native_paths(paths) matched = [] for f in not_blacklisted: basename = os.path.basename(f) for p in paths: if p in f or fnmatch(basename, p): matched.append(f) break if split: matched = split_list(matched, split) t._testfiles.extend(matched) return int(not t.test(sort=sort, timeout=timeout, slow=slow, enhance_asserts=enhance_asserts)) def doctest(*paths, **kwargs): """ Runs doctests in all \*.py files in the sympy directory which match any of the given strings in ``paths`` or all tests if paths=[]. Notes: - Paths can be entered in native system format or in unix, forward-slash format. - Files that are on the blacklist can be tested by providing their path; they are only excluded if no paths are given. Examples ======== >>> import sympy Run all tests: >>> sympy.doctest() # doctest: +SKIP Run one file: >>> sympy.doctest("sympy/core/basic.py") # doctest: +SKIP >>> sympy.doctest("polynomial.rst") # doctest: +SKIP Run all tests in sympy/functions/ and some particular file: >>> sympy.doctest("/functions", "basic.py") # doctest: +SKIP Run any file having polynomial in its name, doc/src/modules/polynomial.rst, sympy/functions/special/polynomials.py, and sympy/polys/polynomial.py: >>> sympy.doctest("polynomial") # doctest: +SKIP The ``split`` option can be passed to split the test run into parts. The split currently only splits the test files, though this may change in the future. ``split`` should be a string of the form 'a/b', which will run part ``a`` of ``b``. Note that the regular doctests and the Sphinx doctests are split independently. For instance, to run the first half of the test suite: >>> sympy.doctest(split='1/2') # doctest: +SKIP The ``subprocess`` and ``verbose`` options are the same as with the function ``test()``. See the docstring of that function for more information. """ subprocess = kwargs.pop("subprocess", True) rerun = kwargs.pop("rerun", 0) # count up from 0, do not print 0 print_counter = lambda i : (print("rerun %d" % (rerun-i)) if rerun-i else None) if subprocess: # loop backwards so last i is 0 for i in range(rerun, -1, -1): print_counter(i) ret = run_in_subprocess_with_hash_randomization("_doctest", function_args=paths, function_kwargs=kwargs) if ret is False: break val = not bool(ret) # exit on the first failure or if done if not val or i == 0: return val # rerun even if hash randomization is not supported for i in range(rerun, -1, -1): print_counter(i) val = not bool(_doctest(*paths, **kwargs)) if not val or i == 0: return val def _doctest(*paths, **kwargs): """ Internal function that actually runs the doctests. All keyword arguments from ``doctest()`` are passed to this function except for ``subprocess``. Returns 0 if tests passed and 1 if they failed. See the docstrings of ``doctest()`` and ``test()`` for more information. """ normal = kwargs.get("normal", False) verbose = kwargs.get("verbose", False) blacklist = kwargs.get("blacklist", []) split = kwargs.get('split', None) blacklist.extend([ "doc/src/modules/plotting.rst", # generates live plots "sympy/utilities/compilef.py", # needs tcc "sympy/physics/gaussopt.py", # raises deprecation warning ]) if import_module('numpy') is None: blacklist.extend([ "sympy/plotting/experimental_lambdify.py", "sympy/plotting/plot_implicit.py", "examples/advanced/autowrap_integrators.py", "examples/advanced/autowrap_ufuncify.py", "examples/intermediate/sample.py", "examples/intermediate/mplot2d.py", "examples/intermediate/mplot3d.py", "doc/src/modules/numeric-computation.rst" ]) else: if import_module('matplotlib') is None: blacklist.extend([ "examples/intermediate/mplot2d.py", "examples/intermediate/mplot3d.py" ]) else: # don't display matplotlib windows from sympy.plotting.plot import unset_show unset_show() if import_module('pyglet') is None: blacklist.extend(["sympy/plotting/pygletplot"]) if import_module('theano') is None: blacklist.extend(["doc/src/modules/numeric-computation.rst"]) # disabled because of doctest failures in asmeurer's bot blacklist.extend([ "sympy/utilities/autowrap.py", "examples/advanced/autowrap_integrators.py", "examples/advanced/autowrap_ufuncify.py" ]) # blacklist these modules until issue 4840 is resolved blacklist.extend([ "sympy/conftest.py", "sympy/utilities/benchmarking.py" ]) blacklist = convert_to_native_paths(blacklist) # Disable warnings for external modules import sympy.external sympy.external.importtools.WARN_OLD_VERSION = False sympy.external.importtools.WARN_NOT_INSTALLED = False # Show deprecation warnings import warnings warnings.simplefilter("error", SymPyDeprecationWarning) r = PyTestReporter(verbose, split=split) t = SymPyDocTests(r, normal) test_files = t.get_test_files('sympy') test_files.extend(t.get_test_files('examples', init_only=False)) not_blacklisted = [f for f in test_files if not any(b in f for b in blacklist)] if len(paths) == 0: matched = not_blacklisted else: # take only what was requested...but not blacklisted items # and allow for partial match anywhere or fnmatch of name paths = convert_to_native_paths(paths) matched = [] for f in not_blacklisted: basename = os.path.basename(f) for p in paths: if p in f or fnmatch(basename, p): matched.append(f) break if split: matched = split_list(matched, split) t._testfiles.extend(matched) # run the tests and record the result for this *py portion of the tests if t._testfiles: failed = not t.test() else: failed = False # N.B. # -------------------------------------------------------------------- # Here we test *.rst files at or below doc/src. Code from these must # be self supporting in terms of imports since there is no importing # of necessary modules by doctest.testfile. If you try to pass *.py # files through this they might fail because they will lack the needed # imports and smarter parsing that can be done with source code. # test_files = t.get_test_files('doc/src', '*.rst', init_only=False) test_files.sort() not_blacklisted = [f for f in test_files if not any(b in f for b in blacklist)] if len(paths) == 0: matched = not_blacklisted else: # Take only what was requested as long as it's not on the blacklist. # Paths were already made native in *py tests so don't repeat here. # There's no chance of having a *py file slip through since we # only have *rst files in test_files. matched = [] for f in not_blacklisted: basename = os.path.basename(f) for p in paths: if p in f or fnmatch(basename, p): matched.append(f) break if split: matched = split_list(matched, split) setup_pprint() first_report = True for rst_file in matched: if not os.path.isfile(rst_file): continue old_displayhook = sys.displayhook try: out = sympytestfile( rst_file, module_relative=False, encoding='utf-8', optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE | pdoctest.IGNORE_EXCEPTION_DETAIL) finally: # make sure we return to the original displayhook in case some # doctest has changed that sys.displayhook = old_displayhook rstfailed, tested = out if tested: failed = rstfailed or failed if first_report: first_report = False msg = 'rst doctests start' if not t._testfiles: r.start(msg=msg) else: r.write_center(msg) print() # use as the id, everything past the first 'sympy' file_id = rst_file[rst_file.find('sympy') + len('sympy') + 1:] print(file_id, end=" ") # get at least the name out so it is know who is being tested wid = r.terminal_width - len(file_id) - 1 # update width test_file = '[%s]' % (tested) report = '[%s]' % (rstfailed or 'OK') print(''.join( [test_file, ' '*(wid - len(test_file) - len(report)), report]) ) # the doctests for *py will have printed this message already if there was # a failure, so now only print it if there was intervening reporting by # testing the *rst as evidenced by first_report no longer being True. if not first_report and failed: print() print("DO *NOT* COMMIT!") return int(failed) sp = re.compile(r'([0-9]+)/([1-9][0-9]*)') def split_list(l, split): """ Splits a list into part a of b split should be a string of the form 'a/b'. For instance, '1/3' would give the split one of three. If the length of the list is not divisible by the number of splits, the last split will have more items. >>> from sympy.utilities.runtests import split_list >>> a = list(range(10)) >>> split_list(a, '1/3') [0, 1, 2] >>> split_list(a, '2/3') [3, 4, 5] >>> split_list(a, '3/3') [6, 7, 8, 9] """ m = sp.match(split) if not m: raise ValueError("split must be a string of the form a/b where a and b are ints") i, t = map(int, m.groups()) return l[(i - 1)*len(l)//t:i*len(l)//t] from collections import namedtuple SymPyTestResults = namedtuple('TestResults', 'failed attempted') def sympytestfile(filename, module_relative=True, name=None, package=None, globs=None, verbose=None, report=True, optionflags=0, extraglobs=None, raise_on_error=False, parser=pdoctest.DocTestParser(), encoding=None): """ Test examples in the given file. Return (#failures, #tests). Optional keyword arg ``module_relative`` specifies how filenames should be interpreted: - If ``module_relative`` is True (the default), then ``filename`` specifies a module-relative path. By default, this path is relative to the calling module's directory; but if the ``package`` argument is specified, then it is relative to that package. To ensure os-independence, ``filename`` should use "/" characters to separate path segments, and should not be an absolute path (i.e., it may not begin with "/"). - If ``module_relative`` is False, then ``filename`` specifies an os-specific path. The path may be absolute or relative (to the current working directory). Optional keyword arg ``name`` gives the name of the test; by default use the file's basename. Optional keyword argument ``package`` is a Python package or the name of a Python package whose directory should be used as the base directory for a module relative filename. If no package is specified, then the calling module's directory is used as the base directory for module relative filenames. It is an error to specify ``package`` if ``module_relative`` is False. Optional keyword arg ``globs`` gives a dict to be used as the globals when executing examples; by default, use {}. A copy of this dict is actually used for each docstring, so that each docstring's examples start with a clean slate. Optional keyword arg ``extraglobs`` gives a dictionary that should be merged into the globals that are used to execute examples. By default, no extra globals are used. Optional keyword arg ``verbose`` prints lots of stuff if true, prints only failures if false; by default, it's true iff "-v" is in sys.argv. Optional keyword arg ``report`` prints a summary at the end when true, else prints nothing at the end. In verbose mode, the summary is detailed, else very brief (in fact, empty if all tests passed). Optional keyword arg ``optionflags`` or's together module constants, and defaults to 0. Possible values (see the docs for details): - DONT_ACCEPT_TRUE_FOR_1 - DONT_ACCEPT_BLANKLINE - NORMALIZE_WHITESPACE - ELLIPSIS - SKIP - IGNORE_EXCEPTION_DETAIL - REPORT_UDIFF - REPORT_CDIFF - REPORT_NDIFF - REPORT_ONLY_FIRST_FAILURE Optional keyword arg ``raise_on_error`` raises an exception on the first unexpected exception or failure. This allows failures to be post-mortem debugged. Optional keyword arg ``parser`` specifies a DocTestParser (or subclass) that should be used to extract tests from the files. Optional keyword arg ``encoding`` specifies an encoding that should be used to convert the file to unicode. Advanced tomfoolery: testmod runs methods of a local instance of class doctest.Tester, then merges the results into (or creates) global Tester instance doctest.master. Methods of doctest.master can be called directly too, if you want to do something unusual. Passing report=0 to testmod is especially useful then, to delay displaying a summary. Invoke doctest.master.summarize(verbose) when you're done fiddling. """ if package and not module_relative: raise ValueError("Package may only be specified for module-" "relative paths.") # Relativize the path if not PY3: text, filename = pdoctest._load_testfile( filename, package, module_relative) if encoding is not None: text = text.decode(encoding) else: text, filename = pdoctest._load_testfile( filename, package, module_relative, encoding) # If no name was given, then use the file's name. if name is None: name = os.path.basename(filename) # Assemble the globals. if globs is None: globs = {} else: globs = globs.copy() if extraglobs is not None: globs.update(extraglobs) if '__name__' not in globs: globs['__name__'] = '__main__' if raise_on_error: runner = pdoctest.DebugRunner(verbose=verbose, optionflags=optionflags) else: runner = SymPyDocTestRunner(verbose=verbose, optionflags=optionflags) runner._checker = SymPyOutputChecker() # Read the file, convert it to a test, and run it. test = parser.get_doctest(text, globs, name, filename, 0) runner.run(test, compileflags=future_flags) if report: runner.summarize() if pdoctest.master is None: pdoctest.master = runner else: pdoctest.master.merge(runner) return SymPyTestResults(runner.failures, runner.tries) class SymPyTests(object): def __init__(self, reporter, kw="", post_mortem=False, seed=None): self._post_mortem = post_mortem self._kw = kw self._count = 0 self._root_dir = sympy_dir self._reporter = reporter self._reporter.root_dir(self._root_dir) self._testfiles = [] self._seed = seed if seed is not None else random.random() def test(self, sort=False, timeout=False, slow=False, enhance_asserts=False): """ Runs the tests returning True if all tests pass, otherwise False. If sort=False run tests in random order. """ if sort: self._testfiles.sort() else: from random import shuffle random.seed(self._seed) shuffle(self._testfiles) self._reporter.start(self._seed) for f in self._testfiles: try: self.test_file(f, sort, timeout, slow, enhance_asserts) except KeyboardInterrupt: print(" interrupted by user") self._reporter.finish() raise return self._reporter.finish() def _enhance_asserts(self, source): from ast import (NodeTransformer, Compare, Name, Store, Load, Tuple, Assign, BinOp, Str, Mod, Assert, parse, fix_missing_locations) ops = {"Eq": '==', "NotEq": '!=', "Lt": '<', "LtE": '<=', "Gt": '>', "GtE": '>=', "Is": 'is', "IsNot": 'is not', "In": 'in', "NotIn": 'not in'} class Transform(NodeTransformer): def visit_Assert(self, stmt): if isinstance(stmt.test, Compare): compare = stmt.test values = [compare.left] + compare.comparators names = [ "_%s" % i for i, _ in enumerate(values) ] names_store = [ Name(n, Store()) for n in names ] names_load = [ Name(n, Load()) for n in names ] target = Tuple(names_store, Store()) value = Tuple(values, Load()) assign = Assign([target], value) new_compare = Compare(names_load[0], compare.ops, names_load[1:]) msg_format = "\n%s " + "\n%s ".join([ ops[op.__class__.__name__] for op in compare.ops ]) + "\n%s" msg = BinOp(Str(msg_format), Mod(), Tuple(names_load, Load())) test = Assert(new_compare, msg, lineno=stmt.lineno, col_offset=stmt.col_offset) return [assign, test] else: return stmt tree = parse(source) new_tree = Transform().visit(tree) return fix_missing_locations(new_tree) def test_file(self, filename, sort=True, timeout=False, slow=False, enhance_asserts=False): funcs = [] try: gl = {'__file__': filename} try: if PY3: open_file = lambda: open(filename, encoding="utf8") else: open_file = lambda: open(filename) with open_file() as f: source = f.read() if self._kw: for l in source.splitlines(): if l.lstrip().startswith('def '): if any(l.find(k) != -1 for k in self._kw): break else: return if enhance_asserts: try: source = self._enhance_asserts(source) except ImportError: pass code = compile(source, filename, "exec") exec_(code, gl) except (SystemExit, KeyboardInterrupt): raise except ImportError: self._reporter.import_error(filename, sys.exc_info()) return clear_cache() self._count += 1 random.seed(self._seed) pytestfile = "" if "XFAIL" in gl: pytestfile = inspect.getsourcefile(gl["XFAIL"]) pytestfile2 = "" if "slow" in gl: pytestfile2 = inspect.getsourcefile(gl["slow"]) disabled = gl.get("disabled", False) if not disabled: # we need to filter only those functions that begin with 'test_' # that are defined in the testing file or in the file where # is defined the XFAIL decorator funcs = [gl[f] for f in gl.keys() if f.startswith("test_") and (inspect.isfunction(gl[f]) or inspect.ismethod(gl[f])) and (inspect.getsourcefile(gl[f]) == filename or inspect.getsourcefile(gl[f]) == pytestfile or inspect.getsourcefile(gl[f]) == pytestfile2)] if slow: funcs = [f for f in funcs if getattr(f, '_slow', False)] # Sorting of XFAILed functions isn't fixed yet :-( funcs.sort(key=lambda x: inspect.getsourcelines(x)[1]) i = 0 while i < len(funcs): if isgeneratorfunction(funcs[i]): # some tests can be generators, that return the actual # test functions. We unpack it below: f = funcs.pop(i) for fg in f(): func = fg[0] args = fg[1:] fgw = lambda: func(*args) funcs.insert(i, fgw) i += 1 else: i += 1 # drop functions that are not selected with the keyword expression: funcs = [x for x in funcs if self.matches(x)] if not funcs: return except Exception: self._reporter.entering_filename(filename, len(funcs)) raise self._reporter.entering_filename(filename, len(funcs)) if not sort: random.shuffle(funcs) for f in funcs: self._reporter.entering_test(f) try: if getattr(f, '_slow', False) and not slow: raise Skipped("Slow") if timeout: self._timeout(f, timeout) else: random.seed(self._seed) f() except KeyboardInterrupt: if getattr(f, '_slow', False): self._reporter.test_skip("KeyboardInterrupt") else: raise except Exception: if timeout: signal.alarm(0) # Disable the alarm. It could not be handled before. t, v, tr = sys.exc_info() if t is AssertionError: self._reporter.test_fail((t, v, tr)) if self._post_mortem: pdb.post_mortem(tr) elif t.__name__ == "Skipped": self._reporter.test_skip(v) elif t.__name__ == "XFail": self._reporter.test_xfail() elif t.__name__ == "XPass": self._reporter.test_xpass(v) else: self._reporter.test_exception((t, v, tr)) if self._post_mortem: pdb.post_mortem(tr) else: self._reporter.test_pass() self._reporter.leaving_filename() def _timeout(self, function, timeout): def callback(x, y): signal.alarm(0) raise Skipped("Timeout") signal.signal(signal.SIGALRM, callback) signal.alarm(timeout) # Set an alarm with a given timeout function() signal.alarm(0) # Disable the alarm def matches(self, x): """ Does the keyword expression self._kw match "x"? Returns True/False. Always returns True if self._kw is "". """ if not self._kw: return True for kw in self._kw: if x.__name__.find(kw) != -1: return True return False def get_test_files(self, dir, pat='test_*.py'): """ Returns the list of test_*.py (default) files at or below directory ``dir`` relative to the sympy home directory. """ dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0]) g = [] for path, folders, files in os.walk(dir): g.extend([os.path.join(path, f) for f in files if fnmatch(f, pat)]) return sorted([sys_normcase(gi) for gi in g]) class SymPyDocTests(object): def __init__(self, reporter, normal): self._count = 0 self._root_dir = sympy_dir self._reporter = reporter self._reporter.root_dir(self._root_dir) self._normal = normal self._testfiles = [] def test(self): """ Runs the tests and returns True if all tests pass, otherwise False. """ self._reporter.start() for f in self._testfiles: try: self.test_file(f) except KeyboardInterrupt: print(" interrupted by user") self._reporter.finish() raise return self._reporter.finish() def test_file(self, filename): clear_cache() from sympy.core.compatibility import StringIO rel_name = filename[len(self._root_dir) + 1:] dirname, file = os.path.split(filename) module = rel_name.replace(os.sep, '.')[:-3] if rel_name.startswith("examples"): # Examples files do not have __init__.py files, # So we have to temporarily extend sys.path to import them sys.path.insert(0, dirname) module = file[:-3] # remove ".py" setup_pprint() try: module = pdoctest._normalize_module(module) tests = SymPyDocTestFinder().find(module) except (SystemExit, KeyboardInterrupt): raise except ImportError: self._reporter.import_error(filename, sys.exc_info()) return finally: if rel_name.startswith("examples"): del sys.path[0] tests = [test for test in tests if len(test.examples) > 0] # By default tests are sorted by alphabetical order by function name. # We sort by line number so one can edit the file sequentially from # bottom to top. However, if there are decorated functions, their line # numbers will be too large and for now one must just search for these # by text and function name. tests.sort(key=lambda x: -x.lineno) if not tests: return self._reporter.entering_filename(filename, len(tests)) for test in tests: assert len(test.examples) != 0 # check if there are external dependencies which need to be met if '_doctest_depends_on' in test.globs: if not self._process_dependencies(test.globs['_doctest_depends_on']): self._reporter.test_skip() continue runner = SymPyDocTestRunner(optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE | pdoctest.IGNORE_EXCEPTION_DETAIL) runner._checker = SymPyOutputChecker() old = sys.stdout new = StringIO() sys.stdout = new # If the testing is normal, the doctests get importing magic to # provide the global namespace. If not normal (the default) then # then must run on their own; all imports must be explicit within # a function's docstring. Once imported that import will be # available to the rest of the tests in a given function's # docstring (unless clear_globs=True below). if not self._normal: test.globs = {} # if this is uncommented then all the test would get is what # comes by default with a "from sympy import *" #exec('from sympy import *') in test.globs test.globs['print_function'] = print_function try: f, t = runner.run(test, compileflags=future_flags, out=new.write, clear_globs=False) except KeyboardInterrupt: raise finally: sys.stdout = old if f > 0: self._reporter.doctest_fail(test.name, new.getvalue()) else: self._reporter.test_pass() self._reporter.leaving_filename() def get_test_files(self, dir, pat='*.py', init_only=True): """ Returns the list of \*.py files (default) from which docstrings will be tested which are at or below directory ``dir``. By default, only those that have an __init__.py in their parent directory and do not start with ``test_`` will be included. """ def importable(x): """ Checks if given pathname x is an importable module by checking for __init__.py file. Returns True/False. Currently we only test if the __init__.py file exists in the directory with the file "x" (in theory we should also test all the parent dirs). """ init_py = os.path.join(os.path.dirname(x), "__init__.py") return os.path.exists(init_py) dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0]) g = [] for path, folders, files in os.walk(dir): g.extend([os.path.join(path, f) for f in files if not f.startswith('test_') and fnmatch(f, pat)]) if init_only: # skip files that are not importable (i.e. missing __init__.py) g = [x for x in g if importable(x)] return [sys_normcase(gi) for gi in g] def _process_dependencies(self, deps): """ Returns ``False`` if some dependencies are not met and the test should be skipped otherwise returns ``True``. """ executables = deps.get('exe', None) moduledeps = deps.get('modules', None) viewers = deps.get('disable_viewers', None) pyglet = deps.get('pyglet', None) # print deps if executables is not None: for ex in executables: found = find_executable(ex) if found is None: return False if moduledeps is not None: for extmod in moduledeps: if extmod == 'matplotlib': matplotlib = import_module( 'matplotlib', __import__kwargs={'fromlist': ['pyplot', 'cm', 'collections']}, min_module_version='1.0.0', catch=(RuntimeError,)) if matplotlib is not None: pass else: return False else: # TODO min version support mod = import_module(extmod) if mod is not None: version = "unknown" if hasattr(mod, '__version__'): version = mod.__version__ else: return False if viewers is not None: import tempfile tempdir = tempfile.mkdtemp() os.environ['PATH'] = '%s:%s' % (tempdir, os.environ['PATH']) if PY3: vw = '#!/usr/bin/env python3\n' \ 'import sys\n' \ 'if len(sys.argv) <= 1:\n' \ ' exit("wrong number of args")\n' else: vw = '#!/usr/bin/env python\n' \ 'import sys\n' \ 'if len(sys.argv) <= 1:\n' \ ' exit("wrong number of args")\n' for viewer in viewers: with open(os.path.join(tempdir, viewer), 'w') as fh: fh.write(vw) # make the file executable os.chmod(os.path.join(tempdir, viewer), stat.S_IREAD | stat.S_IWRITE | stat.S_IXUSR) if pyglet: # monkey-patch pyglet s.t. it does not open a window during # doctesting import pyglet class DummyWindow(object): def __init__(self, *args, **kwargs): self.has_exit=True self.width = 600 self.height = 400 def set_vsync(self, x): pass def switch_to(self): pass def push_handlers(self, x): pass def close(self): pass pyglet.window.Window = DummyWindow return True class SymPyDocTestFinder(DocTestFinder): """ A class used to extract the DocTests that are relevant to a given object, from its docstring and the docstrings of its contained objects. Doctests can currently be extracted from the following object types: modules, functions, classes, methods, staticmethods, classmethods, and properties. Modified from doctest's version by looking harder for code in the case that it looks like the the code comes from a different module. In the case of decorated functions (e.g. @vectorize) they appear to come from a different module (e.g. multidemensional) even though their code is not there. """ def _find(self, tests, obj, name, module, source_lines, globs, seen): """ Find tests for the given object and any contained objects, and add them to ``tests``. """ if self._verbose: print('Finding tests in %s' % name) # If we've already processed this object, then ignore it. if id(obj) in seen: return seen[id(obj)] = 1 # Make sure we don't run doctests for classes outside of sympy, such # as in numpy or scipy. if inspect.isclass(obj): if obj.__module__.split('.')[0] != 'sympy': return # Find a test for this object, and add it to the list of tests. test = self._get_test(obj, name, module, globs, source_lines) if test is not None: tests.append(test) if not self._recurse: return # Look for tests in a module's contained objects. if inspect.ismodule(obj): for rawname, val in obj.__dict__.items(): # Recurse to functions & classes. if inspect.isfunction(val) or inspect.isclass(val): # Make sure we don't run doctests functions or classes # from different modules if val.__module__ != module.__name__: continue assert self._from_module(module, val), \ "%s is not in module %s (rawname %s)" % (val, module, rawname) try: valname = '%s.%s' % (name, rawname) self._find(tests, val, valname, module, source_lines, globs, seen) except KeyboardInterrupt: raise # Look for tests in a module's __test__ dictionary. for valname, val in getattr(obj, '__test__', {}).items(): if not isinstance(valname, string_types): raise ValueError("SymPyDocTestFinder.find: __test__ keys " "must be strings: %r" % (type(valname),)) if not (inspect.isfunction(val) or inspect.isclass(val) or inspect.ismethod(val) or inspect.ismodule(val) or isinstance(val, string_types)): raise ValueError("SymPyDocTestFinder.find: __test__ values " "must be strings, functions, methods, " "classes, or modules: %r" % (type(val),)) valname = '%s.__test__.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) # Look for tests in a class's contained objects. if inspect.isclass(obj): for valname, val in obj.__dict__.items(): # Special handling for staticmethod/classmethod. if isinstance(val, staticmethod): val = getattr(obj, valname) if isinstance(val, classmethod): val = getattr(obj, valname).__func__ # Recurse to methods, properties, and nested classes. if (inspect.isfunction(val) or inspect.isclass(val) or isinstance(val, property)): # Make sure we don't run doctests functions or classes # from different modules if isinstance(val, property): if hasattr(val.fget, '__module__'): if val.fget.__module__ != module.__name__: continue else: if val.__module__ != module.__name__: continue assert self._from_module(module, val), \ "%s is not in module %s (valname %s)" % ( val, module, valname) valname = '%s.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) def _get_test(self, obj, name, module, globs, source_lines): """ Return a DocTest for the given object, if it defines a docstring; otherwise, return None. """ lineno = None # Extract the object's docstring. If it doesn't have one, # then return None (no test for this object). if isinstance(obj, string_types): # obj is a string in the case for objects in the polys package. # Note that source_lines is a binary string (compiled polys # modules), which can't be handled by _find_lineno so determine # the line number here. docstring = obj matches = re.findall("line \d+", name) assert len(matches) == 1, \ "string '%s' does not contain lineno " % name # NOTE: this is not the exact linenumber but its better than no # lineno ;) lineno = int(matches[0][5:]) else: try: if obj.__doc__ is None: docstring = '' else: docstring = obj.__doc__ if not isinstance(docstring, string_types): docstring = str(docstring) except (TypeError, AttributeError): docstring = '' # Don't bother if the docstring is empty. if self._exclude_empty and not docstring: return None # check that properties have a docstring because _find_lineno # assumes it if isinstance(obj, property): if obj.fget.__doc__ is None: return None # Find the docstring's location in the file. if lineno is None: # handling of properties is not implemented in _find_lineno so do # it here if hasattr(obj, 'func_closure') and obj.func_closure is not None: tobj = obj.func_closure[0].cell_contents elif isinstance(obj, property): tobj = obj.fget else: tobj = obj lineno = self._find_lineno(tobj, source_lines) if lineno is None: return None # Return a DocTest for this object. if module is None: filename = None else: filename = getattr(module, '__file__', module.__name__) if filename[-4:] in (".pyc", ".pyo"): filename = filename[:-1] if hasattr(obj, '_doctest_depends_on'): globs['_doctest_depends_on'] = obj._doctest_depends_on else: globs['_doctest_depends_on'] = {} return self._parser.get_doctest(docstring, globs, name, filename, lineno) class SymPyDocTestRunner(DocTestRunner): """ A class used to run DocTest test cases, and accumulate statistics. The ``run`` method is used to process a single DocTest case. It returns a tuple ``(f, t)``, where ``t`` is the number of test cases tried, and ``f`` is the number of test cases that failed. Modified from the doctest version to not reset the sys.displayhook (see issue 5140). See the docstring of the original DocTestRunner for more information. """ def run(self, test, compileflags=None, out=None, clear_globs=True): """ Run the examples in ``test``, and display the results using the writer function ``out``. The examples are run in the namespace ``test.globs``. If ``clear_globs`` is true (the default), then this namespace will be cleared after the test runs, to help with garbage collection. If you would like to examine the namespace after the test completes, then use ``clear_globs=False``. ``compileflags`` gives the set of flags that should be used by the Python compiler when running the examples. If not specified, then it will default to the set of future-import flags that apply to ``globs``. The output of each example is checked using ``SymPyDocTestRunner.check_output``, and the results are formatted by the ``SymPyDocTestRunner.report_*`` methods. """ self.test = test if compileflags is None: compileflags = pdoctest._extract_future_flags(test.globs) save_stdout = sys.stdout if out is None: out = save_stdout.write sys.stdout = self._fakeout # Patch pdb.set_trace to restore sys.stdout during interactive # debugging (so it's not still redirected to self._fakeout). # Note that the interactive output will go to *our* # save_stdout, even if that's not the real sys.stdout; this # allows us to write test cases for the set_trace behavior. save_set_trace = pdb.set_trace self.debugger = pdoctest._OutputRedirectingPdb(save_stdout) self.debugger.reset() pdb.set_trace = self.debugger.set_trace # Patch linecache.getlines, so we can see the example's source # when we're inside the debugger. self.save_linecache_getlines = pdoctest.linecache.getlines linecache.getlines = self.__patched_linecache_getlines try: test.globs['print_function'] = print_function return self.__run(test, compileflags, out) finally: sys.stdout = save_stdout pdb.set_trace = save_set_trace linecache.getlines = self.save_linecache_getlines if clear_globs: test.globs.clear() # We have to override the name mangled methods. SymPyDocTestRunner._SymPyDocTestRunner__patched_linecache_getlines = \ DocTestRunner._DocTestRunner__patched_linecache_getlines SymPyDocTestRunner._SymPyDocTestRunner__run = DocTestRunner._DocTestRunner__run SymPyDocTestRunner._SymPyDocTestRunner__record_outcome = \ DocTestRunner._DocTestRunner__record_outcome class SymPyOutputChecker(pdoctest.OutputChecker): """ Compared to the OutputChecker from the stdlib our OutputChecker class supports numerical comparison of floats occuring in the output of the doctest examples """ def __init__(self): # NOTE OutputChecker is an old-style class with no __init__ method, # so we can't call the base class version of __init__ here got_floats = r'(\d+\.\d*|\.\d+)' # floats in the 'want' string may contain ellipses want_floats = got_floats + r'(\.{3})?' front_sep = r'\s|\+|\-|\*|,' back_sep = front_sep + r'|j|e' fbeg = r'^%s(?=%s|$)' % (got_floats, back_sep) fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, got_floats, back_sep) self.num_got_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend)) fbeg = r'^%s(?=%s|$)' % (want_floats, back_sep) fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, want_floats, back_sep) self.num_want_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend)) def check_output(self, want, got, optionflags): """ Return True iff the actual output from an example (`got`) matches the expected output (`want`). These strings are always considered to match if they are identical; but depending on what option flags the test runner is using, several non-exact match types are also possible. See the documentation for `TestRunner` for more information about option flags. """ # Handle the common case first, for efficiency: # if they're string-identical, always return true. if got == want: return True # TODO parse integers as well ? # Parse floats and compare them. If some of the parsed floats contain # ellipses, skip the comparison. matches = self.num_got_rgx.finditer(got) numbers_got = [match.group(1) for match in matches] # list of strs matches = self.num_want_rgx.finditer(want) numbers_want = [match.group(1) for match in matches] # list of strs if len(numbers_got) != len(numbers_want): return False if len(numbers_got) > 0: nw_ = [] for ng, nw in zip(numbers_got, numbers_want): if '...' in nw: nw_.append(ng) continue else: nw_.append(nw) if abs(float(ng)-float(nw)) > 1e-5: return False got = self.num_got_rgx.sub(r'%s', got) got = got % tuple(nw_) # <BLANKLINE> can be used as a special sequence to signify a # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE): # Replace <BLANKLINE> in want with a blank line. want = re.sub('(?m)^%s\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER), '', want) # If a line in got contains only spaces, then remove the # spaces. got = re.sub('(?m)^\s*?$', '', got) if got == want: return True # This flag causes doctest to ignore any differences in the # contents of whitespace strings. Note that this can be used # in conjunction with the ELLIPSIS flag. if optionflags & pdoctest.NORMALIZE_WHITESPACE: got = ' '.join(got.split()) want = ' '.join(want.split()) if got == want: return True # The ELLIPSIS flag says to let the sequence "..." in `want` # match any substring in `got`. if optionflags & pdoctest.ELLIPSIS: if pdoctest._ellipsis_match(want, got): return True # We didn't find any match; return false. return False class Reporter(object): """ Parent class for all reporters. """ pass class PyTestReporter(Reporter): """ Py.test like reporter. Should produce output identical to py.test. """ def __init__(self, verbose=False, tb="short", colors=True, force_colors=False, split=None): self._verbose = verbose self._tb_style = tb self._colors = colors self._force_colors = force_colors self._xfailed = 0 self._xpassed = [] self._failed = [] self._failed_doctest = [] self._passed = 0 self._skipped = 0 self._exceptions = [] self._terminal_width = None self._default_width = 80 self._split = split # this tracks the x-position of the cursor (useful for positioning # things on the screen), without the need for any readline library: self._write_pos = 0 self._line_wrap = False def root_dir(self, dir): self._root_dir = dir @property def terminal_width(self): if self._terminal_width is not None: return self._terminal_width def findout_terminal_width(): if sys.platform == "win32": # Windows support is based on: # # http://code.activestate.com/recipes/ # 440694-determine-size-of-console-window-on-windows/ from ctypes import windll, create_string_buffer h = windll.kernel32.GetStdHandle(-12) csbi = create_string_buffer(22) res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi) if res: import struct (_, _, _, _, _, left, _, right, _, _, _) = \ struct.unpack("hhhhHhhhhhh", csbi.raw) return right - left else: return self._default_width if hasattr(sys.stdout, 'isatty') and not sys.stdout.isatty(): return self._default_width # leave PIPEs alone try: process = subprocess.Popen(['stty', '-a'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = process.stdout.read() if PY3: stdout = stdout.decode("utf-8") except (OSError, IOError): pass else: # We support the following output formats from stty: # # 1) Linux -> columns 80 # 2) OS X -> 80 columns # 3) Solaris -> columns = 80 re_linux = r"columns\s+(?P<columns>\d+);" re_osx = r"(?P<columns>\d+)\s*columns;" re_solaris = r"columns\s+=\s+(?P<columns>\d+);" for regex in (re_linux, re_osx, re_solaris): match = re.search(regex, stdout) if match is not None: columns = match.group('columns') try: width = int(columns) except ValueError: pass if width != 0: return width return self._default_width width = findout_terminal_width() self._terminal_width = width return width def write(self, text, color="", align="left", width=None, force_colors=False): """ Prints a text on the screen. It uses sys.stdout.write(), so no readline library is necessary. Parameters ========== color : choose from the colors below, "" means default color align : "left"/"right", "left" is a normal print, "right" is aligned on the right-hand side of the screen, filled with spaces if necessary width : the screen width """ color_templates = ( ("Black", "0;30"), ("Red", "0;31"), ("Green", "0;32"), ("Brown", "0;33"), ("Blue", "0;34"), ("Purple", "0;35"), ("Cyan", "0;36"), ("LightGray", "0;37"), ("DarkGray", "1;30"), ("LightRed", "1;31"), ("LightGreen", "1;32"), ("Yellow", "1;33"), ("LightBlue", "1;34"), ("LightPurple", "1;35"), ("LightCyan", "1;36"), ("White", "1;37"), ) colors = {} for name, value in color_templates: colors[name] = value c_normal = '\033[0m' c_color = '\033[%sm' if width is None: width = self.terminal_width if align == "right": if self._write_pos + len(text) > width: # we don't fit on the current line, create a new line self.write("\n") self.write(" "*(width - self._write_pos - len(text))) if not self._force_colors and hasattr(sys.stdout, 'isatty') and not \ sys.stdout.isatty(): # the stdout is not a terminal, this for example happens if the # output is piped to less, e.g. "bin/test | less". In this case, # the terminal control sequences would be printed verbatim, so # don't use any colors. color = "" elif sys.platform == "win32": # Windows consoles don't support ANSI escape sequences color = "" elif not self._colors: color = "" if self._line_wrap: if text[0] != "\n": sys.stdout.write("\n") # Avoid UnicodeEncodeError when printing out test failures if PY3 and IS_WINDOWS: text = text.encode('raw_unicode_escape').decode('utf8', 'ignore') elif PY3 and not sys.stdout.encoding.lower().startswith('utf'): text = text.encode(sys.stdout.encoding, 'backslashreplace' ).decode(sys.stdout.encoding) if color == "": sys.stdout.write(text) else: sys.stdout.write("%s%s%s" % (c_color % colors[color], text, c_normal)) sys.stdout.flush() l = text.rfind("\n") if l == -1: self._write_pos += len(text) else: self._write_pos = len(text) - l - 1 self._line_wrap = self._write_pos >= width self._write_pos %= width def write_center(self, text, delim="="): width = self.terminal_width if text != "": text = " %s " % text idx = (width - len(text)) // 2 t = delim*idx + text + delim*(width - idx - len(text)) self.write(t + "\n") def write_exception(self, e, val, tb): t = traceback.extract_tb(tb) # remove the first item, as that is always runtests.py t = t[1:] t = traceback.format_list(t) self.write("".join(t)) t = traceback.format_exception_only(e, val) self.write("".join(t)) def start(self, seed=None, msg="test process starts"): self.write_center(msg) executable = sys.executable v = tuple(sys.version_info) python_version = "%s.%s.%s-%s-%s" % v implementation = platform.python_implementation() if implementation == 'PyPy': implementation += " %s.%s.%s-%s-%s" % sys.pypy_version_info self.write("executable: %s (%s) [%s]\n" % (executable, python_version, implementation)) from .misc import ARCH self.write("architecture: %s\n" % ARCH) from sympy.core.cache import USE_CACHE self.write("cache: %s\n" % USE_CACHE) from sympy.core.compatibility import GROUND_TYPES, HAS_GMPY version = '' if GROUND_TYPES =='gmpy': if HAS_GMPY == 1: import gmpy elif HAS_GMPY == 2: import gmpy2 as gmpy version = gmpy.version() self.write("ground types: %s %s\n" % (GROUND_TYPES, version)) if seed is not None: self.write("random seed: %d\n" % seed) from .misc import HASH_RANDOMIZATION self.write("hash randomization: ") hash_seed = os.getenv("PYTHONHASHSEED") or '0' if HASH_RANDOMIZATION and (hash_seed == "random" or int(hash_seed)): self.write("on (PYTHONHASHSEED=%s)\n" % hash_seed) else: self.write("off\n") if self._split: self.write("split: %s\n" % self._split) self.write('\n') self._t_start = clock() def finish(self): self._t_end = clock() self.write("\n") global text, linelen text = "tests finished: %d passed, " % self._passed linelen = len(text) def add_text(mytext): global text, linelen """Break new text if too long.""" if linelen + len(mytext) > self.terminal_width: text += '\n' linelen = 0 text += mytext linelen += len(mytext) if len(self._failed) > 0: add_text("%d failed, " % len(self._failed)) if len(self._failed_doctest) > 0: add_text("%d failed, " % len(self._failed_doctest)) if self._skipped > 0: add_text("%d skipped, " % self._skipped) if self._xfailed > 0: add_text("%d expected to fail, " % self._xfailed) if len(self._xpassed) > 0: add_text("%d expected to fail but passed, " % len(self._xpassed)) if len(self._exceptions) > 0: add_text("%d exceptions, " % len(self._exceptions)) add_text("in %.2f seconds" % (self._t_end - self._t_start)) if len(self._xpassed) > 0: self.write_center("xpassed tests", "_") for e in self._xpassed: self.write("%s: %s\n" % (e[0], e[1])) self.write("\n") if self._tb_style != "no" and len(self._exceptions) > 0: for e in self._exceptions: filename, f, (t, val, tb) = e self.write_center("", "_") if f is None: s = "%s" % filename else: s = "%s:%s" % (filename, f.__name__) self.write_center(s, "_") self.write_exception(t, val, tb) self.write("\n") if self._tb_style != "no" and len(self._failed) > 0: for e in self._failed: filename, f, (t, val, tb) = e self.write_center("", "_") self.write_center("%s:%s" % (filename, f.__name__), "_") self.write_exception(t, val, tb) self.write("\n") if self._tb_style != "no" and len(self._failed_doctest) > 0: for e in self._failed_doctest: filename, msg = e self.write_center("", "_") self.write_center("%s" % filename, "_") self.write(msg) self.write("\n") self.write_center(text) ok = len(self._failed) == 0 and len(self._exceptions) == 0 and \ len(self._failed_doctest) == 0 if not ok: self.write("DO *NOT* COMMIT!\n") return ok def entering_filename(self, filename, n): rel_name = filename[len(self._root_dir) + 1:] self._active_file = rel_name self._active_file_error = False self.write(rel_name) self.write("[%d] " % n) def leaving_filename(self): self.write(" ") if self._active_file_error: self.write("[FAIL]", "Red", align="right") else: self.write("[OK]", "Green", align="right") self.write("\n") if self._verbose: self.write("\n") def entering_test(self, f): self._active_f = f if self._verbose: self.write("\n" + f.__name__ + " ") def test_xfail(self): self._xfailed += 1 self.write("f", "Green") def test_xpass(self, v): message = str(v) self._xpassed.append((self._active_file, message)) self.write("X", "Green") def test_fail(self, exc_info): self._failed.append((self._active_file, self._active_f, exc_info)) self.write("F", "Red") self._active_file_error = True def doctest_fail(self, name, error_msg): # the first line contains "******", remove it: error_msg = "\n".join(error_msg.split("\n")[1:]) self._failed_doctest.append((name, error_msg)) self.write("F", "Red") self._active_file_error = True def test_pass(self, char="."): self._passed += 1 if self._verbose: self.write("ok", "Green") else: self.write(char, "Green") def test_skip(self, v=None): char = "s" self._skipped += 1 if v is not None: message = str(v) if message == "KeyboardInterrupt": char = "K" elif message == "Timeout": char = "T" elif message == "Slow": char = "w" self.write(char, "Blue") if self._verbose: self.write(" - ", "Blue") if v is not None: self.write(message, "Blue") def test_exception(self, exc_info): self._exceptions.append((self._active_file, self._active_f, exc_info)) self.write("E", "Red") self._active_file_error = True def import_error(self, filename, exc_info): self._exceptions.append((filename, None, exc_info)) rel_name = filename[len(self._root_dir) + 1:] self.write(rel_name) self.write("[?] Failed to import", "Red") self.write(" ") self.write("[FAIL]", "Red", align="right") self.write("\n") sympy_dir = get_sympy_dir()
bsd-3-clause
gauravmm/Remote-Temperature-Monitor
utilities/colormap/colormaps.py
28
50518
# New matplotlib colormaps by Nathaniel J. Smith, Stefan van der Walt, # and (in the case of viridis) Eric Firing. # # This file and the colormaps in it are released under the CC0 license / # public domain dedication. We would appreciate credit if you use or # redistribute these colormaps, but do not impose any legal restrictions. # # To the extent possible under law, the persons who associated CC0 with # mpl-colormaps have waived all copyright and related or neighboring rights # to mpl-colormaps. # # You should have received a copy of the CC0 legalcode along with this # work. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. __all__ = ['magma', 'inferno', 'plasma', 'viridis'] _magma_data = [[0.001462, 0.000466, 0.013866], [0.002258, 0.001295, 0.018331], [0.003279, 0.002305, 0.023708], [0.004512, 0.003490, 0.029965], [0.005950, 0.004843, 0.037130], [0.007588, 0.006356, 0.044973], [0.009426, 0.008022, 0.052844], [0.011465, 0.009828, 0.060750], [0.013708, 0.011771, 0.068667], [0.016156, 0.013840, 0.076603], [0.018815, 0.016026, 0.084584], [0.021692, 0.018320, 0.092610], [0.024792, 0.020715, 0.100676], [0.028123, 0.023201, 0.108787], [0.031696, 0.025765, 0.116965], [0.035520, 0.028397, 0.125209], [0.039608, 0.031090, 0.133515], [0.043830, 0.033830, 0.141886], [0.048062, 0.036607, 0.150327], [0.052320, 0.039407, 0.158841], [0.056615, 0.042160, 0.167446], [0.060949, 0.044794, 0.176129], [0.065330, 0.047318, 0.184892], [0.069764, 0.049726, 0.193735], [0.074257, 0.052017, 0.202660], [0.078815, 0.054184, 0.211667], [0.083446, 0.056225, 0.220755], [0.088155, 0.058133, 0.229922], [0.092949, 0.059904, 0.239164], [0.097833, 0.061531, 0.248477], [0.102815, 0.063010, 0.257854], [0.107899, 0.064335, 0.267289], [0.113094, 0.065492, 0.276784], [0.118405, 0.066479, 0.286321], [0.123833, 0.067295, 0.295879], [0.129380, 0.067935, 0.305443], [0.135053, 0.068391, 0.315000], [0.140858, 0.068654, 0.324538], [0.146785, 0.068738, 0.334011], [0.152839, 0.068637, 0.343404], [0.159018, 0.068354, 0.352688], [0.165308, 0.067911, 0.361816], [0.171713, 0.067305, 0.370771], [0.178212, 0.066576, 0.379497], [0.184801, 0.065732, 0.387973], [0.191460, 0.064818, 0.396152], [0.198177, 0.063862, 0.404009], [0.204935, 0.062907, 0.411514], [0.211718, 0.061992, 0.418647], [0.218512, 0.061158, 0.425392], [0.225302, 0.060445, 0.431742], [0.232077, 0.059889, 0.437695], [0.238826, 0.059517, 0.443256], [0.245543, 0.059352, 0.448436], [0.252220, 0.059415, 0.453248], [0.258857, 0.059706, 0.457710], [0.265447, 0.060237, 0.461840], [0.271994, 0.060994, 0.465660], [0.278493, 0.061978, 0.469190], [0.284951, 0.063168, 0.472451], [0.291366, 0.064553, 0.475462], [0.297740, 0.066117, 0.478243], [0.304081, 0.067835, 0.480812], [0.310382, 0.069702, 0.483186], [0.316654, 0.071690, 0.485380], [0.322899, 0.073782, 0.487408], [0.329114, 0.075972, 0.489287], [0.335308, 0.078236, 0.491024], [0.341482, 0.080564, 0.492631], [0.347636, 0.082946, 0.494121], [0.353773, 0.085373, 0.495501], [0.359898, 0.087831, 0.496778], [0.366012, 0.090314, 0.497960], [0.372116, 0.092816, 0.499053], [0.378211, 0.095332, 0.500067], [0.384299, 0.097855, 0.501002], [0.390384, 0.100379, 0.501864], [0.396467, 0.102902, 0.502658], [0.402548, 0.105420, 0.503386], [0.408629, 0.107930, 0.504052], [0.414709, 0.110431, 0.504662], [0.420791, 0.112920, 0.505215], [0.426877, 0.115395, 0.505714], [0.432967, 0.117855, 0.506160], [0.439062, 0.120298, 0.506555], [0.445163, 0.122724, 0.506901], [0.451271, 0.125132, 0.507198], [0.457386, 0.127522, 0.507448], [0.463508, 0.129893, 0.507652], [0.469640, 0.132245, 0.507809], [0.475780, 0.134577, 0.507921], [0.481929, 0.136891, 0.507989], [0.488088, 0.139186, 0.508011], [0.494258, 0.141462, 0.507988], [0.500438, 0.143719, 0.507920], [0.506629, 0.145958, 0.507806], [0.512831, 0.148179, 0.507648], [0.519045, 0.150383, 0.507443], [0.525270, 0.152569, 0.507192], [0.531507, 0.154739, 0.506895], [0.537755, 0.156894, 0.506551], [0.544015, 0.159033, 0.506159], [0.550287, 0.161158, 0.505719], [0.556571, 0.163269, 0.505230], [0.562866, 0.165368, 0.504692], [0.569172, 0.167454, 0.504105], [0.575490, 0.169530, 0.503466], [0.581819, 0.171596, 0.502777], [0.588158, 0.173652, 0.502035], [0.594508, 0.175701, 0.501241], [0.600868, 0.177743, 0.500394], [0.607238, 0.179779, 0.499492], [0.613617, 0.181811, 0.498536], [0.620005, 0.183840, 0.497524], [0.626401, 0.185867, 0.496456], [0.632805, 0.187893, 0.495332], [0.639216, 0.189921, 0.494150], [0.645633, 0.191952, 0.492910], [0.652056, 0.193986, 0.491611], [0.658483, 0.196027, 0.490253], [0.664915, 0.198075, 0.488836], [0.671349, 0.200133, 0.487358], [0.677786, 0.202203, 0.485819], [0.684224, 0.204286, 0.484219], [0.690661, 0.206384, 0.482558], [0.697098, 0.208501, 0.480835], [0.703532, 0.210638, 0.479049], [0.709962, 0.212797, 0.477201], [0.716387, 0.214982, 0.475290], [0.722805, 0.217194, 0.473316], [0.729216, 0.219437, 0.471279], [0.735616, 0.221713, 0.469180], [0.742004, 0.224025, 0.467018], [0.748378, 0.226377, 0.464794], [0.754737, 0.228772, 0.462509], [0.761077, 0.231214, 0.460162], [0.767398, 0.233705, 0.457755], [0.773695, 0.236249, 0.455289], [0.779968, 0.238851, 0.452765], [0.786212, 0.241514, 0.450184], [0.792427, 0.244242, 0.447543], [0.798608, 0.247040, 0.444848], [0.804752, 0.249911, 0.442102], [0.810855, 0.252861, 0.439305], [0.816914, 0.255895, 0.436461], [0.822926, 0.259016, 0.433573], [0.828886, 0.262229, 0.430644], [0.834791, 0.265540, 0.427671], [0.840636, 0.268953, 0.424666], [0.846416, 0.272473, 0.421631], [0.852126, 0.276106, 0.418573], [0.857763, 0.279857, 0.415496], [0.863320, 0.283729, 0.412403], [0.868793, 0.287728, 0.409303], [0.874176, 0.291859, 0.406205], [0.879464, 0.296125, 0.403118], [0.884651, 0.300530, 0.400047], [0.889731, 0.305079, 0.397002], [0.894700, 0.309773, 0.393995], [0.899552, 0.314616, 0.391037], [0.904281, 0.319610, 0.388137], [0.908884, 0.324755, 0.385308], [0.913354, 0.330052, 0.382563], [0.917689, 0.335500, 0.379915], [0.921884, 0.341098, 0.377376], [0.925937, 0.346844, 0.374959], [0.929845, 0.352734, 0.372677], [0.933606, 0.358764, 0.370541], [0.937221, 0.364929, 0.368567], [0.940687, 0.371224, 0.366762], [0.944006, 0.377643, 0.365136], [0.947180, 0.384178, 0.363701], [0.950210, 0.390820, 0.362468], [0.953099, 0.397563, 0.361438], [0.955849, 0.404400, 0.360619], [0.958464, 0.411324, 0.360014], [0.960949, 0.418323, 0.359630], [0.963310, 0.425390, 0.359469], [0.965549, 0.432519, 0.359529], [0.967671, 0.439703, 0.359810], [0.969680, 0.446936, 0.360311], [0.971582, 0.454210, 0.361030], [0.973381, 0.461520, 0.361965], [0.975082, 0.468861, 0.363111], [0.976690, 0.476226, 0.364466], [0.978210, 0.483612, 0.366025], [0.979645, 0.491014, 0.367783], [0.981000, 0.498428, 0.369734], [0.982279, 0.505851, 0.371874], [0.983485, 0.513280, 0.374198], [0.984622, 0.520713, 0.376698], [0.985693, 0.528148, 0.379371], [0.986700, 0.535582, 0.382210], [0.987646, 0.543015, 0.385210], [0.988533, 0.550446, 0.388365], [0.989363, 0.557873, 0.391671], [0.990138, 0.565296, 0.395122], [0.990871, 0.572706, 0.398714], [0.991558, 0.580107, 0.402441], [0.992196, 0.587502, 0.406299], [0.992785, 0.594891, 0.410283], [0.993326, 0.602275, 0.414390], [0.993834, 0.609644, 0.418613], [0.994309, 0.616999, 0.422950], [0.994738, 0.624350, 0.427397], [0.995122, 0.631696, 0.431951], [0.995480, 0.639027, 0.436607], [0.995810, 0.646344, 0.441361], [0.996096, 0.653659, 0.446213], [0.996341, 0.660969, 0.451160], [0.996580, 0.668256, 0.456192], [0.996775, 0.675541, 0.461314], [0.996925, 0.682828, 0.466526], [0.997077, 0.690088, 0.471811], [0.997186, 0.697349, 0.477182], [0.997254, 0.704611, 0.482635], [0.997325, 0.711848, 0.488154], [0.997351, 0.719089, 0.493755], [0.997351, 0.726324, 0.499428], [0.997341, 0.733545, 0.505167], [0.997285, 0.740772, 0.510983], [0.997228, 0.747981, 0.516859], [0.997138, 0.755190, 0.522806], [0.997019, 0.762398, 0.528821], [0.996898, 0.769591, 0.534892], [0.996727, 0.776795, 0.541039], [0.996571, 0.783977, 0.547233], [0.996369, 0.791167, 0.553499], [0.996162, 0.798348, 0.559820], [0.995932, 0.805527, 0.566202], [0.995680, 0.812706, 0.572645], [0.995424, 0.819875, 0.579140], [0.995131, 0.827052, 0.585701], [0.994851, 0.834213, 0.592307], [0.994524, 0.841387, 0.598983], [0.994222, 0.848540, 0.605696], [0.993866, 0.855711, 0.612482], [0.993545, 0.862859, 0.619299], [0.993170, 0.870024, 0.626189], [0.992831, 0.877168, 0.633109], [0.992440, 0.884330, 0.640099], [0.992089, 0.891470, 0.647116], [0.991688, 0.898627, 0.654202], [0.991332, 0.905763, 0.661309], [0.990930, 0.912915, 0.668481], [0.990570, 0.920049, 0.675675], [0.990175, 0.927196, 0.682926], [0.989815, 0.934329, 0.690198], [0.989434, 0.941470, 0.697519], [0.989077, 0.948604, 0.704863], [0.988717, 0.955742, 0.712242], [0.988367, 0.962878, 0.719649], [0.988033, 0.970012, 0.727077], [0.987691, 0.977154, 0.734536], [0.987387, 0.984288, 0.742002], [0.987053, 0.991438, 0.749504]] _inferno_data = [[0.001462, 0.000466, 0.013866], [0.002267, 0.001270, 0.018570], [0.003299, 0.002249, 0.024239], [0.004547, 0.003392, 0.030909], [0.006006, 0.004692, 0.038558], [0.007676, 0.006136, 0.046836], [0.009561, 0.007713, 0.055143], [0.011663, 0.009417, 0.063460], [0.013995, 0.011225, 0.071862], [0.016561, 0.013136, 0.080282], [0.019373, 0.015133, 0.088767], [0.022447, 0.017199, 0.097327], [0.025793, 0.019331, 0.105930], [0.029432, 0.021503, 0.114621], [0.033385, 0.023702, 0.123397], [0.037668, 0.025921, 0.132232], [0.042253, 0.028139, 0.141141], [0.046915, 0.030324, 0.150164], [0.051644, 0.032474, 0.159254], [0.056449, 0.034569, 0.168414], [0.061340, 0.036590, 0.177642], [0.066331, 0.038504, 0.186962], [0.071429, 0.040294, 0.196354], [0.076637, 0.041905, 0.205799], [0.081962, 0.043328, 0.215289], [0.087411, 0.044556, 0.224813], [0.092990, 0.045583, 0.234358], [0.098702, 0.046402, 0.243904], [0.104551, 0.047008, 0.253430], [0.110536, 0.047399, 0.262912], [0.116656, 0.047574, 0.272321], [0.122908, 0.047536, 0.281624], [0.129285, 0.047293, 0.290788], [0.135778, 0.046856, 0.299776], [0.142378, 0.046242, 0.308553], [0.149073, 0.045468, 0.317085], [0.155850, 0.044559, 0.325338], [0.162689, 0.043554, 0.333277], [0.169575, 0.042489, 0.340874], [0.176493, 0.041402, 0.348111], [0.183429, 0.040329, 0.354971], [0.190367, 0.039309, 0.361447], [0.197297, 0.038400, 0.367535], [0.204209, 0.037632, 0.373238], [0.211095, 0.037030, 0.378563], [0.217949, 0.036615, 0.383522], [0.224763, 0.036405, 0.388129], [0.231538, 0.036405, 0.392400], [0.238273, 0.036621, 0.396353], [0.244967, 0.037055, 0.400007], [0.251620, 0.037705, 0.403378], [0.258234, 0.038571, 0.406485], [0.264810, 0.039647, 0.409345], [0.271347, 0.040922, 0.411976], [0.277850, 0.042353, 0.414392], [0.284321, 0.043933, 0.416608], [0.290763, 0.045644, 0.418637], [0.297178, 0.047470, 0.420491], [0.303568, 0.049396, 0.422182], [0.309935, 0.051407, 0.423721], [0.316282, 0.053490, 0.425116], [0.322610, 0.055634, 0.426377], [0.328921, 0.057827, 0.427511], [0.335217, 0.060060, 0.428524], [0.341500, 0.062325, 0.429425], [0.347771, 0.064616, 0.430217], [0.354032, 0.066925, 0.430906], [0.360284, 0.069247, 0.431497], [0.366529, 0.071579, 0.431994], [0.372768, 0.073915, 0.432400], [0.379001, 0.076253, 0.432719], [0.385228, 0.078591, 0.432955], [0.391453, 0.080927, 0.433109], [0.397674, 0.083257, 0.433183], [0.403894, 0.085580, 0.433179], [0.410113, 0.087896, 0.433098], [0.416331, 0.090203, 0.432943], [0.422549, 0.092501, 0.432714], [0.428768, 0.094790, 0.432412], [0.434987, 0.097069, 0.432039], [0.441207, 0.099338, 0.431594], [0.447428, 0.101597, 0.431080], [0.453651, 0.103848, 0.430498], [0.459875, 0.106089, 0.429846], [0.466100, 0.108322, 0.429125], [0.472328, 0.110547, 0.428334], [0.478558, 0.112764, 0.427475], [0.484789, 0.114974, 0.426548], [0.491022, 0.117179, 0.425552], [0.497257, 0.119379, 0.424488], [0.503493, 0.121575, 0.423356], [0.509730, 0.123769, 0.422156], [0.515967, 0.125960, 0.420887], [0.522206, 0.128150, 0.419549], [0.528444, 0.130341, 0.418142], [0.534683, 0.132534, 0.416667], [0.540920, 0.134729, 0.415123], [0.547157, 0.136929, 0.413511], [0.553392, 0.139134, 0.411829], [0.559624, 0.141346, 0.410078], [0.565854, 0.143567, 0.408258], [0.572081, 0.145797, 0.406369], [0.578304, 0.148039, 0.404411], [0.584521, 0.150294, 0.402385], [0.590734, 0.152563, 0.400290], [0.596940, 0.154848, 0.398125], [0.603139, 0.157151, 0.395891], [0.609330, 0.159474, 0.393589], [0.615513, 0.161817, 0.391219], [0.621685, 0.164184, 0.388781], [0.627847, 0.166575, 0.386276], [0.633998, 0.168992, 0.383704], [0.640135, 0.171438, 0.381065], [0.646260, 0.173914, 0.378359], [0.652369, 0.176421, 0.375586], [0.658463, 0.178962, 0.372748], [0.664540, 0.181539, 0.369846], [0.670599, 0.184153, 0.366879], [0.676638, 0.186807, 0.363849], [0.682656, 0.189501, 0.360757], [0.688653, 0.192239, 0.357603], [0.694627, 0.195021, 0.354388], [0.700576, 0.197851, 0.351113], [0.706500, 0.200728, 0.347777], [0.712396, 0.203656, 0.344383], [0.718264, 0.206636, 0.340931], [0.724103, 0.209670, 0.337424], [0.729909, 0.212759, 0.333861], [0.735683, 0.215906, 0.330245], [0.741423, 0.219112, 0.326576], [0.747127, 0.222378, 0.322856], [0.752794, 0.225706, 0.319085], [0.758422, 0.229097, 0.315266], [0.764010, 0.232554, 0.311399], [0.769556, 0.236077, 0.307485], [0.775059, 0.239667, 0.303526], [0.780517, 0.243327, 0.299523], [0.785929, 0.247056, 0.295477], [0.791293, 0.250856, 0.291390], [0.796607, 0.254728, 0.287264], [0.801871, 0.258674, 0.283099], [0.807082, 0.262692, 0.278898], [0.812239, 0.266786, 0.274661], [0.817341, 0.270954, 0.270390], [0.822386, 0.275197, 0.266085], [0.827372, 0.279517, 0.261750], [0.832299, 0.283913, 0.257383], [0.837165, 0.288385, 0.252988], [0.841969, 0.292933, 0.248564], [0.846709, 0.297559, 0.244113], [0.851384, 0.302260, 0.239636], [0.855992, 0.307038, 0.235133], [0.860533, 0.311892, 0.230606], [0.865006, 0.316822, 0.226055], [0.869409, 0.321827, 0.221482], [0.873741, 0.326906, 0.216886], [0.878001, 0.332060, 0.212268], [0.882188, 0.337287, 0.207628], [0.886302, 0.342586, 0.202968], [0.890341, 0.347957, 0.198286], [0.894305, 0.353399, 0.193584], [0.898192, 0.358911, 0.188860], [0.902003, 0.364492, 0.184116], [0.905735, 0.370140, 0.179350], [0.909390, 0.375856, 0.174563], [0.912966, 0.381636, 0.169755], [0.916462, 0.387481, 0.164924], [0.919879, 0.393389, 0.160070], [0.923215, 0.399359, 0.155193], [0.926470, 0.405389, 0.150292], [0.929644, 0.411479, 0.145367], [0.932737, 0.417627, 0.140417], [0.935747, 0.423831, 0.135440], [0.938675, 0.430091, 0.130438], [0.941521, 0.436405, 0.125409], [0.944285, 0.442772, 0.120354], [0.946965, 0.449191, 0.115272], [0.949562, 0.455660, 0.110164], [0.952075, 0.462178, 0.105031], [0.954506, 0.468744, 0.099874], [0.956852, 0.475356, 0.094695], [0.959114, 0.482014, 0.089499], [0.961293, 0.488716, 0.084289], [0.963387, 0.495462, 0.079073], [0.965397, 0.502249, 0.073859], [0.967322, 0.509078, 0.068659], [0.969163, 0.515946, 0.063488], [0.970919, 0.522853, 0.058367], [0.972590, 0.529798, 0.053324], [0.974176, 0.536780, 0.048392], [0.975677, 0.543798, 0.043618], [0.977092, 0.550850, 0.039050], [0.978422, 0.557937, 0.034931], [0.979666, 0.565057, 0.031409], [0.980824, 0.572209, 0.028508], [0.981895, 0.579392, 0.026250], [0.982881, 0.586606, 0.024661], [0.983779, 0.593849, 0.023770], [0.984591, 0.601122, 0.023606], [0.985315, 0.608422, 0.024202], [0.985952, 0.615750, 0.025592], [0.986502, 0.623105, 0.027814], [0.986964, 0.630485, 0.030908], [0.987337, 0.637890, 0.034916], [0.987622, 0.645320, 0.039886], [0.987819, 0.652773, 0.045581], [0.987926, 0.660250, 0.051750], [0.987945, 0.667748, 0.058329], [0.987874, 0.675267, 0.065257], [0.987714, 0.682807, 0.072489], [0.987464, 0.690366, 0.079990], [0.987124, 0.697944, 0.087731], [0.986694, 0.705540, 0.095694], [0.986175, 0.713153, 0.103863], [0.985566, 0.720782, 0.112229], [0.984865, 0.728427, 0.120785], [0.984075, 0.736087, 0.129527], [0.983196, 0.743758, 0.138453], [0.982228, 0.751442, 0.147565], [0.981173, 0.759135, 0.156863], [0.980032, 0.766837, 0.166353], [0.978806, 0.774545, 0.176037], [0.977497, 0.782258, 0.185923], [0.976108, 0.789974, 0.196018], [0.974638, 0.797692, 0.206332], [0.973088, 0.805409, 0.216877], [0.971468, 0.813122, 0.227658], [0.969783, 0.820825, 0.238686], [0.968041, 0.828515, 0.249972], [0.966243, 0.836191, 0.261534], [0.964394, 0.843848, 0.273391], [0.962517, 0.851476, 0.285546], [0.960626, 0.859069, 0.298010], [0.958720, 0.866624, 0.310820], [0.956834, 0.874129, 0.323974], [0.954997, 0.881569, 0.337475], [0.953215, 0.888942, 0.351369], [0.951546, 0.896226, 0.365627], [0.950018, 0.903409, 0.380271], [0.948683, 0.910473, 0.395289], [0.947594, 0.917399, 0.410665], [0.946809, 0.924168, 0.426373], [0.946392, 0.930761, 0.442367], [0.946403, 0.937159, 0.458592], [0.946903, 0.943348, 0.474970], [0.947937, 0.949318, 0.491426], [0.949545, 0.955063, 0.507860], [0.951740, 0.960587, 0.524203], [0.954529, 0.965896, 0.540361], [0.957896, 0.971003, 0.556275], [0.961812, 0.975924, 0.571925], [0.966249, 0.980678, 0.587206], [0.971162, 0.985282, 0.602154], [0.976511, 0.989753, 0.616760], [0.982257, 0.994109, 0.631017], [0.988362, 0.998364, 0.644924]] _plasma_data = [[0.050383, 0.029803, 0.527975], [0.063536, 0.028426, 0.533124], [0.075353, 0.027206, 0.538007], [0.086222, 0.026125, 0.542658], [0.096379, 0.025165, 0.547103], [0.105980, 0.024309, 0.551368], [0.115124, 0.023556, 0.555468], [0.123903, 0.022878, 0.559423], [0.132381, 0.022258, 0.563250], [0.140603, 0.021687, 0.566959], [0.148607, 0.021154, 0.570562], [0.156421, 0.020651, 0.574065], [0.164070, 0.020171, 0.577478], [0.171574, 0.019706, 0.580806], [0.178950, 0.019252, 0.584054], [0.186213, 0.018803, 0.587228], [0.193374, 0.018354, 0.590330], [0.200445, 0.017902, 0.593364], [0.207435, 0.017442, 0.596333], [0.214350, 0.016973, 0.599239], [0.221197, 0.016497, 0.602083], [0.227983, 0.016007, 0.604867], [0.234715, 0.015502, 0.607592], [0.241396, 0.014979, 0.610259], [0.248032, 0.014439, 0.612868], [0.254627, 0.013882, 0.615419], [0.261183, 0.013308, 0.617911], [0.267703, 0.012716, 0.620346], [0.274191, 0.012109, 0.622722], [0.280648, 0.011488, 0.625038], [0.287076, 0.010855, 0.627295], [0.293478, 0.010213, 0.629490], [0.299855, 0.009561, 0.631624], [0.306210, 0.008902, 0.633694], [0.312543, 0.008239, 0.635700], [0.318856, 0.007576, 0.637640], [0.325150, 0.006915, 0.639512], [0.331426, 0.006261, 0.641316], [0.337683, 0.005618, 0.643049], [0.343925, 0.004991, 0.644710], [0.350150, 0.004382, 0.646298], [0.356359, 0.003798, 0.647810], [0.362553, 0.003243, 0.649245], [0.368733, 0.002724, 0.650601], [0.374897, 0.002245, 0.651876], [0.381047, 0.001814, 0.653068], [0.387183, 0.001434, 0.654177], [0.393304, 0.001114, 0.655199], [0.399411, 0.000859, 0.656133], [0.405503, 0.000678, 0.656977], [0.411580, 0.000577, 0.657730], [0.417642, 0.000564, 0.658390], [0.423689, 0.000646, 0.658956], [0.429719, 0.000831, 0.659425], [0.435734, 0.001127, 0.659797], [0.441732, 0.001540, 0.660069], [0.447714, 0.002080, 0.660240], [0.453677, 0.002755, 0.660310], [0.459623, 0.003574, 0.660277], [0.465550, 0.004545, 0.660139], [0.471457, 0.005678, 0.659897], [0.477344, 0.006980, 0.659549], [0.483210, 0.008460, 0.659095], [0.489055, 0.010127, 0.658534], [0.494877, 0.011990, 0.657865], [0.500678, 0.014055, 0.657088], [0.506454, 0.016333, 0.656202], [0.512206, 0.018833, 0.655209], [0.517933, 0.021563, 0.654109], [0.523633, 0.024532, 0.652901], [0.529306, 0.027747, 0.651586], [0.534952, 0.031217, 0.650165], [0.540570, 0.034950, 0.648640], [0.546157, 0.038954, 0.647010], [0.551715, 0.043136, 0.645277], [0.557243, 0.047331, 0.643443], [0.562738, 0.051545, 0.641509], [0.568201, 0.055778, 0.639477], [0.573632, 0.060028, 0.637349], [0.579029, 0.064296, 0.635126], [0.584391, 0.068579, 0.632812], [0.589719, 0.072878, 0.630408], [0.595011, 0.077190, 0.627917], [0.600266, 0.081516, 0.625342], [0.605485, 0.085854, 0.622686], [0.610667, 0.090204, 0.619951], [0.615812, 0.094564, 0.617140], [0.620919, 0.098934, 0.614257], [0.625987, 0.103312, 0.611305], [0.631017, 0.107699, 0.608287], [0.636008, 0.112092, 0.605205], [0.640959, 0.116492, 0.602065], [0.645872, 0.120898, 0.598867], [0.650746, 0.125309, 0.595617], [0.655580, 0.129725, 0.592317], [0.660374, 0.134144, 0.588971], [0.665129, 0.138566, 0.585582], [0.669845, 0.142992, 0.582154], [0.674522, 0.147419, 0.578688], [0.679160, 0.151848, 0.575189], [0.683758, 0.156278, 0.571660], [0.688318, 0.160709, 0.568103], [0.692840, 0.165141, 0.564522], [0.697324, 0.169573, 0.560919], [0.701769, 0.174005, 0.557296], [0.706178, 0.178437, 0.553657], [0.710549, 0.182868, 0.550004], [0.714883, 0.187299, 0.546338], [0.719181, 0.191729, 0.542663], [0.723444, 0.196158, 0.538981], [0.727670, 0.200586, 0.535293], [0.731862, 0.205013, 0.531601], [0.736019, 0.209439, 0.527908], [0.740143, 0.213864, 0.524216], [0.744232, 0.218288, 0.520524], [0.748289, 0.222711, 0.516834], [0.752312, 0.227133, 0.513149], [0.756304, 0.231555, 0.509468], [0.760264, 0.235976, 0.505794], [0.764193, 0.240396, 0.502126], [0.768090, 0.244817, 0.498465], [0.771958, 0.249237, 0.494813], [0.775796, 0.253658, 0.491171], [0.779604, 0.258078, 0.487539], [0.783383, 0.262500, 0.483918], [0.787133, 0.266922, 0.480307], [0.790855, 0.271345, 0.476706], [0.794549, 0.275770, 0.473117], [0.798216, 0.280197, 0.469538], [0.801855, 0.284626, 0.465971], [0.805467, 0.289057, 0.462415], [0.809052, 0.293491, 0.458870], [0.812612, 0.297928, 0.455338], [0.816144, 0.302368, 0.451816], [0.819651, 0.306812, 0.448306], [0.823132, 0.311261, 0.444806], [0.826588, 0.315714, 0.441316], [0.830018, 0.320172, 0.437836], [0.833422, 0.324635, 0.434366], [0.836801, 0.329105, 0.430905], [0.840155, 0.333580, 0.427455], [0.843484, 0.338062, 0.424013], [0.846788, 0.342551, 0.420579], [0.850066, 0.347048, 0.417153], [0.853319, 0.351553, 0.413734], [0.856547, 0.356066, 0.410322], [0.859750, 0.360588, 0.406917], [0.862927, 0.365119, 0.403519], [0.866078, 0.369660, 0.400126], [0.869203, 0.374212, 0.396738], [0.872303, 0.378774, 0.393355], [0.875376, 0.383347, 0.389976], [0.878423, 0.387932, 0.386600], [0.881443, 0.392529, 0.383229], [0.884436, 0.397139, 0.379860], [0.887402, 0.401762, 0.376494], [0.890340, 0.406398, 0.373130], [0.893250, 0.411048, 0.369768], [0.896131, 0.415712, 0.366407], [0.898984, 0.420392, 0.363047], [0.901807, 0.425087, 0.359688], [0.904601, 0.429797, 0.356329], [0.907365, 0.434524, 0.352970], [0.910098, 0.439268, 0.349610], [0.912800, 0.444029, 0.346251], [0.915471, 0.448807, 0.342890], [0.918109, 0.453603, 0.339529], [0.920714, 0.458417, 0.336166], [0.923287, 0.463251, 0.332801], [0.925825, 0.468103, 0.329435], [0.928329, 0.472975, 0.326067], [0.930798, 0.477867, 0.322697], [0.933232, 0.482780, 0.319325], [0.935630, 0.487712, 0.315952], [0.937990, 0.492667, 0.312575], [0.940313, 0.497642, 0.309197], [0.942598, 0.502639, 0.305816], [0.944844, 0.507658, 0.302433], [0.947051, 0.512699, 0.299049], [0.949217, 0.517763, 0.295662], [0.951344, 0.522850, 0.292275], [0.953428, 0.527960, 0.288883], [0.955470, 0.533093, 0.285490], [0.957469, 0.538250, 0.282096], [0.959424, 0.543431, 0.278701], [0.961336, 0.548636, 0.275305], [0.963203, 0.553865, 0.271909], [0.965024, 0.559118, 0.268513], [0.966798, 0.564396, 0.265118], [0.968526, 0.569700, 0.261721], [0.970205, 0.575028, 0.258325], [0.971835, 0.580382, 0.254931], [0.973416, 0.585761, 0.251540], [0.974947, 0.591165, 0.248151], [0.976428, 0.596595, 0.244767], [0.977856, 0.602051, 0.241387], [0.979233, 0.607532, 0.238013], [0.980556, 0.613039, 0.234646], [0.981826, 0.618572, 0.231287], [0.983041, 0.624131, 0.227937], [0.984199, 0.629718, 0.224595], [0.985301, 0.635330, 0.221265], [0.986345, 0.640969, 0.217948], [0.987332, 0.646633, 0.214648], [0.988260, 0.652325, 0.211364], [0.989128, 0.658043, 0.208100], [0.989935, 0.663787, 0.204859], [0.990681, 0.669558, 0.201642], [0.991365, 0.675355, 0.198453], [0.991985, 0.681179, 0.195295], [0.992541, 0.687030, 0.192170], [0.993032, 0.692907, 0.189084], [0.993456, 0.698810, 0.186041], [0.993814, 0.704741, 0.183043], [0.994103, 0.710698, 0.180097], [0.994324, 0.716681, 0.177208], [0.994474, 0.722691, 0.174381], [0.994553, 0.728728, 0.171622], [0.994561, 0.734791, 0.168938], [0.994495, 0.740880, 0.166335], [0.994355, 0.746995, 0.163821], [0.994141, 0.753137, 0.161404], [0.993851, 0.759304, 0.159092], [0.993482, 0.765499, 0.156891], [0.993033, 0.771720, 0.154808], [0.992505, 0.777967, 0.152855], [0.991897, 0.784239, 0.151042], [0.991209, 0.790537, 0.149377], [0.990439, 0.796859, 0.147870], [0.989587, 0.803205, 0.146529], [0.988648, 0.809579, 0.145357], [0.987621, 0.815978, 0.144363], [0.986509, 0.822401, 0.143557], [0.985314, 0.828846, 0.142945], [0.984031, 0.835315, 0.142528], [0.982653, 0.841812, 0.142303], [0.981190, 0.848329, 0.142279], [0.979644, 0.854866, 0.142453], [0.977995, 0.861432, 0.142808], [0.976265, 0.868016, 0.143351], [0.974443, 0.874622, 0.144061], [0.972530, 0.881250, 0.144923], [0.970533, 0.887896, 0.145919], [0.968443, 0.894564, 0.147014], [0.966271, 0.901249, 0.148180], [0.964021, 0.907950, 0.149370], [0.961681, 0.914672, 0.150520], [0.959276, 0.921407, 0.151566], [0.956808, 0.928152, 0.152409], [0.954287, 0.934908, 0.152921], [0.951726, 0.941671, 0.152925], [0.949151, 0.948435, 0.152178], [0.946602, 0.955190, 0.150328], [0.944152, 0.961916, 0.146861], [0.941896, 0.968590, 0.140956], [0.940015, 0.975158, 0.131326]] _viridis_data = [[0.267004, 0.004874, 0.329415], [0.268510, 0.009605, 0.335427], [0.269944, 0.014625, 0.341379], [0.271305, 0.019942, 0.347269], [0.272594, 0.025563, 0.353093], [0.273809, 0.031497, 0.358853], [0.274952, 0.037752, 0.364543], [0.276022, 0.044167, 0.370164], [0.277018, 0.050344, 0.375715], [0.277941, 0.056324, 0.381191], [0.278791, 0.062145, 0.386592], [0.279566, 0.067836, 0.391917], [0.280267, 0.073417, 0.397163], [0.280894, 0.078907, 0.402329], [0.281446, 0.084320, 0.407414], [0.281924, 0.089666, 0.412415], [0.282327, 0.094955, 0.417331], [0.282656, 0.100196, 0.422160], [0.282910, 0.105393, 0.426902], [0.283091, 0.110553, 0.431554], [0.283197, 0.115680, 0.436115], [0.283229, 0.120777, 0.440584], [0.283187, 0.125848, 0.444960], [0.283072, 0.130895, 0.449241], [0.282884, 0.135920, 0.453427], [0.282623, 0.140926, 0.457517], [0.282290, 0.145912, 0.461510], [0.281887, 0.150881, 0.465405], [0.281412, 0.155834, 0.469201], [0.280868, 0.160771, 0.472899], [0.280255, 0.165693, 0.476498], [0.279574, 0.170599, 0.479997], [0.278826, 0.175490, 0.483397], [0.278012, 0.180367, 0.486697], [0.277134, 0.185228, 0.489898], [0.276194, 0.190074, 0.493001], [0.275191, 0.194905, 0.496005], [0.274128, 0.199721, 0.498911], [0.273006, 0.204520, 0.501721], [0.271828, 0.209303, 0.504434], [0.270595, 0.214069, 0.507052], [0.269308, 0.218818, 0.509577], [0.267968, 0.223549, 0.512008], [0.266580, 0.228262, 0.514349], [0.265145, 0.232956, 0.516599], [0.263663, 0.237631, 0.518762], [0.262138, 0.242286, 0.520837], [0.260571, 0.246922, 0.522828], [0.258965, 0.251537, 0.524736], [0.257322, 0.256130, 0.526563], [0.255645, 0.260703, 0.528312], [0.253935, 0.265254, 0.529983], [0.252194, 0.269783, 0.531579], [0.250425, 0.274290, 0.533103], [0.248629, 0.278775, 0.534556], [0.246811, 0.283237, 0.535941], [0.244972, 0.287675, 0.537260], [0.243113, 0.292092, 0.538516], [0.241237, 0.296485, 0.539709], [0.239346, 0.300855, 0.540844], [0.237441, 0.305202, 0.541921], [0.235526, 0.309527, 0.542944], [0.233603, 0.313828, 0.543914], [0.231674, 0.318106, 0.544834], [0.229739, 0.322361, 0.545706], [0.227802, 0.326594, 0.546532], [0.225863, 0.330805, 0.547314], [0.223925, 0.334994, 0.548053], [0.221989, 0.339161, 0.548752], [0.220057, 0.343307, 0.549413], [0.218130, 0.347432, 0.550038], [0.216210, 0.351535, 0.550627], [0.214298, 0.355619, 0.551184], [0.212395, 0.359683, 0.551710], [0.210503, 0.363727, 0.552206], [0.208623, 0.367752, 0.552675], [0.206756, 0.371758, 0.553117], [0.204903, 0.375746, 0.553533], [0.203063, 0.379716, 0.553925], [0.201239, 0.383670, 0.554294], [0.199430, 0.387607, 0.554642], [0.197636, 0.391528, 0.554969], [0.195860, 0.395433, 0.555276], [0.194100, 0.399323, 0.555565], [0.192357, 0.403199, 0.555836], [0.190631, 0.407061, 0.556089], [0.188923, 0.410910, 0.556326], [0.187231, 0.414746, 0.556547], [0.185556, 0.418570, 0.556753], [0.183898, 0.422383, 0.556944], [0.182256, 0.426184, 0.557120], [0.180629, 0.429975, 0.557282], [0.179019, 0.433756, 0.557430], [0.177423, 0.437527, 0.557565], [0.175841, 0.441290, 0.557685], [0.174274, 0.445044, 0.557792], [0.172719, 0.448791, 0.557885], [0.171176, 0.452530, 0.557965], [0.169646, 0.456262, 0.558030], [0.168126, 0.459988, 0.558082], [0.166617, 0.463708, 0.558119], [0.165117, 0.467423, 0.558141], [0.163625, 0.471133, 0.558148], [0.162142, 0.474838, 0.558140], [0.160665, 0.478540, 0.558115], [0.159194, 0.482237, 0.558073], [0.157729, 0.485932, 0.558013], [0.156270, 0.489624, 0.557936], [0.154815, 0.493313, 0.557840], [0.153364, 0.497000, 0.557724], [0.151918, 0.500685, 0.557587], [0.150476, 0.504369, 0.557430], [0.149039, 0.508051, 0.557250], [0.147607, 0.511733, 0.557049], [0.146180, 0.515413, 0.556823], [0.144759, 0.519093, 0.556572], [0.143343, 0.522773, 0.556295], [0.141935, 0.526453, 0.555991], [0.140536, 0.530132, 0.555659], [0.139147, 0.533812, 0.555298], [0.137770, 0.537492, 0.554906], [0.136408, 0.541173, 0.554483], [0.135066, 0.544853, 0.554029], [0.133743, 0.548535, 0.553541], [0.132444, 0.552216, 0.553018], [0.131172, 0.555899, 0.552459], [0.129933, 0.559582, 0.551864], [0.128729, 0.563265, 0.551229], [0.127568, 0.566949, 0.550556], [0.126453, 0.570633, 0.549841], [0.125394, 0.574318, 0.549086], [0.124395, 0.578002, 0.548287], [0.123463, 0.581687, 0.547445], [0.122606, 0.585371, 0.546557], [0.121831, 0.589055, 0.545623], [0.121148, 0.592739, 0.544641], [0.120565, 0.596422, 0.543611], [0.120092, 0.600104, 0.542530], [0.119738, 0.603785, 0.541400], [0.119512, 0.607464, 0.540218], [0.119423, 0.611141, 0.538982], [0.119483, 0.614817, 0.537692], [0.119699, 0.618490, 0.536347], [0.120081, 0.622161, 0.534946], [0.120638, 0.625828, 0.533488], [0.121380, 0.629492, 0.531973], [0.122312, 0.633153, 0.530398], [0.123444, 0.636809, 0.528763], [0.124780, 0.640461, 0.527068], [0.126326, 0.644107, 0.525311], [0.128087, 0.647749, 0.523491], [0.130067, 0.651384, 0.521608], [0.132268, 0.655014, 0.519661], [0.134692, 0.658636, 0.517649], [0.137339, 0.662252, 0.515571], [0.140210, 0.665859, 0.513427], [0.143303, 0.669459, 0.511215], [0.146616, 0.673050, 0.508936], [0.150148, 0.676631, 0.506589], [0.153894, 0.680203, 0.504172], [0.157851, 0.683765, 0.501686], [0.162016, 0.687316, 0.499129], [0.166383, 0.690856, 0.496502], [0.170948, 0.694384, 0.493803], [0.175707, 0.697900, 0.491033], [0.180653, 0.701402, 0.488189], [0.185783, 0.704891, 0.485273], [0.191090, 0.708366, 0.482284], [0.196571, 0.711827, 0.479221], [0.202219, 0.715272, 0.476084], [0.208030, 0.718701, 0.472873], [0.214000, 0.722114, 0.469588], [0.220124, 0.725509, 0.466226], [0.226397, 0.728888, 0.462789], [0.232815, 0.732247, 0.459277], [0.239374, 0.735588, 0.455688], [0.246070, 0.738910, 0.452024], [0.252899, 0.742211, 0.448284], [0.259857, 0.745492, 0.444467], [0.266941, 0.748751, 0.440573], [0.274149, 0.751988, 0.436601], [0.281477, 0.755203, 0.432552], [0.288921, 0.758394, 0.428426], [0.296479, 0.761561, 0.424223], [0.304148, 0.764704, 0.419943], [0.311925, 0.767822, 0.415586], [0.319809, 0.770914, 0.411152], [0.327796, 0.773980, 0.406640], [0.335885, 0.777018, 0.402049], [0.344074, 0.780029, 0.397381], [0.352360, 0.783011, 0.392636], [0.360741, 0.785964, 0.387814], [0.369214, 0.788888, 0.382914], [0.377779, 0.791781, 0.377939], [0.386433, 0.794644, 0.372886], [0.395174, 0.797475, 0.367757], [0.404001, 0.800275, 0.362552], [0.412913, 0.803041, 0.357269], [0.421908, 0.805774, 0.351910], [0.430983, 0.808473, 0.346476], [0.440137, 0.811138, 0.340967], [0.449368, 0.813768, 0.335384], [0.458674, 0.816363, 0.329727], [0.468053, 0.818921, 0.323998], [0.477504, 0.821444, 0.318195], [0.487026, 0.823929, 0.312321], [0.496615, 0.826376, 0.306377], [0.506271, 0.828786, 0.300362], [0.515992, 0.831158, 0.294279], [0.525776, 0.833491, 0.288127], [0.535621, 0.835785, 0.281908], [0.545524, 0.838039, 0.275626], [0.555484, 0.840254, 0.269281], [0.565498, 0.842430, 0.262877], [0.575563, 0.844566, 0.256415], [0.585678, 0.846661, 0.249897], [0.595839, 0.848717, 0.243329], [0.606045, 0.850733, 0.236712], [0.616293, 0.852709, 0.230052], [0.626579, 0.854645, 0.223353], [0.636902, 0.856542, 0.216620], [0.647257, 0.858400, 0.209861], [0.657642, 0.860219, 0.203082], [0.668054, 0.861999, 0.196293], [0.678489, 0.863742, 0.189503], [0.688944, 0.865448, 0.182725], [0.699415, 0.867117, 0.175971], [0.709898, 0.868751, 0.169257], [0.720391, 0.870350, 0.162603], [0.730889, 0.871916, 0.156029], [0.741388, 0.873449, 0.149561], [0.751884, 0.874951, 0.143228], [0.762373, 0.876424, 0.137064], [0.772852, 0.877868, 0.131109], [0.783315, 0.879285, 0.125405], [0.793760, 0.880678, 0.120005], [0.804182, 0.882046, 0.114965], [0.814576, 0.883393, 0.110347], [0.824940, 0.884720, 0.106217], [0.835270, 0.886029, 0.102646], [0.845561, 0.887322, 0.099702], [0.855810, 0.888601, 0.097452], [0.866013, 0.889868, 0.095953], [0.876168, 0.891125, 0.095250], [0.886271, 0.892374, 0.095374], [0.896320, 0.893616, 0.096335], [0.906311, 0.894855, 0.098125], [0.916242, 0.896091, 0.100717], [0.926106, 0.897330, 0.104071], [0.935904, 0.898570, 0.108131], [0.945636, 0.899815, 0.112838], [0.955300, 0.901065, 0.118128], [0.964894, 0.902323, 0.123941], [0.974417, 0.903590, 0.130215], [0.983868, 0.904867, 0.136897], [0.993248, 0.906157, 0.143936]] from matplotlib.colors import ListedColormap cmaps = {} for (name, data) in (('magma', _magma_data), ('inferno', _inferno_data), ('plasma', _plasma_data), ('viridis', _viridis_data)): cmaps[name] = ListedColormap(data, name=name) magma = cmaps['magma'] inferno = cmaps['inferno'] plasma = cmaps['plasma'] viridis = cmaps['viridis']
mit
vermouthmjl/scikit-learn
sklearn/metrics/classification.py
1
69294
"""Metrics to assess performance on classification task given class prediction Functions named as ``*_score`` return a scalar value to maximize: the higher the better Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize: the lower the better """ # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Olivier Grisel <olivier.grisel@ensta.org> # Arnaud Joly <a.joly@ulg.ac.be> # Jochen Wersdorfer <jochen@wersdoerfer.de> # Lars Buitinck # Joel Nothman <joel.nothman@gmail.com> # Noel Dawe <noel@dawe.me> # Jatin Shah <jatindshah@gmail.com> # Saurabh Jha <saurabh.jhaa@gmail.com> # Bernardo Stein <bernardovstein@gmail.com> # License: BSD 3 clause from __future__ import division import warnings import numpy as np from scipy.sparse import coo_matrix from scipy.sparse import csr_matrix from ..preprocessing import LabelBinarizer, label_binarize from ..preprocessing import LabelEncoder from ..utils import check_array from ..utils import check_consistent_length from ..utils import column_or_1d from ..utils.multiclass import unique_labels from ..utils.multiclass import type_of_target from ..utils.validation import _num_samples from ..utils.sparsefuncs import count_nonzero from ..utils.fixes import bincount from ..exceptions import UndefinedMetricWarning def _check_targets(y_true, y_pred): """Check that y_true and y_pred belong to the same classification task This converts multiclass or binary types to a common shape, and raises a ValueError for a mix of multilabel and multiclass targets, a mix of multilabel formats, for the presence of continuous-valued or multioutput targets, or for targets of different lengths. Column vectors are squeezed to 1d, while multilabel formats are returned as CSR sparse label indicators. Parameters ---------- y_true : array-like y_pred : array-like Returns ------- type_true : one of {'multilabel-indicator', 'multiclass', 'binary'} The type of the true target data, as output by ``utils.multiclass.type_of_target`` y_true : array or indicator matrix y_pred : array or indicator matrix """ check_consistent_length(y_true, y_pred) type_true = type_of_target(y_true) type_pred = type_of_target(y_pred) y_type = set([type_true, type_pred]) if y_type == set(["binary", "multiclass"]): y_type = set(["multiclass"]) if len(y_type) > 1: raise ValueError("Can't handle mix of {0} and {1}" "".format(type_true, type_pred)) # We can't have more than one value on y_type => The set is no more needed y_type = y_type.pop() # No metrics support "multiclass-multioutput" format if (y_type not in ["binary", "multiclass", "multilabel-indicator"]): raise ValueError("{0} is not supported".format(y_type)) if y_type in ["binary", "multiclass"]: y_true = column_or_1d(y_true) y_pred = column_or_1d(y_pred) if y_type.startswith('multilabel'): y_true = csr_matrix(y_true) y_pred = csr_matrix(y_pred) y_type = 'multilabel-indicator' return y_type, y_true, y_pred def _weighted_sum(sample_score, sample_weight, normalize=False): if normalize: return np.average(sample_score, weights=sample_weight) elif sample_weight is not None: return np.dot(sample_score, sample_weight) else: return sample_score.sum() def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None): """Accuracy classification score. In multilabel classification, this function computes subset accuracy: the set of labels predicted for a sample must *exactly* match the corresponding set of labels in y_true. Read more in the :ref:`User Guide <accuracy_score>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) labels. y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. normalize : bool, optional (default=True) If ``False``, return the number of correctly classified samples. Otherwise, return the fraction of correctly classified samples. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- score : float If ``normalize == True``, return the correctly classified samples (float), else it returns the number of correctly classified samples (int). The best performance is 1 with ``normalize == True`` and the number of samples with ``normalize == False``. See also -------- jaccard_similarity_score, hamming_loss, zero_one_loss Notes ----- In binary and multiclass classification, this function is equal to the ``jaccard_similarity_score`` function. Examples -------- >>> import numpy as np >>> from sklearn.metrics import accuracy_score >>> y_pred = [0, 2, 1, 3] >>> y_true = [0, 1, 2, 3] >>> accuracy_score(y_true, y_pred) 0.5 >>> accuracy_score(y_true, y_pred, normalize=False) 2 In the multilabel case with binary label indicators: >>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2))) 0.5 """ # Compute accuracy for each possible representation y_type, y_true, y_pred = _check_targets(y_true, y_pred) if y_type.startswith('multilabel'): differing_labels = count_nonzero(y_true - y_pred, axis=1) score = differing_labels == 0 else: score = y_true == y_pred return _weighted_sum(score, sample_weight, normalize) def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None): """Compute confusion matrix to evaluate the accuracy of a classification By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}` is equal to the number of observations known to be in group :math:`i` but predicted to be in group :math:`j`. Read more in the :ref:`User Guide <confusion_matrix>`. Parameters ---------- y_true : array, shape = [n_samples] Ground truth (correct) target values. y_pred : array, shape = [n_samples] Estimated targets as returned by a classifier. labels : array, shape = [n_classes], optional List of labels to index the matrix. This may be used to reorder or select a subset of labels. If none is given, those that appear at least once in ``y_true`` or ``y_pred`` are used in sorted order. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- C : array, shape = [n_classes, n_classes] Confusion matrix References ---------- .. [1] `Wikipedia entry for the Confusion matrix <https://en.wikipedia.org/wiki/Confusion_matrix>`_ Examples -------- >>> from sklearn.metrics import confusion_matrix >>> y_true = [2, 0, 2, 2, 0, 1] >>> y_pred = [0, 0, 2, 2, 0, 2] >>> confusion_matrix(y_true, y_pred) array([[2, 0, 0], [0, 0, 1], [1, 0, 2]]) >>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"] >>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"] >>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"]) array([[2, 0, 0], [0, 0, 1], [1, 0, 2]]) """ y_type, y_true, y_pred = _check_targets(y_true, y_pred) if y_type not in ("binary", "multiclass"): raise ValueError("%s is not supported" % y_type) if labels is None: labels = unique_labels(y_true, y_pred) else: labels = np.asarray(labels) if sample_weight is None: sample_weight = np.ones(y_true.shape[0], dtype=np.int) else: sample_weight = np.asarray(sample_weight) check_consistent_length(sample_weight, y_true, y_pred) n_labels = labels.size label_to_ind = dict((y, x) for x, y in enumerate(labels)) # convert yt, yp into index y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred]) y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true]) # intersect y_pred, y_true with labels, eliminate items not in labels ind = np.logical_and(y_pred < n_labels, y_true < n_labels) y_pred = y_pred[ind] y_true = y_true[ind] # also eliminate weights of eliminated items sample_weight = sample_weight[ind] CM = coo_matrix((sample_weight, (y_true, y_pred)), shape=(n_labels, n_labels) ).toarray() return CM def cohen_kappa_score(y1, y2, labels=None): """Cohen's kappa: a statistic that measures inter-annotator agreement. This function computes Cohen's kappa [1], a score that expresses the level of agreement between two annotators on a classification problem. It is defined as .. math:: \kappa = (p_o - p_e) / (1 - p_e) where :math:`p_o` is the empirical probability of agreement on the label assigned to any sample (the observed agreement ratio), and :math:`p_e` is the expected agreement when both annotators assign labels randomly. :math:`p_e` is estimated using a per-annotator empirical prior over the class labels [2]. Parameters ---------- y1 : array, shape = [n_samples] Labels assigned by the first annotator. y2 : array, shape = [n_samples] Labels assigned by the second annotator. The kappa statistic is symmetric, so swapping ``y1`` and ``y2`` doesn't change the value. labels : array, shape = [n_classes], optional List of labels to index the matrix. This may be used to select a subset of labels. If None, all labels that appear at least once in ``y1`` or ``y2`` are used. Returns ------- kappa : float The kappa statistic, which is a number between -1 and 1. The maximum value means complete agreement; zero or lower means chance agreement. References ---------- .. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales". Educational and Psychological Measurement 20(1):37-46. doi:10.1177/001316446002000104. .. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for computational linguistics". Computational Linguistic 34(4):555-596. """ confusion = confusion_matrix(y1, y2, labels=labels) P = confusion / float(confusion.sum()) p_observed = np.trace(P) p_expected = np.dot(P.sum(axis=0), P.sum(axis=1)) return (p_observed - p_expected) / (1 - p_expected) def jaccard_similarity_score(y_true, y_pred, normalize=True, sample_weight=None): """Jaccard similarity coefficient score The Jaccard index [1], or Jaccard similarity coefficient, defined as the size of the intersection divided by the size of the union of two label sets, is used to compare set of predicted labels for a sample to the corresponding set of labels in ``y_true``. Read more in the :ref:`User Guide <jaccard_similarity_score>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) labels. y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. normalize : bool, optional (default=True) If ``False``, return the sum of the Jaccard similarity coefficient over the sample set. Otherwise, return the average of Jaccard similarity coefficient. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- score : float If ``normalize == True``, return the average Jaccard similarity coefficient, else it returns the sum of the Jaccard similarity coefficient over the sample set. The best performance is 1 with ``normalize == True`` and the number of samples with ``normalize == False``. See also -------- accuracy_score, hamming_loss, zero_one_loss Notes ----- In binary and multiclass classification, this function is equivalent to the ``accuracy_score``. It differs in the multilabel classification problem. References ---------- .. [1] `Wikipedia entry for the Jaccard index <https://en.wikipedia.org/wiki/Jaccard_index>`_ Examples -------- >>> import numpy as np >>> from sklearn.metrics import jaccard_similarity_score >>> y_pred = [0, 2, 1, 3] >>> y_true = [0, 1, 2, 3] >>> jaccard_similarity_score(y_true, y_pred) 0.5 >>> jaccard_similarity_score(y_true, y_pred, normalize=False) 2 In the multilabel case with binary label indicators: >>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\ np.ones((2, 2))) 0.75 """ # Compute accuracy for each possible representation y_type, y_true, y_pred = _check_targets(y_true, y_pred) if y_type.startswith('multilabel'): with np.errstate(divide='ignore', invalid='ignore'): # oddly, we may get an "invalid" rather than a "divide" error here pred_or_true = count_nonzero(y_true + y_pred, axis=1) pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1) score = pred_and_true / pred_or_true # If there is no label, it results in a Nan instead, we set # the jaccard to 1: lim_{x->0} x/x = 1 # Note with py2.6 and np 1.3: we can't check safely for nan. score[pred_or_true == 0.0] = 1.0 else: score = y_true == y_pred return _weighted_sum(score, sample_weight, normalize) def matthews_corrcoef(y_true, y_pred, sample_weight=None): """Compute the Matthews correlation coefficient (MCC) for binary classes The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary (two-class) classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] Only in the binary case does this relate to information about true and false positives and negatives. See references below. Read more in the :ref:`User Guide <matthews_corrcoef>`. Parameters ---------- y_true : array, shape = [n_samples] Ground truth (correct) target values. y_pred : array, shape = [n_samples] Estimated targets as returned by a classifier. sample_weight : array-like of shape = [n_samples], default None Sample weights. Returns ------- mcc : float The Matthews correlation coefficient (+1 represents a perfect prediction, 0 an average random prediction and -1 and inverse prediction). References ---------- .. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the accuracy of prediction algorithms for classification: an overview <http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_ .. [2] `Wikipedia entry for the Matthews Correlation Coefficient <https://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_ Examples -------- >>> from sklearn.metrics import matthews_corrcoef >>> y_true = [+1, +1, +1, -1] >>> y_pred = [+1, -1, +1, +1] >>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS -0.33... """ y_type, y_true, y_pred = _check_targets(y_true, y_pred) if y_type != "binary": raise ValueError("%s is not supported" % y_type) lb = LabelEncoder() lb.fit(np.hstack([y_true, y_pred])) y_true = lb.transform(y_true) y_pred = lb.transform(y_pred) mean_yt = np.average(y_true, weights=sample_weight) mean_yp = np.average(y_pred, weights=sample_weight) y_true_u_cent = y_true - mean_yt y_pred_u_cent = y_pred - mean_yp cov_ytyp = np.average(y_true_u_cent * y_pred_u_cent, weights=sample_weight) var_yt = np.average(y_true_u_cent ** 2, weights=sample_weight) var_yp = np.average(y_pred_u_cent ** 2, weights=sample_weight) mcc = cov_ytyp / np.sqrt(var_yt * var_yp) if np.isnan(mcc): return 0. else: return mcc def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None): """Zero-one classification loss. If normalize is ``True``, return the fraction of misclassifications (float), else it returns the number of misclassifications (int). The best performance is 0. Read more in the :ref:`User Guide <zero_one_loss>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) labels. y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. normalize : bool, optional (default=True) If ``False``, return the number of misclassifications. Otherwise, return the fraction of misclassifications. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- loss : float or int, If ``normalize == True``, return the fraction of misclassifications (float), else it returns the number of misclassifications (int). Notes ----- In multilabel classification, the zero_one_loss function corresponds to the subset zero-one loss: for each sample, the entire set of labels must be correctly predicted, otherwise the loss for that sample is equal to one. See also -------- accuracy_score, hamming_loss, jaccard_similarity_score Examples -------- >>> from sklearn.metrics import zero_one_loss >>> y_pred = [1, 2, 3, 4] >>> y_true = [2, 2, 3, 4] >>> zero_one_loss(y_true, y_pred) 0.25 >>> zero_one_loss(y_true, y_pred, normalize=False) 1 In the multilabel case with binary label indicators: >>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2))) 0.5 """ score = accuracy_score(y_true, y_pred, normalize=normalize, sample_weight=sample_weight) if normalize: return 1 - score else: if sample_weight is not None: n_samples = np.sum(sample_weight) else: n_samples = _num_samples(y_true) return n_samples - score def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary', sample_weight=None): """Compute the F1 score, also known as balanced F-score or F-measure The F1 score can be interpreted as a weighted average of the precision and recall, where an F1 score reaches its best value at 1 and worst score at 0. The relative contribution of precision and recall to the F1 score are equal. The formula for the F1 score is:: F1 = 2 * (precision * recall) / (precision + recall) In the multi-class and multi-label case, this is the weighted average of the F1 score of each class. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : list, optional The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. .. versionchanged:: 0.17 parameter *labels* improved for multiclass problem. pos_label : str or int, 1 by default The class to report if ``average='binary'``. Until version 0.18 it is necessary to set ``pos_label=None`` if seeking to use another averaging method over binary targets. average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \ 'weighted'] This parameter is required for multiclass/multilabel targets. If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). Note that if ``pos_label`` is given in binary classification with `average != 'binary'`, only that positive class is reported. This behavior is deprecated and will change in version 0.18. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- f1_score : float or array of float, shape = [n_unique_labels] F1 score of the positive class in binary classification or weighted average of the F1 scores of each class for the multiclass task. References ---------- .. [1] `Wikipedia entry for the F1-score <https://en.wikipedia.org/wiki/F1_score>`_ Examples -------- >>> from sklearn.metrics import f1_score >>> y_true = [0, 1, 2, 0, 1, 2] >>> y_pred = [0, 2, 1, 0, 0, 1] >>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS 0.26... >>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS 0.33... >>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS 0.26... >>> f1_score(y_true, y_pred, average=None) array([ 0.8, 0. , 0. ]) """ return fbeta_score(y_true, y_pred, 1, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1, average='binary', sample_weight=None): """Compute the F-beta score The F-beta score is the weighted harmonic mean of precision and recall, reaching its optimal value at 1 and its worst value at 0. The `beta` parameter determines the weight of precision in the combined score. ``beta < 1`` lends more weight to precision, while ``beta > 1`` favors recall (``beta -> 0`` considers only precision, ``beta -> inf`` only recall). Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. beta: float Weight of precision in harmonic mean. labels : list, optional The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. .. versionchanged:: 0.17 parameter *labels* improved for multiclass problem. pos_label : str or int, 1 by default The class to report if ``average='binary'``. Until version 0.18 it is necessary to set ``pos_label=None`` if seeking to use another averaging method over binary targets. average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \ 'weighted'] This parameter is required for multiclass/multilabel targets. If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). Note that if ``pos_label`` is given in binary classification with `average != 'binary'`, only that positive class is reported. This behavior is deprecated and will change in version 0.18. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- fbeta_score : float (if average is not None) or array of float, shape =\ [n_unique_labels] F-beta score of the positive class in binary classification or weighted average of the F-beta score of each class for the multiclass task. References ---------- .. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern Information Retrieval. Addison Wesley, pp. 327-328. .. [2] `Wikipedia entry for the F1-score <https://en.wikipedia.org/wiki/F1_score>`_ Examples -------- >>> from sklearn.metrics import fbeta_score >>> y_true = [0, 1, 2, 0, 1, 2] >>> y_pred = [0, 2, 1, 0, 0, 1] >>> fbeta_score(y_true, y_pred, average='macro', beta=0.5) ... # doctest: +ELLIPSIS 0.23... >>> fbeta_score(y_true, y_pred, average='micro', beta=0.5) ... # doctest: +ELLIPSIS 0.33... >>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5) ... # doctest: +ELLIPSIS 0.23... >>> fbeta_score(y_true, y_pred, average=None, beta=0.5) ... # doctest: +ELLIPSIS array([ 0.71..., 0. , 0. ]) """ _, _, f, _ = precision_recall_fscore_support(y_true, y_pred, beta=beta, labels=labels, pos_label=pos_label, average=average, warn_for=('f-score',), sample_weight=sample_weight) return f def _prf_divide(numerator, denominator, metric, modifier, average, warn_for): """Performs division and handles divide-by-zero. On zero-division, sets the corresponding result elements to zero and raises a warning. The metric, modifier and average arguments are used only for determining an appropriate warning. """ result = numerator / denominator mask = denominator == 0.0 if not np.any(mask): return result # remove infs result[mask] = 0.0 # build appropriate warning # E.g. "Precision and F-score are ill-defined and being set to 0.0 in # labels with no predicted samples" axis0 = 'sample' axis1 = 'label' if average == 'samples': axis0, axis1 = axis1, axis0 if metric in warn_for and 'f-score' in warn_for: msg_start = '{0} and F-score are'.format(metric.title()) elif metric in warn_for: msg_start = '{0} is'.format(metric.title()) elif 'f-score' in warn_for: msg_start = 'F-score is' else: return result msg = ('{0} ill-defined and being set to 0.0 {{0}} ' 'no {1} {2}s.'.format(msg_start, modifier, axis0)) if len(mask) == 1: msg = msg.format('due to') else: msg = msg.format('in {0}s with'.format(axis1)) warnings.warn(msg, UndefinedMetricWarning, stacklevel=2) return result def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None, pos_label=1, average=None, warn_for=('precision', 'recall', 'f-score'), sample_weight=None): """Compute precision, recall, F-measure and support for each class The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of true positives and ``fp`` the number of false positives. The precision is intuitively the ability of the classifier not to label as positive a sample that is negative. The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of true positives and ``fn`` the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples. The F-beta score can be interpreted as a weighted harmonic mean of the precision and recall, where an F-beta score reaches its best value at 1 and worst score at 0. The F-beta score weights recall more than precision by a factor of ``beta``. ``beta == 1.0`` means recall and precision are equally important. The support is the number of occurrences of each class in ``y_true``. If ``pos_label is None`` and in binary classification, this function returns the average precision, recall and F-measure if ``average`` is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. beta : float, 1.0 by default The strength of recall versus precision in the F-score. labels : list, optional The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. pos_label : str or int, 1 by default The class to report if ``average='binary'``. Until version 0.18 it is necessary to set ``pos_label=None`` if seeking to use another averaging method over binary targets. average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \ 'weighted'] If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). Note that if ``pos_label`` is given in binary classification with `average != 'binary'`, only that positive class is reported. This behavior is deprecated and will change in version 0.18. warn_for : tuple or set, for internal use This determines which warnings will be made in the case that this function is being used to return only one of its metrics. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- precision: float (if average is not None) or array of float, shape =\ [n_unique_labels] recall: float (if average is not None) or array of float, , shape =\ [n_unique_labels] fbeta_score: float (if average is not None) or array of float, shape =\ [n_unique_labels] support: int (if average is not None) or array of int, shape =\ [n_unique_labels] The number of occurrences of each label in ``y_true``. References ---------- .. [1] `Wikipedia entry for the Precision and recall <https://en.wikipedia.org/wiki/Precision_and_recall>`_ .. [2] `Wikipedia entry for the F1-score <https://en.wikipedia.org/wiki/F1_score>`_ .. [3] `Discriminative Methods for Multi-labeled Classification Advances in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu Godbole, Sunita Sarawagi <http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>` Examples -------- >>> from sklearn.metrics import precision_recall_fscore_support >>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig']) >>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog']) >>> precision_recall_fscore_support(y_true, y_pred, average='macro') ... # doctest: +ELLIPSIS (0.22..., 0.33..., 0.26..., None) >>> precision_recall_fscore_support(y_true, y_pred, average='micro') ... # doctest: +ELLIPSIS (0.33..., 0.33..., 0.33..., None) >>> precision_recall_fscore_support(y_true, y_pred, average='weighted') ... # doctest: +ELLIPSIS (0.22..., 0.33..., 0.26..., None) It is possible to compute per-label precisions, recalls, F1-scores and supports instead of averaging: >>> precision_recall_fscore_support(y_true, y_pred, average=None, ... labels=['pig', 'dog', 'cat']) ... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE (array([ 0. , 0. , 0.66...]), array([ 0., 0., 1.]), array([ 0. , 0. , 0.8]), array([2, 2, 2])) """ average_options = (None, 'micro', 'macro', 'weighted', 'samples') if average not in average_options and average != 'binary': raise ValueError('average has to be one of ' + str(average_options)) if beta <= 0: raise ValueError("beta should be >0 in the F-beta score") y_type, y_true, y_pred = _check_targets(y_true, y_pred) present_labels = unique_labels(y_true, y_pred) if average == 'binary' and (y_type != 'binary' or pos_label is None): warnings.warn('The default `weighted` averaging is deprecated, ' 'and from version 0.18, use of precision, recall or ' 'F-score with multiclass or multilabel data or ' 'pos_label=None will result in an exception. ' 'Please set an explicit value for `average`, one of ' '%s. In cross validation use, for instance, ' 'scoring="f1_weighted" instead of scoring="f1".' % str(average_options), DeprecationWarning, stacklevel=2) average = 'weighted' if y_type == 'binary' and pos_label is not None and average is not None: if average != 'binary': warnings.warn('From version 0.18, binary input will not be ' 'handled specially when using averaged ' 'precision/recall/F-score. ' 'Please use average=\'binary\' to report only the ' 'positive class performance.', DeprecationWarning) if labels is None or len(labels) <= 2: if pos_label not in present_labels: if len(present_labels) < 2: # Only negative labels return (0., 0., 0., 0) else: raise ValueError("pos_label=%r is not a valid label: %r" % (pos_label, present_labels)) labels = [pos_label] if labels is None: labels = present_labels n_labels = None else: n_labels = len(labels) labels = np.hstack([labels, np.setdiff1d(present_labels, labels, assume_unique=True)]) # Calculate tp_sum, pred_sum, true_sum ### if y_type.startswith('multilabel'): sum_axis = 1 if average == 'samples' else 0 # All labels are index integers for multilabel. # Select labels: if not np.all(labels == present_labels): if np.max(labels) > np.max(present_labels): raise ValueError('All labels must be in [0, n labels). ' 'Got %d > %d' % (np.max(labels), np.max(present_labels))) if np.min(labels) < 0: raise ValueError('All labels must be in [0, n labels). ' 'Got %d < 0' % np.min(labels)) y_true = y_true[:, labels[:n_labels]] y_pred = y_pred[:, labels[:n_labels]] # calculate weighted counts true_and_pred = y_true.multiply(y_pred) tp_sum = count_nonzero(true_and_pred, axis=sum_axis, sample_weight=sample_weight) pred_sum = count_nonzero(y_pred, axis=sum_axis, sample_weight=sample_weight) true_sum = count_nonzero(y_true, axis=sum_axis, sample_weight=sample_weight) elif average == 'samples': raise ValueError("Sample-based precision, recall, fscore is " "not meaningful outside multilabel " "classification. See the accuracy_score instead.") else: le = LabelEncoder() le.fit(labels) y_true = le.transform(y_true) y_pred = le.transform(y_pred) sorted_labels = le.classes_ # labels are now from 0 to len(labels) - 1 -> use bincount tp = y_true == y_pred tp_bins = y_true[tp] if sample_weight is not None: tp_bins_weights = np.asarray(sample_weight)[tp] else: tp_bins_weights = None if len(tp_bins): tp_sum = bincount(tp_bins, weights=tp_bins_weights, minlength=len(labels)) else: # Pathological case true_sum = pred_sum = tp_sum = np.zeros(len(labels)) if len(y_pred): pred_sum = bincount(y_pred, weights=sample_weight, minlength=len(labels)) if len(y_true): true_sum = bincount(y_true, weights=sample_weight, minlength=len(labels)) # Retain only selected labels indices = np.searchsorted(sorted_labels, labels[:n_labels]) tp_sum = tp_sum[indices] true_sum = true_sum[indices] pred_sum = pred_sum[indices] if average == 'micro': tp_sum = np.array([tp_sum.sum()]) pred_sum = np.array([pred_sum.sum()]) true_sum = np.array([true_sum.sum()]) # Finally, we have all our sufficient statistics. Divide! # beta2 = beta ** 2 with np.errstate(divide='ignore', invalid='ignore'): # Divide, and on zero-division, set scores to 0 and warn: # Oddly, we may get an "invalid" rather than a "divide" error # here. precision = _prf_divide(tp_sum, pred_sum, 'precision', 'predicted', average, warn_for) recall = _prf_divide(tp_sum, true_sum, 'recall', 'true', average, warn_for) # Don't need to warn for F: either P or R warned, or tp == 0 where pos # and true are nonzero, in which case, F is well-defined and zero f_score = ((1 + beta2) * precision * recall / (beta2 * precision + recall)) f_score[tp_sum == 0] = 0.0 # Average the results if average == 'weighted': weights = true_sum if weights.sum() == 0: return 0, 0, 0, None elif average == 'samples': weights = sample_weight else: weights = None if average is not None: assert average != 'binary' or len(precision) == 1 precision = np.average(precision, weights=weights) recall = np.average(recall, weights=weights) f_score = np.average(f_score, weights=weights) true_sum = None # return no support return precision, recall, f_score, true_sum def precision_score(y_true, y_pred, labels=None, pos_label=1, average='binary', sample_weight=None): """Compute the precision The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of true positives and ``fp`` the number of false positives. The precision is intuitively the ability of the classifier not to label as positive a sample that is negative. The best value is 1 and the worst value is 0. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : list, optional The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. .. versionchanged:: 0.17 parameter *labels* improved for multiclass problem. pos_label : str or int, 1 by default The class to report if ``average='binary'``. Until version 0.18 it is necessary to set ``pos_label=None`` if seeking to use another averaging method over binary targets. average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \ 'weighted'] This parameter is required for multiclass/multilabel targets. If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). Note that if ``pos_label`` is given in binary classification with `average != 'binary'`, only that positive class is reported. This behavior is deprecated and will change in version 0.18. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- precision : float (if average is not None) or array of float, shape =\ [n_unique_labels] Precision of the positive class in binary classification or weighted average of the precision of each class for the multiclass task. Examples -------- >>> from sklearn.metrics import precision_score >>> y_true = [0, 1, 2, 0, 1, 2] >>> y_pred = [0, 2, 1, 0, 0, 1] >>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS 0.22... >>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS 0.33... >>> precision_score(y_true, y_pred, average='weighted') ... # doctest: +ELLIPSIS 0.22... >>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS array([ 0.66..., 0. , 0. ]) """ p, _, _, _ = precision_recall_fscore_support(y_true, y_pred, labels=labels, pos_label=pos_label, average=average, warn_for=('precision',), sample_weight=sample_weight) return p def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary', sample_weight=None): """Compute the recall The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of true positives and ``fn`` the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples. The best value is 1 and the worst value is 0. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : list, optional The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. .. versionchanged:: 0.17 parameter *labels* improved for multiclass problem. pos_label : str or int, 1 by default The class to report if ``average='binary'``. Until version 0.18 it is necessary to set ``pos_label=None`` if seeking to use another averaging method over binary targets. average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \ 'weighted'] This parameter is required for multiclass/multilabel targets. If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). Note that if ``pos_label`` is given in binary classification with `average != 'binary'`, only that positive class is reported. This behavior is deprecated and will change in version 0.18. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- recall : float (if average is not None) or array of float, shape =\ [n_unique_labels] Recall of the positive class in binary classification or weighted average of the recall of each class for the multiclass task. Examples -------- >>> from sklearn.metrics import recall_score >>> y_true = [0, 1, 2, 0, 1, 2] >>> y_pred = [0, 2, 1, 0, 0, 1] >>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS 0.33... >>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS 0.33... >>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS 0.33... >>> recall_score(y_true, y_pred, average=None) array([ 1., 0., 0.]) """ _, r, _, _ = precision_recall_fscore_support(y_true, y_pred, labels=labels, pos_label=pos_label, average=average, warn_for=('recall',), sample_weight=sample_weight) return r def classification_report(y_true, y_pred, labels=None, target_names=None, sample_weight=None, digits=2): """Build a text report showing the main classification metrics Read more in the :ref:`User Guide <classification_report>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : array, shape = [n_labels] Optional list of label indices to include in the report. target_names : list of strings Optional display names matching the labels (same order). sample_weight : array-like of shape = [n_samples], optional Sample weights. digits : int Number of digits for formatting output floating point values Returns ------- report : string Text summary of the precision, recall, F1 score for each class. Examples -------- >>> from sklearn.metrics import classification_report >>> y_true = [0, 1, 2, 2, 2] >>> y_pred = [0, 0, 2, 2, 1] >>> target_names = ['class 0', 'class 1', 'class 2'] >>> print(classification_report(y_true, y_pred, target_names=target_names)) precision recall f1-score support <BLANKLINE> class 0 0.50 1.00 0.67 1 class 1 0.00 0.00 0.00 1 class 2 1.00 0.67 0.80 3 <BLANKLINE> avg / total 0.70 0.60 0.61 5 <BLANKLINE> """ if labels is None: labels = unique_labels(y_true, y_pred) else: labels = np.asarray(labels) last_line_heading = 'avg / total' if target_names is None: target_names = ['%s' % l for l in labels] name_width = max(len(cn) for cn in target_names) width = max(name_width, len(last_line_heading), digits) headers = ["precision", "recall", "f1-score", "support"] fmt = '%% %ds' % width # first column: class name fmt += ' ' fmt += ' '.join(['% 9s' for _ in headers]) fmt += '\n' headers = [""] + headers report = fmt % tuple(headers) report += '\n' p, r, f1, s = precision_recall_fscore_support(y_true, y_pred, labels=labels, average=None, sample_weight=sample_weight) for i, label in enumerate(labels): values = [target_names[i]] for v in (p[i], r[i], f1[i]): values += ["{0:0.{1}f}".format(v, digits)] values += ["{0}".format(s[i])] report += fmt % tuple(values) report += '\n' # compute averages values = [last_line_heading] for v in (np.average(p, weights=s), np.average(r, weights=s), np.average(f1, weights=s)): values += ["{0:0.{1}f}".format(v, digits)] values += ['{0}'.format(np.sum(s))] report += fmt % tuple(values) return report def hamming_loss(y_true, y_pred, classes=None, sample_weight=None): """Compute the average Hamming loss. The Hamming loss is the fraction of labels that are incorrectly predicted. Read more in the :ref:`User Guide <hamming_loss>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) labels. y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. classes : array, shape = [n_labels], optional Integer array of labels. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- loss : float or int, Return the average Hamming loss between element of ``y_true`` and ``y_pred``. See Also -------- accuracy_score, jaccard_similarity_score, zero_one_loss Notes ----- In multiclass classification, the Hamming loss correspond to the Hamming distance between ``y_true`` and ``y_pred`` which is equivalent to the subset ``zero_one_loss`` function. In multilabel classification, the Hamming loss is different from the subset zero-one loss. The zero-one loss considers the entire set of labels for a given sample incorrect if it does entirely match the true set of labels. Hamming loss is more forgiving in that it penalizes the individual labels. The Hamming loss is upperbounded by the subset zero-one loss. When normalized over samples, the Hamming loss is always between 0 and 1. References ---------- .. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification: An Overview. International Journal of Data Warehousing & Mining, 3(3), 1-13, July-September 2007. .. [2] `Wikipedia entry on the Hamming distance <https://en.wikipedia.org/wiki/Hamming_distance>`_ Examples -------- >>> from sklearn.metrics import hamming_loss >>> y_pred = [1, 2, 3, 4] >>> y_true = [2, 2, 3, 4] >>> hamming_loss(y_true, y_pred) 0.25 In the multilabel case with binary label indicators: >>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2))) 0.75 """ y_type, y_true, y_pred = _check_targets(y_true, y_pred) if classes is None: classes = unique_labels(y_true, y_pred) else: classes = np.asarray(classes) if sample_weight is None: weight_average = 1. else: weight_average = np.mean(sample_weight) if y_type.startswith('multilabel'): n_differences = count_nonzero(y_true - y_pred, sample_weight=sample_weight) return (n_differences / (y_true.shape[0] * len(classes) * weight_average)) elif y_type in ["binary", "multiclass"]: return _weighted_sum(y_true != y_pred, sample_weight, normalize=True) else: raise ValueError("{0} is not supported".format(y_type)) def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None): """Log loss, aka logistic loss or cross-entropy loss. This is the loss function used in (multinomial) logistic regression and extensions of it such as neural networks, defined as the negative log-likelihood of the true labels given a probabilistic classifier's predictions. For a single sample with true label yt in {0,1} and estimated probability yp that yt = 1, the log loss is -log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp)) Read more in the :ref:`User Guide <log_loss>`. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels for n_samples samples. y_pred : array-like of float, shape = (n_samples, n_classes) Predicted probabilities, as returned by a classifier's predict_proba method. eps : float Log loss is undefined for p=0 or p=1, so probabilities are clipped to max(eps, min(1 - eps, p)). normalize : bool, optional (default=True) If true, return the mean loss per sample. Otherwise, return the sum of the per-sample losses. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- loss : float Examples -------- >>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS ... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]]) 0.21616... References ---------- C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer, p. 209. Notes ----- The logarithm used is the natural logarithm (base-e). """ lb = LabelBinarizer() T = lb.fit_transform(y_true) if T.shape[1] == 1: T = np.append(1 - T, T, axis=1) y_pred = check_array(y_pred, ensure_2d=False) # Clipping Y = np.clip(y_pred, eps, 1 - eps) # This happens in cases when elements in y_pred have type "str". if not isinstance(Y, np.ndarray): raise ValueError("y_pred should be an array of floats.") # If y_pred is of single dimension, assume y_true to be binary # and then check. if Y.ndim == 1: Y = Y[:, np.newaxis] if Y.shape[1] == 1: Y = np.append(1 - Y, Y, axis=1) # Check if dimensions are consistent. check_consistent_length(T, Y) T = check_array(T) Y = check_array(Y) if T.shape[1] != Y.shape[1]: raise ValueError("y_true and y_pred have different number of classes " "%d, %d" % (T.shape[1], Y.shape[1])) # Renormalize Y /= Y.sum(axis=1)[:, np.newaxis] loss = -(T * np.log(Y)).sum(axis=1) return _weighted_sum(loss, sample_weight, normalize) def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None): """Average hinge loss (non-regularized) In binary class case, assuming labels in y_true are encoded with +1 and -1, when a prediction mistake is made, ``margin = y_true * pred_decision`` is always negative (since the signs disagree), implying ``1 - margin`` is always greater than 1. The cumulated hinge loss is therefore an upper bound of the number of mistakes made by the classifier. In multiclass case, the function expects that either all the labels are included in y_true or an optional labels argument is provided which contains all the labels. The multilabel margin is calculated according to Crammer-Singer's method. As in the binary case, the cumulated hinge loss is an upper bound of the number of mistakes made by the classifier. Read more in the :ref:`User Guide <hinge_loss>`. Parameters ---------- y_true : array, shape = [n_samples] True target, consisting of integers of two values. The positive label must be greater than the negative label. pred_decision : array, shape = [n_samples] or [n_samples, n_classes] Predicted decisions, as output by decision_function (floats). labels : array, optional, default None Contains all the labels for the problem. Used in multiclass hinge loss. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- loss : float References ---------- .. [1] `Wikipedia entry on the Hinge loss <https://en.wikipedia.org/wiki/Hinge_loss>`_ .. [2] Koby Crammer, Yoram Singer. On the Algorithmic Implementation of Multiclass Kernel-based Vector Machines. Journal of Machine Learning Research 2, (2001), 265-292 .. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models by Robert C. Moore, John DeNero. <http://www.ttic.edu/sigml/symposium2011/papers/ Moore+DeNero_Regularization.pdf>`_ Examples -------- >>> from sklearn import svm >>> from sklearn.metrics import hinge_loss >>> X = [[0], [1]] >>> y = [-1, 1] >>> est = svm.LinearSVC(random_state=0) >>> est.fit(X, y) LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True, intercept_scaling=1, loss='squared_hinge', max_iter=1000, multi_class='ovr', penalty='l2', random_state=0, tol=0.0001, verbose=0) >>> pred_decision = est.decision_function([[-2], [3], [0.5]]) >>> pred_decision # doctest: +ELLIPSIS array([-2.18..., 2.36..., 0.09...]) >>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS 0.30... In the multiclass case: >>> X = np.array([[0], [1], [2], [3]]) >>> Y = np.array([0, 1, 2, 3]) >>> labels = np.array([0, 1, 2, 3]) >>> est = svm.LinearSVC() >>> est.fit(X, Y) LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True, intercept_scaling=1, loss='squared_hinge', max_iter=1000, multi_class='ovr', penalty='l2', random_state=None, tol=0.0001, verbose=0) >>> pred_decision = est.decision_function([[-1], [2], [3]]) >>> y_true = [0, 2, 3] >>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS 0.56... """ check_consistent_length(y_true, pred_decision, sample_weight) pred_decision = check_array(pred_decision, ensure_2d=False) y_true = column_or_1d(y_true) y_true_unique = np.unique(y_true) if y_true_unique.size > 2: if (labels is None and pred_decision.ndim > 1 and (np.size(y_true_unique) != pred_decision.shape[1])): raise ValueError("Please include all labels in y_true " "or pass labels as third argument") if labels is None: labels = y_true_unique le = LabelEncoder() le.fit(labels) y_true = le.transform(y_true) mask = np.ones_like(pred_decision, dtype=bool) mask[np.arange(y_true.shape[0]), y_true] = False margin = pred_decision[~mask] margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1), axis=1) else: # Handles binary class case # this code assumes that positive and negative labels # are encoded as +1 and -1 respectively pred_decision = column_or_1d(pred_decision) pred_decision = np.ravel(pred_decision) lbin = LabelBinarizer(neg_label=-1) y_true = lbin.fit_transform(y_true)[:, 0] try: margin = y_true * pred_decision except TypeError: raise TypeError("pred_decision should be an array of floats.") losses = 1 - margin # The hinge_loss doesn't penalize good enough predictions. losses[losses <= 0] = 0 return np.average(losses, weights=sample_weight) def _check_binary_probabilistic_predictions(y_true, y_prob): """Check that y_true is binary and y_prob contains valid probabilities""" check_consistent_length(y_true, y_prob) labels = np.unique(y_true) if len(labels) != 2: raise ValueError("Only binary classification is supported. " "Provided labels %s." % labels) if y_prob.max() > 1: raise ValueError("y_prob contains values greater than 1.") if y_prob.min() < 0: raise ValueError("y_prob contains values less than 0.") return label_binarize(y_true, labels)[:, 0] def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None): """Compute the Brier score. The smaller the Brier score, the better, hence the naming with "loss". Across all items in a set N predictions, the Brier score measures the mean squared difference between (1) the predicted probability assigned to the possible outcomes for item i, and (2) the actual outcome. Therefore, the lower the Brier score is for a set of predictions, the better the predictions are calibrated. Note that the Brier score always takes on a value between zero and one, since this is the largest possible difference between a predicted probability (which must be between zero and one) and the actual outcome (which can take on values of only 0 and 1). The Brier score is appropriate for binary and categorical outcomes that can be structured as true or false, but is inappropriate for ordinal variables which can take on three or more values (this is because the Brier score assumes that all possible outcomes are equivalently "distant" from one another). Which label is considered to be the positive label is controlled via the parameter pos_label, which defaults to 1. Read more in the :ref:`User Guide <calibration>`. Parameters ---------- y_true : array, shape (n_samples,) True targets. y_prob : array, shape (n_samples,) Probabilities of the positive class. sample_weight : array-like of shape = [n_samples], optional Sample weights. pos_label : int (default: None) Label of the positive class. If None, the maximum label is used as positive class Returns ------- score : float Brier score Examples -------- >>> import numpy as np >>> from sklearn.metrics import brier_score_loss >>> y_true = np.array([0, 1, 1, 0]) >>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"]) >>> y_prob = np.array([0.1, 0.9, 0.8, 0.3]) >>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS 0.037... >>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS 0.037... >>> brier_score_loss(y_true_categorical, y_prob, \ pos_label="ham") # doctest: +ELLIPSIS 0.037... >>> brier_score_loss(y_true, np.array(y_prob) > 0.5) 0.0 References ---------- https://en.wikipedia.org/wiki/Brier_score """ y_true = column_or_1d(y_true) y_prob = column_or_1d(y_prob) if pos_label is None: pos_label = y_true.max() y_true = np.array(y_true == pos_label, int) y_true = _check_binary_probabilistic_predictions(y_true, y_prob) return np.average((y_true - y_prob) ** 2, weights=sample_weight)
bsd-3-clause
eegroopm/pyLATTICE
gui/pyLATTICE.py
1
74321
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ pyLATTICE is... """ from __future__ import division #necessary for python2 from __future__ import unicode_literals # define authorship information __authors__ = ['Evan Groopman', 'Thomas Bernatowicz'] __author__ = ','.join(__authors__) __credits__ = [] __copyright__ = 'Copyright (c) 2011-2014' __license__ = 'GPL' # maintanence information __maintainer__ = 'Evan Groopman' __email__ = 'eegroopm@gmail.com' """ Created on Wed Apr 11 14:46:56 2012 @author: Evan Groopman """ #Main imports from PyQt4 import QtCore, QtGui, uic import os, sys import numpy as np import pandas as pd import re #Matplotlib imports import matplotlib as mpl mpl.use('Qt4Agg') from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar # Local files in the resource directory import gui from resources.TableWidget import TableWidget from resources.Diffraction import Diffraction #from resources.pyqtresizer import logit,slResizer,Resizer from resources.IPythonConsole import IPythonConsole from resources.common import common from resources.matplotlibwidget import matplotlibWidget from resources.Dialogs import (MineralListDialog, NewMineralDialog, ManualConditionsDialog, SettingsDialog) try: from resources.dspace import DSpace print('Importing compiled "DSpace"') except ImportError as error: # Attempt autocompilation. import pyximport pyximport.install() from resources._dspace import DSpace print('Building "DSpace"') try: from resources.diffspot import CalcSpots, CalcSpotsHCP print('Importing compiled "DiffSpot"') except ImportError as error: # Attempt autocompilation. import pyximport pyximport.install() from resources._diffspot import CalcSpots, CalcSpotsHCP print('Building "DiffSpot"') #need different compiled versions of Cython modules depending on python version #if sys.version_info[0] == 3: # #from resources.dspace import DSpace#Cython function for calculating d-spaces # #from resources.diffspot import CalcSpots, CalcSpotsHCP#Cython function for calculating diffraction spot coordinates # from resources.pyqtresizer import logit,slResizer,Resizer #elif sys.version_info[0] == 2: # #from resources.dspace_py2 import DSpace#Cython function for calculating d-spaces # #from resources.diffspot_py2 import CalcSpots, CalcSpotsHCP#Cython function for calculating diffraction spot coordinates # from resources.pyqtresizer_py2 import logit,slResizer,Resizer #from Wulff_net import WULFF #dealing with unicode characters in windows, which breaks compiled linux rendering if sys.platform == 'win32': mpl.rc('font', **{'sans-serif' : 'Arial Unicode MS','family' : 'sans-serif'}) #elif sys.platform == 'linux':# and os.path.isfile('pyLATTICE'): #on linux AND executable file exists. Does nothing if running from source # print('Adjusting font') # mpl.rc('font',**{'sans-serif' : 'Bitstream Vera Sans','family' : 'sans-serif'}) #use LaTeX to render symbols #plt.rc('text', usetex=True) mpl.rcParams['mathtext.default'] = 'regular' #mpl.rcParams['text.latex.preamble'] = [r'\usepackage{textcomp}'] #mpl.rcParams['text.latex.unicode'] = True #Other try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: _fromUtf8 = lambda s: s ################################################################################ ## Gui file ## # Set up window class pyLATTICE_GUI(QtGui.QMainWindow): def __init__(self, parent=None): super(pyLATTICE_GUI, self).__init__(parent) # load the ui gui.loadUi(__file__,self) self.version = gui.__version__ self._grabCommon() self.Diffraction = Diffraction() self.DiffWidget = matplotlibWidget(self.common,self.Diffraction) #mplwidget can access common data #self.DiffWidget.setStyleSheet("font-family: 'Arial Unicode MS', Arial, sans-serif; font-size: 15px;") self.verticalLayout.addWidget(self.DiffWidget) self.DiffWidget.distances.connect(self.on_distances_sent) #self.DiffWidget = self.MplWidget self.Plot = self.DiffWidget.canvas.ax self.Plot.axis('equal') #locks aspect ratio 1:1, even when zooming #matplotlibWidget.setupToolbar(self.DiffWidget.canvas, self.DiffTab) # Create the navigation toolbar, tied to the canvas self.mpl_toolbar = NavigationToolbar(self.DiffWidget.canvas, self.DiffTab) #add widgets to toolbar self.comboBox_rotate = QtGui.QComboBox() self.checkBox_labels = QtGui.QCheckBox("Labels") self.checkBox_labels.setChecked(True) self.mpl_toolbar.addWidget(self.comboBox_rotate) self.mpl_toolbar.addWidget(self.checkBox_labels) #add toolbar to tabs self.verticalLayout.addWidget(self.mpl_toolbar) #Plot initial zero spot self.Plot.plot(0,0, linestyle = '', marker='o', markersize = 10, color = 'black') self.Plot.set_xlim([-5,5]) self.Plot.set_ylim([-5,5]) #self.Plot.annotate('0 0 0', xy = (0,0), xytext=(0,10),textcoords = 'offset points', ha = 'center', va = 'bottom', #bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.01)) #Initialize Metric tensor Tables self.Gtable_size = (200,200) self.Gtable = TableWidget() self.G_inv_table = TableWidget() self.tensorlayout.addWidget(self.Gtable,2,0) #third row, first column self.tensorlayout.addWidget(self.G_inv_table,2,1) self.Gtable.resize(self.Gtable_size[0],self.Gtable_size[1]) self.G_inv_table.resize(self.Gtable_size[0],self.Gtable_size[1]) self.Gtable.setData(np.eye(3)) self.G_inv_table.setData(np.eye(3)) for i in range(3): self.Gtable.setColumnWidth(i,self.Gtable_size[0]/4) self.Gtable.setRowHeight(i,self.Gtable_size[1]/3.5) self.G_inv_table.setColumnWidth(i,self.Gtable_size[0]/3.35) self.G_inv_table.setRowHeight(i,self.Gtable_size[1]/3.5) self.a = 1; self.b=1; self.c=1 #Initialize parameter tables self.param_table_size = (200,200) self.Gparam_table = TableWidget() self.Gparam_inv_table = TableWidget() self.Gparam_table.resize(self.param_table_size[0],self.param_table_size[1]) self.Gparam_inv_table.resize(self.param_table_size[0],self.param_table_size[1]) initdat = np.transpose(np.array([[1,1,1,90,90,90]])) self.Gparam_table.setData(initdat) self.Gparam_inv_table.setData(initdat) self.Gparam_table.setHorizontalHeaderLabels(['Parameters']) self.Gparam_inv_table.setHorizontalHeaderLabels(['Parameters']) self.Gparam_table.setVerticalHeaderLabels([u'a',u'b',u'c',u'\u03B1',u'\u03B2',u'\u03B3']) self.Gparam_inv_table.setVerticalHeaderLabels([u'a*',u'b*',u'c*',u'\u03B1*',u'\u03B2*',u'\u03B3*']) self.tensorlayout.addWidget(self.Gparam_table,3,0) self.tensorlayout.addWidget(self.Gparam_inv_table,3,1) for i in range(0,6): self.Gparam_table.setColumnWidth(i,self.param_table_size[0]) self.Gparam_table.setRowHeight(i,self.param_table_size[0]/6.7) self.Gparam_inv_table.setColumnWidth(i,self.param_table_size[0]) self.Gparam_inv_table.setRowHeight(i,self.param_table_size[0]/6.7) #D-spacing table self.dspace_table_size = (400,630) self.dspace_table = TableWidget() self.dspace_table.resize(self.dspace_table_size[0],self.dspace_table_size[1]) self.dspace_table.setData(np.array([[0,0,0,0]])) self.dspace_table.setHorizontalHeaderLabels(['d-space','h','k','l']) self.dspacelayout.addWidget(self.dspace_table) self.dspace_table.setColumnWidth(0,80) for i in range(1,4): self.dspace_table.setColumnWidth(i,50) # Set miller indices self.miller_indices = [str(x) for x in range(-6,7)] self.comboBox_hmin.addItems(self.miller_indices) self.comboBox_kmin.addItems(self.miller_indices) self.comboBox_lmin.addItems(self.miller_indices) self.comboBox_hmin.setCurrentIndex(4) self.comboBox_kmin.setCurrentIndex(4) self.comboBox_lmin.setCurrentIndex(4) # Miller max indices set to be 1 greater than selected min index self.comboBox_hmax.addItems(self.miller_indices) self.comboBox_kmax.addItems(self.miller_indices) self.comboBox_lmax.addItems(self.miller_indices) #self.setMillerMax_h() #self.setMillerMax_k() #self.setMillerMax_l() self.comboBox_hmax.setCurrentIndex(8) self.comboBox_kmax.setCurrentIndex(8) self.comboBox_lmax.setCurrentIndex(8) #Set zone axis parameters #by default set as [0 0 1] zone_indices = [str(x) for x in range(-5,6)] self.comboBox_u.addItems(zone_indices) self.comboBox_v.addItems(zone_indices) self.comboBox_w.addItems(zone_indices) self.comboBox_u.setCurrentIndex(5) self.comboBox_v.setCurrentIndex(5) self.comboBox_w.setCurrentIndex(6) #set calculator comboboxes self.comboBox_h1.addItems(self.miller_indices) self.comboBox_h2.addItems(self.miller_indices) self.comboBox_k1.addItems(self.miller_indices) self.comboBox_k2.addItems(self.miller_indices) self.comboBox_l1.addItems(self.miller_indices) self.comboBox_l2.addItems(self.miller_indices) self.comboBox_h1.setCurrentIndex(7) self.comboBox_h2.setCurrentIndex(8) self.comboBox_k1.setCurrentIndex(6) self.comboBox_k2.setCurrentIndex(6) self.comboBox_l1.setCurrentIndex(6) self.comboBox_l2.setCurrentIndex(6) #Initialize mineral database combobox self.setMineralList() #Initialize rotation of diffraction pattern. #Will only offer 0,90,180,270 degrees rotate_items = ['-180','-150','-120','-90','-60','-30','0','30','60','90','120','150','180'] self.comboBox_rotate.addItems(rotate_items) self.comboBox_rotate.setCurrentIndex(6) #zero by default #get values in energy, cam legnth, cam const. combo boxes self.spinBox_beamenergy.setValue(int(self.common.beamenergy)) self.spinBox_camlength.setValue(int(self.common.camlength)) self.doubleSpinBox_camconst.setValue(self.common.camconst) #Initialize signals and slots #This needs to go here after setting Miller indices #When initializing, it runs Recalculate to do metric tensor and d-spacings #must go before setting crystal types, but after setting all of the combo boxes #combo boxes recalculate each time their index changes once the signals/slots set up #if signals/slots placed before, will recalc d-spacings every time you initialize a combobox value self.signals_slots() # Set crystal type combo box items: self.crystaltypes = ['Cubic','Tetragonal','Orthorhombic','Trigonal', 'Hexagonal','Monoclinic','Triclinic'] self.comboBox_crystaltype.addItems(self.crystaltypes) #Redo some labels in unicode/greek characters self.label_alpha.setText(u'\u03B1') self.label_beta.setText(u'\u03B2') self.label_gamma.setText(u'\u03B3') self.label_dist_recip.setText(u'Reciprocal Distance (\u212B\u207B\u00B9 )') self.label_dist_real.setText(u'Real Distance (\u212B)') self.label_dist_film.setText(u'Film Distance (cm)') self.label_angle.setText(u'Angle (\u00B0)') v = self.version.split('.') pv = v[0] + '.' + v[1] #only major/minor versions. not bugfixes self.label_pyLATTICE.setText(u'pyLATTICE %s' % pv) #initialize popup IPython console #can interact with specific data self._initIPython(self.common) def _grabCommon(self): """Get all common variables.""" self.common = common() self._overline_strings = self.common._overline_strings self.DSpaces = self.common.DSpaces self.ZoneAxis = self.common.ZoneAxis self.u = self.common.u self.v = self.common.v self.w = self.common.w #lattice parameters and angles self.a = self.common.a self.b = self.common.b self.c = self.common.c self.astar = self.common.astar self.bstar = self.common.bstar self.cstar = self.common.cstar self.alpha = self.common.alpha self.beta = self.common.beta self.gamma = self.common.gamma self.alphastar = self.common.alphastar self.betastar = self.common.betastar self.gammastar = self.common.gammastar #TEM params self.beamenergy = self.common.beamenergy self.camlength = self.common.camlength self.camconst = self.common.camconst self.wavelength = self.common.wavelength #SpaceGroup data self.sg = self.common.sg self.sghex = self.common.sghex self.mineraldb = self.common.mineraldb self.manualConds = self.common.manualConds #manual space group conditions def updateCommon(self): """Update all of the common variables and push these to the IPython console""" self.common.DSpaces = self.DSpaces self.common.ZoneAxis = self.ZoneAxis self.common.u = self.u self.common.v = self.v self.common.w = self.w self.common.a = self.a self.common.b = self.b self.common.c = self.c self.common.astar = self.astar self.common.bstar = self.bstar self.common.cstar = self.cstar self.common.alpha = self.alpha self.common.beta = self.beta self.common.gamma = self.gamma self.common.alphastar = self.alphastar self.common.betastar = self.betastar self.common.gammastar = self.gammastar #mineral database and manual conditions #self.mineraldb = self.common.mineraldb self.common.manualConds = self.manualConds #manual space group conditions self.common.beamenergy = self.beamenergy = self.spinBox_beamenergy.value() self.common.camlength = self.camlength = self.spinBox_camlength.value() self.common.camconst = self.camconst = self.doubleSpinBox_camconst.value() self.common.wavelength = self.wavelength = self.common.Wavelength(self.beamenergy) self.updateIPY(self.common) @QtCore.pyqtSlot(str,str,str,str) def on_distances_sent(self,recip_d, real_d, film_d, angle): self.lineEdit_recip_2.setText(recip_d) self.lineEdit_real_2.setText(real_d) self.lineEdit_film_2.setText(film_d) self.lineEdit_angle_3.setText(angle) def Recalculate(self): """Run MetricTensor() and D_Spacigns(). For use when slider hasn't changed""" self.MetricTensor() self.D_Spacings() def ReplotDiffraction(self): self.Recalculate() try: self.PlotDiffraction() except UnboundLocalError: pass # def Print(self): # """test print fn""" # print(self.comboBox_spacegroup.currentIndex()) def signals_slots(self): """All of the signals and slots not in .ui file""" #Testing #QtCore.QObject.connect(self.command_Wulff, QtCore.SIGNAL(_fromUtf8("clicked()")),WULFF) ### Menu actions QtCore.QObject.connect(self.actionClose, QtCore.SIGNAL(_fromUtf8("triggered()")), self.close) QtCore.QObject.connect(self.actionAbout, QtCore.SIGNAL(_fromUtf8("triggered()")), self.About) QtCore.QObject.connect(self.actionHow_to, QtCore.SIGNAL(_fromUtf8("triggered()")), self.HowTo) QtCore.QObject.connect(self.actionSave_D_spacings, QtCore.SIGNAL(_fromUtf8("triggered()")), self.SaveDSpace) QtCore.QObject.connect(self.actionRemove_DB_Minerals, QtCore.SIGNAL(_fromUtf8("triggered()")), self.removeMinerals) QtCore.QObject.connect(self.actionSave_Mineral_Database, QtCore.SIGNAL(_fromUtf8("triggered()")), self.SaveMineralDB) QtCore.QObject.connect(self.actionLoad_Mineral_Database, QtCore.SIGNAL(_fromUtf8("triggered()")), self.LoadMineralDB) QtCore.QObject.connect(self.actionAppendMineral, QtCore.SIGNAL(_fromUtf8("triggered()")), self.AppendMineral) QtCore.QObject.connect(self.actionIPython_Console, QtCore.SIGNAL(_fromUtf8("triggered()")), self.IPY) QtCore.QObject.connect(self.actionManualCond, QtCore.SIGNAL(_fromUtf8("triggered()")), self.ManualConditions) QtCore.QObject.connect(self.actionSettings, QtCore.SIGNAL(_fromUtf8("triggered()")), self.setSettings) ### Command buttons QtCore.QObject.connect(self.command_Plot, QtCore.SIGNAL(_fromUtf8("clicked()")),self.PlotDiffraction) QtCore.QObject.connect(self.command_recalculate, QtCore.SIGNAL(_fromUtf8("clicked()")),self.Recalculate) #QtCore.QObject.connect(self.command_Wulff, QtCore.SIGNAL(_fromUtf8("clicked()")),self.updateIPY) ### crystal and cell type actions QtCore.QObject.connect(self.comboBox_crystaltype, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setCellType) QtCore.QObject.connect(self.comboBox_celltype, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setConditions) QtCore.QObject.connect(self.spinBox_spacegroup, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.SpaceGroupLookup) QtCore.QObject.connect(self.checkBox_obverse, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.D_Spacings) QtCore.QObject.connect(self.comboBox_mineraldb, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setMineral) #QtCore.QObject.connect(self.comboBox_spacegroup, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.D_Spacings) ### Navigation Toolbar buttons QtCore.QObject.connect(self.comboBox_rotate, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.PlotDiffraction) QtCore.QObject.connect(self.checkBox_labels, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.PlotDiffraction) #labels checkbox ### Checkboxes and Miller indices QtCore.QObject.connect(self.checkBox_samemin, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.sameMin) QtCore.QObject.connect(self.checkBox_samemax, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.sameMax) QtCore.QObject.connect(self.comboBox_hmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_kmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)"))) QtCore.QObject.connect(self.comboBox_kmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_lmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)"))) QtCore.QObject.connect(self.comboBox_lmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_hmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)"))) QtCore.QObject.connect(self.comboBox_hmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_kmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)"))) QtCore.QObject.connect(self.comboBox_kmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_lmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)"))) QtCore.QObject.connect(self.comboBox_lmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_hmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)"))) QtCore.QObject.connect(self.checkBox_showforbidden, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.PlotDiffraction) ### Sliders/spin boxes: lattice parameters QtCore.QObject.connect(self.hSlider_a, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.slider_to_spindouble) QtCore.QObject.connect(self.hSlider_b, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.slider_to_spindouble) QtCore.QObject.connect(self.hSlider_c, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.slider_to_spindouble) QtCore.QObject.connect(self.hSlider_a, QtCore.SIGNAL(_fromUtf8("sliderReleased()")), self.D_Spacings) QtCore.QObject.connect(self.hSlider_b, QtCore.SIGNAL(_fromUtf8("sliderReleased()")), self.D_Spacings) QtCore.QObject.connect(self.hSlider_c, QtCore.SIGNAL(_fromUtf8("sliderReleased()")), self.D_Spacings) QtCore.QObject.connect(self.doubleSpinBox_a, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.spindouble_to_slider) QtCore.QObject.connect(self.doubleSpinBox_b, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.spindouble_to_slider) QtCore.QObject.connect(self.doubleSpinBox_c, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.spindouble_to_slider) QtCore.QObject.connect(self.doubleSpinBox_a, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.MetricTensor) QtCore.QObject.connect(self.doubleSpinBox_b, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.MetricTensor) QtCore.QObject.connect(self.doubleSpinBox_c, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.MetricTensor) #QtCore.QObject.connect(self.doubleSpinBox_a, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.D_Spacings) #QtCore.QObject.connect(self.doubleSpinBox_b, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.D_Spacings) #QtCore.QObject.connect(self.doubleSpinBox_c, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.D_Spacings) QtCore.QObject.connect(self.hSlider_alpha, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.MetricTensor) QtCore.QObject.connect(self.hSlider_beta, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.MetricTensor) QtCore.QObject.connect(self.hSlider_gamma, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.MetricTensor) QtCore.QObject.connect(self.hSlider_alpha, QtCore.SIGNAL(_fromUtf8("sliderReleased()")), self.D_Spacings) QtCore.QObject.connect(self.hSlider_beta, QtCore.SIGNAL(_fromUtf8("sliderReleased()")), self.D_Spacings) QtCore.QObject.connect(self.hSlider_gamma, QtCore.SIGNAL(_fromUtf8("sliderReleased()")), self.D_Spacings) #Spinboxes beam energy, cam length, camconst QtCore.QObject.connect(self.spinBox_beamenergy,QtCore.SIGNAL(_fromUtf8("valueChanged(int)")),self.updateCommon) QtCore.QObject.connect(self.spinBox_camlength,QtCore.SIGNAL(_fromUtf8("valueChanged(int)")),self.updateCommon) QtCore.QObject.connect(self.doubleSpinBox_camconst,QtCore.SIGNAL(_fromUtf8("valueChanged(double)")),self.updateCommon) #Instances to recalculate metric tensor and d-spacings #only enable these once you get miller maxes sorted out so they don't change QtCore.QObject.connect(self.checkBox_zoneaxis, QtCore.SIGNAL(_fromUtf8("toggled(bool)")),self.DiffWidget, QtCore.SLOT(_fromUtf8("setEnabled(bool)"))) QtCore.QObject.connect(self.comboBox_u, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.ReplotDiffraction) QtCore.QObject.connect(self.comboBox_v, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.ReplotDiffraction) QtCore.QObject.connect(self.comboBox_w, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.ReplotDiffraction) QtCore.QObject.connect(self.comboBox_hmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.D_Spacings) QtCore.QObject.connect(self.comboBox_hmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.D_Spacings) #QtCore.QObject.connect(self.checkBox_labels, QtCore.SIGNAL(_fromUtf8("toggled(bool)")),self.UpdatePlot) #QtCore.QObject.connect(self.comboBox_hmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.TempMax) #QtCore.QObject.connect(self.comboBox_kmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.TempMax) #QtCore.QObject.connect(self.comboBox_lmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.TempMax) #QtCore.QObject.connect(self.comboBox_hmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.Recalculate) #QtCore.QObject.connect(self.comboBox_kmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.Recalculate) #QtCore.QObject.connect(self.comboBox_lmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.Recalculate) #QtCore.QObject.connect(self.comboBox_w, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.ReplotDiffraction) #Calculator Tab QtCore.QObject.connect(self.checkBox_normals, QtCore.SIGNAL(_fromUtf8("toggled(bool)")),self.CalcLabels) QtCore.QObject.connect(self.command_angle, QtCore.SIGNAL(_fromUtf8("clicked()")),self.Calculator) def _initIPython(self,common): """Initialize IPython console from which the user can interact with data/files""" banner = """Welcome to the pyLATTICE IPython Qt4 Console. You are here to interact with data and parameters - Python command line knowledge required. Use the 'whos' command for a list of available variables. Sometimes this does not work the first time. Imported packages include: pylab (including numpy modules) as 'pl'; pandas as 'pd' \n""" self.ipywidget = IPythonConsole(common,banner=banner) def IPY(self): self.ipywidget.SHOW() def updateIPY(self,common): self.ipyvars = common.__dict__ self.ipywidget.pushVariables(self.ipyvars) def slider_to_spindouble(self,slider): """Sliders only send/receive int data. Converts int to double by dividing by 100.""" if self.hSlider_a.isSliderDown(): self.a = self.hSlider_a.value() / 100 self.doubleSpinBox_a.setValue(self.a) elif self.hSlider_b.isSliderDown(): self.b = self.hSlider_b.value() / 100 self.doubleSpinBox_b.setValue(self.b) elif self.hSlider_c.isSliderDown(): self.c = self.hSlider_c.value() / 100 self.doubleSpinBox_c.setValue(self.c) def spindouble_to_slider(self,spinbox): """Converts spindouble entry into int for slider (multiply by 100)""" #There may be some redundancy in the connections setting values. #hopefully this does not slow the program down. #without these, some aspect often lags and gives the wrong value if self.comboBox_crystaltype.currentText() == 'Cubic': self.a = self.doubleSpinBox_a.value() self.hSlider_a.setValue(self.a * 100) self.hSlider_b.setValue(self.a * 100);self.doubleSpinBox_b.setValue(self.a) self.hSlider_c.setValue(self.a * 100);self.doubleSpinBox_c.setValue(self.a) elif self.comboBox_crystaltype.currentText() == 'Tetragonal': self.a = self.doubleSpinBox_a.value() self.hSlider_a.setValue(self.a * 100) self.hSlider_b.setValue(self.a * 100); self.doubleSpinBox_b.setValue(self.a) elif self.comboBox_crystaltype.currentText() == 'Trigonal': self.a = self.doubleSpinBox_a.value() self.hSlider_a.setValue(self.a * 100) self.hSlider_b.setValue(self.a * 100); self.doubleSpinBox_b.setValue(self.a) elif self.comboBox_crystaltype.currentText() == 'Hexagonal': self.a = self.doubleSpinBox_a.value() self.hSlider_a.setValue(self.a * 100) self.hSlider_b.setValue(self.a * 100); self.doubleSpinBox_b.setValue(self.a) else: self.a = self.doubleSpinBox_a.value() self.hSlider_a.setValue(self.a * 100) self.b = self.doubleSpinBox_b.value() self.hSlider_b.setValue(self.b * 100) self.c = self.doubleSpinBox_c.value() self.hSlider_c.setValue(self.c * 100) def setMillerMax_h(self): """Sets the items available for the max miller indices to include everything greater than the selected min index""" self.miller_max_h = [str(x) for x in range(int(self.comboBox_hmin.currentText()) + 1,7)] self.comboBox_hmax.clear() self.comboBox_hmax.addItems(self.miller_max_h) def setMillerMax_k(self): """Sets the items available for the max miller indices to include everything greater than the selected min index""" self.miller_max_k = [str(x) for x in range(int(self.comboBox_kmin.currentText()) + 1,7)] self.comboBox_kmax.clear() self.comboBox_kmax.addItems(self.miller_max_k) def setMillerMax_l(self): """Sets the items available for the max miller indices to include everything greater than the selected min index""" self.miller_max_l = [str(x) for x in range(int(self.comboBox_lmin.currentText()) + 1,7)] self.comboBox_lmax.clear() self.comboBox_lmax.addItems(self.miller_max_l) def sameMin(self): if not self.checkBox_samemin.isChecked(): #change to value_changed, not index changed. lengths may be different if checkboxes aren't clicked QtCore.QObject.disconnect(self.comboBox_hmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_kmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)"))) QtCore.QObject.disconnect(self.comboBox_kmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_lmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)"))) QtCore.QObject.disconnect(self.comboBox_lmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_hmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)"))) elif self.checkBox_samemin.isChecked(): QtCore.QObject.connect(self.comboBox_hmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_kmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)"))) QtCore.QObject.connect(self.comboBox_kmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_lmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)"))) QtCore.QObject.connect(self.comboBox_lmin, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_hmin,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)"))) def sameMax(self): if not self.checkBox_samemax.isChecked(): QtCore.QObject.disconnect(self.comboBox_hmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_kmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)"))) QtCore.QObject.disconnect(self.comboBox_kmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_lmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)"))) QtCore.QObject.disconnect(self.comboBox_lmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_hmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)"))) elif self.checkBox_samemax.isChecked(): QtCore.QObject.connect(self.comboBox_hmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_kmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)"))) QtCore.QObject.connect(self.comboBox_kmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_lmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)"))) QtCore.QObject.connect(self.comboBox_lmax, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(int)")), self.comboBox_hmax,QtCore.SLOT(_fromUtf8("setCurrentIndex(int)"))) def setMineral(self): i = self.comboBox_mineraldb.currentIndex() if i == 0: pass else: #disconnect d-space calculations till the end QtCore.QObject.disconnect(self.comboBox_crystaltype, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setCellType) QtCore.QObject.disconnect(self.comboBox_celltype, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setConditions) QtCore.QObject.disconnect(self.spinBox_spacegroup, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.SpaceGroupLookup) m = self.mineraldb.loc[i] ind = ['Cubic','Tetragonal','Orthorhombic','Trigonal','Hexagonal','Monoclinic','Triclinic'].index(m.Crystal) self.comboBox_crystaltype.setCurrentIndex(ind) self.setCellType() ind = self.celltypes.index(m.UnitCell) self.comboBox_celltype.setCurrentIndex(ind) self.setConditions() ind = self.sgnumbers.index(m.SpaceGroup) self.comboBox_spacegroup.setCurrentIndex(ind) #now a,b,c paramters #print(self.sgnumbers) self.doubleSpinBox_a.setValue(m.a) self.a = m.a if not np.isnan(m.b): self.doubleSpinBox_b.setValue(m.b) self.b = m.b if not np.isnan(m.c): self.doubleSpinBox_c.setValue(m.c) self.c = m.c try: self.manualConds = m.SpecialConditions.split(';') except AttributeError: #ignore floats or nans self.manualConds = '' self.MetricTensor() #reconnect and calculate QtCore.QObject.connect(self.comboBox_crystaltype, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setCellType) QtCore.QObject.connect(self.comboBox_celltype, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setConditions) QtCore.QObject.connect(self.spinBox_spacegroup, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.SpaceGroupLookup) self.D_Spacings() def setCellType(self): """Sets the unit cell possibilities based upon the crystal type selected""" self.comboBox_celltype.clear() self.comboBox_spacegroup.clear() self.celltypes = [] if self.comboBox_crystaltype.currentText() == 'Cubic': self.celltypes = ['Primitive','Face Centered','Body Centered'] self.length_label = u' a = b = c' self.label_lattice_equality.setText(self.length_label) self.hSlider_b.setDisabled(True); self.hSlider_c.setDisabled(True) self.doubleSpinBox_b.setDisabled(True); self.doubleSpinBox_c.setDisabled(True) self.angles_label = u' \u03B1 = \u03B2 = \u03B3 = 90°' self.label_angle_equality.setText(self.angles_label) self.alpha = 90; self.beta = 90; self.gamma = 90 self.hSlider_alpha.setValue(self.alpha); self.hSlider_beta.setValue(self.beta); self.hSlider_gamma.setValue(self.gamma) #disable editing sliders and spinboxes self.hSlider_alpha.setDisabled(True); self.hSlider_beta.setDisabled(True); self.hSlider_gamma.setDisabled(True) self.spinBox_alpha.setDisabled(True); self.spinBox_beta.setDisabled(True); self.spinBox_gamma.setDisabled(True) elif self.comboBox_crystaltype.currentText() == 'Tetragonal': self.celltypes = ['Primitive','Body Centered'] self.length_label = u' a = b ≠ c' self.label_lattice_equality.setText(self.length_label) self.hSlider_b.setDisabled(True); self.hSlider_c.setDisabled(False) self.doubleSpinBox_b.setDisabled(True); self.doubleSpinBox_c.setDisabled(False) self.angles_label = u' \u03B1 = \u03B2 = \u03B3 = 90°' self.label_angle_equality.setText(self.angles_label) self.alpha = 90; self.beta = 90; self.gamma = 90 self.hSlider_alpha.setValue(self.alpha); self.hSlider_beta.setValue(self.beta); self.hSlider_gamma.setValue(self.gamma) #disable editing sliders and spinboxes self.hSlider_alpha.setDisabled(True); self.hSlider_beta.setDisabled(True); self.hSlider_gamma.setDisabled(True) self.spinBox_alpha.setDisabled(True); self.spinBox_beta.setDisabled(True); self.spinBox_gamma.setDisabled(True) elif self.comboBox_crystaltype.currentText() == 'Orthorhombic': self.celltypes = ['Primitive','Face Centered','Body Centered','(001) Base Centered','(100) Base Centered'] self.length_label = u' a ≠ b ≠ c' self.label_lattice_equality.setText(self.length_label) self.hSlider_b.setDisabled(False); self.hSlider_c.setDisabled(False) self.doubleSpinBox_b.setDisabled(False); self.doubleSpinBox_c.setDisabled(False) self.angles_label = u' \u03B1 = \u03B2 = \u03B3 = 90°' self.label_angle_equality.setText(self.angles_label) self.alpha = 90; self.beta = 90; self.gamma = 90 self.hSlider_alpha.setValue(self.alpha); self.hSlider_beta.setValue(self.beta); self.hSlider_gamma.setValue(self.gamma) #disable editing sliders and spinboxes self.hSlider_alpha.setDisabled(True); self.hSlider_beta.setDisabled(True); self.hSlider_gamma.setDisabled(True) self.spinBox_alpha.setDisabled(True); self.spinBox_beta.setDisabled(True); self.spinBox_gamma.setDisabled(True) elif self.comboBox_crystaltype.currentText() == 'Trigonal': self.celltypes = ['Primitive','Rhombohedral','Rhombohedral, Hexagonal Axes','Hexagonal'] self.length_label = u' a = b ≠ c' self.label_lattice_equality.setText(self.length_label) self.hSlider_b.setDisabled(True); self.hSlider_c.setDisabled(False) self.doubleSpinBox_b.setDisabled(True); self.doubleSpinBox_c.setDisabled(False) self.angles_label = u' \u03B1 = \u03B2 = 90°, \u03B3 = 120°' self.label_angle_equality.setText(self.angles_label) self.alpha = 90; self.beta = 90; self.gamma = 120 self.hSlider_alpha.setValue(self.alpha); self.hSlider_beta.setValue(self.beta); self.hSlider_gamma.setValue(self.gamma) #disable editing sliders and spinboxes self.hSlider_alpha.setDisabled(True); self.hSlider_beta.setDisabled(True); self.hSlider_gamma.setDisabled(True) self.spinBox_alpha.setDisabled(True); self.spinBox_beta.setDisabled(True); self.spinBox_gamma.setDisabled(True) elif self.comboBox_crystaltype.currentText() == 'Hexagonal': self.celltypes = ['Primitive'] self.length_label = u' a = b ≠ c' self.label_lattice_equality.setText(self.length_label) self.hSlider_b.setDisabled(True); self.hSlider_c.setDisabled(False) self.doubleSpinBox_b.setDisabled(True); self.doubleSpinBox_c.setDisabled(False) self.angles_label = u' \u03B1 = \u03B2 = 90°, \u03B3 = 120°' self.label_angle_equality.setText(self.angles_label) self.alpha = 90; self.beta = 90; self.gamma = 120 self.hSlider_alpha.setValue(self.alpha); self.hSlider_beta.setValue(self.beta); self.hSlider_gamma.setValue(self.gamma) #disable editing sliders and spinboxes self.hSlider_alpha.setDisabled(True); self.hSlider_beta.setDisabled(True); self.hSlider_gamma.setDisabled(True) self.spinBox_alpha.setDisabled(True); self.spinBox_beta.setDisabled(True); self.spinBox_gamma.setDisabled(True) elif self.comboBox_crystaltype.currentText() == 'Monoclinic': self.celltypes = ['Primitive','(001) Base Centered'] self.length_label = u' a ≠ b ≠ c' self.label_lattice_equality.setText(self.length_label) self.hSlider_b.setDisabled(False); self.hSlider_c.setDisabled(False) self.doubleSpinBox_b.setDisabled(False); self.doubleSpinBox_c.setDisabled(False) self.angles_label = u' \u03B1 = \u03B3 = 90°' self.label_angle_equality.setText(self.angles_label) self.alpha = 90; self.beta = 90; self.gamma = 90 self.hSlider_alpha.setValue(self.alpha); self.hSlider_beta.setValue(self.beta); self.hSlider_gamma.setValue(self.gamma) #disable editing sliders and spinboxes self.hSlider_alpha.setDisabled(True); self.hSlider_beta.setDisabled(True); self.hSlider_gamma.setDisabled(True) self.spinBox_alpha.setDisabled(True); self.spinBox_beta.setDisabled(True); self.spinBox_gamma.setDisabled(True) elif self.comboBox_crystaltype.currentText() == 'Triclinic': self.celltypes = ['Primitive'] self.length_label = u' a ≠ b ≠ c' self.label_lattice_equality.setText(self.length_label) self.hSlider_b.setDisabled(False); self.hSlider_c.setDisabled(False) self.doubleSpinBox_b.setDisabled(False); self.doubleSpinBox_c.setDisabled(False) self.angles_label = u'' self.label_angle_equality.setText(self.angles_label) #Enable editing sliders and spinboxes self.hSlider_alpha.setDisabled(False); self.hSlider_beta.setDisabled(False); self.hSlider_gamma.setDisabled(False) self.spinBox_alpha.setDisabled(False); self.spinBox_beta.setDisabled(False); self.spinBox_gamma.setDisabled(False) self.comboBox_celltype.addItems(self.celltypes) #self.Recalculate() def setConditions(self): """Sets conditions based upon which unit cell type is chosen. Store equations in strings and then evaluate and solve with eval()""" geom = self.comboBox_crystaltype.currentText() unit = self.comboBox_celltype.currentText() if unit in ['Rhombohedral','Rhombohedral, Hexagonal Axes']: self.checkBox_obverse.setDisabled(False) else: self.checkBox_obverse.setDisabled(True) try: #there is a loop I cant find where this tries to calculate conditions before unit cell type is set resulting in index error. #this simply supresses the error, as another pass is always fine. if unit in ['Rhombohedral, Hexagonal Axes','Hexagonal']: rhomhex=True self.conditions = np.unique(self.sghex[self.sghex['Unit Cell'] == unit]['Conditions'])[0] else: rhomhex=False self.conditions = np.unique(self.sg[self.sg['Unit Cell'] == unit]['Conditions'])[0] #grab individual condition b/c of repetition self.setSpaceGroups(geom,unit,rhomhex) #QtCore.QObject.disconnect(self.comboBox_spacegroup, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.D_Spacings) self.comboBox_spacegroup.clear() self.comboBox_spacegroup.addItems(self.sgnumlist) #QtCore.QObject.connect(self.comboBox_spacegroup, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.D_Spacings) self.Recalculate() except IndexError: pass def setSpaceGroups(self,geom,unit,rhomhex=False): """Sets the space group options based upon crystal geometry and unit cell type""" if rhomhex: sg = self.sghex elif not rhomhex: sg = self.sg self.sgnumbers = list(sg[(sg['Geometry'] == geom) & (sg['Unit Cell'] == unit)].index) self.sglist = list(sg.loc[self.sgnumbers,'Patterson']) self.sgnumlist = [str(x) + ': ' + y for x,y in zip(self.sgnumbers,self.sglist)] def SpaceGroupConditions(self,number): """Looks up the space-group-specific allowed diffraction spots. number is the specific space group number to look up.""" if not self.checkBox_spacegroup.isChecked(): sg_conditions = 'True' elif self.checkBox_spacegroup.isChecked(): #make sure number is an integer #something is wrong with some FCC crystals number = int(number) unit = self.comboBox_celltype.currentText() if unit in ['Rhombohedral, Hexagonal Axes','Hexagonal']: sg = self.sghex else: sg = self.sg sg_conditions = sg.loc[number,'SG Conditions'] return sg_conditions def SpaceGroupLookup(self): """Takes input from slider/spinbox and outputs sapcegroup info into text line""" index = self.spinBox_spacegroup.value() c = self.sg.loc[index,'Geometry'] #u = self.sg.loc[index,'Unit Cell'] sg = self.sg.loc[index,'Patterson'] text = ': '.join([c,sg]) self.label_spacegrouplookup.setText(text) def MetricTensor(self): """Calculate and show G, the metric tensor, and G*, the inverse metric tensor. Also call function that outputs parameters into tables.""" self.G = np.zeros([3,3]) #self.G_inv #remember, indices start at 0 #metric tensor is axially symmetric self.a = self.doubleSpinBox_a.value() self.b = self.doubleSpinBox_b.value() self.c = self.doubleSpinBox_c.value() self.G[0,0] = self.a**2 self.G[0,1] = round(self.a * self.b * np.cos(np.radians(self.spinBox_gamma.value())),6) self.G[1,0] = self.G[0,1] self.G[1,1] = self.b**2 self.G[0,2] = round(self.a * self.c * np.cos(np.radians(self.spinBox_beta.value())),6) self.G[2,0] = self.G[0,2] self.G[2,2] = self.doubleSpinBox_c.value()**2 self.G[1,2] = round(self.c * self.b * np.cos(np.radians(self.spinBox_alpha.value())),6) self.G[2,1] = self.G[1,2] # calc G inverse, G* self.G_inv = np.linalg.inv(self.G) self.Gtable.setData(self.G) #self.Gtable.resizeColumnsToContents() self.G_inv_table.setData(self.G_inv) #self.G_inv_table.resizeColumnsToContents() for i in range(0,3): self.Gtable.setColumnWidth(i,self.Gtable_size[0]/3.35) self.Gtable.setRowHeight(i,self.Gtable_size[1]/3.5) self.G_inv_table.setColumnWidth(i,self.Gtable_size[0]/3.35) self.G_inv_table.setRowHeight(i,self.Gtable_size[1]/3.5) self.Parameters() def Parameters(self): """Grabs current parameters and outputs them in tables. Calculates reciprocal lattice parameters as well. Must make it deal with complex numbers, but really only necessary for Triclinic...""" self.parameters_direct = np.transpose(np.array([[self.doubleSpinBox_a.value(),self.doubleSpinBox_b.value(),self.doubleSpinBox_c.value(),self.spinBox_alpha.value(),self.spinBox_beta.value(),self.spinBox_gamma.value()]])) self.astar = np.sqrt(self.G_inv[0,0]); self.bstar = np.sqrt(self.G_inv[1,1]); self.cstar = np.sqrt(self.G_inv[2,2]) self.gammastar = np.arccos(self.G_inv[0,1] / (self.astar * self.bstar))*180 / np.pi self.betastar = np.arccos(self.G_inv[0,2] / (self.astar * self.cstar))*180 / np.pi self.alphastar = np.arccos(self.G_inv[1,2] / (self.cstar * self.bstar))*180 / np.pi self.parameters_reciprocal = np.transpose(np.array([[self.astar,self.bstar,self.cstar,self.alphastar,self.betastar,self.gammastar]])) self.Gparam_table.setData(self.parameters_direct) self.Gparam_inv_table.setData(self.parameters_reciprocal) self.Gparam_table.setHorizontalHeaderLabels(['Parameters']) self.Gparam_inv_table.setHorizontalHeaderLabels(['Parameters']) self.Gparam_table.setVerticalHeaderLabels([u'a',u'b',u'c',u'\u03B1',u'\u03B2',u'\u03B3']) self.Gparam_inv_table.setVerticalHeaderLabels([u'a*',u'b*',u'c*',u'\u03B1*',u'\u03B2*',u'\u03B3*']) for i in range(0,6): self.Gparam_table.setColumnWidth(i,self.param_table_size[0]) self.Gparam_table.setRowHeight(i,self.param_table_size[0]/6.7) self.Gparam_inv_table.setColumnWidth(i,self.param_table_size[0]) self.Gparam_inv_table.setRowHeight(i,self.param_table_size[0]/6.7) def D_Spacings(self): """Calculates D-spacings using the metric tensor and places them in a table (sorted?)""" #grab spacegroup conditions #multiple different spacegroup conditions. e.g. eval('h==1 or k==1') returns a True if on is satisfied #add all conditions together into one string #full_conditions = self.conditions + ' and ' + sg_conditions if self.checkBox_zoneaxis.isChecked(): try: self.u = int(self.comboBox_u.currentText()) except ValueError: self.u = 0 try: self.v = int(self.comboBox_v.currentText()) except ValueError: self.v = 0 try: self.w = int(self.comboBox_w.currentText()) except ValueError: self.w = 0 #set "q" for rhombohedral obserse/reverse if self.checkBox_obverse.isChecked(): q = 1 elif not self.checkBox_obverse.isChecked(): q = -1 else: q = 0 #make pandas dataframe with multiindex h,k,l #reinitialize dataframe self.DSpaces = pd.DataFrame(columns = ['d-space','h','k','l']) self.Forbidden = pd.DataFrame(columns = ['d-space','h','k','l']) #maybe implement masking instead of loops hmin = int(self.comboBox_hmin.currentText()) hmax = int(self.comboBox_hmax.currentText()) kmin = int(self.comboBox_kmin.currentText()) kmax = int(self.comboBox_kmax.currentText()) lmin = int(self.comboBox_lmin.currentText()) lmax = int(self.comboBox_lmax.currentText()) gen_conditions = str(self.conditions) #needs to deal with possibility of conditional special statements, will update dspace.pyx #first calculate all general conditions self.DSpaces = DSpace(self.G_inv,self.u,self.v,self.w,hmin,hmax,kmin,kmax,lmin,lmax,gen_conditions,q) #now deal with special spacegroup conditions by removing invalid spots sg_conditions = self.SpaceGroupConditions(self.sgnumbers[self.comboBox_spacegroup.currentIndex()]) if self.manualConds != []: if sg_conditions == 'True': sg_conditions = '' for c in self.manualConds: sg_conditions += ';%s' % c sg_conditions = sg_conditions.lstrip(';') self.DSpaces = self.RemoveForbidden(self.DSpaces,sg_conditions) #sort in descending Dspace order, then by h values, then k, then l... self.DSpaces.sort(columns=['d-space','h','k','l'],ascending=False,inplace=True) #reset indices for convenience later self.DSpaces.index = [x for x in range(len(self.DSpaces))] self.common.DSpaces = self.DSpaces #update DSpaces self.dspace_table.setData(self.DSpaces) self.dspace_table.setColumnWidth(0,80) #make d-space column a bit wider for i in range(1,4): self.dspace_table.setColumnWidth(i,45) elif not self.checkBox_zoneaxis.isChecked(): pass try: self.updateCommon() except AttributeError: #first go round ipython console hasn't been initialized yet pass def RemoveForbidden(self,d,sgconditions): #h = d['h']; k = d['k']; l = d['l'] f = pd.DataFrame(columns = ['d-space','h','k','l']) try: if eval(sgconditions): return(d) except (KeyError,SyntaxError): #if sgconditions not 'True' #d[(h==k) & ~(l%2==0)] #d = d.drop(r.index) #split multiple conditions up conds = sgconditions.split(';') for c in conds: #these should be if:then statements, so remove the if:~thens c = c.strip() if not c.startswith('if'): r = d[eval(c)] d = d.drop(r.index) else: c = c.lstrip('if').strip() iff, then = c.split(':') #eval doesnt care about spaces #needed for eval h = d.h; k = d.k; l = d.l r = d[eval('(' + iff + ')& ~(' + then + ')')] d = d.drop(r.index) f = pd.concat([f,r]) f.sort(columns=['d-space','h','k','l'],ascending=False,inplace=True) f.index = [x for x in range(len(f))] self.common.Forbidden = f self.Forbidden = self.common.Forbidden return(d) def setMineralList(self): self.comboBox_mineraldb.clear() self.minlist = list(self.mineraldb['Chemical'] + ': ' + self.mineraldb['Name']) self.comboBox_mineraldb.addItems(self.minlist) def removeMinerals(self): QtCore.QObject.disconnect(self.comboBox_mineraldb, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setMineral) mindiag = MineralListDialog() model = QtGui.QStandardItemModel(mindiag.listView) #mindiag.buttonBox.accepted.connect(mindiag.accept) #mindiag.buttonBox.rejected.connect(mindiag.reject) for mineral in self.minlist[1:]: item = QtGui.QStandardItem(mineral) item.setCheckable(True) item.setEditable(False) model.appendRow(item) mindiag.listView.setModel(model) if mindiag.exec_(): i=1 l=[] while model.item(i): if model.item(i).checkState(): l.append(i) i += 1 self.mineraldb = self.mineraldb.drop(self.mineraldb.index[l]) self.mineraldb.index = list(range(len(self.mineraldb))) self.setMineralList() QtCore.QObject.connect(self.comboBox_mineraldb, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setMineral) def AppendMineral(self): QtCore.QObject.disconnect(self.comboBox_mineraldb, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setMineral) dial = NewMineralDialog() if dial.exec_(): name = dial.lineEdit_name.text() sym = dial.lineEdit_sym.text() if name == '': name = 'Mineral' if sym == '': sym = 'XX' #set special conditions to a bunch of strings or as a NaN if self.manualConds == []: SCs = np.nan else: SCs = ';'.join(self.manualConds) params = {'Name':name, 'Chemical':sym, 'Crystal':self.comboBox_crystaltype.currentText(), 'UnitCell':self.comboBox_celltype.currentText(), 'SpaceGroup':int(self.comboBox_spacegroup.currentText().split(':')[0]), 'a':self.doubleSpinBox_a.value(), 'b':self.doubleSpinBox_b.value(), 'c':self.doubleSpinBox_c.value(), 'SpecialConditions':SCs} self.mineraldb = self.mineraldb.append(params,ignore_index=True) self.setMineralList() QtCore.QObject.connect(self.comboBox_mineraldb, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setMineral) def setSettings(self): """Raise SettingsDialog and pass values to pyLATTICE parameters. Grab current settings first.""" current = {'a max':self.doubleSpinBox_a.maximum(), 'b max':self.doubleSpinBox_b.maximum(), 'c max':self.doubleSpinBox_c.maximum()} dial = SettingsDialog(current) if dial.exec_(): amax = dial.maxa.value() bmax = dial.maxb.value() cmax = dial.maxc.value() #set slider and spinbox maxima self.doubleSpinBox_a.setMaximum(amax) self.doubleSpinBox_b.setMaximum(bmax) self.doubleSpinBox_c.setMaximum(cmax) self.hSlider_a.setMaximum(int(10*amax)) self.hSlider_b.setMaximum(int(10*bmax)) self.hSlider_c.setMaximum(int(10*cmax)) def ManualConditions(self): """Raise the manual space group conditions dialog""" dial = ManualConditionsDialog(conditions= self.manualConds) if dial.exec_(): num = dial.manualCondList.count() self.manualConds = [dial.manualCondList.item(i).text() for i in range(num)] def SaveDSpace(self): self.Save(self.DSpaces) def SaveMineralDB(self): self.Save(self.mineraldb) def LoadMineralDB(self): QtCore.QObject.disconnect(self.comboBox_mineraldb, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setMineral) ftypes = 'HDF (*.h5);;CSV (*.csv);;Excel (*.xlsx)' fname,ffilter = QtGui.QFileDialog.getOpenFileNameAndFilter(self,caption='Load Mineral Database',directory=self.common.path,filter=ftypes) fname = str(fname) ffilter=str(ffilter) #print(fname,ffilter) name, ext = os.path.splitext(fname) self.common.path = os.path.dirname(fname) if ffilter.startswith('HDF'): item = pd.read_hdf(name + '.h5','table') elif ffilter.startswith('CSV'): item = pd.read_csv(name + '.csv',sep=',') elif ffilter.startswith('Excel'): #allow for different excel formats sheetname,ok = QtGui.QInputDialog.getText(self,'Input Sheetname','Sheetname') if ok and sheetname != '': if ext == '.xlsx' or ext == '': item = pd.read_excel(name + '.xlsx',str(sheetname)) elif ext == '.xls': item = pd.read_excel(name + '.xls',str(sheetname)) self.mineraldb = item else: QtGui.QMessageBox.information(self, "Warning!", 'You must specify a sheet name!') self.setMineralList() QtCore.QObject.connect(self.comboBox_mineraldb, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), self.setMineral) def Save(self,item): #item should be pandas dataframe object ftypes = 'HDF (*.h5);;CSV (*.csv);;Excel (*.xlsx)' fname,ffilter = QtGui.QFileDialog.getSaveFileNameAndFilter(self,caption='Save D-Spacings',directory=self.common.path,filter=ftypes) fname = str(fname) ffilter=str(ffilter) #print(fname,ffilter) name, ext = os.path.splitext(fname) self.common.path = os.path.dirname(fname) print(name + ext) if ffilter.startswith('HDF'): item.to_hdf(name + '.h5','table') elif ffilter.startswith('CSV'): item.to_csv(name + '.csv',sep=',') elif ffilter.startswith('Excel'): #allow for different excel formats if ext == '.xlsx' or ext == '': item.to_excel(name + '.xlsx') elif ext == '.xls': item.to_excel(name + '.xls') ################################################################################ ############################### Plotting ####################################### ################################################################################ def PlotDiffraction(self): """Plots the current list of spots and d-spacings. For each point in self.DSpaces [d-space,h,k,l], determines anlges for plotting.""" #initialize plot with center spot only self.Plot.clear() self.common._x2 = False self.Plot.set_xlabel(u'Distance (\u212B\u207B\u00B9)')#angstrom^-1 self.Plot.set_ylabel(u'Distance (\u212B\u207B\u00B9)') #get values in energy, cam legnth, cam const. combo boxes self.energy = self.spinBox_beamenergy.value() self.camlength = self.spinBox_camlength.value() self.camconst = self.doubleSpinBox_camconst.value() #self.Plot.plot(0,0, linestyle = '', marker='o', markersize = 10, color = 'black',picker=5, label = u'0 0 0') #add some labels if self.checkBox_labels.isChecked() == True: #center spot #self.Plot.annotate(u'0 0 0', xy = (0,0), xytext=(0,10),textcoords = 'offset points', ha = 'center', va = 'bottom', # bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.01)) #add crystal structure information in an annotation #grab current values self.a = self.doubleSpinBox_a.value(); self.b = self.doubleSpinBox_b.value(); self.c = self.doubleSpinBox_c.value() alph = self.spinBox_alpha.value(); beta = self.spinBox_beta.value(); gam = self.spinBox_gamma.value() plot_label = r'''%s: %s; a = %.2f, b = %.2f, c = %.2f; $\alpha$ = %d$^o$, $\beta$ = %d$^o$, $\gamma$ = %d$^o$''' % (self.comboBox_crystaltype.currentText(),self.comboBox_celltype.currentText(),self.a,self.b,self.c,alph,beta,gam) ann = self.Plot.annotate(plot_label, xy=(0.02, 1.02), xycoords='axes fraction',bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.01)) ann.draggable() #make annotation draggable #need to choose a reference point with smallest sum of absolute miller indices #since self.DSpaces is sorted with largest d-space first, this will be the smallest sum of abs miller indices #first point rotation = np.radians(float(self.comboBox_rotate.currentText())) d = np.array(self.DSpaces['d-space'],dtype=np.float) ref = np.array(self.DSpaces.loc[0,['h','k','l']],dtype=np.int) Q2 = np.array(self.DSpaces[['h','k','l']],dtype=np.int) recip_vec = np.array([self.astar,self.bstar,self.cstar],dtype=np.float) dir_vec = np.array([self.u,self.v,self.w],dtype=np.int) #add extra factor if hcp unit cell t = self.comboBox_crystaltype.currentText() #must check that Forbidden dataframe isn't empty for a spcific zone axis showf = self.checkBox_showforbidden.isChecked() and not self.Forbidden.empty if t in ['Hexagonal','Trigonal']: #print('Hexagonal') #change dtypes ref = np.array(ref,dtype=np.float) Q2 = np.array(Q2,dtype=np.float) lam = np.sqrt(2/3)*(self.c/self.a) ref[2] = ref[2]/lam ref = np.hstack([ref,-(ref[0]+ref[1])]) #add i direction, but to the end b/c it doesnt matter Q2[:,2] = Q2[:,2]/lam Q2 = np.append(Q2,np.array([-Q2[:,0]-Q2[:,1]]).T,axis=1) theta,x,y = CalcSpotsHCP(d,Q2,ref,recip_vec,dir_vec,rotation) if showf: df = np.array(self.Forbidden['d-space'],dtype=np.float) Q2f = np.array(self.Forbidden[['h','k','l']],dtype=np.int) Q2f = np.array(Q2f,dtype=np.float) Q2f[:,2] = Q2f[:,2]/lam Q2f = np.append(Q2f,np.array([-Q2f[:,0]-Q2f[:,1]]).T,axis=1) thetaf,xf,yf = CalcSpotsHCP(df,Q2f,ref,recip_vec,dir_vec,rotation) else: theta,x,y = CalcSpots(d,Q2,ref,recip_vec,self.G_inv,dir_vec,rotation) if showf: df = np.array(self.Forbidden['d-space'],dtype=np.float) Q2f = np.array(self.Forbidden[['h','k','l']],dtype=np.int) thetaf,xf,yf = CalcSpots(df,Q2f,ref,recip_vec,self.G_inv,dir_vec,rotation) self.DSpaces['theta'] = np.degrees(theta).round(2); self.DSpaces['x'] = x; self.DSpaces['y'] = y if showf: self.Forbidden['theta'] = np.degrees(thetaf).round(2); self.Forbidden['x'] = xf; self.Forbidden['y'] = yf for i in range(len(self.Forbidden)): label = ' '.join([str(int(x)) for x in self.Forbidden.loc[i,['h','k','l']]]) #this is a bit dense, but makes a list of str() hkl values, then concatenates #convert negative numbers to overline numbers for visual effect for j,num in enumerate(self._overline_strings): match = re.search(u'-%d' % (j+1),label) if match: label = re.sub(match.group(),num,label) #add each label and coordinate to DSpace dataframe # self.DSpaces.loc[i,'x'] = coords[0] # self.DSpaces.loc[i,'y'] = coords[1] self.Forbidden.loc[i,'label'] = label #print(self.DSpaces) #make label for each spot for i in range(len(self.DSpaces)): label = r' '.join([str(int(x)) for x in self.DSpaces.loc[i,['h','k','l']]]) #this is a bit dense, but makes a list of str() hkl values, then concatenates #convert negative numbers to overline numbers for visual effect for j,num in enumerate(self._overline_strings): match = re.search(u'-%d' % (j+1),label) if match: label = re.sub(match.group(),num,label) #add each label and coordinate to DSpace dataframe # self.DSpaces.loc[i,'x'] = coords[0] # self.DSpaces.loc[i,'y'] = coords[1] label = r'$%s$' % label.replace(' ','\ ') #convert to mathtex string and add spaces self.DSpaces.loc[i,'label'] = label #add 000 spot self.DSpaces.loc[len(self.DSpaces),['d-space','h','k','l','x','y','label']] = [0,0,0,0,0,0,r'$0\ 0\ 0$'] #print(self.DSpaces) #scatterplots make it difficult to get data back in matplotlibwidget # for i in range(len(self.DSpaces)): self.Plot.plot(self.DSpaces['x'],self.DSpaces['y'],ls='',marker='o',markersize=10,color='k',picker=5)#,label='%i %i %i' % (self.DSpaces.loc[i,['h']],self.DSpaces.loc[i,['k']],self.DSpaces.loc[i,['l']])) if showf: self.Plot.plot(self.Forbidden['x'],self.Forbidden['y'],ls='',marker='o',markersize=7,color='gray', alpha=.7) #xmax = max(self.DSpaces['x']); xmin = min(self.DSpaces['x']) #ymax = max(self.DSpaces['y']); ymin = min(self.DSpaces['y']) #self.Plot.set_xlim([1.5*xmin,1.5*xmax]) #self.Plot.set_ylim([1.5*ymin,1.5*ymax]) if self.checkBox_labels.isChecked() == True: for i in range(len(self.DSpaces)): #label = self.MathLabels(i) label = self.DSpaces.loc[i,'label'] self.Plot.annotate(label, xy = (self.DSpaces.loc[i,'x'],self.DSpaces.loc[i,'y']), xytext=(0,10),textcoords = 'offset points', ha = 'center', va = 'bottom', bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.01)) if showf: for i in range(len(self.Forbidden)): #label = self.MathLabels(i) label = self.Forbidden.loc[i,'label'] self.Plot.annotate(label, xy = (self.Forbidden.loc[i,'x'],self.Forbidden.loc[i,'y']), xytext=(0,10),textcoords = 'offset points', ha = 'center', va = 'bottom',color='gray', bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.01)) if showf: self.common.Forbidden = self.Forbidden self.common.DSpaces = self.DSpaces self.common.a = self.doubleSpinBox_a.value()#for determining arrow size in plot self.DiffWidget.canvas.draw() def MathLabels(self,i): '''Make labels with overlines instead of minus signs for plotting with matplotlib. i is the index for DSpaces''' label = r'' if self.DSpaces.loc[i,'h'] < 0: label+=r'$\bar %i$ ' % abs(self.DSpaces.loc[i,'h']) else: label+=r'%i ' % self.DSpaces.loc[i,'h'] if self.DSpaces.loc[i,'k'] < 0: label+=r'$\bar %i$ ' % abs(self.DSpaces.loc[i,'k']) else: label+=r'%i ' % self.DSpaces.loc[i,'k'] if self.DSpaces.loc[i,'l'] < 0: label+=r'$\bar %i$' % abs(self.DSpaces.loc[i,'l']) else: label+=r'%i' % self.DSpaces.loc[i,'l'] return(label) ################################################################################ ############################### Calculator ##################################### ################################################################################ def Calculator(self): """Grabs current miller indices or zone directions and calls AngleCalc""" h1 = int(self.comboBox_h1.currentText()) h2 = int(self.comboBox_h2.currentText()) k1 = int(self.comboBox_k1.currentText()) k2 = int(self.comboBox_k2.currentText()) l1 = int(self.comboBox_l1.currentText()) l2 = int(self.comboBox_l2.currentText()) i1 = -(h1+k1) i2 = -(h2+k2) hex = self.checkBox_hexagonal.isChecked() angle = round(np.degrees(self.Diffraction.PlaneAngle(p1=np.array([h1,k1,l1]),p2=np.array([h2,k2,l2]),hex=hex)),2) if np.isnan(angle): QtGui.QMessageBox.information(self, "Uh, Oh!", 'There is no [0 0 0] direction/plane!') else: if self.checkBox_normals.isChecked(): self.lineEdit_angle.setText(u'φ = %.2f°' % angle) bra = u'(' ket = u')' elif not self.checkBox_normals.isChecked(): self.lineEdit_angle.setText(u'ρ = %.2f°' % angle) bra = u'[' ket = u']' if hex == False: hkls = [bra,h1,k1,l1,ket,bra,h2,k2,l2,ket] for j,it in enumerate(hkls): if type(it) == int and it < 0: hkls[j] = self._overline_strings[abs(it)-1] self.lineEdit_dirs.setText(u'%s%s%s%s%s \u2220 %s%s%s%s%s' % tuple(hkls)) else: hkls = [bra,h1,k1,i1,l1,ket,bra,h2,k2,i2,l2,ket] for j,it in enumerate(hkls): if type(it) == int and it < 0: hkls[j] = self._overline_strings[abs(it)-1] self.lineEdit_dirs.setText(u'%s%s%s%s%s%s \u2220 %s%s%s%s%s%s' % tuple(hkls)) def CalcLabels(self): """Rewrite labels for aesthetics""" if self.checkBox_normals.isChecked(): self.label_h2.setText(u'h') self.label_k2.setText(u'k') self.label_l2.setText(u'l') #self.label_h2.setAlignment(0x0004) #self.label_k2.setAlignment(0x0004) #self.label_l2.setAlignment(0x0004) elif not self.checkBox_normals.isChecked(): self.label_h2.setText(u'u') self.label_k2.setText(u'v') self.label_l2.setText(u'w') #self.label_h2.setAlignment(0x0004) #self.label_k2.setAlignment(0x0004) #self.label_l2.setAlignment(0x0004) ################################################################################ ############################### Other ########################################## ################################################################################ def About(self): """Displays the About message""" QtGui.QMessageBox.information(self, "About", """pyLATTICE %s: Written by Evan Groopman Based upon LATTICE (DOS) by Thomas Bernatowicz c. 2011-2014 For help contact: eegroopm@gmail.com""" % self.version) def HowTo(self): """How-to dialog box""" howtomessage = ( """ - Select crystal type, unit cell type, and lattice parameters to calculate the metric tensor. - OR select a mineral from the database. - D-spacings will be calculated between the selected Miller indices. - Select zone axis and press "Plot" to show diffraction pattern. - Select two diffraction spots to measure distance and angle. Note: pyLATTICE only includes general reflection conditions for each space group. It does not includes special conditions based upon multiplcity, site symmetry, etc. EXCEPT for FCC diamond #227. """) QtGui.QMessageBox.information(self, "How to use", howtomessage)
gpl-2.0
tawsifkhan/scikit-learn
sklearn/linear_model/omp.py
127
30417
"""Orthogonal matching pursuit algorithms """ # Author: Vlad Niculae # # License: BSD 3 clause import warnings from distutils.version import LooseVersion import numpy as np from scipy import linalg from scipy.linalg.lapack import get_lapack_funcs from .base import LinearModel, _pre_fit from ..base import RegressorMixin from ..utils import as_float_array, check_array, check_X_y from ..cross_validation import check_cv from ..externals.joblib import Parallel, delayed import scipy solve_triangular_args = {} if LooseVersion(scipy.__version__) >= LooseVersion('0.12'): # check_finite=False is an optimization available only in scipy >=0.12 solve_triangular_args = {'check_finite': False} premature = """ Orthogonal matching pursuit ended prematurely due to linear dependence in the dictionary. The requested precision might not have been met. """ def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True, return_path=False): """Orthogonal Matching Pursuit step using the Cholesky decomposition. Parameters ---------- X : array, shape (n_samples, n_features) Input dictionary. Columns are assumed to have unit norm. y : array, shape (n_samples,) Input targets n_nonzero_coefs : int Targeted number of non-zero elements tol : float Targeted squared error, if not None overrides n_nonzero_coefs. copy_X : bool, optional Whether the design matrix X must be copied by the algorithm. A false value is only helpful if X is already Fortran-ordered, otherwise a copy is made anyway. return_path : bool, optional. Default: False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. Returns ------- gamma : array, shape (n_nonzero_coefs,) Non-zero elements of the solution idx : array, shape (n_nonzero_coefs,) Indices of the positions of the elements in gamma within the solution vector coef : array, shape (n_features, n_nonzero_coefs) The first k values of column k correspond to the coefficient value for the active features at that step. The lower left triangle contains garbage. Only returned if ``return_path=True``. n_active : int Number of active features at convergence. """ if copy_X: X = X.copy('F') else: # even if we are allowed to overwrite, still copy it if bad order X = np.asfortranarray(X) min_float = np.finfo(X.dtype).eps nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,)) potrs, = get_lapack_funcs(('potrs',), (X,)) alpha = np.dot(X.T, y) residual = y gamma = np.empty(0) n_active = 0 indices = np.arange(X.shape[1]) # keeping track of swapping max_features = X.shape[1] if tol is not None else n_nonzero_coefs if solve_triangular_args: # new scipy, don't need to initialize because check_finite=False L = np.empty((max_features, max_features), dtype=X.dtype) else: # old scipy, we need the garbage upper triangle to be non-Inf L = np.zeros((max_features, max_features), dtype=X.dtype) L[0, 0] = 1. if return_path: coefs = np.empty_like(L) while True: lam = np.argmax(np.abs(np.dot(X.T, residual))) if lam < n_active or alpha[lam] ** 2 < min_float: # atom already selected or inner product too small warnings.warn(premature, RuntimeWarning, stacklevel=2) break if n_active > 0: # Updates the Cholesky decomposition of X' X L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam]) linalg.solve_triangular(L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, **solve_triangular_args) v = nrm2(L[n_active, :n_active]) ** 2 if 1 - v <= min_float: # selected atoms are dependent warnings.warn(premature, RuntimeWarning, stacklevel=2) break L[n_active, n_active] = np.sqrt(1 - v) X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam]) alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active] indices[n_active], indices[lam] = indices[lam], indices[n_active] n_active += 1 # solves LL'x = y as a composition of two triangular systems gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True, overwrite_b=False) if return_path: coefs[:n_active, n_active - 1] = gamma residual = y - np.dot(X[:, :n_active], gamma) if tol is not None and nrm2(residual) ** 2 <= tol: break elif n_active == max_features: break if return_path: return gamma, indices[:n_active], coefs[:, :n_active], n_active else: return gamma, indices[:n_active], n_active def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None, copy_Gram=True, copy_Xy=True, return_path=False): """Orthogonal Matching Pursuit step on a precomputed Gram matrix. This function uses the the Cholesky decomposition method. Parameters ---------- Gram : array, shape (n_features, n_features) Gram matrix of the input data matrix Xy : array, shape (n_features,) Input targets n_nonzero_coefs : int Targeted number of non-zero elements tol_0 : float Squared norm of y, required if tol is not None. tol : float Targeted squared error, if not None overrides n_nonzero_coefs. copy_Gram : bool, optional Whether the gram matrix must be copied by the algorithm. A false value is only helpful if it is already Fortran-ordered, otherwise a copy is made anyway. copy_Xy : bool, optional Whether the covariance vector Xy must be copied by the algorithm. If False, it may be overwritten. return_path : bool, optional. Default: False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. Returns ------- gamma : array, shape (n_nonzero_coefs,) Non-zero elements of the solution idx : array, shape (n_nonzero_coefs,) Indices of the positions of the elements in gamma within the solution vector coefs : array, shape (n_features, n_nonzero_coefs) The first k values of column k correspond to the coefficient value for the active features at that step. The lower left triangle contains garbage. Only returned if ``return_path=True``. n_active : int Number of active features at convergence. """ Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram) if copy_Xy: Xy = Xy.copy() min_float = np.finfo(Gram.dtype).eps nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,)) potrs, = get_lapack_funcs(('potrs',), (Gram,)) indices = np.arange(len(Gram)) # keeping track of swapping alpha = Xy tol_curr = tol_0 delta = 0 gamma = np.empty(0) n_active = 0 max_features = len(Gram) if tol is not None else n_nonzero_coefs if solve_triangular_args: # new scipy, don't need to initialize because check_finite=False L = np.empty((max_features, max_features), dtype=Gram.dtype) else: # old scipy, we need the garbage upper triangle to be non-Inf L = np.zeros((max_features, max_features), dtype=Gram.dtype) L[0, 0] = 1. if return_path: coefs = np.empty_like(L) while True: lam = np.argmax(np.abs(alpha)) if lam < n_active or alpha[lam] ** 2 < min_float: # selected same atom twice, or inner product too small warnings.warn(premature, RuntimeWarning, stacklevel=3) break if n_active > 0: L[n_active, :n_active] = Gram[lam, :n_active] linalg.solve_triangular(L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, **solve_triangular_args) v = nrm2(L[n_active, :n_active]) ** 2 if 1 - v <= min_float: # selected atoms are dependent warnings.warn(premature, RuntimeWarning, stacklevel=3) break L[n_active, n_active] = np.sqrt(1 - v) Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam]) Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam]) indices[n_active], indices[lam] = indices[lam], indices[n_active] Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active] n_active += 1 # solves LL'x = y as a composition of two triangular systems gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True, overwrite_b=False) if return_path: coefs[:n_active, n_active - 1] = gamma beta = np.dot(Gram[:, :n_active], gamma) alpha = Xy - beta if tol is not None: tol_curr += delta delta = np.inner(gamma, beta[:n_active]) tol_curr -= delta if abs(tol_curr) <= tol: break elif n_active == max_features: break if return_path: return gamma, indices[:n_active], coefs[:, :n_active], n_active else: return gamma, indices[:n_active], n_active def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False, copy_X=True, return_path=False, return_n_iter=False): """Orthogonal Matching Pursuit (OMP) Solves n_targets Orthogonal Matching Pursuit problems. An instance of the problem has the form: When parametrized by the number of non-zero coefficients using `n_nonzero_coefs`: argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs} When parametrized by error using the parameter `tol`: argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol Read more in the :ref:`User Guide <omp>`. Parameters ---------- X : array, shape (n_samples, n_features) Input data. Columns are assumed to have unit norm. y : array, shape (n_samples,) or (n_samples, n_targets) Input targets n_nonzero_coefs : int Desired number of non-zero entries in the solution. If None (by default) this value is set to 10% of n_features. tol : float Maximum norm of the residual. If not None, overrides n_nonzero_coefs. precompute : {True, False, 'auto'}, Whether to perform precomputations. Improves performance when n_targets or n_samples is very large. copy_X : bool, optional Whether the design matrix X must be copied by the algorithm. A false value is only helpful if X is already Fortran-ordered, otherwise a copy is made anyway. return_path : bool, optional. Default: False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. return_n_iter : bool, optional default False Whether or not to return the number of iterations. Returns ------- coef : array, shape (n_features,) or (n_features, n_targets) Coefficients of the OMP solution. If `return_path=True`, this contains the whole coefficient path. In this case its shape is (n_features, n_features) or (n_features, n_targets, n_features) and iterating over the last axis yields coefficients in increasing order of active features. n_iters : array-like or int Number of active features across every target. Returned only if `return_n_iter` is set to True. See also -------- OrthogonalMatchingPursuit orthogonal_mp_gram lars_path decomposition.sparse_encode Notes ----- Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang, Matching pursuits with time-frequency dictionaries, IEEE Transactions on Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. (http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf) This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal Matching Pursuit Technical Report - CS Technion, April 2008. http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf """ X = check_array(X, order='F', copy=copy_X) copy_X = False if y.ndim == 1: y = y.reshape(-1, 1) y = check_array(y) if y.shape[1] > 1: # subsequent targets will be affected copy_X = True if n_nonzero_coefs is None and tol is None: # default for n_nonzero_coefs is 0.1 * n_features # but at least one. n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1) if tol is not None and tol < 0: raise ValueError("Epsilon cannot be negative") if tol is None and n_nonzero_coefs <= 0: raise ValueError("The number of atoms must be positive") if tol is None and n_nonzero_coefs > X.shape[1]: raise ValueError("The number of atoms cannot be more than the number " "of features") if precompute == 'auto': precompute = X.shape[0] > X.shape[1] if precompute: G = np.dot(X.T, X) G = np.asfortranarray(G) Xy = np.dot(X.T, y) if tol is not None: norms_squared = np.sum((y ** 2), axis=0) else: norms_squared = None return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared, copy_Gram=copy_X, copy_Xy=False, return_path=return_path) if return_path: coef = np.zeros((X.shape[1], y.shape[1], X.shape[1])) else: coef = np.zeros((X.shape[1], y.shape[1])) n_iters = [] for k in range(y.shape[1]): out = _cholesky_omp( X, y[:, k], n_nonzero_coefs, tol, copy_X=copy_X, return_path=return_path) if return_path: _, idx, coefs, n_iter = out coef = coef[:, :, :len(idx)] for n_active, x in enumerate(coefs.T): coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1] else: x, idx, n_iter = out coef[idx, k] = x n_iters.append(n_iter) if y.shape[1] == 1: n_iters = n_iters[0] if return_n_iter: return np.squeeze(coef), n_iters else: return np.squeeze(coef) def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None, norms_squared=None, copy_Gram=True, copy_Xy=True, return_path=False, return_n_iter=False): """Gram Orthogonal Matching Pursuit (OMP) Solves n_targets Orthogonal Matching Pursuit problems using only the Gram matrix X.T * X and the product X.T * y. Read more in the :ref:`User Guide <omp>`. Parameters ---------- Gram : array, shape (n_features, n_features) Gram matrix of the input data: X.T * X Xy : array, shape (n_features,) or (n_features, n_targets) Input targets multiplied by X: X.T * y n_nonzero_coefs : int Desired number of non-zero entries in the solution. If None (by default) this value is set to 10% of n_features. tol : float Maximum norm of the residual. If not None, overrides n_nonzero_coefs. norms_squared : array-like, shape (n_targets,) Squared L2 norms of the lines of y. Required if tol is not None. copy_Gram : bool, optional Whether the gram matrix must be copied by the algorithm. A false value is only helpful if it is already Fortran-ordered, otherwise a copy is made anyway. copy_Xy : bool, optional Whether the covariance vector Xy must be copied by the algorithm. If False, it may be overwritten. return_path : bool, optional. Default: False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. return_n_iter : bool, optional default False Whether or not to return the number of iterations. Returns ------- coef : array, shape (n_features,) or (n_features, n_targets) Coefficients of the OMP solution. If `return_path=True`, this contains the whole coefficient path. In this case its shape is (n_features, n_features) or (n_features, n_targets, n_features) and iterating over the last axis yields coefficients in increasing order of active features. n_iters : array-like or int Number of active features across every target. Returned only if `return_n_iter` is set to True. See also -------- OrthogonalMatchingPursuit orthogonal_mp lars_path decomposition.sparse_encode Notes ----- Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang, Matching pursuits with time-frequency dictionaries, IEEE Transactions on Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. (http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf) This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal Matching Pursuit Technical Report - CS Technion, April 2008. http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf """ Gram = check_array(Gram, order='F', copy=copy_Gram) Xy = np.asarray(Xy) if Xy.ndim > 1 and Xy.shape[1] > 1: # or subsequent target will be affected copy_Gram = True if Xy.ndim == 1: Xy = Xy[:, np.newaxis] if tol is not None: norms_squared = [norms_squared] if n_nonzero_coefs is None and tol is None: n_nonzero_coefs = int(0.1 * len(Gram)) if tol is not None and norms_squared is None: raise ValueError('Gram OMP needs the precomputed norms in order ' 'to evaluate the error sum of squares.') if tol is not None and tol < 0: raise ValueError("Epsilon cannot be negative") if tol is None and n_nonzero_coefs <= 0: raise ValueError("The number of atoms must be positive") if tol is None and n_nonzero_coefs > len(Gram): raise ValueError("The number of atoms cannot be more than the number " "of features") if return_path: coef = np.zeros((len(Gram), Xy.shape[1], len(Gram))) else: coef = np.zeros((len(Gram), Xy.shape[1])) n_iters = [] for k in range(Xy.shape[1]): out = _gram_omp( Gram, Xy[:, k], n_nonzero_coefs, norms_squared[k] if tol is not None else None, tol, copy_Gram=copy_Gram, copy_Xy=copy_Xy, return_path=return_path) if return_path: _, idx, coefs, n_iter = out coef = coef[:, :, :len(idx)] for n_active, x in enumerate(coefs.T): coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1] else: x, idx, n_iter = out coef[idx, k] = x n_iters.append(n_iter) if Xy.shape[1] == 1: n_iters = n_iters[0] if return_n_iter: return np.squeeze(coef), n_iters else: return np.squeeze(coef) class OrthogonalMatchingPursuit(LinearModel, RegressorMixin): """Orthogonal Matching Pursuit model (OMP) Parameters ---------- n_nonzero_coefs : int, optional Desired number of non-zero entries in the solution. If None (by default) this value is set to 10% of n_features. tol : float, optional Maximum norm of the residual. If not None, overrides n_nonzero_coefs. fit_intercept : boolean, optional whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional If False, the regressors X are assumed to be already normalized. precompute : {True, False, 'auto'}, default 'auto' Whether to use a precomputed Gram and Xy matrix to speed up calculations. Improves performance when `n_targets` or `n_samples` is very large. Note that if you already have such matrices, you can pass them directly to the fit method. Read more in the :ref:`User Guide <omp>`. Attributes ---------- coef_ : array, shape (n_features,) or (n_features, n_targets) parameter vector (w in the formula) intercept_ : float or array, shape (n_targets,) independent term in decision function. n_iter_ : int or array-like Number of active features across every target. Notes ----- Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang, Matching pursuits with time-frequency dictionaries, IEEE Transactions on Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. (http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf) This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal Matching Pursuit Technical Report - CS Technion, April 2008. http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf See also -------- orthogonal_mp orthogonal_mp_gram lars_path Lars LassoLars decomposition.sparse_encode """ def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True, normalize=True, precompute='auto'): self.n_nonzero_coefs = n_nonzero_coefs self.tol = tol self.fit_intercept = fit_intercept self.normalize = normalize self.precompute = precompute def fit(self, X, y): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values. Returns ------- self : object returns an instance of self. """ X, y = check_X_y(X, y, multi_output=True, y_numeric=True) n_features = X.shape[1] X, y, X_mean, y_mean, X_std, Gram, Xy = \ _pre_fit(X, y, None, self.precompute, self.normalize, self.fit_intercept, copy=True) if y.ndim == 1: y = y[:, np.newaxis] if self.n_nonzero_coefs is None and self.tol is None: # default for n_nonzero_coefs is 0.1 * n_features # but at least one. self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1) else: self.n_nonzero_coefs_ = self.n_nonzero_coefs if Gram is False: coef_, self.n_iter_ = orthogonal_mp( X, y, self.n_nonzero_coefs_, self.tol, precompute=False, copy_X=True, return_n_iter=True) else: norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None coef_, self.n_iter_ = orthogonal_mp_gram( Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_, tol=self.tol, norms_squared=norms_sq, copy_Gram=True, copy_Xy=True, return_n_iter=True) self.coef_ = coef_.T self._set_intercept(X_mean, y_mean, X_std) return self def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True, fit_intercept=True, normalize=True, max_iter=100): """Compute the residues on left-out data for a full LARS path Parameters ----------- X_train : array, shape (n_samples, n_features) The data to fit the LARS on y_train : array, shape (n_samples) The target variable to fit LARS on X_test : array, shape (n_samples, n_features) The data to compute the residues on y_test : array, shape (n_samples) The target variable to compute the residues on copy : boolean, optional Whether X_train, X_test, y_train and y_test should be copied. If False, they may be overwritten. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. max_iter : integer, optional Maximum numbers of iterations to perform, therefore maximum features to include. 100 by default. Returns ------- residues: array, shape (n_samples, max_features) Residues of the prediction on the test data """ if copy: X_train = X_train.copy() y_train = y_train.copy() X_test = X_test.copy() y_test = y_test.copy() if fit_intercept: X_mean = X_train.mean(axis=0) X_train -= X_mean X_test -= X_mean y_mean = y_train.mean(axis=0) y_train = as_float_array(y_train, copy=False) y_train -= y_mean y_test = as_float_array(y_test, copy=False) y_test -= y_mean if normalize: norms = np.sqrt(np.sum(X_train ** 2, axis=0)) nonzeros = np.flatnonzero(norms) X_train[:, nonzeros] /= norms[nonzeros] coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None, precompute=False, copy_X=False, return_path=True) if coefs.ndim == 1: coefs = coefs[:, np.newaxis] if normalize: coefs[nonzeros] /= norms[nonzeros][:, np.newaxis] return np.dot(coefs.T, X_test.T) - y_test class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin): """Cross-validated Orthogonal Matching Pursuit model (OMP) Parameters ---------- copy : bool, optional Whether the design matrix X must be copied by the algorithm. A false value is only helpful if X is already Fortran-ordered, otherwise a copy is made anyway. fit_intercept : boolean, optional whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional If False, the regressors X are assumed to be already normalized. max_iter : integer, optional Maximum numbers of iterations to perform, therefore maximum features to include. 10% of ``n_features`` but at least 5 if available. cv : cross-validation generator, optional see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to a 5-fold strategy n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs verbose : boolean or integer, optional Sets the verbosity amount Read more in the :ref:`User Guide <omp>`. Attributes ---------- intercept_ : float or array, shape (n_targets,) Independent term in decision function. coef_ : array, shape (n_features,) or (n_features, n_targets) Parameter vector (w in the problem formulation). n_nonzero_coefs_ : int Estimated number of non-zero coefficients giving the best mean squared error over the cross-validation folds. n_iter_ : int or array-like Number of active features across every target for the model refit with the best hyperparameters got by cross-validating across all folds. See also -------- orthogonal_mp orthogonal_mp_gram lars_path Lars LassoLars OrthogonalMatchingPursuit LarsCV LassoLarsCV decomposition.sparse_encode """ def __init__(self, copy=True, fit_intercept=True, normalize=True, max_iter=None, cv=None, n_jobs=1, verbose=False): self.copy = copy self.fit_intercept = fit_intercept self.normalize = normalize self.max_iter = max_iter self.cv = cv self.n_jobs = n_jobs self.verbose = verbose def fit(self, X, y): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape [n_samples, n_features] Training data. y : array-like, shape [n_samples] Target values. Returns ------- self : object returns an instance of self. """ X, y = check_X_y(X, y, y_numeric=True) X = as_float_array(X, copy=False, force_all_finite=False) cv = check_cv(self.cv, X, y, classifier=False) max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1]) if not self.max_iter else self.max_iter) cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( delayed(_omp_path_residues)( X[train], y[train], X[test], y[test], self.copy, self.fit_intercept, self.normalize, max_iter) for train, test in cv) min_early_stop = min(fold.shape[0] for fold in cv_paths) mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1) for fold in cv_paths]) best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1 self.n_nonzero_coefs_ = best_n_nonzero_coefs omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs, fit_intercept=self.fit_intercept, normalize=self.normalize) omp.fit(X, y) self.coef_ = omp.coef_ self.intercept_ = omp.intercept_ self.n_iter_ = omp.n_iter_ return self
bsd-3-clause
cbertinato/pandas
pandas/tests/frame/test_axis_select_reindex.py
1
44030
from datetime import datetime import numpy as np import pytest from pandas.errors import PerformanceWarning import pandas as pd from pandas import ( Categorical, DataFrame, Index, MultiIndex, Series, date_range, isna) from pandas.tests.frame.common import TestData import pandas.util.testing as tm from pandas.util.testing import assert_frame_equal class TestDataFrameSelectReindex(TestData): # These are specific reindex-based tests; other indexing tests should go in # test_indexing def test_drop_names(self): df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]], index=['a', 'b', 'c'], columns=['d', 'e', 'f']) df.index.name, df.columns.name = 'first', 'second' df_dropped_b = df.drop('b') df_dropped_e = df.drop('e', axis=1) df_inplace_b, df_inplace_e = df.copy(), df.copy() df_inplace_b.drop('b', inplace=True) df_inplace_e.drop('e', axis=1, inplace=True) for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e): assert obj.index.name == 'first' assert obj.columns.name == 'second' assert list(df.columns) == ['d', 'e', 'f'] msg = r"\['g'\] not found in axis" with pytest.raises(KeyError, match=msg): df.drop(['g']) with pytest.raises(KeyError, match=msg): df.drop(['g'], 1) # errors = 'ignore' dropped = df.drop(['g'], errors='ignore') expected = Index(['a', 'b', 'c'], name='first') tm.assert_index_equal(dropped.index, expected) dropped = df.drop(['b', 'g'], errors='ignore') expected = Index(['a', 'c'], name='first') tm.assert_index_equal(dropped.index, expected) dropped = df.drop(['g'], axis=1, errors='ignore') expected = Index(['d', 'e', 'f'], name='second') tm.assert_index_equal(dropped.columns, expected) dropped = df.drop(['d', 'g'], axis=1, errors='ignore') expected = Index(['e', 'f'], name='second') tm.assert_index_equal(dropped.columns, expected) # GH 16398 dropped = df.drop([], errors='ignore') expected = Index(['a', 'b', 'c'], name='first') tm.assert_index_equal(dropped.index, expected) def test_drop_col_still_multiindex(self): arrays = [['a', 'b', 'c', 'top'], ['', '', '', 'OD'], ['', '', '', 'wx']] tuples = sorted(zip(*arrays)) index = MultiIndex.from_tuples(tuples) df = DataFrame(np.random.randn(3, 4), columns=index) del df[('a', '', '')] assert(isinstance(df.columns, MultiIndex)) def test_drop(self): simple = DataFrame({"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]}) assert_frame_equal(simple.drop("A", axis=1), simple[['B']]) assert_frame_equal(simple.drop(["A", "B"], axis='columns'), simple[[]]) assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.loc[[2], :]) assert_frame_equal(simple.drop( [0, 3], axis='index'), simple.loc[[1, 2], :]) with pytest.raises(KeyError, match=r"\[5\] not found in axis"): simple.drop(5) with pytest.raises(KeyError, match=r"\['C'\] not found in axis"): simple.drop('C', 1) with pytest.raises(KeyError, match=r"\[5\] not found in axis"): simple.drop([1, 5]) with pytest.raises(KeyError, match=r"\['C'\] not found in axis"): simple.drop(['A', 'C'], 1) # errors = 'ignore' assert_frame_equal(simple.drop(5, errors='ignore'), simple) assert_frame_equal(simple.drop([0, 5], errors='ignore'), simple.loc[[1, 2, 3], :]) assert_frame_equal(simple.drop('C', axis=1, errors='ignore'), simple) assert_frame_equal(simple.drop(['A', 'C'], axis=1, errors='ignore'), simple[['B']]) # non-unique - wheee! nu_df = DataFrame(list(zip(range(3), range(-3, 1), list('abc'))), columns=['a', 'a', 'b']) assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']]) assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a']) assert_frame_equal(nu_df.drop([]), nu_df) # GH 16398 nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X'])) nu_df.columns = list('abc') assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.loc[["Y"], :]) assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.loc[[], :]) # inplace cache issue # GH 5628 df = pd.DataFrame(np.random.randn(10, 3), columns=list('abc')) expected = df[~(df.b > 0)] df.drop(labels=df[df.b > 0].index, inplace=True) assert_frame_equal(df, expected) def test_drop_multiindex_not_lexsorted(self): # GH 11640 # define the lexsorted version lexsorted_mi = MultiIndex.from_tuples( [('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b', 'c']) lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi) assert lexsorted_df.columns.is_lexsorted() # define the non-lexsorted version not_lexsorted_df = DataFrame(columns=['a', 'b', 'c', 'd'], data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]]) not_lexsorted_df = not_lexsorted_df.pivot_table( index='a', columns=['b', 'c'], values='d') not_lexsorted_df = not_lexsorted_df.reset_index() assert not not_lexsorted_df.columns.is_lexsorted() # compare the results tm.assert_frame_equal(lexsorted_df, not_lexsorted_df) expected = lexsorted_df.drop('a', axis=1) with tm.assert_produces_warning(PerformanceWarning): result = not_lexsorted_df.drop('a', axis=1) tm.assert_frame_equal(result, expected) def test_drop_api_equivalence(self): # equivalence of the labels/axis and index/columns API's (GH12392) df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]], index=['a', 'b', 'c'], columns=['d', 'e', 'f']) res1 = df.drop('a') res2 = df.drop(index='a') tm.assert_frame_equal(res1, res2) res1 = df.drop('d', 1) res2 = df.drop(columns='d') tm.assert_frame_equal(res1, res2) res1 = df.drop(labels='e', axis=1) res2 = df.drop(columns='e') tm.assert_frame_equal(res1, res2) res1 = df.drop(['a'], axis=0) res2 = df.drop(index=['a']) tm.assert_frame_equal(res1, res2) res1 = df.drop(['a'], axis=0).drop(['d'], axis=1) res2 = df.drop(index=['a'], columns=['d']) tm.assert_frame_equal(res1, res2) with pytest.raises(ValueError): df.drop(labels='a', index='b') with pytest.raises(ValueError): df.drop(labels='a', columns='b') with pytest.raises(ValueError): df.drop(axis=1) def test_merge_join_different_levels(self): # GH 9455 # first dataframe df1 = DataFrame(columns=['a', 'b'], data=[[1, 11], [0, 22]]) # second dataframe columns = MultiIndex.from_tuples([('a', ''), ('c', 'c1')]) df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]]) # merge columns = ['a', 'b', ('c', 'c1')] expected = DataFrame(columns=columns, data=[[1, 11, 33], [0, 22, 44]]) with tm.assert_produces_warning(UserWarning): result = pd.merge(df1, df2, on='a') tm.assert_frame_equal(result, expected) # join, see discussion in GH 12219 columns = ['a', 'b', ('a', ''), ('c', 'c1')] expected = DataFrame(columns=columns, data=[[1, 11, 0, 44], [0, 22, 1, 33]]) with tm.assert_produces_warning(UserWarning): result = df1.join(df2, on='a') tm.assert_frame_equal(result, expected) def test_reindex(self): newFrame = self.frame.reindex(self.ts1.index) for col in newFrame.columns: for idx, val in newFrame[col].items(): if idx in self.frame.index: if np.isnan(val): assert np.isnan(self.frame[col][idx]) else: assert val == self.frame[col][idx] else: assert np.isnan(val) for col, series in newFrame.items(): assert tm.equalContents(series.index, newFrame.index) emptyFrame = self.frame.reindex(Index([])) assert len(emptyFrame.index) == 0 # Cython code should be unit-tested directly nonContigFrame = self.frame.reindex(self.ts1.index[::2]) for col in nonContigFrame.columns: for idx, val in nonContigFrame[col].items(): if idx in self.frame.index: if np.isnan(val): assert np.isnan(self.frame[col][idx]) else: assert val == self.frame[col][idx] else: assert np.isnan(val) for col, series in nonContigFrame.items(): assert tm.equalContents(series.index, nonContigFrame.index) # corner cases # Same index, copies values but not index if copy=False newFrame = self.frame.reindex(self.frame.index, copy=False) assert newFrame.index is self.frame.index # length zero newFrame = self.frame.reindex([]) assert newFrame.empty assert len(newFrame.columns) == len(self.frame.columns) # length zero with columns reindexed with non-empty index newFrame = self.frame.reindex([]) newFrame = newFrame.reindex(self.frame.index) assert len(newFrame.index) == len(self.frame.index) assert len(newFrame.columns) == len(self.frame.columns) # pass non-Index newFrame = self.frame.reindex(list(self.ts1.index)) tm.assert_index_equal(newFrame.index, self.ts1.index) # copy with no axes result = self.frame.reindex() assert_frame_equal(result, self.frame) assert result is not self.frame def test_reindex_nan(self): df = pd.DataFrame([[1, 2], [3, 5], [7, 11], [9, 23]], index=[2, np.nan, 1, 5], columns=['joe', 'jim']) i, j = [np.nan, 5, 5, np.nan, 1, 2, np.nan], [1, 3, 3, 1, 2, 0, 1] assert_frame_equal(df.reindex(i), df.iloc[j]) df.index = df.index.astype('object') assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False) # GH10388 df = pd.DataFrame({'other': ['a', 'b', np.nan, 'c'], 'date': ['2015-03-22', np.nan, '2012-01-08', np.nan], 'amount': [2, 3, 4, 5]}) df['date'] = pd.to_datetime(df.date) df['delta'] = (pd.to_datetime('2015-06-18') - df['date']).shift(1) left = df.set_index(['delta', 'other', 'date']).reset_index() right = df.reindex(columns=['delta', 'other', 'date', 'amount']) assert_frame_equal(left, right) def test_reindex_name_remains(self): s = Series(np.random.rand(10)) df = DataFrame(s, index=np.arange(len(s))) i = Series(np.arange(10), name='iname') df = df.reindex(i) assert df.index.name == 'iname' df = df.reindex(Index(np.arange(10), name='tmpname')) assert df.index.name == 'tmpname' s = Series(np.random.rand(10)) df = DataFrame(s.T, index=np.arange(len(s))) i = Series(np.arange(10), name='iname') df = df.reindex(columns=i) assert df.columns.name == 'iname' def test_reindex_int(self): smaller = self.intframe.reindex(self.intframe.index[::2]) assert smaller['A'].dtype == np.int64 bigger = smaller.reindex(self.intframe.index) assert bigger['A'].dtype == np.float64 smaller = self.intframe.reindex(columns=['A', 'B']) assert smaller['A'].dtype == np.int64 def test_reindex_like(self): other = self.frame.reindex(index=self.frame.index[:10], columns=['C', 'B']) assert_frame_equal(other, self.frame.reindex_like(other)) def test_reindex_columns(self): new_frame = self.frame.reindex(columns=['A', 'B', 'E']) tm.assert_series_equal(new_frame['B'], self.frame['B']) assert np.isnan(new_frame['E']).all() assert 'C' not in new_frame # Length zero new_frame = self.frame.reindex(columns=[]) assert new_frame.empty def test_reindex_columns_method(self): # GH 14992, reindexing over columns ignored method df = DataFrame(data=[[11, 12, 13], [21, 22, 23], [31, 32, 33]], index=[1, 2, 4], columns=[1, 2, 4], dtype=float) # default method result = df.reindex(columns=range(6)) expected = DataFrame(data=[[np.nan, 11, 12, np.nan, 13, np.nan], [np.nan, 21, 22, np.nan, 23, np.nan], [np.nan, 31, 32, np.nan, 33, np.nan]], index=[1, 2, 4], columns=range(6), dtype=float) assert_frame_equal(result, expected) # method='ffill' result = df.reindex(columns=range(6), method='ffill') expected = DataFrame(data=[[np.nan, 11, 12, 12, 13, 13], [np.nan, 21, 22, 22, 23, 23], [np.nan, 31, 32, 32, 33, 33]], index=[1, 2, 4], columns=range(6), dtype=float) assert_frame_equal(result, expected) # method='bfill' result = df.reindex(columns=range(6), method='bfill') expected = DataFrame(data=[[11, 11, 12, 13, 13, np.nan], [21, 21, 22, 23, 23, np.nan], [31, 31, 32, 33, 33, np.nan]], index=[1, 2, 4], columns=range(6), dtype=float) assert_frame_equal(result, expected) def test_reindex_axes(self): # GH 3317, reindexing by both axes loses freq of the index df = DataFrame(np.ones((3, 3)), index=[datetime(2012, 1, 1), datetime(2012, 1, 2), datetime(2012, 1, 3)], columns=['a', 'b', 'c']) time_freq = date_range('2012-01-01', '2012-01-03', freq='d') some_cols = ['a', 'b'] index_freq = df.reindex(index=time_freq).index.freq both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq seq_freq = df.reindex(index=time_freq).reindex( columns=some_cols).index.freq assert index_freq == both_freq assert index_freq == seq_freq def test_reindex_fill_value(self): df = DataFrame(np.random.randn(10, 4)) # axis=0 result = df.reindex(list(range(15))) assert np.isnan(result.values[-5:]).all() result = df.reindex(range(15), fill_value=0) expected = df.reindex(range(15)).fillna(0) assert_frame_equal(result, expected) # axis=1 result = df.reindex(columns=range(5), fill_value=0.) expected = df.copy() expected[4] = 0. assert_frame_equal(result, expected) result = df.reindex(columns=range(5), fill_value=0) expected = df.copy() expected[4] = 0 assert_frame_equal(result, expected) result = df.reindex(columns=range(5), fill_value='foo') expected = df.copy() expected[4] = 'foo' assert_frame_equal(result, expected) # reindex_axis with tm.assert_produces_warning(FutureWarning): result = df.reindex_axis(range(15), fill_value=0., axis=0) expected = df.reindex(range(15)).fillna(0) assert_frame_equal(result, expected) with tm.assert_produces_warning(FutureWarning): result = df.reindex_axis(range(5), fill_value=0., axis=1) expected = df.reindex(columns=range(5)).fillna(0) assert_frame_equal(result, expected) # other dtypes df['foo'] = 'foo' result = df.reindex(range(15), fill_value=0) expected = df.reindex(range(15)).fillna(0) assert_frame_equal(result, expected) def test_reindex_dups(self): # GH4746, reindex on duplicate index error messages arr = np.random.randn(10) df = DataFrame(arr, index=[1, 2, 3, 4, 5, 1, 2, 3, 4, 5]) # set index is ok result = df.copy() result.index = list(range(len(df))) expected = DataFrame(arr, index=list(range(len(df)))) assert_frame_equal(result, expected) # reindex fails msg = "cannot reindex from a duplicate axis" with pytest.raises(ValueError, match=msg): df.reindex(index=list(range(len(df)))) def test_reindex_axis_style(self): # https://github.com/pandas-dev/pandas/issues/12392 df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) expected = pd.DataFrame({"A": [1, 2, np.nan], "B": [4, 5, np.nan]}, index=[0, 1, 3]) result = df.reindex([0, 1, 3]) assert_frame_equal(result, expected) result = df.reindex([0, 1, 3], axis=0) assert_frame_equal(result, expected) result = df.reindex([0, 1, 3], axis='index') assert_frame_equal(result, expected) def test_reindex_positional_warns(self): # https://github.com/pandas-dev/pandas/issues/12392 df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) expected = pd.DataFrame({"A": [1., 2], 'B': [4., 5], "C": [np.nan, np.nan]}) with tm.assert_produces_warning(FutureWarning): result = df.reindex([0, 1], ['A', 'B', 'C']) assert_frame_equal(result, expected) def test_reindex_axis_style_raises(self): # https://github.com/pandas-dev/pandas/issues/12392 df = pd.DataFrame({"A": [1, 2, 3], 'B': [4, 5, 6]}) with pytest.raises(TypeError, match="Cannot specify both 'axis'"): df.reindex([0, 1], ['A'], axis=1) with pytest.raises(TypeError, match="Cannot specify both 'axis'"): df.reindex([0, 1], ['A'], axis='index') with pytest.raises(TypeError, match="Cannot specify both 'axis'"): df.reindex(index=[0, 1], axis='index') with pytest.raises(TypeError, match="Cannot specify both 'axis'"): df.reindex(index=[0, 1], axis='columns') with pytest.raises(TypeError, match="Cannot specify both 'axis'"): df.reindex(columns=[0, 1], axis='columns') with pytest.raises(TypeError, match="Cannot specify both 'axis'"): df.reindex(index=[0, 1], columns=[0, 1], axis='columns') with pytest.raises(TypeError, match='Cannot specify all'): df.reindex([0, 1], [0], ['A']) # Mixing styles with pytest.raises(TypeError, match="Cannot specify both 'axis'"): df.reindex(index=[0, 1], axis='index') with pytest.raises(TypeError, match="Cannot specify both 'axis'"): df.reindex(index=[0, 1], axis='columns') # Duplicates with pytest.raises(TypeError, match="multiple values"): df.reindex([0, 1], labels=[0, 1]) def test_reindex_single_named_indexer(self): # https://github.com/pandas-dev/pandas/issues/12392 df = pd.DataFrame({"A": [1, 2, 3], "B": [1, 2, 3]}) result = df.reindex([0, 1], columns=['A']) expected = pd.DataFrame({"A": [1, 2]}) assert_frame_equal(result, expected) def test_reindex_api_equivalence(self): # https://github.com/pandas-dev/pandas/issues/12392 # equivalence of the labels/axis and index/columns API's df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]], index=['a', 'b', 'c'], columns=['d', 'e', 'f']) res1 = df.reindex(['b', 'a']) res2 = df.reindex(index=['b', 'a']) res3 = df.reindex(labels=['b', 'a']) res4 = df.reindex(labels=['b', 'a'], axis=0) res5 = df.reindex(['b', 'a'], axis=0) for res in [res2, res3, res4, res5]: tm.assert_frame_equal(res1, res) res1 = df.reindex(columns=['e', 'd']) res2 = df.reindex(['e', 'd'], axis=1) res3 = df.reindex(labels=['e', 'd'], axis=1) for res in [res2, res3]: tm.assert_frame_equal(res1, res) with tm.assert_produces_warning(FutureWarning) as m: res1 = df.reindex(['b', 'a'], ['e', 'd']) assert 'reindex' in str(m[0].message) res2 = df.reindex(columns=['e', 'd'], index=['b', 'a']) res3 = df.reindex(labels=['b', 'a'], axis=0).reindex(labels=['e', 'd'], axis=1) for res in [res2, res3]: tm.assert_frame_equal(res1, res) def test_align(self): af, bf = self.frame.align(self.frame) assert af._data is not self.frame._data af, bf = self.frame.align(self.frame, copy=False) assert af._data is self.frame._data # axis = 0 other = self.frame.iloc[:-5, :3] af, bf = self.frame.align(other, axis=0, fill_value=-1) tm.assert_index_equal(bf.columns, other.columns) # test fill value join_idx = self.frame.index.join(other.index) diff_a = self.frame.index.difference(join_idx) diff_b = other.index.difference(join_idx) diff_a_vals = af.reindex(diff_a).values diff_b_vals = bf.reindex(diff_b).values assert (diff_a_vals == -1).all() af, bf = self.frame.align(other, join='right', axis=0) tm.assert_index_equal(bf.columns, other.columns) tm.assert_index_equal(bf.index, other.index) tm.assert_index_equal(af.index, other.index) # axis = 1 other = self.frame.iloc[:-5, :3].copy() af, bf = self.frame.align(other, axis=1) tm.assert_index_equal(bf.columns, self.frame.columns) tm.assert_index_equal(bf.index, other.index) # test fill value join_idx = self.frame.index.join(other.index) diff_a = self.frame.index.difference(join_idx) diff_b = other.index.difference(join_idx) diff_a_vals = af.reindex(diff_a).values # TODO(wesm): unused? diff_b_vals = bf.reindex(diff_b).values # noqa assert (diff_a_vals == -1).all() af, bf = self.frame.align(other, join='inner', axis=1) tm.assert_index_equal(bf.columns, other.columns) af, bf = self.frame.align(other, join='inner', axis=1, method='pad') tm.assert_index_equal(bf.columns, other.columns) # test other non-float types af, bf = self.intframe.align(other, join='inner', axis=1, method='pad') tm.assert_index_equal(bf.columns, other.columns) af, bf = self.mixed_frame.align(self.mixed_frame, join='inner', axis=1, method='pad') tm.assert_index_equal(bf.columns, self.mixed_frame.columns) af, bf = self.frame.align(other.iloc[:, 0], join='inner', axis=1, method=None, fill_value=None) tm.assert_index_equal(bf.index, Index([])) af, bf = self.frame.align(other.iloc[:, 0], join='inner', axis=1, method=None, fill_value=0) tm.assert_index_equal(bf.index, Index([])) # mixed floats/ints af, bf = self.mixed_float.align(other.iloc[:, 0], join='inner', axis=1, method=None, fill_value=0) tm.assert_index_equal(bf.index, Index([])) af, bf = self.mixed_int.align(other.iloc[:, 0], join='inner', axis=1, method=None, fill_value=0) tm.assert_index_equal(bf.index, Index([])) # Try to align DataFrame to Series along bad axis with pytest.raises(ValueError): self.frame.align(af.iloc[0, :3], join='inner', axis=2) # align dataframe to series with broadcast or not idx = self.frame.index s = Series(range(len(idx)), index=idx) left, right = self.frame.align(s, axis=0) tm.assert_index_equal(left.index, self.frame.index) tm.assert_index_equal(right.index, self.frame.index) assert isinstance(right, Series) left, right = self.frame.align(s, broadcast_axis=1) tm.assert_index_equal(left.index, self.frame.index) expected = {c: s for c in self.frame.columns} expected = DataFrame(expected, index=self.frame.index, columns=self.frame.columns) tm.assert_frame_equal(right, expected) # see gh-9558 df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}) result = df[df['a'] == 2] expected = DataFrame([[2, 5]], index=[1], columns=['a', 'b']) tm.assert_frame_equal(result, expected) result = df.where(df['a'] == 2, 0) expected = DataFrame({'a': [0, 2, 0], 'b': [0, 5, 0]}) tm.assert_frame_equal(result, expected) def _check_align(self, a, b, axis, fill_axis, how, method, limit=None): aa, ab = a.align(b, axis=axis, join=how, method=method, limit=limit, fill_axis=fill_axis) join_index, join_columns = None, None ea, eb = a, b if axis is None or axis == 0: join_index = a.index.join(b.index, how=how) ea = ea.reindex(index=join_index) eb = eb.reindex(index=join_index) if axis is None or axis == 1: join_columns = a.columns.join(b.columns, how=how) ea = ea.reindex(columns=join_columns) eb = eb.reindex(columns=join_columns) ea = ea.fillna(axis=fill_axis, method=method, limit=limit) eb = eb.fillna(axis=fill_axis, method=method, limit=limit) assert_frame_equal(aa, ea) assert_frame_equal(ab, eb) @pytest.mark.parametrize('meth', ['pad', 'bfill']) @pytest.mark.parametrize('ax', [0, 1, None]) @pytest.mark.parametrize('fax', [0, 1]) @pytest.mark.parametrize('how', ['inner', 'outer', 'left', 'right']) def test_align_fill_method(self, how, meth, ax, fax): self._check_align_fill(how, meth, ax, fax) def _check_align_fill(self, kind, meth, ax, fax): left = self.frame.iloc[0:4, :10] right = self.frame.iloc[2:, 6:] empty = self.frame.iloc[:0, :0] self._check_align(left, right, axis=ax, fill_axis=fax, how=kind, method=meth) self._check_align(left, right, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1) # empty left self._check_align(empty, right, axis=ax, fill_axis=fax, how=kind, method=meth) self._check_align(empty, right, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1) # empty right self._check_align(left, empty, axis=ax, fill_axis=fax, how=kind, method=meth) self._check_align(left, empty, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1) # both empty self._check_align(empty, empty, axis=ax, fill_axis=fax, how=kind, method=meth) self._check_align(empty, empty, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1) def test_align_int_fill_bug(self): # GH #910 X = np.arange(10 * 10, dtype='float64').reshape(10, 10) Y = np.ones((10, 1), dtype=int) df1 = DataFrame(X) df1['0.X'] = Y.squeeze() df2 = df1.astype(float) result = df1 - df1.mean() expected = df2 - df2.mean() assert_frame_equal(result, expected) def test_align_multiindex(self): # GH 10665 # same test cases as test_align_multiindex in test_series.py midx = pd.MultiIndex.from_product([range(2), range(3), range(2)], names=('a', 'b', 'c')) idx = pd.Index(range(2), name='b') df1 = pd.DataFrame(np.arange(12, dtype='int64'), index=midx) df2 = pd.DataFrame(np.arange(2, dtype='int64'), index=idx) # these must be the same results (but flipped) res1l, res1r = df1.align(df2, join='left') res2l, res2r = df2.align(df1, join='right') expl = df1 assert_frame_equal(expl, res1l) assert_frame_equal(expl, res2r) expr = pd.DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx) assert_frame_equal(expr, res1r) assert_frame_equal(expr, res2l) res1l, res1r = df1.align(df2, join='right') res2l, res2r = df2.align(df1, join='left') exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)], names=('a', 'b', 'c')) expl = pd.DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx) assert_frame_equal(expl, res1l) assert_frame_equal(expl, res2r) expr = pd.DataFrame([0, 0, 1, 1] * 2, index=exp_idx) assert_frame_equal(expr, res1r) assert_frame_equal(expr, res2l) def test_align_series_combinations(self): df = pd.DataFrame({'a': [1, 3, 5], 'b': [1, 3, 5]}, index=list('ACE')) s = pd.Series([1, 2, 4], index=list('ABD'), name='x') # frame + series res1, res2 = df.align(s, axis=0) exp1 = pd.DataFrame({'a': [1, np.nan, 3, np.nan, 5], 'b': [1, np.nan, 3, np.nan, 5]}, index=list('ABCDE')) exp2 = pd.Series([1, 2, np.nan, 4, np.nan], index=list('ABCDE'), name='x') tm.assert_frame_equal(res1, exp1) tm.assert_series_equal(res2, exp2) # series + frame res1, res2 = s.align(df) tm.assert_series_equal(res1, exp2) tm.assert_frame_equal(res2, exp1) def test_filter(self): # Items filtered = self.frame.filter(['A', 'B', 'E']) assert len(filtered.columns) == 2 assert 'E' not in filtered filtered = self.frame.filter(['A', 'B', 'E'], axis='columns') assert len(filtered.columns) == 2 assert 'E' not in filtered # Other axis idx = self.frame.index[0:4] filtered = self.frame.filter(idx, axis='index') expected = self.frame.reindex(index=idx) tm.assert_frame_equal(filtered, expected) # like fcopy = self.frame.copy() fcopy['AA'] = 1 filtered = fcopy.filter(like='A') assert len(filtered.columns) == 2 assert 'AA' in filtered # like with ints in column names df = DataFrame(0., index=[0, 1, 2], columns=[0, 1, '_A', '_B']) filtered = df.filter(like='_') assert len(filtered.columns) == 2 # regex with ints in column names # from PR #10384 df = DataFrame(0., index=[0, 1, 2], columns=['A1', 1, 'B', 2, 'C']) expected = DataFrame( 0., index=[0, 1, 2], columns=pd.Index([1, 2], dtype=object)) filtered = df.filter(regex='^[0-9]+$') tm.assert_frame_equal(filtered, expected) expected = DataFrame(0., index=[0, 1, 2], columns=[0, '0', 1, '1']) # shouldn't remove anything filtered = expected.filter(regex='^[0-9]+$') tm.assert_frame_equal(filtered, expected) # pass in None with pytest.raises(TypeError, match='Must pass'): self.frame.filter() with pytest.raises(TypeError, match='Must pass'): self.frame.filter(items=None) with pytest.raises(TypeError, match='Must pass'): self.frame.filter(axis=1) # test mutually exclusive arguments with pytest.raises(TypeError, match='mutually exclusive'): self.frame.filter(items=['one', 'three'], regex='e$', like='bbi') with pytest.raises(TypeError, match='mutually exclusive'): self.frame.filter(items=['one', 'three'], regex='e$', axis=1) with pytest.raises(TypeError, match='mutually exclusive'): self.frame.filter(items=['one', 'three'], regex='e$') with pytest.raises(TypeError, match='mutually exclusive'): self.frame.filter(items=['one', 'three'], like='bbi', axis=0) with pytest.raises(TypeError, match='mutually exclusive'): self.frame.filter(items=['one', 'three'], like='bbi') # objects filtered = self.mixed_frame.filter(like='foo') assert 'foo' in filtered # unicode columns, won't ascii-encode df = self.frame.rename(columns={'B': '\u2202'}) filtered = df.filter(like='C') assert 'C' in filtered def test_filter_regex_search(self): fcopy = self.frame.copy() fcopy['AA'] = 1 # regex filtered = fcopy.filter(regex='[A]+') assert len(filtered.columns) == 2 assert 'AA' in filtered # doesn't have to be at beginning df = DataFrame({'aBBa': [1, 2], 'BBaBB': [1, 2], 'aCCa': [1, 2], 'aCCaBB': [1, 2]}) result = df.filter(regex='BB') exp = df[[x for x in df.columns if 'BB' in x]] assert_frame_equal(result, exp) @pytest.mark.parametrize('name,expected', [ ('a', DataFrame({'a': [1, 2]})), ('a', DataFrame({'a': [1, 2]})), ('あ', DataFrame({'あ': [3, 4]})) ]) def test_filter_unicode(self, name, expected): # GH13101 df = DataFrame({'a': [1, 2], 'あ': [3, 4]}) assert_frame_equal(df.filter(like=name), expected) assert_frame_equal(df.filter(regex=name), expected) @pytest.mark.parametrize('name', ['a', 'a']) def test_filter_bytestring(self, name): # GH13101 df = DataFrame({b'a': [1, 2], b'b': [3, 4]}) expected = DataFrame({b'a': [1, 2]}) assert_frame_equal(df.filter(like=name), expected) assert_frame_equal(df.filter(regex=name), expected) def test_filter_corner(self): empty = DataFrame() result = empty.filter([]) assert_frame_equal(result, empty) result = empty.filter(like='foo') assert_frame_equal(result, empty) def test_take(self): # homogeneous order = [3, 1, 2, 0] for df in [self.frame]: result = df.take(order, axis=0) expected = df.reindex(df.index.take(order)) assert_frame_equal(result, expected) # axis = 1 result = df.take(order, axis=1) expected = df.loc[:, ['D', 'B', 'C', 'A']] assert_frame_equal(result, expected, check_names=False) # negative indices order = [2, 1, -1] for df in [self.frame]: result = df.take(order, axis=0) expected = df.reindex(df.index.take(order)) assert_frame_equal(result, expected) with tm.assert_produces_warning(FutureWarning): result = df.take(order, convert=True, axis=0) assert_frame_equal(result, expected) with tm.assert_produces_warning(FutureWarning): result = df.take(order, convert=False, axis=0) assert_frame_equal(result, expected) # axis = 1 result = df.take(order, axis=1) expected = df.loc[:, ['C', 'B', 'D']] assert_frame_equal(result, expected, check_names=False) # illegal indices msg = "indices are out-of-bounds" with pytest.raises(IndexError, match=msg): df.take([3, 1, 2, 30], axis=0) with pytest.raises(IndexError, match=msg): df.take([3, 1, 2, -31], axis=0) with pytest.raises(IndexError, match=msg): df.take([3, 1, 2, 5], axis=1) with pytest.raises(IndexError, match=msg): df.take([3, 1, 2, -5], axis=1) # mixed-dtype order = [4, 1, 2, 0, 3] for df in [self.mixed_frame]: result = df.take(order, axis=0) expected = df.reindex(df.index.take(order)) assert_frame_equal(result, expected) # axis = 1 result = df.take(order, axis=1) expected = df.loc[:, ['foo', 'B', 'C', 'A', 'D']] assert_frame_equal(result, expected) # negative indices order = [4, 1, -2] for df in [self.mixed_frame]: result = df.take(order, axis=0) expected = df.reindex(df.index.take(order)) assert_frame_equal(result, expected) # axis = 1 result = df.take(order, axis=1) expected = df.loc[:, ['foo', 'B', 'D']] assert_frame_equal(result, expected) # by dtype order = [1, 2, 0, 3] for df in [self.mixed_float, self.mixed_int]: result = df.take(order, axis=0) expected = df.reindex(df.index.take(order)) assert_frame_equal(result, expected) # axis = 1 result = df.take(order, axis=1) expected = df.loc[:, ['B', 'C', 'A', 'D']] assert_frame_equal(result, expected) def test_reindex_boolean(self): frame = DataFrame(np.ones((10, 2), dtype=bool), index=np.arange(0, 20, 2), columns=[0, 2]) reindexed = frame.reindex(np.arange(10)) assert reindexed.values.dtype == np.object_ assert isna(reindexed[0][1]) reindexed = frame.reindex(columns=range(3)) assert reindexed.values.dtype == np.object_ assert isna(reindexed[1]).all() def test_reindex_objects(self): reindexed = self.mixed_frame.reindex(columns=['foo', 'A', 'B']) assert 'foo' in reindexed reindexed = self.mixed_frame.reindex(columns=['A', 'B']) assert 'foo' not in reindexed def test_reindex_corner(self): index = Index(['a', 'b', 'c']) dm = self.empty.reindex(index=[1, 2, 3]) reindexed = dm.reindex(columns=index) tm.assert_index_equal(reindexed.columns, index) # ints are weird smaller = self.intframe.reindex(columns=['A', 'B', 'E']) assert smaller['E'].dtype == np.float64 def test_reindex_axis(self): cols = ['A', 'B', 'E'] with tm.assert_produces_warning(FutureWarning) as m: reindexed1 = self.intframe.reindex_axis(cols, axis=1) assert 'reindex' in str(m[0].message) reindexed2 = self.intframe.reindex(columns=cols) assert_frame_equal(reindexed1, reindexed2) rows = self.intframe.index[0:5] with tm.assert_produces_warning(FutureWarning) as m: reindexed1 = self.intframe.reindex_axis(rows, axis=0) assert 'reindex' in str(m[0].message) reindexed2 = self.intframe.reindex(index=rows) assert_frame_equal(reindexed1, reindexed2) msg = ("No axis named 2 for object type" " <class 'pandas.core.frame.DataFrame'>") with pytest.raises(ValueError, match=msg): self.intframe.reindex_axis(rows, axis=2) # no-op case cols = self.frame.columns.copy() with tm.assert_produces_warning(FutureWarning) as m: newFrame = self.frame.reindex_axis(cols, axis=1) assert 'reindex' in str(m[0].message) assert_frame_equal(newFrame, self.frame) def test_reindex_with_nans(self): df = DataFrame([[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]], columns=['a', 'b'], index=[100.0, 101.0, np.nan, 102.0, 103.0]) result = df.reindex(index=[101.0, 102.0, 103.0]) expected = df.iloc[[1, 3, 4]] assert_frame_equal(result, expected) result = df.reindex(index=[103.0]) expected = df.iloc[[4]] assert_frame_equal(result, expected) result = df.reindex(index=[101.0]) expected = df.iloc[[1]] assert_frame_equal(result, expected) def test_reindex_multi(self): df = DataFrame(np.random.randn(3, 3)) result = df.reindex(index=range(4), columns=range(4)) expected = df.reindex(list(range(4))).reindex(columns=range(4)) assert_frame_equal(result, expected) df = DataFrame(np.random.randint(0, 10, (3, 3))) result = df.reindex(index=range(4), columns=range(4)) expected = df.reindex(list(range(4))).reindex(columns=range(4)) assert_frame_equal(result, expected) df = DataFrame(np.random.randint(0, 10, (3, 3))) result = df.reindex(index=range(2), columns=range(2)) expected = df.reindex(range(2)).reindex(columns=range(2)) assert_frame_equal(result, expected) df = DataFrame(np.random.randn(5, 3) + 1j, columns=['a', 'b', 'c']) result = df.reindex(index=[0, 1], columns=['a', 'b']) expected = df.reindex([0, 1]).reindex(columns=['a', 'b']) assert_frame_equal(result, expected) def test_reindex_multi_categorical_time(self): # https://github.com/pandas-dev/pandas/issues/21390 midx = pd.MultiIndex.from_product( [Categorical(['a', 'b', 'c']), Categorical(date_range("2012-01-01", periods=3, freq='H'))]) df = pd.DataFrame({'a': range(len(midx))}, index=midx) df2 = df.iloc[[0, 1, 2, 3, 4, 5, 6, 8]] result = df2.reindex(midx) expected = pd.DataFrame( {'a': [0, 1, 2, 3, 4, 5, 6, np.nan, 8]}, index=midx) assert_frame_equal(result, expected) data = [[1, 2, 3], [1, 2, 3]] @pytest.mark.parametrize('actual', [ DataFrame(data=data, index=['a', 'a']), DataFrame(data=data, index=['a', 'b']), DataFrame(data=data, index=['a', 'b']).set_index([0, 1]), DataFrame(data=data, index=['a', 'a']).set_index([0, 1]) ]) def test_raise_on_drop_duplicate_index(self, actual): # issue 19186 level = 0 if isinstance(actual.index, MultiIndex) else None with pytest.raises(KeyError): actual.drop('c', level=level, axis=0) with pytest.raises(KeyError): actual.T.drop('c', level=level, axis=1) expected_no_err = actual.drop('c', axis=0, level=level, errors='ignore') assert_frame_equal(expected_no_err, actual) expected_no_err = actual.T.drop('c', axis=1, level=level, errors='ignore') assert_frame_equal(expected_no_err.T, actual) @pytest.mark.parametrize('index', [[1, 2, 3], [1, 1, 2]]) @pytest.mark.parametrize('drop_labels', [[], [1], [2]]) def test_drop_empty_list(self, index, drop_labels): # GH 21494 expected_index = [i for i in index if i not in drop_labels] frame = pd.DataFrame(index=index).drop(drop_labels) tm.assert_frame_equal(frame, pd.DataFrame(index=expected_index)) @pytest.mark.parametrize('index', [[1, 2, 3], [1, 2, 2]]) @pytest.mark.parametrize('drop_labels', [[1, 4], [4, 5]]) def test_drop_non_empty_list(self, index, drop_labels): # GH 21494 with pytest.raises(KeyError, match='not found in axis'): pd.DataFrame(index=index).drop(drop_labels)
bsd-3-clause
chaluemwut/fbserver
venv/lib/python2.7/site-packages/sklearn/feature_extraction/text.py
1
49725
# -*- coding: utf-8 -*- # Authors: Olivier Grisel <olivier.grisel@ensta.org> # Mathieu Blondel <mathieu@mblondel.org> # Lars Buitinck <L.J.Buitinck@uva.nl> # Robert Layton <robertlayton@gmail.com> # Jochen Wersdörfer <jochen@wersdoerfer.de> # Roman Sinayev <roman.sinayev@gmail.com> # # License: BSD 3 clause """ The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to build feature vectors from text documents. """ from __future__ import unicode_literals import array from collections import Mapping, defaultdict import numbers from operator import itemgetter import re import unicodedata import warnings import numpy as np import scipy.sparse as sp from ..base import BaseEstimator, TransformerMixin from ..externals.six.moves import xrange from ..preprocessing import normalize from .hashing import FeatureHasher from .stop_words import ENGLISH_STOP_WORDS from ..utils import deprecated from ..externals import six __all__ = ['CountVectorizer', 'ENGLISH_STOP_WORDS', 'TfidfTransformer', 'TfidfVectorizer', 'strip_accents_ascii', 'strip_accents_unicode', 'strip_tags'] def strip_accents_unicode(s): """Transform accentuated unicode symbols into their simple counterpart Warning: the python-level loop and join operations make this implementation 20 times slower than the strip_accents_ascii basic normalization. See also -------- strip_accents_ascii Remove accentuated char for any unicode symbol that has a direct ASCII equivalent. """ return ''.join([c for c in unicodedata.normalize('NFKD', s) if not unicodedata.combining(c)]) def strip_accents_ascii(s): """Transform accentuated unicode symbols into ascii or nothing Warning: this solution is only suited for languages that have a direct transliteration to ASCII symbols. See also -------- strip_accents_unicode Remove accentuated char for any unicode symbol. """ nkfd_form = unicodedata.normalize('NFKD', s) return nkfd_form.encode('ASCII', 'ignore').decode('ASCII') def strip_tags(s): """Basic regexp based HTML / XML tag stripper function For serious HTML/XML preprocessing you should rather use an external library such as lxml or BeautifulSoup. """ return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s) def _check_stop_list(stop): if stop == "english": return ENGLISH_STOP_WORDS elif isinstance(stop, six.string_types): raise ValueError("not a built-in stop list: %s" % stop) else: # assume it's a collection return stop class VectorizerMixin(object): """Provides common code for text vectorizers (tokenization logic).""" _white_spaces = re.compile(r"\s\s+") def decode(self, doc): """Decode the input into a string of unicode symbols The decoding strategy depends on the vectorizer parameters. """ if self.input == 'filename': with open(doc, 'rb') as fh: doc = fh.read() elif self.input == 'file': doc = doc.read() if isinstance(doc, bytes): doc = doc.decode(self.encoding, self.decode_error) if doc is np.nan: raise ValueError("np.nan is an invalid document, expected byte or unicode string.") return doc def _word_ngrams(self, tokens, stop_words=None): """Turn tokens into a sequence of n-grams after stop words filtering""" # handle stop words if stop_words is not None: tokens = [w for w in tokens if w not in stop_words] # handle token n-grams min_n, max_n = self.ngram_range if max_n != 1: original_tokens = tokens tokens = [] n_original_tokens = len(original_tokens) for n in xrange(min_n, min(max_n + 1, n_original_tokens + 1)): for i in xrange(n_original_tokens - n + 1): tokens.append(" ".join(original_tokens[i: i + n])) return tokens def _char_ngrams(self, text_document): """Tokenize text_document into a sequence of character n-grams""" # normalize white spaces text_document = self._white_spaces.sub(" ", text_document) text_len = len(text_document) ngrams = [] min_n, max_n = self.ngram_range for n in xrange(min_n, min(max_n + 1, text_len + 1)): for i in xrange(text_len - n + 1): ngrams.append(text_document[i: i + n]) return ngrams def _char_wb_ngrams(self, text_document): """Whitespace sensitive char-n-gram tokenization. Tokenize text_document into a sequence of character n-grams excluding any whitespace (operating only inside word boundaries)""" # normalize white spaces text_document = self._white_spaces.sub(" ", text_document) min_n, max_n = self.ngram_range ngrams = [] for w in text_document.split(): w = ' ' + w + ' ' w_len = len(w) for n in xrange(min_n, max_n + 1): offset = 0 ngrams.append(w[offset:offset + n]) while offset + n < w_len: offset += 1 ngrams.append(w[offset:offset + n]) if offset == 0: # count a short word (w_len < n) only once break return ngrams def build_preprocessor(self): """Return a function to preprocess the text before tokenization""" if self.preprocessor is not None: return self.preprocessor # unfortunately python functools package does not have an efficient # `compose` function that would have allowed us to chain a dynamic # number of functions. However the cost of a lambda call is a few # hundreds of nanoseconds which is negligible when compared to the # cost of tokenizing a string of 1000 chars for instance. noop = lambda x: x # accent stripping if not self.strip_accents: strip_accents = noop elif callable(self.strip_accents): strip_accents = self.strip_accents elif self.strip_accents == 'ascii': strip_accents = strip_accents_ascii elif self.strip_accents == 'unicode': strip_accents = strip_accents_unicode else: raise ValueError('Invalid value for "strip_accents": %s' % self.strip_accents) if self.lowercase: return lambda x: strip_accents(x.lower()) else: return strip_accents def build_tokenizer(self): """Return a function that splits a string into a sequence of tokens""" if self.tokenizer is not None: return self.tokenizer token_pattern = re.compile(self.token_pattern) return lambda doc: token_pattern.findall(doc) def get_stop_words(self): """Build or fetch the effective stop words list""" return _check_stop_list(self.stop_words) def build_analyzer(self): """Return a callable that handles preprocessing and tokenization""" if callable(self.analyzer): return self.analyzer preprocess = self.build_preprocessor() if self.analyzer == 'char': return lambda doc: self._char_ngrams(preprocess(self.decode(doc))) elif self.analyzer == 'char_wb': return lambda doc: self._char_wb_ngrams( preprocess(self.decode(doc))) elif self.analyzer == 'word': stop_words = self.get_stop_words() tokenize = self.build_tokenizer() return lambda doc: self._word_ngrams( tokenize(preprocess(self.decode(doc))), stop_words) else: raise ValueError('%s is not a valid tokenization scheme/analyzer' % self.analyzer) def _check_vocabulary(self): vocabulary = self.vocabulary if vocabulary is not None: if not isinstance(vocabulary, Mapping): vocab = {} for i, t in enumerate(vocabulary): if vocab.setdefault(t, i) != i: msg = "Duplicate term in vocabulary: %r" % t raise ValueError(msg) vocabulary = vocab else: indices = set(six.itervalues(vocabulary)) if len(indices) != len(vocabulary): raise ValueError("Vocabulary contains repeated indices.") for i in xrange(len(vocabulary)): if i not in indices: msg = ("Vocabulary of size %d doesn't contain index " "%d." % (len(vocabulary), i)) raise ValueError(msg) if not vocabulary: raise ValueError("empty vocabulary passed to fit") self.fixed_vocabulary_ = True self.vocabulary_ = dict(vocabulary) else: self.fixed_vocabulary_ = False @property @deprecated("The `fixed_vocabulary` attribute is deprecated and will be " "removed in 0.18. Please use `fixed_vocabulary_` instead.") def fixed_vocabulary(self): return self.fixed_vocabulary_ class HashingVectorizer(BaseEstimator, VectorizerMixin): """Convert a collection of text documents to a matrix of token occurrences It turns a collection of text documents into a scipy.sparse matrix holding token occurrence counts (or binary occurrence information), possibly normalized as token frequencies if norm='l1' or projected on the euclidean unit sphere if norm='l2'. This text vectorizer implementation uses the hashing trick to find the token string name to feature integer index mapping. This strategy has several advantages: - it is very low memory scalable to large datasets as there is no need to store a vocabulary dictionary in memory - it is fast to pickle and un-pickle as it holds no state besides the constructor parameters - it can be used in a streaming (partial fit) or parallel pipeline as there is no state computed during fit. There are also a couple of cons (vs using a CountVectorizer with an in-memory vocabulary): - there is no way to compute the inverse transform (from feature indices to string feature names) which can be a problem when trying to introspect which features are most important to a model. - there can be collisions: distinct tokens can be mapped to the same feature index. However in practice this is rarely an issue if n_features is large enough (e.g. 2 ** 18 for text classification problems). - no IDF weighting as this would render the transformer stateful. The hash function employed is the signed 32-bit version of Murmurhash3. Parameters ---------- input: string {'filename', 'file', 'content'} If 'filename', the sequence passed as an argument to fit is expected to be a list of filenames that need reading to fetch the raw content to analyze. If 'file', the sequence items must have a 'read' method (file-like object) that is called to fetch the bytes in memory. Otherwise the input is expected to be the sequence strings or bytes items are expected to be analyzed directly. encoding : string, 'utf-8' by default. If bytes or files are given to analyze, this encoding is used to decode. decode_error : {'strict', 'ignore', 'replace'} Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. strip_accents: {'ascii', 'unicode', None} Remove accents during the preprocessing step. 'ascii' is a fast method that only works on characters that have an direct ASCII mapping. 'unicode' is a slightly slower method that works on any characters. None (default) does nothing. analyzer: string, {'word', 'char', 'char_wb'} or callable Whether the feature should be made of word or character n-grams. Option 'char_wb' creates character n-grams only from text inside word boundaries. If a callable is passed it is used to extract the sequence of features out of the raw, unprocessed input. preprocessor: callable or None (default) Override the preprocessing (string transformation) stage while preserving the tokenizing and n-grams generation steps. tokenizer: callable or None (default) Override the string tokenization step while preserving the preprocessing and n-grams generation steps. ngram_range: tuple (min_n, max_n) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. stop_words: string {'english'}, list, or None (default) If 'english', a built-in stop word list for English is used. If a list, that list is assumed to contain stop words, all of which will be removed from the resulting tokens. lowercase: boolean, default True Convert all characters to lowercase before tokenizing. token_pattern: string Regular expression denoting what constitutes a "token", only used if `analyzer == 'word'`. The default regexp selects tokens of 2 or more alphanumeric characters (punctuation is completely ignored and always treated as a token separator). n_features : integer, optional, (2 ** 20) by default The number of features (columns) in the output matrices. Small numbers of features are likely to cause hash collisions, but large numbers will cause larger coefficient dimensions in linear learners. norm : 'l1', 'l2' or None, optional Norm used to normalize term vectors. None for no normalization. binary: boolean, False by default. If True, all non zero counts are set to 1. This is useful for discrete probabilistic models that model binary events rather than integer counts. dtype: type, optional Type of the matrix returned by fit_transform() or transform(). non_negative : boolean, optional Whether output matrices should contain non-negative values only; effectively calls abs on the matrix prior to returning it. When True, output values can be interpreted as frequencies. When False, output values will have expected value zero. See also -------- CountVectorizer, TfidfVectorizer """ def __init__(self, input='content', charset=None, encoding='utf-8', decode_error='strict', charset_error=None, strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, stop_words=None, token_pattern=r"(?u)\b\w\w+\b", ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20), binary=False, norm='l2', non_negative=False, dtype=np.float64): self.input = input self.encoding = encoding self.decode_error = decode_error if charset is not None: warnings.warn("The charset parameter is deprecated as of version " "0.14 and will be removed in 0.16. Use encoding " "instead.", DeprecationWarning) self.encoding = charset if charset_error is not None: warnings.warn("The charset_error parameter is deprecated as of " "version 0.14 and will be removed in 0.16. Use " "decode_error instead.", DeprecationWarning) self.decode_error = charset_error self.strip_accents = strip_accents self.preprocessor = preprocessor self.tokenizer = tokenizer self.analyzer = analyzer self.lowercase = lowercase self.token_pattern = token_pattern self.stop_words = stop_words self.n_features = n_features self.ngram_range = ngram_range self.binary = binary self.norm = norm self.non_negative = non_negative self.dtype = dtype def partial_fit(self, X, y=None): """Does nothing: this transformer is stateless. This method is just there to mark the fact that this transformer can work in a streaming setup. """ return self def fit(self, X, y=None): """Does nothing: this transformer is stateless.""" # triggers a parameter validation self._get_hasher().fit(X, y=y) return self def transform(self, X, y=None): """Transform a sequence of documents to a document-term matrix. Parameters ---------- X : iterable over raw text documents, length = n_samples Samples. Each sample must be a text document (either bytes or unicode strings, file name or file object depending on the constructor argument) which will be tokenized and hashed. y : (ignored) Returns ------- X : scipy.sparse matrix, shape = (n_samples, self.n_features) Document-term matrix. """ analyzer = self.build_analyzer() X = self._get_hasher().transform(analyzer(doc) for doc in X) if self.binary: X.data.fill(1) if self.norm is not None: X = normalize(X, norm=self.norm, copy=False) return X # Alias transform to fit_transform for convenience fit_transform = transform def _get_hasher(self): return FeatureHasher(n_features=self.n_features, input_type='string', dtype=self.dtype, non_negative=self.non_negative) def _document_frequency(X): """Count the number of non-zero values for each feature in sparse X.""" if sp.isspmatrix_csr(X): return np.bincount(X.indices, minlength=X.shape[1]) else: return np.diff(sp.csc_matrix(X, copy=False).indptr) class CountVectorizer(BaseEstimator, VectorizerMixin): """Convert a collection of text documents to a matrix of token counts This implementation produces a sparse representation of the counts using scipy.sparse.coo_matrix. If you do not provide an a-priori dictionary and you do not use an analyzer that does some kind of feature selection then the number of features will be equal to the vocabulary size found by analyzing the data. Parameters ---------- input : string {'filename', 'file', 'content'} If 'filename', the sequence passed as an argument to fit is expected to be a list of filenames that need reading to fetch the raw content to analyze. If 'file', the sequence items must have a 'read' method (file-like object) that is called to fetch the bytes in memory. Otherwise the input is expected to be the sequence strings or bytes items are expected to be analyzed directly. encoding : string, 'utf-8' by default. If bytes or files are given to analyze, this encoding is used to decode. decode_error : {'strict', 'ignore', 'replace'} Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. strip_accents : {'ascii', 'unicode', None} Remove accents during the preprocessing step. 'ascii' is a fast method that only works on characters that have an direct ASCII mapping. 'unicode' is a slightly slower method that works on any characters. None (default) does nothing. analyzer : string, {'word', 'char', 'char_wb'} or callable Whether the feature should be made of word or character n-grams. Option 'char_wb' creates character n-grams only from text inside word boundaries. If a callable is passed it is used to extract the sequence of features out of the raw, unprocessed input. preprocessor : callable or None (default) Override the preprocessing (string transformation) stage while preserving the tokenizing and n-grams generation steps. tokenizer : callable or None (default) Override the string tokenization step while preserving the preprocessing and n-grams generation steps. ngram_range : tuple (min_n, max_n) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. stop_words : string {'english'}, list, or None (default) If 'english', a built-in stop word list for English is used. If a list, that list is assumed to contain stop words, all of which will be removed from the resulting tokens. If None, no stop words will be used. max_df can be set to a value in the range [0.7, 1.0) to automatically detect and filter stop words based on intra corpus document frequency of terms. lowercase : boolean, True by default Convert all characters to lowercase before tokenizing. token_pattern : string Regular expression denoting what constitutes a "token", only used if `tokenize == 'word'`. The default regexp select tokens of 2 or more alphanumeric characters (punctuation is completely ignored and always treated as a token separator). max_df : float in range [0.0, 1.0] or int, optional, 1.0 by default When building the vocabulary ignore terms that have a document frequency strictly higher than the given threshold (corpus-specific stop words). If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. min_df : float in range [0.0, 1.0] or int, optional, 1 by default When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold. This value is also called cut-off in the literature. If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. max_features : optional, None by default If not None, build a vocabulary that only consider the top max_features ordered by term frequency across the corpus. This parameter is ignored if vocabulary is not None. vocabulary : Mapping or iterable, optional Either a Mapping (e.g., a dict) where keys are terms and values are indices in the feature matrix, or an iterable over terms. If not given, a vocabulary is determined from the input documents. Indices in the mapping should not be repeated and should not have any gap between 0 and the largest index. binary : boolean, False by default. If True, all non zero counts are set to 1. This is useful for discrete probabilistic models that model binary events rather than integer counts. dtype : type, optional Type of the matrix returned by fit_transform() or transform(). Attributes ---------- `vocabulary_` : dict A mapping of terms to feature indices. `stop_words_` : set Terms that were ignored because they occurred in either too many (`max_df`) or in too few (`min_df`) documents. This is only available if no vocabulary was given. See also -------- HashingVectorizer, TfidfVectorizer """ def __init__(self, input='content', encoding='utf-8', charset=None, decode_error='strict', charset_error=None, strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, stop_words=None, token_pattern=r"(?u)\b\w\w+\b", ngram_range=(1, 1), analyzer='word', max_df=1.0, min_df=1, max_features=None, vocabulary=None, binary=False, dtype=np.int64): self.input = input self.encoding = encoding self.decode_error = decode_error if charset is not None: warnings.warn("The charset parameter is deprecated as of version " "0.14 and will be removed in 0.16. Use encoding " "instead.", DeprecationWarning) self.encoding = charset if charset_error is not None: warnings.warn("The charset_error parameter is deprecated as of " "version 0.14 and will be removed in 0.16. Use " "decode_error instead.", DeprecationWarning) self.decode_error = charset_error self.strip_accents = strip_accents self.preprocessor = preprocessor self.tokenizer = tokenizer self.analyzer = analyzer self.lowercase = lowercase self.token_pattern = token_pattern self.stop_words = stop_words self.max_df = max_df self.min_df = min_df if max_df < 0 or min_df < 0: raise ValueError("negative value for max_df of min_df") self.max_features = max_features if max_features is not None: if (not isinstance(max_features, numbers.Integral) or max_features <= 0): raise ValueError( "max_features=%r, neither a positive integer nor None" % max_features) self.ngram_range = ngram_range self.vocabulary = vocabulary self.binary = binary self.dtype = dtype def _sort_features(self, X, vocabulary): """Sort features by name Returns a reordered matrix and modifies the vocabulary in place """ sorted_features = sorted(six.iteritems(vocabulary)) map_index = np.empty(len(sorted_features), dtype=np.int32) for new_val, (term, old_val) in enumerate(sorted_features): map_index[new_val] = old_val vocabulary[term] = new_val return X[:, map_index] def _limit_features(self, X, vocabulary, high=None, low=None, limit=None): """Remove too rare or too common features. Prune features that are non zero in more samples than high or less documents than low, modifying the vocabulary, and restricting it to at most the limit most frequent. This does not prune samples with zero features. """ if high is None and low is None and limit is None: return X, set() # Calculate a mask based on document frequencies dfs = _document_frequency(X) tfs = np.asarray(X.sum(axis=0)).ravel() mask = np.ones(len(dfs), dtype=bool) if high is not None: mask &= dfs <= high if low is not None: mask &= dfs >= low if limit is not None and mask.sum() > limit: mask_inds = (-tfs[mask]).argsort()[:limit] new_mask = np.zeros(len(dfs), dtype=bool) new_mask[np.where(mask)[0][mask_inds]] = True mask = new_mask new_indices = np.cumsum(mask) - 1 # maps old indices to new removed_terms = set() for term, old_index in list(six.iteritems(vocabulary)): if mask[old_index]: vocabulary[term] = new_indices[old_index] else: del vocabulary[term] removed_terms.add(term) kept_indices = np.where(mask)[0] if len(kept_indices) == 0: raise ValueError("After pruning, no terms remain. Try a lower" " min_df or a higher max_df.") return X[:, kept_indices], removed_terms def _count_vocab(self, raw_documents, fixed_vocab): """Create sparse feature matrix, and vocabulary where fixed_vocab=False """ if fixed_vocab: vocabulary = self.vocabulary_ else: # Add a new value when a new vocabulary item is seen vocabulary = defaultdict() vocabulary.default_factory = vocabulary.__len__ analyze = self.build_analyzer() j_indices = _make_int_array() indptr = _make_int_array() indptr.append(0) for doc in raw_documents: for feature in analyze(doc): try: j_indices.append(vocabulary[feature]) except KeyError: # Ignore out-of-vocabulary items for fixed_vocab=True continue indptr.append(len(j_indices)) if not fixed_vocab: # disable defaultdict behaviour vocabulary = dict(vocabulary) if not vocabulary: raise ValueError("empty vocabulary; perhaps the documents only" " contain stop words") # some Python/Scipy versions won't accept an array.array: if j_indices: j_indices = np.frombuffer(j_indices, dtype=np.intc) else: j_indices = np.array([], dtype=np.int32) indptr = np.frombuffer(indptr, dtype=np.intc) values = np.ones(len(j_indices)) X = sp.csr_matrix((values, j_indices, indptr), shape=(len(indptr) - 1, len(vocabulary)), dtype=self.dtype) X.sum_duplicates() return vocabulary, X def fit(self, raw_documents, y=None): """Learn a vocabulary dictionary of all tokens in the raw documents. Parameters ---------- raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns ------- self """ self.fit_transform(raw_documents) return self def fit_transform(self, raw_documents, y=None): """Learn the vocabulary dictionary and return term-document matrix. This is equivalent to fit followed by transform, but more efficiently implemented. Parameters ---------- raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns ------- X : array, [n_samples, n_features] Document-term matrix. """ # We intentionally don't call the transform method to make # fit_transform overridable without unwanted side effects in # TfidfVectorizer. self._check_vocabulary() max_df = self.max_df min_df = self.min_df max_features = self.max_features vocabulary, X = self._count_vocab(raw_documents, self.fixed_vocabulary_) if self.binary: X.data.fill(1) if not self.fixed_vocabulary_: X = self._sort_features(X, vocabulary) n_doc = X.shape[0] max_doc_count = (max_df if isinstance(max_df, numbers.Integral) else max_df * n_doc) min_doc_count = (min_df if isinstance(min_df, numbers.Integral) else min_df * n_doc) if max_doc_count < min_doc_count: raise ValueError( "max_df corresponds to < documents than min_df") X, self.stop_words_ = self._limit_features(X, vocabulary, max_doc_count, min_doc_count, max_features) self.vocabulary_ = vocabulary return X def transform(self, raw_documents): """Transform documents to document-term matrix. Extract token counts out of raw text documents using the vocabulary fitted with fit or the one provided to the constructor. Parameters ---------- raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns ------- X : sparse matrix, [n_samples, n_features] Document-term matrix. """ if not hasattr(self, 'vocabulary_'): self._check_vocabulary() if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0: raise ValueError("Vocabulary wasn't fitted or is empty!") # use the same matrix-building strategy as fit_transform _, X = self._count_vocab(raw_documents, fixed_vocab=True) if self.binary: X.data.fill(1) return X def inverse_transform(self, X): """Return terms per document with nonzero entries in X. Parameters ---------- X : {array, sparse matrix}, shape = [n_samples, n_features] Returns ------- X_inv : list of arrays, len = n_samples List of arrays of terms. """ if sp.issparse(X): # We need CSR format for fast row manipulations. X = X.tocsr() else: # We need to convert X to a matrix, so that the indexing # returns 2D objects X = np.asmatrix(X) n_samples = X.shape[0] terms = np.array(list(self.vocabulary_.keys())) indices = np.array(list(self.vocabulary_.values())) inverse_vocabulary = terms[np.argsort(indices)] return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel() for i in range(n_samples)] def get_feature_names(self): """Array mapping from feature integer indices to feature name""" if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0: raise ValueError("Vocabulary wasn't fitted or is empty!") return [t for t, i in sorted(six.iteritems(self.vocabulary_), key=itemgetter(1))] def _make_int_array(): """Construct an array.array of a type suitable for scipy.sparse indices.""" return array.array(str("i")) class TfidfTransformer(BaseEstimator, TransformerMixin): """Transform a count matrix to a normalized tf or tf-idf representation Tf means term-frequency while tf-idf means term-frequency times inverse document-frequency. This is a common term weighting scheme in information retrieval, that has also found good use in document classification. The goal of using tf-idf instead of the raw frequencies of occurrence of a token in a given document is to scale down the impact of tokens that occur very frequently in a given corpus and that are hence empirically less informative than features that occur in a small fraction of the training corpus. The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf, instead of tf * idf. The effect of this is that terms with zero idf, i.e. that occur in all documents of a training set, will not be entirely ignored. The formulas used to compute tf and idf depend on parameter settings that correspond to the SMART notation used in IR, as follows: Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True. Idf is "t" when use_idf is given, "n" (none) otherwise. Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None. Parameters ---------- norm : 'l1', 'l2' or None, optional Norm used to normalize term vectors. None for no normalization. use_idf : boolean, optional Enable inverse-document-frequency reweighting. smooth_idf : boolean, optional Smooth idf weights by adding one to document frequencies, as if an extra document was seen containing every term in the collection exactly once. Prevents zero divisions. sublinear_tf : boolean, optional Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf). References ---------- .. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern Information Retrieval. Addison Wesley, pp. 68-74.` .. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to Information Retrieval. Cambridge University Press, pp. 118-120.` """ def __init__(self, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False): self.norm = norm self.use_idf = use_idf self.smooth_idf = smooth_idf self.sublinear_tf = sublinear_tf def fit(self, X, y=None): """Learn the idf vector (global term weights) Parameters ---------- X : sparse matrix, [n_samples, n_features] a matrix of term/token counts """ if not sp.issparse(X): X = sp.csc_matrix(X) if self.use_idf: n_samples, n_features = X.shape df = _document_frequency(X) # perform idf smoothing if required df += int(self.smooth_idf) n_samples += int(self.smooth_idf) # log1p instead of log makes sure terms with zero idf don't get # suppressed entirely idf = np.log(float(n_samples) / df) + 1.0 self._idf_diag = sp.spdiags(idf, diags=0, m=n_features, n=n_features) return self def transform(self, X, copy=True): """Transform a count matrix to a tf or tf-idf representation Parameters ---------- X : sparse matrix, [n_samples, n_features] a matrix of term/token counts Returns ------- vectors : sparse matrix, [n_samples, n_features] """ if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float): # preserve float family dtype X = sp.csr_matrix(X, copy=copy) else: # convert counts or binary occurrences to floats X = sp.csr_matrix(X, dtype=np.float64, copy=copy) n_samples, n_features = X.shape if self.sublinear_tf: np.log(X.data, X.data) X.data += 1 if self.use_idf: if not hasattr(self, "_idf_diag"): raise ValueError("idf vector not fitted") expected_n_features = self._idf_diag.shape[0] if n_features != expected_n_features: raise ValueError("Input has n_features=%d while the model" " has been trained with n_features=%d" % ( n_features, expected_n_features)) # *= doesn't work X = X * self._idf_diag if self.norm: X = normalize(X, norm=self.norm, copy=False) return X @property def idf_(self): if hasattr(self, "_idf_diag"): return np.ravel(self._idf_diag.sum(axis=0)) else: return None class TfidfVectorizer(CountVectorizer): """Convert a collection of raw documents to a matrix of TF-IDF features. Equivalent to CountVectorizer followed by TfidfTransformer. Parameters ---------- input : string {'filename', 'file', 'content'} If 'filename', the sequence passed as an argument to fit is expected to be a list of filenames that need reading to fetch the raw content to analyze. If 'file', the sequence items must have a 'read' method (file-like object) that is called to fetch the bytes in memory. Otherwise the input is expected to be the sequence strings or bytes items are expected to be analyzed directly. encoding : string, 'utf-8' by default. If bytes or files are given to analyze, this encoding is used to decode. decode_error : {'strict', 'ignore', 'replace'} Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. strip_accents : {'ascii', 'unicode', None} Remove accents during the preprocessing step. 'ascii' is a fast method that only works on characters that have an direct ASCII mapping. 'unicode' is a slightly slower method that works on any characters. None (default) does nothing. analyzer : string, {'word', 'char'} or callable Whether the feature should be made of word or character n-grams. If a callable is passed it is used to extract the sequence of features out of the raw, unprocessed input. preprocessor : callable or None (default) Override the preprocessing (string transformation) stage while preserving the tokenizing and n-grams generation steps. tokenizer : callable or None (default) Override the string tokenization step while preserving the preprocessing and n-grams generation steps. ngram_range : tuple (min_n, max_n) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. stop_words : string {'english'}, list, or None (default) If a string, it is passed to _check_stop_list and the appropriate stop list is returned. 'english' is currently the only supported string value. If a list, that list is assumed to contain stop words, all of which will be removed from the resulting tokens. If None, no stop words will be used. max_df can be set to a value in the range [0.7, 1.0) to automatically detect and filter stop words based on intra corpus document frequency of terms. lowercase : boolean, default True Convert all characters to lowercase before tokenizing. token_pattern : string Regular expression denoting what constitutes a "token", only used if `analyzer == 'word'`. The default regexp selects tokens of 2 or more alphanumeric characters (punctuation is completely ignored and always treated as a token separator). max_df : float in range [0.0, 1.0] or int, optional, 1.0 by default When building the vocabulary ignore terms that have a term frequency strictly higher than the given threshold (corpus specific stop words). If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. min_df : float in range [0.0, 1.0] or int, optional, 1 by default When building the vocabulary ignore terms that have a term frequency strictly lower than the given threshold. This value is also called cut-off in the literature. If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. max_features : optional, None by default If not None, build a vocabulary that only consider the top max_features ordered by term frequency across the corpus. This parameter is ignored if vocabulary is not None. vocabulary : Mapping or iterable, optional Either a Mapping (e.g., a dict) where keys are terms and values are indices in the feature matrix, or an iterable over terms. If not given, a vocabulary is determined from the input documents. binary : boolean, False by default. If True, all non-zero term counts are set to 1. This does not mean outputs will have only 0/1 values, only that the tf term in tf-idf is binary. (Set idf and normalization to False to get 0/1 outputs.) dtype : type, optional Type of the matrix returned by fit_transform() or transform(). norm : 'l1', 'l2' or None, optional Norm used to normalize term vectors. None for no normalization. use_idf : boolean, optional Enable inverse-document-frequency reweighting. smooth_idf : boolean, optional Smooth idf weights by adding one to document frequencies, as if an extra document was seen containing every term in the collection exactly once. Prevents zero divisions. sublinear_tf : boolean, optional Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf). Attributes ---------- ``idf_`` : array, shape = [n_features], or None The learned idf vector (global term weights) when ``use_idf`` is set to True, None otherwise. See also -------- CountVectorizer Tokenize the documents and count the occurrences of token and return them as a sparse matrix TfidfTransformer Apply Term Frequency Inverse Document Frequency normalization to a sparse matrix of occurrence counts. """ def __init__(self, input='content', encoding='utf-8', charset=None, decode_error='strict', charset_error=None, strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, analyzer='word', stop_words=None, token_pattern=r"(?u)\b\w\w+\b", ngram_range=(1, 1), max_df=1.0, min_df=1, max_features=None, vocabulary=None, binary=False, dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False): super(TfidfVectorizer, self).__init__( input=input, charset=charset, charset_error=charset_error, encoding=encoding, decode_error=decode_error, strip_accents=strip_accents, lowercase=lowercase, preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer, stop_words=stop_words, token_pattern=token_pattern, ngram_range=ngram_range, max_df=max_df, min_df=min_df, max_features=max_features, vocabulary=vocabulary, binary=binary, dtype=dtype) self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf, smooth_idf=smooth_idf, sublinear_tf=sublinear_tf) # Broadcast the TF-IDF parameters to the underlying transformer instance # for easy grid search and repr @property def norm(self): return self._tfidf.norm @norm.setter def norm(self, value): self._tfidf.norm = value @property def use_idf(self): return self._tfidf.use_idf @use_idf.setter def use_idf(self, value): self._tfidf.use_idf = value @property def smooth_idf(self): return self._tfidf.smooth_idf @smooth_idf.setter def smooth_idf(self, value): self._tfidf.smooth_idf = value @property def sublinear_tf(self): return self._tfidf.sublinear_tf @sublinear_tf.setter def sublinear_tf(self, value): self._tfidf.sublinear_tf = value @property def idf_(self): return self._tfidf.idf_ def fit(self, raw_documents, y=None): """Learn vocabulary and idf from training set. Parameters ---------- raw_documents : iterable an iterable which yields either str, unicode or file objects Returns ------- self : TfidfVectorizer """ X = super(TfidfVectorizer, self).fit_transform(raw_documents) self._tfidf.fit(X) return self def fit_transform(self, raw_documents, y=None): """Learn vocabulary and idf, return term-document matrix. This is equivalent to fit followed by transform, but more efficiently implemented. Parameters ---------- raw_documents : iterable an iterable which yields either str, unicode or file objects Returns ------- X : sparse matrix, [n_samples, n_features] Tf-idf-weighted document-term matrix. """ X = super(TfidfVectorizer, self).fit_transform(raw_documents) self._tfidf.fit(X) # X is already a transformed view of raw_documents so # we set copy to False return self._tfidf.transform(X, copy=False) def transform(self, raw_documents, copy=True): """Transform documents to document-term matrix. Uses the vocabulary and document frequencies (df) learned by fit (or fit_transform). Parameters ---------- raw_documents : iterable an iterable which yields either str, unicode or file objects Returns ------- X : sparse matrix, [n_samples, n_features] Tf-idf-weighted document-term matrix. """ X = super(TfidfVectorizer, self).transform(raw_documents) return self._tfidf.transform(X, copy=False)
apache-2.0
cbertinato/pandas
pandas/tests/frame/test_combine_concat.py
1
34741
from datetime import datetime import numpy as np import pytest import pandas as pd from pandas import DataFrame, Index, Series, Timestamp, date_range import pandas.util.testing as tm from pandas.util.testing import assert_frame_equal, assert_series_equal class TestDataFrameConcatCommon: def test_concat_multiple_frames_dtypes(self): # GH 2759 A = DataFrame(data=np.ones((10, 2)), columns=[ 'foo', 'bar'], dtype=np.float64) B = DataFrame(data=np.ones((10, 2)), dtype=np.float32) results = pd.concat((A, B), axis=1).get_dtype_counts() expected = Series(dict(float64=2, float32=2)) assert_series_equal(results, expected) @pytest.mark.parametrize('data', [ pd.date_range('2000', periods=4), pd.date_range('2000', periods=4, tz="US/Central"), pd.period_range('2000', periods=4), pd.timedelta_range(0, periods=4), ]) def test_combine_datetlike_udf(self, data): # https://github.com/pandas-dev/pandas/issues/23079 df = pd.DataFrame({"A": data}) other = df.copy() df.iloc[1, 0] = None def combiner(a, b): return b result = df.combine(other, combiner) tm.assert_frame_equal(result, other) def test_concat_multiple_tzs(self): # GH 12467 # combining datetime tz-aware and naive DataFrames ts1 = Timestamp('2015-01-01', tz=None) ts2 = Timestamp('2015-01-01', tz='UTC') ts3 = Timestamp('2015-01-01', tz='EST') df1 = DataFrame(dict(time=[ts1])) df2 = DataFrame(dict(time=[ts2])) df3 = DataFrame(dict(time=[ts3])) results = pd.concat([df1, df2]).reset_index(drop=True) expected = DataFrame(dict(time=[ts1, ts2]), dtype=object) assert_frame_equal(results, expected) results = pd.concat([df1, df3]).reset_index(drop=True) expected = DataFrame(dict(time=[ts1, ts3]), dtype=object) assert_frame_equal(results, expected) results = pd.concat([df2, df3]).reset_index(drop=True) expected = DataFrame(dict(time=[ts2, ts3])) assert_frame_equal(results, expected) @pytest.mark.parametrize( 't1', [ '2015-01-01', pytest.param(pd.NaT, marks=pytest.mark.xfail( reason='GH23037 incorrect dtype when concatenating'))]) def test_concat_tz_NaT(self, t1): # GH 22796 # Concating tz-aware multicolumn DataFrames ts1 = Timestamp(t1, tz='UTC') ts2 = Timestamp('2015-01-01', tz='UTC') ts3 = Timestamp('2015-01-01', tz='UTC') df1 = DataFrame([[ts1, ts2]]) df2 = DataFrame([[ts3]]) result = pd.concat([df1, df2]) expected = DataFrame([[ts1, ts2], [ts3, pd.NaT]], index=[0, 0]) assert_frame_equal(result, expected) def test_concat_tz_not_aligned(self): # GH 22796 ts = pd.to_datetime([1, 2]).tz_localize("UTC") a = pd.DataFrame({"A": ts}) b = pd.DataFrame({"A": ts, "B": ts}) result = pd.concat([a, b], sort=True, ignore_index=True) expected = pd.DataFrame({"A": list(ts) + list(ts), "B": [pd.NaT, pd.NaT] + list(ts)}) assert_frame_equal(result, expected) def test_concat_tuple_keys(self): # GH 14438 df1 = pd.DataFrame(np.ones((2, 2)), columns=list('AB')) df2 = pd.DataFrame(np.ones((3, 2)) * 2, columns=list('AB')) results = pd.concat((df1, df2), keys=[('bee', 'bah'), ('bee', 'boo')]) expected = pd.DataFrame( {'A': {('bee', 'bah', 0): 1.0, ('bee', 'bah', 1): 1.0, ('bee', 'boo', 0): 2.0, ('bee', 'boo', 1): 2.0, ('bee', 'boo', 2): 2.0}, 'B': {('bee', 'bah', 0): 1.0, ('bee', 'bah', 1): 1.0, ('bee', 'boo', 0): 2.0, ('bee', 'boo', 1): 2.0, ('bee', 'boo', 2): 2.0}}) assert_frame_equal(results, expected) def test_append_series_dict(self): df = DataFrame(np.random.randn(5, 4), columns=['foo', 'bar', 'baz', 'qux']) series = df.loc[4] msg = 'Indexes have overlapping values' with pytest.raises(ValueError, match=msg): df.append(series, verify_integrity=True) series.name = None msg = 'Can only append a Series if ignore_index=True' with pytest.raises(TypeError, match=msg): df.append(series, verify_integrity=True) result = df.append(series[::-1], ignore_index=True) expected = df.append(DataFrame({0: series[::-1]}, index=df.columns).T, ignore_index=True) assert_frame_equal(result, expected) # dict result = df.append(series.to_dict(), ignore_index=True) assert_frame_equal(result, expected) result = df.append(series[::-1][:3], ignore_index=True) expected = df.append(DataFrame({0: series[::-1][:3]}).T, ignore_index=True, sort=True) assert_frame_equal(result, expected.loc[:, result.columns]) # can append when name set row = df.loc[4] row.name = 5 result = df.append(row) expected = df.append(df[-1:], ignore_index=True) assert_frame_equal(result, expected) def test_append_list_of_series_dicts(self): df = DataFrame(np.random.randn(5, 4), columns=['foo', 'bar', 'baz', 'qux']) dicts = [x.to_dict() for idx, x in df.iterrows()] result = df.append(dicts, ignore_index=True) expected = df.append(df, ignore_index=True) assert_frame_equal(result, expected) # different columns dicts = [{'foo': 1, 'bar': 2, 'baz': 3, 'peekaboo': 4}, {'foo': 5, 'bar': 6, 'baz': 7, 'peekaboo': 8}] result = df.append(dicts, ignore_index=True, sort=True) expected = df.append(DataFrame(dicts), ignore_index=True, sort=True) assert_frame_equal(result, expected) def test_append_missing_cols(self): # GH22252 # exercise the conditional branch in append method where the data # to be appended is a list and does not contain all columns that are in # the target DataFrame df = DataFrame(np.random.randn(5, 4), columns=['foo', 'bar', 'baz', 'qux']) dicts = [{'foo': 9}, {'bar': 10}] with tm.assert_produces_warning(None): result = df.append(dicts, ignore_index=True, sort=True) expected = df.append(DataFrame(dicts), ignore_index=True, sort=True) assert_frame_equal(result, expected) def test_append_empty_dataframe(self): # Empty df append empty df df1 = DataFrame() df2 = DataFrame() result = df1.append(df2) expected = df1.copy() assert_frame_equal(result, expected) # Non-empty df append empty df df1 = DataFrame(np.random.randn(5, 2)) df2 = DataFrame() result = df1.append(df2) expected = df1.copy() assert_frame_equal(result, expected) # Empty df with columns append empty df df1 = DataFrame(columns=['bar', 'foo']) df2 = DataFrame() result = df1.append(df2) expected = df1.copy() assert_frame_equal(result, expected) # Non-Empty df with columns append empty df df1 = DataFrame(np.random.randn(5, 2), columns=['bar', 'foo']) df2 = DataFrame() result = df1.append(df2) expected = df1.copy() assert_frame_equal(result, expected) def test_append_dtypes(self): # GH 5754 # row appends of different dtypes (so need to do by-item) # can sometimes infer the correct type df1 = DataFrame({'bar': Timestamp('20130101')}, index=range(5)) df2 = DataFrame() result = df1.append(df2) expected = df1.copy() assert_frame_equal(result, expected) df1 = DataFrame({'bar': Timestamp('20130101')}, index=range(1)) df2 = DataFrame({'bar': 'foo'}, index=range(1, 2)) result = df1.append(df2) expected = DataFrame({'bar': [Timestamp('20130101'), 'foo']}) assert_frame_equal(result, expected) df1 = DataFrame({'bar': Timestamp('20130101')}, index=range(1)) df2 = DataFrame({'bar': np.nan}, index=range(1, 2)) result = df1.append(df2) expected = DataFrame( {'bar': Series([Timestamp('20130101'), np.nan], dtype='M8[ns]')}) assert_frame_equal(result, expected) df1 = DataFrame({'bar': Timestamp('20130101')}, index=range(1)) df2 = DataFrame({'bar': np.nan}, index=range(1, 2), dtype=object) result = df1.append(df2) expected = DataFrame( {'bar': Series([Timestamp('20130101'), np.nan], dtype='M8[ns]')}) assert_frame_equal(result, expected) df1 = DataFrame({'bar': np.nan}, index=range(1)) df2 = DataFrame({'bar': Timestamp('20130101')}, index=range(1, 2)) result = df1.append(df2) expected = DataFrame( {'bar': Series([np.nan, Timestamp('20130101')], dtype='M8[ns]')}) assert_frame_equal(result, expected) df1 = DataFrame({'bar': Timestamp('20130101')}, index=range(1)) df2 = DataFrame({'bar': 1}, index=range(1, 2), dtype=object) result = df1.append(df2) expected = DataFrame({'bar': Series([Timestamp('20130101'), 1])}) assert_frame_equal(result, expected) def test_update(self): df = DataFrame([[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3], [1.5, np.nan, 3]]) other = DataFrame([[3.6, 2., np.nan], [np.nan, np.nan, 7]], index=[1, 3]) df.update(other) expected = DataFrame([[1.5, np.nan, 3], [3.6, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.]]) assert_frame_equal(df, expected) def test_update_dtypes(self): # gh 3016 df = DataFrame([[1., 2., False, True], [4., 5., True, False]], columns=['A', 'B', 'bool1', 'bool2']) other = DataFrame([[45, 45]], index=[0], columns=['A', 'B']) df.update(other) expected = DataFrame([[45., 45., False, True], [4., 5., True, False]], columns=['A', 'B', 'bool1', 'bool2']) assert_frame_equal(df, expected) def test_update_nooverwrite(self): df = DataFrame([[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3], [1.5, np.nan, 3]]) other = DataFrame([[3.6, 2., np.nan], [np.nan, np.nan, 7]], index=[1, 3]) df.update(other, overwrite=False) expected = DataFrame([[1.5, np.nan, 3], [1.5, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 3.]]) assert_frame_equal(df, expected) def test_update_filtered(self): df = DataFrame([[1.5, np.nan, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3], [1.5, np.nan, 3]]) other = DataFrame([[3.6, 2., np.nan], [np.nan, np.nan, 7]], index=[1, 3]) df.update(other, filter_func=lambda x: x > 2) expected = DataFrame([[1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.]]) assert_frame_equal(df, expected) @pytest.mark.parametrize('bad_kwarg, exception, msg', [ # errors must be 'ignore' or 'raise' ({'errors': 'something'}, ValueError, 'The parameter errors must.*'), ({'join': 'inner'}, NotImplementedError, 'Only left join is supported') ]) def test_update_raise_bad_parameter(self, bad_kwarg, exception, msg): df = DataFrame([[1.5, 1, 3.]]) with pytest.raises(exception, match=msg): df.update(df, **bad_kwarg) def test_update_raise_on_overlap(self): df = DataFrame([[1.5, 1, 3.], [1.5, np.nan, 3.], [1.5, np.nan, 3], [1.5, np.nan, 3]]) other = DataFrame([[2., np.nan], [np.nan, 7]], index=[1, 3], columns=[1, 2]) with pytest.raises(ValueError, match="Data overlaps"): df.update(other, errors='raise') @pytest.mark.parametrize('raise_conflict', [True, False]) def test_update_deprecation(self, raise_conflict): df = DataFrame([[1.5, 1, 3.]]) other = DataFrame() with tm.assert_produces_warning(FutureWarning): df.update(other, raise_conflict=raise_conflict) def test_update_from_non_df(self): d = {'a': Series([1, 2, 3, 4]), 'b': Series([5, 6, 7, 8])} df = DataFrame(d) d['a'] = Series([5, 6, 7, 8]) df.update(d) expected = DataFrame(d) assert_frame_equal(df, expected) d = {'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8]} df = DataFrame(d) d['a'] = [5, 6, 7, 8] df.update(d) expected = DataFrame(d) assert_frame_equal(df, expected) def test_update_datetime_tz(self): # GH 25807 result = DataFrame([pd.Timestamp('2019', tz='UTC')]) result.update(result) expected = DataFrame([pd.Timestamp('2019', tz='UTC')]) assert_frame_equal(result, expected) def test_join_str_datetime(self): str_dates = ['20120209', '20120222'] dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)] A = DataFrame(str_dates, index=range(2), columns=['aa']) C = DataFrame([[1, 2], [3, 4]], index=str_dates, columns=dt_dates) tst = A.join(C, on='aa') assert len(tst.columns) == 3 def test_join_multiindex_leftright(self): # GH 10741 df1 = (pd.DataFrame([['a', 'x', 0.471780], ['a', 'y', 0.774908], ['a', 'z', 0.563634], ['b', 'x', -0.353756], ['b', 'y', 0.368062], ['b', 'z', -1.721840], ['c', 'x', 1], ['c', 'y', 2], ['c', 'z', 3]], columns=['first', 'second', 'value1']) .set_index(['first', 'second'])) df2 = (pd.DataFrame([['a', 10], ['b', 20]], columns=['first', 'value2']) .set_index(['first'])) exp = pd.DataFrame([[0.471780, 10], [0.774908, 10], [0.563634, 10], [-0.353756, 20], [0.368062, 20], [-1.721840, 20], [1.000000, np.nan], [2.000000, np.nan], [3.000000, np.nan]], index=df1.index, columns=['value1', 'value2']) # these must be the same results (but columns are flipped) assert_frame_equal(df1.join(df2, how='left'), exp) assert_frame_equal(df2.join(df1, how='right'), exp[['value2', 'value1']]) exp_idx = pd.MultiIndex.from_product([['a', 'b'], ['x', 'y', 'z']], names=['first', 'second']) exp = pd.DataFrame([[0.471780, 10], [0.774908, 10], [0.563634, 10], [-0.353756, 20], [0.368062, 20], [-1.721840, 20]], index=exp_idx, columns=['value1', 'value2']) assert_frame_equal(df1.join(df2, how='right'), exp) assert_frame_equal(df2.join(df1, how='left'), exp[['value2', 'value1']]) def test_concat_named_keys(self): # GH 14252 df = pd.DataFrame({'foo': [1, 2], 'bar': [0.1, 0.2]}) index = Index(['a', 'b'], name='baz') concatted_named_from_keys = pd.concat([df, df], keys=index) expected_named = pd.DataFrame( {'foo': [1, 2, 1, 2], 'bar': [0.1, 0.2, 0.1, 0.2]}, index=pd.MultiIndex.from_product((['a', 'b'], [0, 1]), names=['baz', None])) assert_frame_equal(concatted_named_from_keys, expected_named) index_no_name = Index(['a', 'b'], name=None) concatted_named_from_names = pd.concat( [df, df], keys=index_no_name, names=['baz']) assert_frame_equal(concatted_named_from_names, expected_named) concatted_unnamed = pd.concat([df, df], keys=index_no_name) expected_unnamed = pd.DataFrame( {'foo': [1, 2, 1, 2], 'bar': [0.1, 0.2, 0.1, 0.2]}, index=pd.MultiIndex.from_product((['a', 'b'], [0, 1]), names=[None, None])) assert_frame_equal(concatted_unnamed, expected_unnamed) def test_concat_axis_parameter(self): # GH 14369 df1 = pd.DataFrame({'A': [0.1, 0.2]}, index=range(2)) df2 = pd.DataFrame({'A': [0.3, 0.4]}, index=range(2)) # Index/row/0 DataFrame expected_index = pd.DataFrame( {'A': [0.1, 0.2, 0.3, 0.4]}, index=[0, 1, 0, 1]) concatted_index = pd.concat([df1, df2], axis='index') assert_frame_equal(concatted_index, expected_index) concatted_row = pd.concat([df1, df2], axis='rows') assert_frame_equal(concatted_row, expected_index) concatted_0 = pd.concat([df1, df2], axis=0) assert_frame_equal(concatted_0, expected_index) # Columns/1 DataFrame expected_columns = pd.DataFrame( [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=['A', 'A']) concatted_columns = pd.concat([df1, df2], axis='columns') assert_frame_equal(concatted_columns, expected_columns) concatted_1 = pd.concat([df1, df2], axis=1) assert_frame_equal(concatted_1, expected_columns) series1 = pd.Series([0.1, 0.2]) series2 = pd.Series([0.3, 0.4]) # Index/row/0 Series expected_index_series = pd.Series( [0.1, 0.2, 0.3, 0.4], index=[0, 1, 0, 1]) concatted_index_series = pd.concat([series1, series2], axis='index') assert_series_equal(concatted_index_series, expected_index_series) concatted_row_series = pd.concat([series1, series2], axis='rows') assert_series_equal(concatted_row_series, expected_index_series) concatted_0_series = pd.concat([series1, series2], axis=0) assert_series_equal(concatted_0_series, expected_index_series) # Columns/1 Series expected_columns_series = pd.DataFrame( [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=[0, 1]) concatted_columns_series = pd.concat( [series1, series2], axis='columns') assert_frame_equal(concatted_columns_series, expected_columns_series) concatted_1_series = pd.concat([series1, series2], axis=1) assert_frame_equal(concatted_1_series, expected_columns_series) # Testing ValueError with pytest.raises(ValueError, match='No axis named'): pd.concat([series1, series2], axis='something') def test_concat_numerical_names(self): # #15262 # #12223 df = pd.DataFrame({'col': range(9)}, dtype='int32', index=(pd.MultiIndex .from_product([['A0', 'A1', 'A2'], ['B0', 'B1', 'B2']], names=[1, 2]))) result = pd.concat((df.iloc[:2, :], df.iloc[-2:, :])) expected = pd.DataFrame({'col': [0, 1, 7, 8]}, dtype='int32', index=pd.MultiIndex.from_tuples([('A0', 'B0'), ('A0', 'B1'), ('A2', 'B1'), ('A2', 'B2')], names=[1, 2])) tm.assert_frame_equal(result, expected) def test_concat_astype_dup_col(self): # gh 23049 df = pd.DataFrame([{'a': 'b'}]) df = pd.concat([df, df], axis=1) result = df.astype('category') expected = pd.DataFrame(np.array(["b", "b"]).reshape(1, 2), columns=["a", "a"]).astype("category") tm.assert_frame_equal(result, expected) class TestDataFrameCombineFirst: def test_combine_first_mixed(self): a = Series(['a', 'b'], index=range(2)) b = Series(range(2), index=range(2)) f = DataFrame({'A': a, 'B': b}) a = Series(['a', 'b'], index=range(5, 7)) b = Series(range(2), index=range(5, 7)) g = DataFrame({'A': a, 'B': b}) exp = pd.DataFrame({'A': list('abab'), 'B': [0., 1., 0., 1.]}, index=[0, 1, 5, 6]) combined = f.combine_first(g) tm.assert_frame_equal(combined, exp) def test_combine_first(self, float_frame): # disjoint head, tail = float_frame[:5], float_frame[5:] combined = head.combine_first(tail) reordered_frame = float_frame.reindex(combined.index) assert_frame_equal(combined, reordered_frame) assert tm.equalContents(combined.columns, float_frame.columns) assert_series_equal(combined['A'], reordered_frame['A']) # same index fcopy = float_frame.copy() fcopy['A'] = 1 del fcopy['C'] fcopy2 = float_frame.copy() fcopy2['B'] = 0 del fcopy2['D'] combined = fcopy.combine_first(fcopy2) assert (combined['A'] == 1).all() assert_series_equal(combined['B'], fcopy['B']) assert_series_equal(combined['C'], fcopy2['C']) assert_series_equal(combined['D'], fcopy['D']) # overlap head, tail = reordered_frame[:10].copy(), reordered_frame head['A'] = 1 combined = head.combine_first(tail) assert (combined['A'][:10] == 1).all() # reverse overlap tail['A'][:10] = 0 combined = tail.combine_first(head) assert (combined['A'][:10] == 0).all() # no overlap f = float_frame[:10] g = float_frame[10:] combined = f.combine_first(g) assert_series_equal(combined['A'].reindex(f.index), f['A']) assert_series_equal(combined['A'].reindex(g.index), g['A']) # corner cases comb = float_frame.combine_first(DataFrame()) assert_frame_equal(comb, float_frame) comb = DataFrame().combine_first(float_frame) assert_frame_equal(comb, float_frame) comb = float_frame.combine_first(DataFrame(index=["faz", "boo"])) assert "faz" in comb.index # #2525 df = DataFrame({'a': [1]}, index=[datetime(2012, 1, 1)]) df2 = DataFrame(columns=['b']) result = df.combine_first(df2) assert 'b' in result def test_combine_first_mixed_bug(self): idx = Index(['a', 'b', 'c', 'e']) ser1 = Series([5.0, -9.0, 4.0, 100.], index=idx) ser2 = Series(['a', 'b', 'c', 'e'], index=idx) ser3 = Series([12, 4, 5, 97], index=idx) frame1 = DataFrame({"col0": ser1, "col2": ser2, "col3": ser3}) idx = Index(['a', 'b', 'c', 'f']) ser1 = Series([5.0, -9.0, 4.0, 100.], index=idx) ser2 = Series(['a', 'b', 'c', 'f'], index=idx) ser3 = Series([12, 4, 5, 97], index=idx) frame2 = DataFrame({"col1": ser1, "col2": ser2, "col5": ser3}) combined = frame1.combine_first(frame2) assert len(combined.columns) == 5 # gh 3016 (same as in update) df = DataFrame([[1., 2., False, True], [4., 5., True, False]], columns=['A', 'B', 'bool1', 'bool2']) other = DataFrame([[45, 45]], index=[0], columns=['A', 'B']) result = df.combine_first(other) assert_frame_equal(result, df) df.loc[0, 'A'] = np.nan result = df.combine_first(other) df.loc[0, 'A'] = 45 assert_frame_equal(result, df) # doc example df1 = DataFrame({'A': [1., np.nan, 3., 5., np.nan], 'B': [np.nan, 2., 3., np.nan, 6.]}) df2 = DataFrame({'A': [5., 2., 4., np.nan, 3., 7.], 'B': [np.nan, np.nan, 3., 4., 6., 8.]}) result = df1.combine_first(df2) expected = DataFrame( {'A': [1, 2, 3, 5, 3, 7.], 'B': [np.nan, 2, 3, 4, 6, 8]}) assert_frame_equal(result, expected) # GH3552, return object dtype with bools df1 = DataFrame( [[np.nan, 3., True], [-4.6, np.nan, True], [np.nan, 7., False]]) df2 = DataFrame( [[-42.6, np.nan, True], [-5., 1.6, False]], index=[1, 2]) result = df1.combine_first(df2)[2] expected = Series([True, True, False], name=2) assert_series_equal(result, expected) # GH 3593, converting datetime64[ns] incorrectly df0 = DataFrame({"a": [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]}) df1 = DataFrame({"a": [None, None, None]}) df2 = df1.combine_first(df0) assert_frame_equal(df2, df0) df2 = df0.combine_first(df1) assert_frame_equal(df2, df0) df0 = DataFrame({"a": [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]}) df1 = DataFrame({"a": [datetime(2000, 1, 2), None, None]}) df2 = df1.combine_first(df0) result = df0.copy() result.iloc[0, :] = df1.iloc[0, :] assert_frame_equal(df2, result) df2 = df0.combine_first(df1) assert_frame_equal(df2, df0) def test_combine_first_align_nan(self): # GH 7509 (not fixed) dfa = pd.DataFrame([[pd.Timestamp('2011-01-01'), 2]], columns=['a', 'b']) dfb = pd.DataFrame([[4], [5]], columns=['b']) assert dfa['a'].dtype == 'datetime64[ns]' assert dfa['b'].dtype == 'int64' res = dfa.combine_first(dfb) exp = pd.DataFrame({'a': [pd.Timestamp('2011-01-01'), pd.NaT], 'b': [2., 5.]}, columns=['a', 'b']) tm.assert_frame_equal(res, exp) assert res['a'].dtype == 'datetime64[ns]' # ToDo: this must be int64 assert res['b'].dtype == 'float64' res = dfa.iloc[:0].combine_first(dfb) exp = pd.DataFrame({'a': [np.nan, np.nan], 'b': [4, 5]}, columns=['a', 'b']) tm.assert_frame_equal(res, exp) # ToDo: this must be datetime64 assert res['a'].dtype == 'float64' # ToDo: this must be int64 assert res['b'].dtype == 'int64' def test_combine_first_timezone(self): # see gh-7630 data1 = pd.to_datetime('20100101 01:01').tz_localize('UTC') df1 = pd.DataFrame(columns=['UTCdatetime', 'abc'], data=data1, index=pd.date_range('20140627', periods=1)) data2 = pd.to_datetime('20121212 12:12').tz_localize('UTC') df2 = pd.DataFrame(columns=['UTCdatetime', 'xyz'], data=data2, index=pd.date_range('20140628', periods=1)) res = df2[['UTCdatetime']].combine_first(df1) exp = pd.DataFrame({'UTCdatetime': [pd.Timestamp('2010-01-01 01:01', tz='UTC'), pd.Timestamp('2012-12-12 12:12', tz='UTC')], 'abc': [pd.Timestamp('2010-01-01 01:01:00', tz='UTC'), pd.NaT]}, columns=['UTCdatetime', 'abc'], index=pd.date_range('20140627', periods=2, freq='D')) tm.assert_frame_equal(res, exp) assert res['UTCdatetime'].dtype == 'datetime64[ns, UTC]' assert res['abc'].dtype == 'datetime64[ns, UTC]' # see gh-10567 dts1 = pd.date_range('2015-01-01', '2015-01-05', tz='UTC') df1 = pd.DataFrame({'DATE': dts1}) dts2 = pd.date_range('2015-01-03', '2015-01-05', tz='UTC') df2 = pd.DataFrame({'DATE': dts2}) res = df1.combine_first(df2) tm.assert_frame_equal(res, df1) assert res['DATE'].dtype == 'datetime64[ns, UTC]' dts1 = pd.DatetimeIndex(['2011-01-01', 'NaT', '2011-01-03', '2011-01-04'], tz='US/Eastern') df1 = pd.DataFrame({'DATE': dts1}, index=[1, 3, 5, 7]) dts2 = pd.DatetimeIndex(['2012-01-01', '2012-01-02', '2012-01-03'], tz='US/Eastern') df2 = pd.DataFrame({'DATE': dts2}, index=[2, 4, 5]) res = df1.combine_first(df2) exp_dts = pd.DatetimeIndex(['2011-01-01', '2012-01-01', 'NaT', '2012-01-02', '2011-01-03', '2011-01-04'], tz='US/Eastern') exp = pd.DataFrame({'DATE': exp_dts}, index=[1, 2, 3, 4, 5, 7]) tm.assert_frame_equal(res, exp) # different tz dts1 = pd.date_range('2015-01-01', '2015-01-05', tz='US/Eastern') df1 = pd.DataFrame({'DATE': dts1}) dts2 = pd.date_range('2015-01-03', '2015-01-05') df2 = pd.DataFrame({'DATE': dts2}) # if df1 doesn't have NaN, keep its dtype res = df1.combine_first(df2) tm.assert_frame_equal(res, df1) assert res['DATE'].dtype == 'datetime64[ns, US/Eastern]' dts1 = pd.date_range('2015-01-01', '2015-01-02', tz='US/Eastern') df1 = pd.DataFrame({'DATE': dts1}) dts2 = pd.date_range('2015-01-01', '2015-01-03') df2 = pd.DataFrame({'DATE': dts2}) res = df1.combine_first(df2) exp_dts = [pd.Timestamp('2015-01-01', tz='US/Eastern'), pd.Timestamp('2015-01-02', tz='US/Eastern'), pd.Timestamp('2015-01-03')] exp = pd.DataFrame({'DATE': exp_dts}) tm.assert_frame_equal(res, exp) assert res['DATE'].dtype == 'object' def test_combine_first_timedelta(self): data1 = pd.TimedeltaIndex(['1 day', 'NaT', '3 day', '4day']) df1 = pd.DataFrame({'TD': data1}, index=[1, 3, 5, 7]) data2 = pd.TimedeltaIndex(['10 day', '11 day', '12 day']) df2 = pd.DataFrame({'TD': data2}, index=[2, 4, 5]) res = df1.combine_first(df2) exp_dts = pd.TimedeltaIndex(['1 day', '10 day', 'NaT', '11 day', '3 day', '4 day']) exp = pd.DataFrame({'TD': exp_dts}, index=[1, 2, 3, 4, 5, 7]) tm.assert_frame_equal(res, exp) assert res['TD'].dtype == 'timedelta64[ns]' def test_combine_first_period(self): data1 = pd.PeriodIndex(['2011-01', 'NaT', '2011-03', '2011-04'], freq='M') df1 = pd.DataFrame({'P': data1}, index=[1, 3, 5, 7]) data2 = pd.PeriodIndex(['2012-01-01', '2012-02', '2012-03'], freq='M') df2 = pd.DataFrame({'P': data2}, index=[2, 4, 5]) res = df1.combine_first(df2) exp_dts = pd.PeriodIndex(['2011-01', '2012-01', 'NaT', '2012-02', '2011-03', '2011-04'], freq='M') exp = pd.DataFrame({'P': exp_dts}, index=[1, 2, 3, 4, 5, 7]) tm.assert_frame_equal(res, exp) assert res['P'].dtype == data1.dtype # different freq dts2 = pd.PeriodIndex(['2012-01-01', '2012-01-02', '2012-01-03'], freq='D') df2 = pd.DataFrame({'P': dts2}, index=[2, 4, 5]) res = df1.combine_first(df2) exp_dts = [pd.Period('2011-01', freq='M'), pd.Period('2012-01-01', freq='D'), pd.NaT, pd.Period('2012-01-02', freq='D'), pd.Period('2011-03', freq='M'), pd.Period('2011-04', freq='M')] exp = pd.DataFrame({'P': exp_dts}, index=[1, 2, 3, 4, 5, 7]) tm.assert_frame_equal(res, exp) assert res['P'].dtype == 'object' def test_combine_first_int(self): # GH14687 - integer series that do no align exactly df1 = pd.DataFrame({'a': [0, 1, 3, 5]}, dtype='int64') df2 = pd.DataFrame({'a': [1, 4]}, dtype='int64') res = df1.combine_first(df2) tm.assert_frame_equal(res, df1) assert res['a'].dtype == 'int64' @pytest.mark.parametrize("val", [1, 1.0]) def test_combine_first_with_asymmetric_other(self, val): # see gh-20699 df1 = pd.DataFrame({'isNum': [val]}) df2 = pd.DataFrame({'isBool': [True]}) res = df1.combine_first(df2) exp = pd.DataFrame({'isBool': [True], 'isNum': [val]}) tm.assert_frame_equal(res, exp) def test_concat_datetime_datetime64_frame(self): # #2624 rows = [] rows.append([datetime(2010, 1, 1), 1]) rows.append([datetime(2010, 1, 2), 'hi']) df2_obj = DataFrame.from_records(rows, columns=['date', 'test']) ind = date_range(start="2000/1/1", freq="D", periods=10) df1 = DataFrame({'date': ind, 'test': range(10)}) # it works! pd.concat([df1, df2_obj]) class TestDataFrameUpdate: def test_update_nan(self): # #15593 #15617 # test 1 df1 = DataFrame({'A': [1.0, 2, 3], 'B': date_range('2000', periods=3)}) df2 = DataFrame({'A': [None, 2, 3]}) expected = df1.copy() df1.update(df2, overwrite=False) tm.assert_frame_equal(df1, expected) # test 2 df1 = DataFrame({'A': [1.0, None, 3], 'B': date_range('2000', periods=3)}) df2 = DataFrame({'A': [None, 2, 3]}) expected = DataFrame({'A': [1.0, 2, 3], 'B': date_range('2000', periods=3)}) df1.update(df2, overwrite=False) tm.assert_frame_equal(df1, expected)
bsd-3-clause
massmutual/scikit-learn
sklearn/utils/estimator_checks.py
1
54609
from __future__ import print_function import types import warnings import sys import traceback import pickle from copy import deepcopy import numpy as np from scipy import sparse import struct from sklearn.externals.six.moves import zip from sklearn.externals.joblib import hash, Memory from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regex from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_in from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import META_ESTIMATORS from sklearn.utils.testing import set_random_state from sklearn.utils.testing import assert_greater from sklearn.utils.testing import SkipTest from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_warns from sklearn.base import (clone, ClassifierMixin, RegressorMixin, TransformerMixin, ClusterMixin, BaseEstimator) from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.random_projection import BaseRandomProjection from sklearn.feature_selection import SelectKBest from sklearn.svm.base import BaseLibSVM, BaseSVC from sklearn.pipeline import make_pipeline from sklearn.decomposition import NMF, ProjectedGradientNMF from sklearn.utils.validation import DataConversionWarning from sklearn.utils import ConvergenceWarning from sklearn.cross_validation import train_test_split from sklearn.utils import shuffle from sklearn.utils.fixes import signature from sklearn.preprocessing import StandardScaler from sklearn.datasets import load_iris, load_boston, make_blobs BOSTON = None CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD'] MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet', 'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess', 'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso', 'LassoLars', 'LinearRegression', 'MultiTaskElasticNet', 'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV', 'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression', 'RANSACRegressor', 'RadiusNeighborsRegressor', 'RandomForestRegressor', 'Ridge', 'RidgeCV'] # Estimators with deprecated transform methods. Should be removed in 0.19 when # _LearntSelectorMixin is removed. DEPRECATED_TRANSFORM = [ "RandomForestClassifier", "RandomForestRegressor", "ExtraTreesClassifier", "ExtraTreesRegressor", "RandomTreesEmbedding", "DecisionTreeClassifier", "DecisionTreeRegressor", "ExtraTreeClassifier", "ExtraTreeRegressor", "LinearSVC", "SGDClassifier", "SGDRegressor", "Perceptron", "LogisticRegression", "LogisticRegressionCV", "GradientBoostingClassifier", "GradientBoostingRegressor"] def _yield_non_meta_checks(name, Estimator): yield check_estimators_dtypes yield check_fit_score_takes_y yield check_dtype_object yield check_estimators_fit_returns_self # Check that all estimator yield informative messages when # trained on empty datasets yield check_estimators_empty_data_messages if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']: # SpectralEmbedding is non-deterministic, # see issue #4236 # cross-decomposition's "transform" returns X and Y yield check_pipeline_consistency if name not in ['Imputer']: # Test that all estimators check their input for NaN's and infs yield check_estimators_nan_inf if name not in ['GaussianProcess']: # FIXME! # in particular GaussianProcess! yield check_estimators_overwrite_params if hasattr(Estimator, 'sparsify'): yield check_sparsify_coefficients yield check_estimator_sparse_data # Test that estimators can be pickled, and once pickled # give the same answer as before. yield check_estimators_pickle def _yield_classifier_checks(name, Classifier): # test classfiers can handle non-array data yield check_classifier_data_not_an_array # test classifiers trained on a single label always return this label yield check_classifiers_one_label yield check_classifiers_classes yield check_estimators_partial_fit_n_features # basic consistency testing yield check_classifiers_train if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"] # TODO some complication with -1 label and name not in ["DecisionTreeClassifier", "ExtraTreeClassifier"]): # We don't raise a warning in these classifiers, as # the column y interface is used by the forests. yield check_supervised_y_2d # test if NotFittedError is raised yield check_estimators_unfitted if 'class_weight' in Classifier().get_params().keys(): yield check_class_weight_classifiers def _yield_regressor_checks(name, Regressor): # TODO: test with intercept # TODO: test with multiple responses # basic testing yield check_regressors_train yield check_regressor_data_not_an_array yield check_estimators_partial_fit_n_features yield check_regressors_no_decision_function yield check_supervised_y_2d if name != 'CCA': # check that the regressor handles int input yield check_regressors_int # Test if NotFittedError is raised yield check_estimators_unfitted def _yield_transformer_checks(name, Transformer): # All transformers should either deal with sparse data or raise an # exception with type TypeError and an intelligible error message if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer', 'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']: yield check_transformer_data_not_an_array # these don't actually fit the data, so don't raise errors if name not in ['AdditiveChi2Sampler', 'Binarizer', 'FunctionTransformer', 'Normalizer']: # basic tests yield check_transformer_general yield check_transformers_unfitted def _yield_clustering_checks(name, Clusterer): yield check_clusterer_compute_labels_predict if name not in ('WardAgglomeration', "FeatureAgglomeration"): # this is clustering on the features # let's not test that here. yield check_clustering yield check_estimators_partial_fit_n_features def _yield_all_checks(name, Estimator): for check in _yield_non_meta_checks(name, Estimator): yield check if issubclass(Estimator, ClassifierMixin): for check in _yield_classifier_checks(name, Estimator): yield check if issubclass(Estimator, RegressorMixin): for check in _yield_regressor_checks(name, Estimator): yield check if issubclass(Estimator, TransformerMixin): if name not in DEPRECATED_TRANSFORM: for check in _yield_transformer_checks(name, Estimator): yield check if issubclass(Estimator, ClusterMixin): for check in _yield_clustering_checks(name, Estimator): yield check yield check_fit2d_predict1d yield check_fit2d_1sample yield check_fit2d_1feature yield check_fit1d_1feature yield check_fit1d_1sample def check_estimator(Estimator): """Check if estimator adheres to sklearn conventions. This estimator will run an extensive test-suite for input validation, shapes, etc. Additional tests for classifiers, regressors, clustering or transformers will be run if the Estimator class inherits from the corresponding mixin from sklearn.base. Parameters ---------- Estimator : class Class to check. """ name = Estimator.__class__.__name__ check_parameters_default_constructible(name, Estimator) for check in _yield_all_checks(name, Estimator): check(name, Estimator) def _boston_subset(n_samples=200): global BOSTON if BOSTON is None: boston = load_boston() X, y = boston.data, boston.target X, y = shuffle(X, y, random_state=0) X, y = X[:n_samples], y[:n_samples] X = StandardScaler().fit_transform(X) BOSTON = X, y return BOSTON def set_testing_parameters(estimator): # set parameters to speed up some estimators and # avoid deprecated behaviour params = estimator.get_params() if ("n_iter" in params and estimator.__class__.__name__ != "TSNE"): estimator.set_params(n_iter=5) if "max_iter" in params: warnings.simplefilter("ignore", ConvergenceWarning) if estimator.max_iter is not None: estimator.set_params(max_iter=min(5, estimator.max_iter)) # LinearSVR if estimator.__class__.__name__ == 'LinearSVR': estimator.set_params(max_iter=20) # NMF if estimator.__class__.__name__ == 'NMF': estimator.set_params(max_iter=100) if "n_resampling" in params: # randomized lasso estimator.set_params(n_resampling=5) if "n_estimators" in params: # especially gradient boosting with default 100 estimator.set_params(n_estimators=min(5, estimator.n_estimators)) if "max_trials" in params: # RANSAC estimator.set_params(max_trials=10) if "n_init" in params: # K-Means estimator.set_params(n_init=2) if "decision_function_shape" in params: # SVC estimator.set_params(decision_function_shape='ovo') if estimator.__class__.__name__ == "SelectFdr": # be tolerant of noisy datasets (not actually speed) estimator.set_params(alpha=.5) if estimator.__class__.__name__ == "TheilSenRegressor": estimator.max_subpopulation = 100 if isinstance(estimator, BaseRandomProjection): # Due to the jl lemma and often very few samples, the number # of components of the random matrix projection will be probably # greater than the number of features. # So we impose a smaller number (avoid "auto" mode) estimator.set_params(n_components=1) if isinstance(estimator, SelectKBest): # SelectKBest has a default of k=10 # which is more feature than we have in most case. estimator.set_params(k=1) if isinstance(estimator, NMF): if not isinstance(estimator, ProjectedGradientNMF): estimator.set_params(solver='cd') class NotAnArray(object): " An object that is convertable to an array" def __init__(self, data): self.data = data def __array__(self, dtype=None): return self.data def _is_32bit(): """Detect if process is 32bit Python.""" return struct.calcsize('P') * 8 == 32 def check_estimator_sparse_data(name, Estimator): rng = np.random.RandomState(0) X = rng.rand(40, 10) X[X < .8] = 0 X_csr = sparse.csr_matrix(X) y = (4 * rng.rand(40)).astype(np.int) for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']: X = X_csr.asformat(sparse_format) # catch deprecation warnings with warnings.catch_warnings(): if name in ['Scaler', 'StandardScaler']: estimator = Estimator(with_mean=False) else: estimator = Estimator() set_testing_parameters(estimator) # fit and predict try: estimator.fit(X, y) if hasattr(estimator, "predict"): pred = estimator.predict(X) assert_equal(pred.shape, (X.shape[0],)) if hasattr(estimator, 'predict_proba'): probs = estimator.predict_proba(X) assert_equal(probs.shape, (X.shape[0], 4)) except TypeError as e: if 'sparse' not in repr(e): print("Estimator %s doesn't seem to fail gracefully on " "sparse data: error message state explicitly that " "sparse input is not supported if this is not the case." % name) raise except Exception: print("Estimator %s doesn't seem to fail gracefully on " "sparse data: it should raise a TypeError if sparse input " "is explicitly not supported." % name) raise def check_dtype_object(name, Estimator): # check that estimators treat dtype object as numeric if possible rng = np.random.RandomState(0) X = rng.rand(40, 10).astype(object) y = (X[:, 0] * 4).astype(np.int) y = multioutput_estimator_convert_y_2d(name, y) with warnings.catch_warnings(): estimator = Estimator() set_testing_parameters(estimator) estimator.fit(X, y) if hasattr(estimator, "predict"): estimator.predict(X) if (hasattr(estimator, "transform") and name not in DEPRECATED_TRANSFORM): estimator.transform(X) try: estimator.fit(X, y.astype(object)) except Exception as e: if "Unknown label type" not in str(e): raise X[0, 0] = {'foo': 'bar'} msg = "argument must be a string or a number" assert_raises_regex(TypeError, msg, estimator.fit, X, y) @ignore_warnings def check_fit2d_predict1d(name, Estimator): # check by fitting a 2d array and prediting with a 1d array rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(20, 3)) y = X[:, 0].astype(np.int) y = multioutput_estimator_convert_y_2d(name, y) estimator = Estimator() set_testing_parameters(estimator) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) estimator.fit(X, y) for method in ["predict", "transform", "decision_function", "predict_proba"]: if hasattr(estimator, method): try: assert_warns(DeprecationWarning, getattr(estimator, method), X[0]) except ValueError: pass @ignore_warnings def check_fit2d_1sample(name, Estimator): # check by fitting a 2d array and prediting with a 1d array rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(1, 10)) y = X[:, 0].astype(np.int) y = multioutput_estimator_convert_y_2d(name, y) estimator = Estimator() set_testing_parameters(estimator) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) try: estimator.fit(X, y) except ValueError: pass @ignore_warnings def check_fit2d_1feature(name, Estimator): # check by fitting a 2d array and prediting with a 1d array rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(10, 1)) y = X[:, 0].astype(np.int) y = multioutput_estimator_convert_y_2d(name, y) estimator = Estimator() set_testing_parameters(estimator) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) try: estimator.fit(X, y) except ValueError: pass @ignore_warnings def check_fit1d_1feature(name, Estimator): # check fitting 1d array with 1 feature rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(20)) y = X.astype(np.int) y = multioutput_estimator_convert_y_2d(name, y) estimator = Estimator() set_testing_parameters(estimator) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) try: estimator.fit(X, y) except ValueError: pass @ignore_warnings def check_fit1d_1sample(name, Estimator): # check fitting 1d array with 1 feature rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(20)) y = np.array([1]) y = multioutput_estimator_convert_y_2d(name, y) estimator = Estimator() set_testing_parameters(estimator) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) try: estimator.fit(X, y) except ValueError : pass def check_transformer_general(name, Transformer): X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X = StandardScaler().fit_transform(X) X -= X.min() _check_transformer(name, Transformer, X, y) _check_transformer(name, Transformer, X.tolist(), y.tolist()) def check_transformer_data_not_an_array(name, Transformer): X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X = StandardScaler().fit_transform(X) # We need to make sure that we have non negative data, for things # like NMF X -= X.min() - .1 this_X = NotAnArray(X) this_y = NotAnArray(np.asarray(y)) _check_transformer(name, Transformer, this_X, this_y) def check_transformers_unfitted(name, Transformer): X, y = _boston_subset() with warnings.catch_warnings(record=True): transformer = Transformer() assert_raises((AttributeError, ValueError), transformer.transform, X) def _check_transformer(name, Transformer, X, y): if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit(): # Those transformers yield non-deterministic output when executed on # a 32bit Python. The same transformers are stable on 64bit Python. # FIXME: try to isolate a minimalistic reproduction case only depending # on numpy & scipy and/or maybe generate a test dataset that does not # cause such unstable behaviors. msg = name + ' is non deterministic on 32bit Python' raise SkipTest(msg) n_samples, n_features = np.asarray(X).shape # catch deprecation warnings with warnings.catch_warnings(record=True): transformer = Transformer() set_random_state(transformer) set_testing_parameters(transformer) # fit if name in CROSS_DECOMPOSITION: y_ = np.c_[y, y] y_[::2, 1] *= 2 else: y_ = y transformer.fit(X, y_) X_pred = transformer.fit_transform(X, y=y_) if isinstance(X_pred, tuple): for x_pred in X_pred: assert_equal(x_pred.shape[0], n_samples) else: # check for consistent n_samples assert_equal(X_pred.shape[0], n_samples) if hasattr(transformer, 'transform'): if name in CROSS_DECOMPOSITION: X_pred2 = transformer.transform(X, y_) X_pred3 = transformer.fit_transform(X, y=y_) else: X_pred2 = transformer.transform(X) X_pred3 = transformer.fit_transform(X, y=y_) if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple): for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3): assert_array_almost_equal( x_pred, x_pred2, 2, "fit_transform and transform outcomes not consistent in %s" % Transformer) assert_array_almost_equal( x_pred, x_pred3, 2, "consecutive fit_transform outcomes not consistent in %s" % Transformer) else: assert_array_almost_equal( X_pred, X_pred2, 2, "fit_transform and transform outcomes not consistent in %s" % Transformer) assert_array_almost_equal( X_pred, X_pred3, 2, "consecutive fit_transform outcomes not consistent in %s" % Transformer) assert_equal(len(X_pred2), n_samples) assert_equal(len(X_pred3), n_samples) # raises error on malformed input for transform if hasattr(X, 'T'): # If it's not an array, it does not have a 'T' property assert_raises(ValueError, transformer.transform, X.T) @ignore_warnings def check_pipeline_consistency(name, Estimator): if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit(): # Those transformers yield non-deterministic output when executed on # a 32bit Python. The same transformers are stable on 64bit Python. # FIXME: try to isolate a minimalistic reproduction case only depending # scipy and/or maybe generate a test dataset that does not # cause such unstable behaviors. msg = name + ' is non deterministic on 32bit Python' raise SkipTest(msg) # check that make_pipeline(est) gives same score as est X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X -= X.min() y = multioutput_estimator_convert_y_2d(name, y) estimator = Estimator() set_testing_parameters(estimator) set_random_state(estimator) pipeline = make_pipeline(estimator) estimator.fit(X, y) pipeline.fit(X, y) if name in DEPRECATED_TRANSFORM: funcs = ["score"] else: funcs = ["score", "fit_transform"] for func_name in funcs: func = getattr(estimator, func_name, None) if func is not None: func_pipeline = getattr(pipeline, func_name) result = func(X, y) result_pipe = func_pipeline(X, y) assert_array_almost_equal(result, result_pipe) @ignore_warnings def check_fit_score_takes_y(name, Estimator): # check that all estimators accept an optional y # in fit and score so they can be used in pipelines rnd = np.random.RandomState(0) X = rnd.uniform(size=(10, 3)) y = np.arange(10) % 3 y = multioutput_estimator_convert_y_2d(name, y) estimator = Estimator() set_testing_parameters(estimator) set_random_state(estimator) if name in DEPRECATED_TRANSFORM: funcs = ["fit", "score", "partial_fit", "fit_predict"] else: funcs = [ "fit", "score", "partial_fit", "fit_predict", "fit_transform"] for func_name in funcs: func = getattr(estimator, func_name, None) if func is not None: func(X, y) args = [p.name for p in signature(func).parameters.values()] assert_true(args[1] in ["y", "Y"], "Expected y or Y as second argument for method " "%s of %s. Got arguments: %r." % (func_name, Estimator.__name__, args)) @ignore_warnings def check_estimators_dtypes(name, Estimator): rnd = np.random.RandomState(0) X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32) X_train_64 = X_train_32.astype(np.float64) X_train_int_64 = X_train_32.astype(np.int64) X_train_int_32 = X_train_32.astype(np.int32) y = X_train_int_64[:, 0] y = multioutput_estimator_convert_y_2d(name, y) if name in DEPRECATED_TRANSFORM: methods = ["predict", "decision_function", "predict_proba"] else: methods = [ "predict", "transform", "decision_function", "predict_proba"] for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]: with warnings.catch_warnings(record=True): estimator = Estimator() set_testing_parameters(estimator) set_random_state(estimator, 1) estimator.fit(X_train, y) for method in methods: if hasattr(estimator, method): getattr(estimator, method)(X_train) def check_estimators_empty_data_messages(name, Estimator): e = Estimator() set_testing_parameters(e) set_random_state(e, 1) X_zero_samples = np.empty(0).reshape(0, 3) # The precise message can change depending on whether X or y is # validated first. Let us test the type of exception only: assert_raises(ValueError, e.fit, X_zero_samples, []) X_zero_features = np.empty(0).reshape(3, 0) # the following y should be accepted by both classifiers and regressors # and ignored by unsupervised models y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1])) msg = "0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* is required." assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y) def check_estimators_nan_inf(name, Estimator): rnd = np.random.RandomState(0) X_train_finite = rnd.uniform(size=(10, 3)) X_train_nan = rnd.uniform(size=(10, 3)) X_train_nan[0, 0] = np.nan X_train_inf = rnd.uniform(size=(10, 3)) X_train_inf[0, 0] = np.inf y = np.ones(10) y[:5] = 0 y = multioutput_estimator_convert_y_2d(name, y) error_string_fit = "Estimator doesn't check for NaN and inf in fit." error_string_predict = ("Estimator doesn't check for NaN and inf in" " predict.") error_string_transform = ("Estimator doesn't check for NaN and inf in" " transform.") for X_train in [X_train_nan, X_train_inf]: # catch deprecation warnings with warnings.catch_warnings(record=True): estimator = Estimator() set_testing_parameters(estimator) set_random_state(estimator, 1) # try to fit try: estimator.fit(X_train, y) except ValueError as e: if 'inf' not in repr(e) and 'NaN' not in repr(e): print(error_string_fit, Estimator, e) traceback.print_exc(file=sys.stdout) raise e except Exception as exc: print(error_string_fit, Estimator, exc) traceback.print_exc(file=sys.stdout) raise exc else: raise AssertionError(error_string_fit, Estimator) # actually fit estimator.fit(X_train_finite, y) # predict if hasattr(estimator, "predict"): try: estimator.predict(X_train) except ValueError as e: if 'inf' not in repr(e) and 'NaN' not in repr(e): print(error_string_predict, Estimator, e) traceback.print_exc(file=sys.stdout) raise e except Exception as exc: print(error_string_predict, Estimator, exc) traceback.print_exc(file=sys.stdout) else: raise AssertionError(error_string_predict, Estimator) # transform if (hasattr(estimator, "transform") and name not in DEPRECATED_TRANSFORM): try: estimator.transform(X_train) except ValueError as e: if 'inf' not in repr(e) and 'NaN' not in repr(e): print(error_string_transform, Estimator, e) traceback.print_exc(file=sys.stdout) raise e except Exception as exc: print(error_string_transform, Estimator, exc) traceback.print_exc(file=sys.stdout) else: raise AssertionError(error_string_transform, Estimator) @ignore_warnings def check_estimators_pickle(name, Estimator): """Test that we can pickle all estimators""" if name in DEPRECATED_TRANSFORM: check_methods = ["predict", "decision_function", "predict_proba"] else: check_methods = ["predict", "transform", "decision_function", "predict_proba"] X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) # some estimators can't do features less than 0 X -= X.min() # some estimators only take multioutputs y = multioutput_estimator_convert_y_2d(name, y) # catch deprecation warnings with warnings.catch_warnings(record=True): estimator = Estimator() set_random_state(estimator) set_testing_parameters(estimator) estimator.fit(X, y) result = dict() for method in check_methods: if hasattr(estimator, method): result[method] = getattr(estimator, method)(X) # pickle and unpickle! pickled_estimator = pickle.dumps(estimator) unpickled_estimator = pickle.loads(pickled_estimator) for method in result: unpickled_result = getattr(unpickled_estimator, method)(X) assert_array_almost_equal(result[method], unpickled_result) def check_estimators_partial_fit_n_features(name, Alg): # check if number of features changes between calls to partial_fit. if not hasattr(Alg, 'partial_fit'): return X, y = make_blobs(n_samples=50, random_state=1) X -= X.min() with warnings.catch_warnings(record=True): alg = Alg() set_testing_parameters(alg) if isinstance(alg, ClassifierMixin): classes = np.unique(y) alg.partial_fit(X, y, classes=classes) else: alg.partial_fit(X, y) assert_raises(ValueError, alg.partial_fit, X[:, :-1], y) def check_clustering(name, Alg): X, y = make_blobs(n_samples=50, random_state=1) X, y = shuffle(X, y, random_state=7) X = StandardScaler().fit_transform(X) n_samples, n_features = X.shape # catch deprecation and neighbors warnings with warnings.catch_warnings(record=True): alg = Alg() set_testing_parameters(alg) if hasattr(alg, "n_clusters"): alg.set_params(n_clusters=3) set_random_state(alg) if name == 'AffinityPropagation': alg.set_params(preference=-100) alg.set_params(max_iter=100) # fit alg.fit(X) # with lists alg.fit(X.tolist()) assert_equal(alg.labels_.shape, (n_samples,)) pred = alg.labels_ assert_greater(adjusted_rand_score(pred, y), 0.4) # fit another time with ``fit_predict`` and compare results if name is 'SpectralClustering': # there is no way to make Spectral clustering deterministic :( return set_random_state(alg) with warnings.catch_warnings(record=True): pred2 = alg.fit_predict(X) assert_array_equal(pred, pred2) def check_clusterer_compute_labels_predict(name, Clusterer): """Check that predict is invariant of compute_labels""" X, y = make_blobs(n_samples=20, random_state=0) clusterer = Clusterer() if hasattr(clusterer, "compute_labels"): # MiniBatchKMeans if hasattr(clusterer, "random_state"): clusterer.set_params(random_state=0) X_pred1 = clusterer.fit(X).predict(X) clusterer.set_params(compute_labels=False) X_pred2 = clusterer.fit(X).predict(X) assert_array_equal(X_pred1, X_pred2) def check_classifiers_one_label(name, Classifier): error_string_fit = "Classifier can't train when only one class is present." error_string_predict = ("Classifier can't predict when only one class is " "present.") rnd = np.random.RandomState(0) X_train = rnd.uniform(size=(10, 3)) X_test = rnd.uniform(size=(10, 3)) y = np.ones(10) # catch deprecation warnings with warnings.catch_warnings(record=True): classifier = Classifier() set_testing_parameters(classifier) # try to fit try: classifier.fit(X_train, y) except ValueError as e: if 'class' not in repr(e): print(error_string_fit, Classifier, e) traceback.print_exc(file=sys.stdout) raise e else: return except Exception as exc: print(error_string_fit, Classifier, exc) traceback.print_exc(file=sys.stdout) raise exc # predict try: assert_array_equal(classifier.predict(X_test), y) except Exception as exc: print(error_string_predict, Classifier, exc) raise exc @ignore_warnings # Warnings are raised by decision function def check_classifiers_train(name, Classifier): X_m, y_m = make_blobs(n_samples=300, random_state=0) X_m, y_m = shuffle(X_m, y_m, random_state=7) X_m = StandardScaler().fit_transform(X_m) # generate binary problem from multi-class one y_b = y_m[y_m != 2] X_b = X_m[y_m != 2] for (X, y) in [(X_m, y_m), (X_b, y_b)]: # catch deprecation warnings classes = np.unique(y) n_classes = len(classes) n_samples, n_features = X.shape with warnings.catch_warnings(record=True): classifier = Classifier() if name in ['BernoulliNB', 'MultinomialNB']: X -= X.min() set_testing_parameters(classifier) set_random_state(classifier) # raises error on malformed input for fit assert_raises(ValueError, classifier.fit, X, y[:-1]) # fit classifier.fit(X, y) # with lists classifier.fit(X.tolist(), y.tolist()) assert_true(hasattr(classifier, "classes_")) y_pred = classifier.predict(X) assert_equal(y_pred.shape, (n_samples,)) # training set performance if name not in ['BernoulliNB', 'MultinomialNB']: assert_greater(accuracy_score(y, y_pred), 0.83) # raises error on malformed input for predict assert_raises(ValueError, classifier.predict, X.T) if hasattr(classifier, "decision_function"): try: # decision_function agrees with predict decision = classifier.decision_function(X) if n_classes is 2: assert_equal(decision.shape, (n_samples,)) dec_pred = (decision.ravel() > 0).astype(np.int) assert_array_equal(dec_pred, y_pred) if (n_classes is 3 and not isinstance(classifier, BaseLibSVM)): # 1on1 of LibSVM works differently assert_equal(decision.shape, (n_samples, n_classes)) assert_array_equal(np.argmax(decision, axis=1), y_pred) # raises error on malformed input assert_raises(ValueError, classifier.decision_function, X.T) # raises error on malformed input for decision_function assert_raises(ValueError, classifier.decision_function, X.T) except NotImplementedError: pass if hasattr(classifier, "predict_proba"): # predict_proba agrees with predict y_prob = classifier.predict_proba(X) assert_equal(y_prob.shape, (n_samples, n_classes)) assert_array_equal(np.argmax(y_prob, axis=1), y_pred) # check that probas for all classes sum to one assert_array_almost_equal(np.sum(y_prob, axis=1), np.ones(n_samples)) # raises error on malformed input assert_raises(ValueError, classifier.predict_proba, X.T) # raises error on malformed input for predict_proba assert_raises(ValueError, classifier.predict_proba, X.T) def check_estimators_fit_returns_self(name, Estimator): """Check if self is returned when calling fit""" X, y = make_blobs(random_state=0, n_samples=9, n_features=4) y = multioutput_estimator_convert_y_2d(name, y) # some want non-negative input X -= X.min() estimator = Estimator() set_testing_parameters(estimator) set_random_state(estimator) assert_true(estimator.fit(X, y) is estimator) @ignore_warnings def check_estimators_unfitted(name, Estimator): """Check that predict raises an exception in an unfitted estimator. Unfitted estimators should raise either AttributeError or ValueError. The specific exception type NotFittedError inherits from both and can therefore be adequately raised for that purpose. """ # Common test for Regressors as well as Classifiers X, y = _boston_subset() with warnings.catch_warnings(record=True): est = Estimator() msg = "fit" if hasattr(est, 'predict'): assert_raise_message((AttributeError, ValueError), msg, est.predict, X) if hasattr(est, 'decision_function'): assert_raise_message((AttributeError, ValueError), msg, est.decision_function, X) if hasattr(est, 'predict_proba'): assert_raise_message((AttributeError, ValueError), msg, est.predict_proba, X) if hasattr(est, 'predict_log_proba'): assert_raise_message((AttributeError, ValueError), msg, est.predict_log_proba, X) def check_supervised_y_2d(name, Estimator): if "MultiTask" in name: # These only work on 2d, so this test makes no sense return rnd = np.random.RandomState(0) X = rnd.uniform(size=(10, 3)) y = np.arange(10) % 3 # catch deprecation warnings with warnings.catch_warnings(record=True): estimator = Estimator() set_testing_parameters(estimator) set_random_state(estimator) # fit estimator.fit(X, y) y_pred = estimator.predict(X) set_random_state(estimator) # Check that when a 2D y is given, a DataConversionWarning is # raised with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always", DataConversionWarning) warnings.simplefilter("ignore", RuntimeWarning) estimator.fit(X, y[:, np.newaxis]) y_pred_2d = estimator.predict(X) msg = "expected 1 DataConversionWarning, got: %s" % ( ", ".join([str(w_x) for w_x in w])) if name not in MULTI_OUTPUT: # check that we warned if we don't support multi-output assert_greater(len(w), 0, msg) assert_true("DataConversionWarning('A column-vector y" " was passed when a 1d array was expected" in msg) assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel()) def check_classifiers_classes(name, Classifier): X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1) X, y = shuffle(X, y, random_state=7) X = StandardScaler().fit_transform(X) # We need to make sure that we have non negative data, for things # like NMF X -= X.min() - .1 y_names = np.array(["one", "two", "three"])[y] for y_names in [y_names, y_names.astype('O')]: if name in ["LabelPropagation", "LabelSpreading"]: # TODO some complication with -1 label y_ = y else: y_ = y_names classes = np.unique(y_) # catch deprecation warnings with warnings.catch_warnings(record=True): classifier = Classifier() if name == 'BernoulliNB': classifier.set_params(binarize=X.mean()) set_testing_parameters(classifier) set_random_state(classifier) # fit classifier.fit(X, y_) y_pred = classifier.predict(X) # training set performance assert_array_equal(np.unique(y_), np.unique(y_pred)) if np.any(classifier.classes_ != classes): print("Unexpected classes_ attribute for %r: " "expected %s, got %s" % (classifier, classes, classifier.classes_)) def check_regressors_int(name, Regressor): X, _ = _boston_subset() X = X[:50] rnd = np.random.RandomState(0) y = rnd.randint(3, size=X.shape[0]) y = multioutput_estimator_convert_y_2d(name, y) rnd = np.random.RandomState(0) # catch deprecation warnings with warnings.catch_warnings(record=True): # separate estimators to control random seeds regressor_1 = Regressor() regressor_2 = Regressor() set_testing_parameters(regressor_1) set_testing_parameters(regressor_2) set_random_state(regressor_1) set_random_state(regressor_2) if name in CROSS_DECOMPOSITION: y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))]) y_ = y_.T else: y_ = y # fit regressor_1.fit(X, y_) pred1 = regressor_1.predict(X) regressor_2.fit(X, y_.astype(np.float)) pred2 = regressor_2.predict(X) assert_array_almost_equal(pred1, pred2, 2, name) def check_regressors_train(name, Regressor): X, y = _boston_subset() y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled y = y.ravel() y = multioutput_estimator_convert_y_2d(name, y) rnd = np.random.RandomState(0) # catch deprecation warnings with warnings.catch_warnings(record=True): regressor = Regressor() set_testing_parameters(regressor) if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'): # linear regressors need to set alpha, but not generalized CV ones regressor.alpha = 0.01 if name == 'PassiveAggressiveRegressor': regressor.C = 0.01 # raises error on malformed input for fit assert_raises(ValueError, regressor.fit, X, y[:-1]) # fit if name in CROSS_DECOMPOSITION: y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))]) y_ = y_.T else: y_ = y set_random_state(regressor) regressor.fit(X, y_) regressor.fit(X.tolist(), y_.tolist()) y_pred = regressor.predict(X) assert_equal(y_pred.shape, y_.shape) # TODO: find out why PLS and CCA fail. RANSAC is random # and furthermore assumes the presence of outliers, hence # skipped if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'): print(regressor) assert_greater(regressor.score(X, y_), 0.5) @ignore_warnings def check_regressors_no_decision_function(name, Regressor): # checks whether regressors have decision_function or predict_proba rng = np.random.RandomState(0) X = rng.normal(size=(10, 4)) y = multioutput_estimator_convert_y_2d(name, X[:, 0]) regressor = Regressor() set_testing_parameters(regressor) if hasattr(regressor, "n_components"): # FIXME CCA, PLS is not robust to rank 1 effects regressor.n_components = 1 regressor.fit(X, y) funcs = ["decision_function", "predict_proba", "predict_log_proba"] for func_name in funcs: func = getattr(regressor, func_name, None) if func is None: # doesn't have function continue # has function. Should raise deprecation warning msg = func_name assert_warns_message(DeprecationWarning, msg, func, X) def check_class_weight_classifiers(name, Classifier): if name == "NuSVC": # the sparse version has a parameter that doesn't do anything raise SkipTest if name.endswith("NB"): # NaiveBayes classifiers have a somewhat different interface. # FIXME SOON! raise SkipTest for n_centers in [2, 3]: # create a very noisy dataset X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5, random_state=0) n_centers = len(np.unique(y_train)) if n_centers == 2: class_weight = {0: 1000, 1: 0.0001} else: class_weight = {0: 1000, 1: 0.0001, 2: 0.0001} with warnings.catch_warnings(record=True): classifier = Classifier(class_weight=class_weight) if hasattr(classifier, "n_iter"): classifier.set_params(n_iter=100) if hasattr(classifier, "min_weight_fraction_leaf"): classifier.set_params(min_weight_fraction_leaf=0.01) set_random_state(classifier) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) assert_greater(np.mean(y_pred == 0), 0.89) def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train, X_test, y_test, weights): with warnings.catch_warnings(record=True): classifier = Classifier() if hasattr(classifier, "n_iter"): classifier.set_params(n_iter=100) set_random_state(classifier) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) classifier.set_params(class_weight='balanced') classifier.fit(X_train, y_train) y_pred_balanced = classifier.predict(X_test) assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'), f1_score(y_test, y_pred, average='weighted')) def check_class_weight_balanced_linear_classifier(name, Classifier): """Test class weights with non-contiguous class labels.""" X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) y = np.array([1, 1, 1, -1, -1]) with warnings.catch_warnings(record=True): classifier = Classifier() if hasattr(classifier, "n_iter"): # This is a very small dataset, default n_iter are likely to prevent # convergence classifier.set_params(n_iter=1000) set_random_state(classifier) # Let the model compute the class frequencies classifier.set_params(class_weight='balanced') coef_balanced = classifier.fit(X, y).coef_.copy() # Count each label occurrence to reweight manually n_samples = len(y) n_classes = float(len(np.unique(y))) class_weight = {1: n_samples / (np.sum(y == 1) * n_classes), -1: n_samples / (np.sum(y == -1) * n_classes)} classifier.set_params(class_weight=class_weight) coef_manual = classifier.fit(X, y).coef_.copy() assert_array_almost_equal(coef_balanced, coef_manual) def check_estimators_overwrite_params(name, Estimator): X, y = make_blobs(random_state=0, n_samples=9) y = multioutput_estimator_convert_y_2d(name, y) # some want non-negative input X -= X.min() with warnings.catch_warnings(record=True): # catch deprecation warnings estimator = Estimator() set_testing_parameters(estimator) set_random_state(estimator) # Make a physical copy of the orginal estimator parameters before fitting. params = estimator.get_params() original_params = deepcopy(params) # Fit the model estimator.fit(X, y) # Compare the state of the model parameters with the original parameters new_params = estimator.get_params() for param_name, original_value in original_params.items(): new_value = new_params[param_name] # We should never change or mutate the internal state of input # parameters by default. To check this we use the joblib.hash function # that introspects recursively any subobjects to compute a checksum. # The only exception to this rule of immutable constructor parameters # is possible RandomState instance but in this check we explicitly # fixed the random_state params recursively to be integer seeds. assert_equal(hash(new_value), hash(original_value), "Estimator %s should not change or mutate " " the parameter %s from %s to %s during fit." % (name, param_name, original_value, new_value)) def check_sparsify_coefficients(name, Estimator): X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-1, -2], [2, 2], [-2, -2]]) y = [1, 1, 1, 2, 2, 2, 3, 3, 3] est = Estimator() est.fit(X, y) pred_orig = est.predict(X) # test sparsify with dense inputs est.sparsify() assert_true(sparse.issparse(est.coef_)) pred = est.predict(X) assert_array_equal(pred, pred_orig) # pickle and unpickle with sparse coef_ est = pickle.loads(pickle.dumps(est)) assert_true(sparse.issparse(est.coef_)) pred = est.predict(X) assert_array_equal(pred, pred_orig) def check_classifier_data_not_an_array(name, Estimator): X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]]) y = [1, 1, 1, 2, 2, 2] y = multioutput_estimator_convert_y_2d(name, y) check_estimators_data_not_an_array(name, Estimator, X, y) def check_regressor_data_not_an_array(name, Estimator): X, y = _boston_subset(n_samples=50) y = multioutput_estimator_convert_y_2d(name, y) check_estimators_data_not_an_array(name, Estimator, X, y) def check_estimators_data_not_an_array(name, Estimator, X, y): if name in CROSS_DECOMPOSITION: raise SkipTest # catch deprecation warnings with warnings.catch_warnings(record=True): # separate estimators to control random seeds estimator_1 = Estimator() estimator_2 = Estimator() set_testing_parameters(estimator_1) set_testing_parameters(estimator_2) set_random_state(estimator_1) set_random_state(estimator_2) y_ = NotAnArray(np.asarray(y)) X_ = NotAnArray(np.asarray(X)) # fit estimator_1.fit(X_, y_) pred1 = estimator_1.predict(X_) estimator_2.fit(X, y) pred2 = estimator_2.predict(X) assert_array_almost_equal(pred1, pred2, 2, name) def check_parameters_default_constructible(name, Estimator): classifier = LinearDiscriminantAnalysis() # test default-constructibility # get rid of deprecation warnings with warnings.catch_warnings(record=True): if name in META_ESTIMATORS: estimator = Estimator(classifier) else: estimator = Estimator() # test cloning clone(estimator) # test __repr__ repr(estimator) # test that set_params returns self assert_true(estimator.set_params() is estimator) # test if init does nothing but set parameters # this is important for grid_search etc. # We get the default parameters from init and then # compare these against the actual values of the attributes. # this comes from getattr. Gets rid of deprecation decorator. init = getattr(estimator.__init__, 'deprecated_original', estimator.__init__) try: def param_filter(p): """Identify hyper parameters of an estimator""" return (p.name != 'self' and p.kind != p.VAR_KEYWORD and p.kind != p.VAR_POSITIONAL) init_params = [p for p in signature(init).parameters.values() if param_filter(p)] except (TypeError, ValueError): # init is not a python function. # true for mixins return params = estimator.get_params() if name in META_ESTIMATORS: # they can need a non-default argument init_params = init_params[1:] for init_param in init_params: assert_not_equal(init_param.default, init_param.empty, "parameter %s for %s has no default value" % (init_param.name, type(estimator).__name__)) assert_in(type(init_param.default), [str, int, float, bool, tuple, type(None), np.float64, types.FunctionType, Memory]) if init_param.name not in params.keys(): # deprecated parameter, not in get_params assert_true(init_param.default is None) continue param_value = params[init_param.name] if isinstance(param_value, np.ndarray): assert_array_equal(param_value, init_param.default) else: assert_equal(param_value, init_param.default) def multioutput_estimator_convert_y_2d(name, y): # Estimators in mono_output_task_error raise ValueError if y is of 1-D # Convert into a 2-D y for those estimators. if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV', 'MultiTaskLasso', 'MultiTaskElasticNet']): return y[:, np.newaxis] return y def check_non_transformer_estimators_n_iter(name, estimator, multi_output=False): # Check if all iterative solvers, run for more than one iteratiom iris = load_iris() X, y_ = iris.data, iris.target if multi_output: y_ = y_[:, np.newaxis] set_random_state(estimator, 0) if name == 'AffinityPropagation': estimator.fit(X) else: estimator.fit(X, y_) assert_greater(estimator.n_iter_, 0) def check_transformer_n_iter(name, estimator): if name in CROSS_DECOMPOSITION: # Check using default data X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]] y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]] else: X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X -= X.min() - 0.1 set_random_state(estimator, 0) estimator.fit(X, y_) # These return a n_iter per component. if name in CROSS_DECOMPOSITION: for iter_ in estimator.n_iter_: assert_greater(iter_, 1) else: assert_greater(estimator.n_iter_, 1) def check_get_params_invariance(name, estimator): class T(BaseEstimator): """Mock classifier """ def __init__(self): pass def fit(self, X, y): return self if name in ('FeatureUnion', 'Pipeline'): e = estimator([('clf', T())]) elif name in ('GridSearchCV', 'RandomizedSearchCV', 'SelectFromModel'): return else: e = estimator() shallow_params = e.get_params(deep=False) deep_params = e.get_params(deep=True) assert_true(all(item in deep_params.items() for item in shallow_params.items()))
bsd-3-clause
INM-6/python-neo
neo/io/neuralynxio_v1.py
2
105289
""" Class for reading data from Neuralynx files. This IO supports NCS, NEV and NSE file formats. This module is an older implementation with old neo.io API. A new class NeuralynxIO compunded by NeuralynxRawIO and BaseFromIO superseed this one. Depends on: numpy Supported: Read Author: Julia Sprenger, Carlos Canova Adapted from the exampleIO of python-neo """ import sys import os import warnings import codecs import copy import re import datetime import pkg_resources import numpy as np import quantities as pq from neo.io.baseio import BaseIO import neo.io.neuralynxio from neo.core import (Block, Segment, ChannelIndex, AnalogSignal, SpikeTrain, Event, Unit) from os import listdir, sep from os.path import isfile, getsize import hashlib import pickle if hasattr(pkg_resources, 'pkg_resources'): parse_version = pkg_resources.pkg_resources.parse_version else: parse_version = pkg_resources.parse_version class NeuralynxIO(BaseIO): """ Class for reading Neuralynx files. It enables reading: - :class:'Block' - :class:'Segment' - :class:'AnalogSignal' - :class:'SpikeTrain' Usage: from neo import io import quantities as pq import matplotlib.pyplot as plt session_folder = '../Data/2014-07-24_10-31-02' NIO = io.NeuralynxIO(session_folder,print_diagnostic = True) block = NIO.read_block(t_starts = 0.1*pq.s, t_stops = 0.2*pq.s, events=True) seg = block.segments[0] analogsignal = seg.analogsignals[0] plt.plot(analogsignal.times.rescale(pq.ms), analogsignal.magnitude) plt.show() """ is_readable = True # This class can only read data is_writable = False # write is not supported # This class is able to directly or indirectly handle the following objects # You can notice that this greatly simplifies the full Neo object hierarchy supported_objects = [Segment, AnalogSignal, SpikeTrain, Event] # This class can return either a Block or a Segment # The first one is the default ( self.read ) # These lists should go from highest object to lowest object because # common_io_test assumes it. readable_objects = [Segment, AnalogSignal, SpikeTrain] # This class is not able to write objects writeable_objects = [] has_header = False is_streameable = False # This is for GUI stuff : a definition for parameters when reading. # This dict should be keyed by object (`Block`). Each entry is a list # of tuple. The first entry in each tuple is the parameter name. The # second entry is a dict with keys 'value' (for default value), # and 'label' (for a descriptive name). # Note that if the highest-level object requires parameters, # common_io_test will be skipped. read_params = { Segment: [('waveforms', {'value': True})], Block: [('waveforms', {'value': False})] } # do not supported write so no GUI stuff write_params = None name = 'Neuralynx' description = 'This IO reads .nse/.ncs/.nev files of the Neuralynx (' \ 'Cheetah) recordings system (tetrodes).' extensions = ['nse', 'ncs', 'nev', 'ntt'] # mode can be 'file' or 'dir' or 'fake' or 'database' # the main case is 'file' but some reader are base on a directory or # a database this info is for GUI stuff also mode = 'dir' # hardcoded parameters from manual, which are not present in Neuralynx # data files # unit of timestamps in different files nev_time_unit = pq.microsecond ncs_time_unit = pq.microsecond nse_time_unit = pq.microsecond ntt_time_unit = pq.microsecond # unit of sampling rate in different files ncs_sr_unit = pq.Hz nse_sr_unit = pq.Hz ntt_sr_unit = pq.Hz def __init__(self, sessiondir=None, cachedir=None, use_cache='hash', print_diagnostic=False, filename=None): """ Arguments: sessiondir: the directory the files of the recording session are collected. Default 'None'. print_diagnostic: indicates, whether information about the loading of data is printed in terminal or not. Default 'False'. cachedir: the directory where metadata about the recording session is read from and written to. use_cache: method used for cache identification. Possible values: 'hash'/ 'always'/'datesize'/'never'. Default 'hash' filename: this argument is handles the same as sessiondir and is only added for external IO interfaces. The value of sessiondir has priority over filename. """ warnings.warn('{} is deprecated and will be removed in neo version 0.10. Use {} instead.' ''.format(self.__class__, neo.io.neuralynxio.NeuralynxIO), FutureWarning) BaseIO.__init__(self) # possiblity to provide filename instead of sessiondir for IO # compatibility if filename is not None and sessiondir is None: sessiondir = filename if sessiondir is None: raise ValueError('Must provide a directory containing data files of' ' of one recording session.') # remove filename if specific file was passed if any([sessiondir.endswith('.%s' % ext) for ext in self.extensions]): sessiondir = sessiondir[:sessiondir.rfind(sep)] # remove / for consistent directory handling if sessiondir.endswith(sep): sessiondir = sessiondir.rstrip(sep) # set general parameters of this IO self.sessiondir = sessiondir self.filename = sessiondir.split(sep)[-1] self._print_diagnostic = print_diagnostic self.associated = False self._associate(cachedir=cachedir, usecache=use_cache) self._diagnostic_print( 'Initialized IO for session %s' % self.sessiondir) def read_block(self, lazy=False, cascade=True, t_starts=None, t_stops=None, electrode_list=None, unit_list=None, analogsignals=True, events=False, waveforms=False): """ Reads data in a requested time window and returns block with as many segments es necessary containing these data. Arguments: lazy : Postpone actual reading of the data files. Default 'False'. cascade : Do not postpone reading subsequent neo types (segments). Default 'True'. t_starts : list of quantities or quantity describing the start of the requested time window to load. If None or [None] the complete session is loaded. Default 'None'. t_stops : list of quantities or quantity describing the end of the requested time window to load. Has to contain the same number of values as t_starts. If None or [None] the complete session is loaded. Default 'None'. electrode_list : list of integers containing the IDs of the requested to load. If [] or None all available channels will be loaded. Default: None. unit_list : list of integers containing the IDs of the requested units to load. If [] or None all available units will be loaded. Default: None. analogsignals : boolean, indication whether analogsignals should be read. Default: True. events : Loading events. If True all available events in the given time window will be read. Default: False. waveforms : Load waveform for spikes in the requested time window. Default: False. Returns: Block object containing the requested data in neo structures. Usage: from neo import io import quantities as pq import matplotlib.pyplot as plt session_folder = '../Data/2014-07-24_10-31-02' NIO = io.NeuralynxIO(session_folder,print_diagnostic = True) block = NIO.read_block(lazy = False, cascade = True, t_starts = 0.1*pq.s, t_stops = 0.2*pq.s, electrode_list = [1,5,10], unit_list = [1,2,3], events = True, waveforms = True) plt.plot(block.segments[0].analogsignals[0]) plt.show() """ # Create block bl = Block(file_origin=self.sessiondir) bl.name = self.filename if not cascade: return bl # Checking input of t_start and t_stop # For lazy users that specify x,x instead of [x],[x] for t_starts, # t_stops if t_starts is None: t_starts = [None] elif type(t_starts) == pq.Quantity: t_starts = [t_starts] elif type(t_starts) != list or any( [(type(i) != pq.Quantity and i is not None) for i in t_starts]): raise ValueError('Invalid specification of t_starts.') if t_stops is None: t_stops = [None] elif type(t_stops) == pq.Quantity: t_stops = [t_stops] elif type(t_stops) != list or any( [(type(i) != pq.Quantity and i is not None) for i in t_stops]): raise ValueError('Invalid specification of t_stops.') # adapting t_starts and t_stops to known gap times (extracted in # association process / initialization) for gap in self.parameters_global['gaps']: # gap=gap_list[0] for e in range(len(t_starts)): t1, t2 = t_starts[e], t_stops[e] gap_start = gap[1] * self.ncs_time_unit - \ self.parameters_global['t_start'] gap_stop = gap[2] * self.ncs_time_unit - self.parameters_global[ 't_start'] if ((t1 is None and t2 is None) or (t1 is None and t2 is not None and t2.rescale( self.ncs_time_unit) > gap_stop) or (t2 is None and t1 is not None and t1.rescale( self.ncs_time_unit) < gap_stop) or (t1 is not None and t2 is not None and t1.rescale( self.ncs_time_unit) < gap_start and t2.rescale(self.ncs_time_unit) > gap_stop)): # adapting first time segment t_stops[e] = gap_start # inserting second time segment t_starts.insert(e + 1, gap_stop) t_stops.insert(e + 1, t2) warnings.warn( 'Substituted t_starts and t_stops in order to skip ' 'gap in recording session.') # loading all channels if empty electrode_list if electrode_list == [] or electrode_list is None: electrode_list = self.parameters_ncs.keys() # adding a segment for each t_start, t_stop pair for t_start, t_stop in zip(t_starts, t_stops): seg = self.read_segment(lazy=lazy, cascade=cascade, t_start=t_start, t_stop=t_stop, electrode_list=electrode_list, unit_list=unit_list, analogsignals=analogsignals, events=events, waveforms=waveforms) bl.segments.append(seg) # generate units units = [] channel_unit_collection = {} for st in [s for seg in bl.segments for s in seg.spiketrains]: # collecting spiketrains of same channel and unit id to generate # common unit chuid = (st.annotations['channel_index'], st.annotations['unit_id']) if chuid in channel_unit_collection: channel_unit_collection[chuid].append(st) else: channel_unit_collection[chuid] = [st] for chuid in channel_unit_collection: sts = channel_unit_collection[chuid] unit = Unit(name='Channel %i, Unit %i' % chuid) unit.spiketrains.extend(sts) units.append(unit) # generate one channel indexes for each analogsignal for anasig in [a for seg in bl.segments for a in seg.analogsignals]: channelids = anasig.annotations['channel_index'] channel_names = ['channel %i' % i for i in channelids] channelidx = ChannelIndex(index=range(len(channelids)), channel_names=channel_names, name='channel ids for all analogsignal ' '"%s"' % anasig.name, channel_ids=channelids) channelidx.analogsignals.append(anasig) bl.channel_indexes.append(channelidx) # generate channel indexes for units channelids = [unit.spiketrains[0].annotations['channel_index'] for unit in units] channel_names = ['channel %i' % i for i in channelids] channelidx = ChannelIndex(index=range(len(channelids)), channel_names=channel_names, name='channel ids for all spiketrains', channel_ids=channelids) channelidx.units.extend(units) bl.channel_indexes.append(channelidx) bl.create_many_to_one_relationship() # Adding global parameters to block annotation bl.annotations.update(self.parameters_global) return bl def read_segment(self, lazy=False, cascade=True, t_start=None, t_stop=None, electrode_list=None, unit_list=None, analogsignals=True, events=False, waveforms=False): """Reads one Segment. The Segment will contain one AnalogSignal for each channel and will go from t_start to t_stop. Arguments: lazy : Postpone actual reading of the data files. Default 'False'. cascade : Do not postpone reading subsequent neo types (SpikeTrains, AnalogSignals, Events). Default 'True'. t_start : time (quantity) that the Segment begins. Default None. t_stop : time (quantity) that the Segment ends. Default None. electrode_list : list of integers containing the IDs of the requested to load. If [] or None all available channels will be loaded. Default: None. unit_list : list of integers containing the IDs of the requested units to load. If [] or None all available units will be loaded. If False, no unit will be loaded. Default: None. analogsignals : boolean, indication whether analogsignals should be read. Default: True. events : Loading events. If True all available events in the given time window will be read. Default: False. waveforms : Load waveform for spikes in the requested time window. Default: False. Returns: Segment object containing neo objects, which contain the data. """ # input check # loading all channels if empty electrode_list if electrode_list == [] or electrode_list is None: electrode_list = self.parameters_ncs.keys() elif electrode_list is None: raise ValueError('Electrode_list can not be None.') elif [v for v in electrode_list if v in self.parameters_ncs.keys()] == []: # warn if non of the requested channels are present in this session warnings.warn('Requested channels %s are not present in session ' '(contains only %s)' % ( electrode_list, self.parameters_ncs.keys())) electrode_list = [] seg = Segment(file_origin=self.filename) if not cascade: return seg # generate empty segment for analogsignal collection empty_seg = Segment(file_origin=self.filename) # Reading NCS Files # # selecting ncs files to load based on electrode_list requested if analogsignals: for chid in electrode_list: if chid in self.parameters_ncs: file_ncs = self.parameters_ncs[chid]['filename'] self.read_ncs(file_ncs, empty_seg, lazy, cascade, t_start=t_start, t_stop=t_stop) else: self._diagnostic_print('Can not load ncs of channel %i. ' 'No corresponding ncs file ' 'present.' % (chid)) # supplementory merge function, should be replaced by neo utility # function def merge_analogsignals(anasig_list): for aid, anasig in enumerate(anasig_list): anasig.channel_index = None if aid == 0: full_analogsignal = anasig else: full_analogsignal = full_analogsignal.merge(anasig) for key in anasig_list[0].annotations.keys(): listified_values = [a.annotations[key] for a in anasig_list] full_analogsignal.annotations[key] = listified_values return full_analogsignal analogsignal = merge_analogsignals(empty_seg.analogsignals) seg.analogsignals.append(analogsignal) analogsignal.segment = seg # Reading NEV Files (Events)# # reading all files available if events: for filename_nev in self.nev_asso: self.read_nev(filename_nev, seg, lazy, cascade, t_start=t_start, t_stop=t_stop) # Reading Spike Data only if requested if unit_list is not False: # Reading NSE Files (Spikes)# # selecting nse files to load based on electrode_list requested for chid in electrode_list: if chid in self.parameters_nse: filename_nse = self.parameters_nse[chid]['filename'] self.read_nse(filename_nse, seg, lazy, cascade, t_start=t_start, t_stop=t_stop, waveforms=waveforms) else: self._diagnostic_print('Can not load nse of channel %i. ' 'No corresponding nse file ' 'present.' % (chid)) # Reading ntt Files (Spikes)# # selecting ntt files to load based on electrode_list requested for chid in electrode_list: if chid in self.parameters_ntt: filename_ntt = self.parameters_ntt[chid]['filename'] self.read_ntt(filename_ntt, seg, lazy, cascade, t_start=t_start, t_stop=t_stop, waveforms=waveforms) else: self._diagnostic_print('Can not load ntt of channel %i. ' 'No corresponding ntt file ' 'present.' % (chid)) return seg def read_ncs(self, filename_ncs, seg, lazy=False, cascade=True, t_start=None, t_stop=None): ''' Reading a single .ncs file from the associated Neuralynx recording session. In case of a recording gap between t_start and t_stop, data are only loaded until gap start. For loading data across recording gaps use read_block(...). Arguments: filename_ncs : Name of the .ncs file to be loaded. seg : Neo Segment, to which the AnalogSignal containing the data will be attached. lazy : Postpone actual reading of the data. Instead provide a dummy AnalogSignal. Default 'False'. cascade : Not used in this context. Default: 'True'. t_start : time or sample (quantity or integer) that the AnalogSignal begins. Default None. t_stop : time or sample (quantity or integer) that the AnalogSignal ends. Default None. Returns: None ''' # checking format of filename and correcting if necessary if filename_ncs[-4:] != '.ncs': filename_ncs = filename_ncs + '.ncs' if sep in filename_ncs: filename_ncs = filename_ncs.split(sep)[-1] # Extracting the channel id from prescan (association) of ncs files with # this recording session chid = self.get_channel_id_by_file_name(filename_ncs) if chid is None: raise ValueError('NeuralynxIO is attempting to read a file ' 'not associated to this session (%s).' % ( filename_ncs)) if not cascade: return # read data header_time_data = self.__mmap_ncs_packet_timestamps(filename_ncs) data = self.__mmap_ncs_data(filename_ncs) # ensure meaningful values for requested start and stop times # in case time is provided in samples: transform to absolute time units if isinstance(t_start, int): t_start = t_start / self.parameters_ncs[chid]['sampling_rate'] if isinstance(t_stop, int): t_stop = t_stop / self.parameters_ncs[chid]['sampling_rate'] # rescaling to global start time of recording (time of first sample # in any file type) if t_start is None or t_start < ( self.parameters_ncs[chid]['t_start'] - self.parameters_global[ 't_start']): t_start = ( self.parameters_ncs[chid]['t_start'] - self.parameters_global[ 't_start']) if t_start > ( self.parameters_ncs[chid]['t_stop'] - self.parameters_global[ 't_start']): raise ValueError( 'Requested times window (%s to %s) is later than data are ' 'recorded (t_stop = %s) ' 'for file %s.' % (t_start, t_stop, (self.parameters_ncs[chid]['t_stop'] - self.parameters_global['t_start']), filename_ncs)) if t_stop is None or t_stop > ( self.parameters_ncs[chid]['t_stop'] - self.parameters_global[ 't_start']): t_stop = ( self.parameters_ncs[chid]['t_stop'] - self.parameters_global[ 't_start']) if t_stop < ( self.parameters_ncs[chid]['t_start'] - self.parameters_global['t_start']): raise ValueError( 'Requested times window (%s to %s) is earlier than data ' 'are ' 'recorded (t_start = %s) ' 'for file %s.' % (t_start, t_stop, (self.parameters_ncs[chid]['t_start'] - self.parameters_global['t_start']), filename_ncs)) if t_start >= t_stop: raise ValueError( 'Requested start time (%s) is later than / equal to stop ' 'time ' '(%s) ' 'for file %s.' % (t_start, t_stop, filename_ncs)) # Extracting data signal in requested time window unit = pq.dimensionless # default value if lazy: sig = [] p_id_start = 0 else: tstamps = header_time_data * self.ncs_time_unit - \ self.parameters_global['t_start'] # find data packet to start with signal construction starts = np.where(tstamps <= t_start)[0] if len(starts) == 0: self._diagnostic_print( 'Requested AnalogSignal not present in this time ' 'interval.') return else: # first packet to be included into signal p_id_start = starts[-1] # find data packet where signal ends (due to gap or t_stop) stops = np.where(tstamps >= t_stop)[0] if len(stops) != 0: first_stop = [stops[0]] else: first_stop = [] # last packet to be included in signal p_id_stop = min(first_stop + [len(data)]) # search gaps in recording in time range to load gap_packets = [gap_id[0] for gap_id in self.parameters_ncs[chid]['gaps'] if gap_id[0] > p_id_start] if len(gap_packets) > 0 and min(gap_packets) < p_id_stop: p_id_stop = min(gap_packets) warnings.warn( 'Analogsignalarray was shortened due to gap in ' 'recorded ' 'data ' ' of file %s at packet id %i' % ( filename_ncs, min(gap_packets))) # search broken packets in time range to load broken_packets = [] if 'broken_packet' in self.parameters_ncs[chid]: broken_packets = [packet[0] for packet in self.parameters_ncs[chid]['broken_packet'] if packet[0] > p_id_start] if len(broken_packets) > 0 and min(broken_packets) < p_id_stop: p_id_stop = min(broken_packets) warnings.warn( 'Analogsignalarray was shortened due to broken data ' 'packet in recorded data ' ' of file %s at packet id %i' % ( filename_ncs, min(broken_packets))) # construct signal in valid packet range sig = np.array(data[p_id_start:p_id_stop + 1], dtype=float) sig = sig.reshape(len(sig) * len(sig[0])) # ADBitVolts is not guaranteed to be present in the header! if 'ADBitVolts' in self.parameters_ncs[chid]: sig *= self.parameters_ncs[chid]['ADBitVolts'] unit = pq.V else: warnings.warn( 'Could not transform data from file %s into physical ' 'signal. ' 'Missing "ADBitVolts" value in text header.') # defining sampling rate for rescaling purposes sampling_rate = self.parameters_ncs[chid]['sampling_unit'][0] # creating neo AnalogSignal containing data anasig = AnalogSignal(signal=pq.Quantity(sig, unit, copy=False), sampling_rate=1 * sampling_rate, # rescaling t_start to sampling time units t_start=(header_time_data[p_id_start] * self.ncs_time_unit - self.parameters_global['t_start']).rescale( 1 / sampling_rate), name='channel_%i' % (chid), channel_index=chid) # removing protruding parts of first and last data packet if anasig.t_start < t_start.rescale(anasig.t_start.units): anasig = anasig.time_slice(t_start.rescale(anasig.t_start.units), None) if anasig.t_stop > t_stop.rescale(anasig.t_start.units): anasig = anasig.time_slice(None, t_stop.rescale(anasig.t_start.units)) annotations = copy.deepcopy(self.parameters_ncs[chid]) for pop_key in ['sampling_rate', 't_start']: if pop_key in annotations: annotations.pop(pop_key) anasig.annotations.update(annotations) anasig.annotations['electrode_id'] = chid # this annotation is necesary for automatic genereation of # recordingchannels anasig.annotations['channel_index'] = chid anasig.segment = seg # needed for merge function of analogsignals seg.analogsignals.append(anasig) def read_nev(self, filename_nev, seg, lazy=False, cascade=True, t_start=None, t_stop=None): ''' Reads associated nev file and attaches its content as eventarray to provided neo segment. In constrast to read_ncs times can not be provided in number of samples as a nev file has no inherent sampling rate. Arguments: filename_nev : Name of the .nev file to be loaded. seg : Neo Segment, to which the Event containing the data will be attached. lazy : Postpone actual reading of the data. Instead provide a dummy Event. Default 'False'. cascade : Not used in this context. Default: 'True'. t_start : time (quantity) that the Events begin. Default None. t_stop : time (quantity) that the Event end. Default None. Returns: None ''' if filename_nev[-4:] != '.nev': filename_nev += '.nev' if sep in filename_nev: filename_nev = filename_nev.split(sep)[-1] if filename_nev not in self.nev_asso: raise ValueError('NeuralynxIO is attempting to read a file ' 'not associated to this session (%s).' % ( filename_nev)) # # ensure meaningful values for requested start and stop times # # providing time is samples for nev file does not make sense as we # don't know the underlying sampling rate if isinstance(t_start, int): raise ValueError( 'Requesting event information from nev file in samples ' 'does ' 'not make sense. ' 'Requested t_start %s' % t_start) if isinstance(t_stop, int): raise ValueError( 'Requesting event information from nev file in samples ' 'does ' 'not make sense. ' 'Requested t_stop %s' % t_stop) # ensure meaningful values for requested start and stop times if t_start is None or t_start < ( self.parameters_nev[filename_nev]['t_start'] - self.parameters_global['t_start']): t_start = (self.parameters_nev[filename_nev]['t_start'] - self.parameters_global['t_start']) if t_start > (self.parameters_nev[filename_nev]['t_stop'] - self.parameters_global['t_start']): raise ValueError( 'Requested times window (%s to %s) is later than data are ' 'recorded (t_stop = %s) ' 'for file %s.' % (t_start, t_stop, (self.parameters_nev[filename_nev]['t_stop'] - self.parameters_global['t_start']), filename_nev)) if t_stop is None or t_stop > ( self.parameters_nev[filename_nev]['t_stop'] - self.parameters_global['t_start']): t_stop = (self.parameters_nev[filename_nev]['t_stop'] - self.parameters_global['t_start']) if t_stop < (self.parameters_nev[filename_nev]['t_start'] - self.parameters_global['t_start']): raise ValueError( 'Requested times window (%s to %s) is earlier than data ' 'are ' 'recorded (t_start = %s) ' 'for file %s.' % (t_start, t_stop, ( self.parameters_nev[filename_nev][ 't_start'] - self.parameters_global['t_start']), filename_nev)) if t_start >= t_stop: raise ValueError( 'Requested start time (%s) is later than / equal to stop ' 'time ' '(%s) ' 'for file %s.' % (t_start, t_stop, filename_nev)) data = self.__mmap_nev_file(filename_nev) # Extracting all events for one event type and put it into an event # array # TODO: Check if this is the correct way of event creation. for event_type in self.parameters_nev[filename_nev]['event_types']: # Extract all time stamps of digital markers and rescaling time type_mask = [i for i in range(len(data)) if (data[i][4] == event_type['event_id'] and data[i][5] == event_type['nttl'] and data[i][10].decode('latin-1') == event_type[ 'name'])] marker_times = [t[3] for t in data[type_mask]] * self.nev_time_unit - \ self.parameters_global['t_start'] # only consider Events in the requested time window [t_start, # t_stop] time_mask = [i for i in range(len(marker_times)) if ( marker_times[i] >= t_start and marker_times[i] <= t_stop)] marker_times = marker_times[time_mask] # Do not create an eventarray if there are no events of this type # in the requested time range if len(marker_times) == 0: continue ev = Event(times=pq.Quantity(marker_times, units=self.nev_time_unit, dtype="int"), labels=event_type['name'], name="Digital Marker " + str(event_type), file_origin=filename_nev, marker_id=event_type['event_id'], digital_marker=True, analog_marker=False, nttl=event_type['nttl']) seg.events.append(ev) def read_nse(self, filename_nse, seg, lazy=False, cascade=True, t_start=None, t_stop=None, unit_list=None, waveforms=False): ''' Reads nse file and attaches content as spike train to provided neo segment. Times can be provided in samples (integer values). If the nse file does not contain a sampling rate value, the ncs sampling rate on the same electrode is used. Arguments: filename_nse : Name of the .nse file to be loaded. seg : Neo Segment, to which the Spiketrain containing the data will be attached. lazy : Postpone actual reading of the data. Instead provide a dummy SpikeTrain. Default 'False'. cascade : Not used in this context. Default: 'True'. t_start : time or sample (quantity or integer) that the SpikeTrain begins. Default None. t_stop : time or sample (quantity or integer) that the SpikeTrain ends. Default None. unit_list : unit ids to be loaded. If [], all units are loaded. Default None. waveforms : Load the waveform (up to 32 data points) for each spike time. Default: False Returns: None ''' if filename_nse[-4:] != '.nse': filename_nse += '.nse' if sep in filename_nse: filename_nse = filename_nse.split(sep)[-1] # extracting channel id of requested file channel_id = self.get_channel_id_by_file_name(filename_nse) if channel_id is not None: chid = channel_id else: # if nse file is empty it is not listed in self.parameters_nse, but # in self.nse_avail if filename_nse in self.nse_avail: warnings.warn('NeuralynxIO is attempting to read an empty ' '(not associated) nse file (%s). ' 'Not loading nse file.' % (filename_nse)) return else: raise ValueError('NeuralynxIO is attempting to read a file ' 'not associated to this session (%s).' % ( filename_nse)) # ensure meaningful values for requested start and stop times # in case time is provided in samples: transform to absolute time units # ncs sampling rate is best guess if there is no explicit sampling # rate given for nse values. if 'sampling_rate' in self.parameters_nse[chid]: sr = self.parameters_nse[chid]['sampling_rate'] elif chid in self.parameters_ncs and 'sampling_rate' in \ self.parameters_ncs[chid]: sr = self.parameters_ncs[chid]['sampling_rate'] else: raise ValueError( 'No sampling rate present for channel id %i in nse file ' '%s. ' 'Could also not find the sampling rate of the respective ' 'ncs ' 'file.' % ( chid, filename_nse)) if isinstance(t_start, int): t_start = t_start / sr if isinstance(t_stop, int): t_stop = t_stop / sr # + rescaling global recording start (first sample in any file type) # This is not optimal, as there is no way to know how long the # recording lasted after last spike if t_start is None or t_start < ( self.parameters_nse[chid]['t_first'] - self.parameters_global[ 't_start']): t_start = ( self.parameters_nse[chid]['t_first'] - self.parameters_global[ 't_start']) if t_start > ( self.parameters_nse[chid]['t_last'] - self.parameters_global['t_start']): raise ValueError( 'Requested times window (%s to %s) is later than data are ' 'recorded (t_stop = %s) ' 'for file %s.' % (t_start, t_stop, (self.parameters_nse[chid]['t_last'] - self.parameters_global['t_start']), filename_nse)) if t_stop is None: t_stop = (sys.maxsize) * self.nse_time_unit if t_stop is None or t_stop > ( self.parameters_nse[chid]['t_last'] - self.parameters_global[ 't_start']): t_stop = ( self.parameters_nse[chid]['t_last'] - self.parameters_global[ 't_start']) if t_stop < ( self.parameters_nse[chid]['t_first'] - self.parameters_global[ 't_start']): raise ValueError( 'Requested times window (%s to %s) is earlier than data ' 'are recorded (t_start = %s) ' 'for file %s.' % (t_start, t_stop, (self.parameters_nse[chid]['t_first'] - self.parameters_global['t_start']), filename_nse)) if t_start >= t_stop: raise ValueError( 'Requested start time (%s) is later than / equal to stop ' 'time ' '(%s) for file %s.' % (t_start, t_stop, filename_nse)) # reading data [timestamps, channel_ids, cell_numbers, features, data_points] = self.__mmap_nse_packets(filename_nse) # load all units available if unit_list==[] or None if unit_list == [] or unit_list is None: unit_list = np.unique(cell_numbers) elif not any([u in cell_numbers for u in unit_list]): self._diagnostic_print( 'None of the requested unit ids (%s) present ' 'in nse file %s (contains unit_list %s)' % ( unit_list, filename_nse, np.unique(cell_numbers))) # extracting spikes unit-wise and generate spiketrains for unit_i in unit_list: if not lazy: # Extract all time stamps of that neuron on that electrode unit_mask = np.where(cell_numbers == unit_i)[0] spike_times = timestamps[unit_mask] * self.nse_time_unit spike_times = spike_times - self.parameters_global['t_start'] time_mask = np.where(np.logical_and(spike_times >= t_start, spike_times < t_stop)) spike_times = spike_times[time_mask] else: spike_times = pq.Quantity([], units=self.nse_time_unit) # Create SpikeTrain object st = SpikeTrain(times=spike_times, t_start=t_start, t_stop=t_stop, sampling_rate=self.parameters_ncs[chid][ 'sampling_rate'], name="Channel %i, Unit %i" % (chid, unit_i), file_origin=filename_nse, unit_id=unit_i, channel_id=chid) if waveforms and not lazy: # Collect all waveforms of the specific unit # For computational reasons: no units, no time axis st.waveforms = data_points[unit_mask][time_mask] # TODO: Add units to waveforms (pq.uV?) and add annotation # left_sweep = x * pq.ms indicating when threshold crossing # occurred in waveform st.annotations.update(self.parameters_nse[chid]) st.annotations['electrode_id'] = chid # This annotations is necessary for automatic generation of # recordingchannels st.annotations['channel_index'] = chid seg.spiketrains.append(st) def read_ntt(self, filename_ntt, seg, lazy=False, cascade=True, t_start=None, t_stop=None, unit_list=None, waveforms=False): ''' Reads ntt file and attaches content as spike train to provided neo segment. Arguments: filename_ntt : Name of the .ntt file to be loaded. seg : Neo Segment, to which the Spiketrain containing the data will be attached. lazy : Postpone actual reading of the data. Instead provide a dummy SpikeTrain. Default 'False'. cascade : Not used in this context. Default: 'True'. t_start : time (quantity) that the SpikeTrain begins. Default None. t_stop : time (quantity) that the SpikeTrain ends. Default None. unit_list : unit ids to be loaded. If [] or None all units are loaded. Default None. waveforms : Load the waveform (up to 32 data points) for each spike time. Default: False Returns: None ''' if filename_ntt[-4:] != '.ntt': filename_ntt += '.ntt' if sep in filename_ntt: filename_ntt = filename_ntt.split(sep)[-1] # extracting channel id of requested file channel_id = self.get_channel_id_by_file_name(filename_ntt) if channel_id is not None: chid = channel_id else: # if ntt file is empty it is not listed in self.parameters_ntt, but # in self.ntt_avail if filename_ntt in self.ntt_avail: warnings.warn('NeuralynxIO is attempting to read an empty ' '(not associated) ntt file (%s). ' 'Not loading ntt file.' % (filename_ntt)) return else: raise ValueError('NeuralynxIO is attempting to read a file ' 'not associated to this session (%s).' % ( filename_ntt)) # ensure meaningful values for requested start and stop times # in case time is provided in samples: transform to absolute time units # ncs sampling rate is best guess if there is no explicit sampling # rate given for ntt values. if 'sampling_rate' in self.parameters_ntt[chid]: sr = self.parameters_ntt[chid]['sampling_rate'] elif chid in self.parameters_ncs and 'sampling_rate' in \ self.parameters_ncs[chid]: sr = self.parameters_ncs[chid]['sampling_rate'] else: raise ValueError( 'No sampling rate present for channel id %i in ntt file ' '%s. ' 'Could also not find the sampling rate of the respective ' 'ncs ' 'file.' % ( chid, filename_ntt)) if isinstance(t_start, int): t_start = t_start / sr if isinstance(t_stop, int): t_stop = t_stop / sr # + rescaling to global recording start (first sample in any # recording file) if t_start is None or t_start < ( self.parameters_ntt[chid]['t_first'] - self.parameters_global[ 't_start']): t_start = ( self.parameters_ntt[chid]['t_first'] - self.parameters_global[ 't_start']) if t_start > ( self.parameters_ntt[chid]['t_last'] - self.parameters_global[ 't_start']): raise ValueError( 'Requested times window (%s to %s) is later than data are ' 'recorded (t_stop = %s) ' 'for file %s.' % (t_start, t_stop, (self.parameters_ntt[chid]['t_last'] - self.parameters_global['t_start']), filename_ntt)) if t_stop is None: t_stop = (sys.maxsize) * self.ntt_time_unit if t_stop is None or t_stop > ( self.parameters_ntt[chid]['t_last'] - self.parameters_global[ 't_start']): t_stop = ( self.parameters_ntt[chid]['t_last'] - self.parameters_global[ 't_start']) if t_stop < ( self.parameters_ntt[chid]['t_first'] - self.parameters_global[ 't_start']): raise ValueError( 'Requested times window (%s to %s) is earlier than data ' 'are ' 'recorded (t_start = %s) ' 'for file %s.' % (t_start, t_stop, (self.parameters_ntt[chid]['t_first'] - self.parameters_global['t_start']), filename_ntt)) if t_start >= t_stop: raise ValueError( 'Requested start time (%s) is later than / equal to stop ' 'time ' '(%s) ' 'for file %s.' % (t_start, t_stop, filename_ntt)) # reading data [timestamps, channel_ids, cell_numbers, features, data_points] = self.__mmap_ntt_packets(filename_ntt) # TODO: When ntt available: Implement 1 RecordingChannelGroup per # Tetrode, such that each electrode gets its own recording channel # load all units available if units==[] if unit_list == [] or unit_list is None: unit_list = np.unique(cell_numbers) elif not any([u in cell_numbers for u in unit_list]): self._diagnostic_print( 'None of the requested unit ids (%s) present ' 'in ntt file %s (contains units %s)' % ( unit_list, filename_ntt, np.unique(cell_numbers))) # loading data for each unit and generating spiketrain for unit_i in unit_list: if not lazy: # Extract all time stamps of that neuron on that electrode mask = np.where(cell_numbers == unit_i)[0] spike_times = timestamps[mask] * self.ntt_time_unit spike_times = spike_times - self.parameters_global['t_start'] spike_times = spike_times[np.where( np.logical_and(spike_times >= t_start, spike_times < t_stop))] else: spike_times = pq.Quantity([], units=self.ntt_time_unit) # Create SpikeTrain object st = SpikeTrain(times=spike_times, t_start=t_start, t_stop=t_stop, sampling_rate=self.parameters_ncs[chid][ 'sampling_rate'], name="Channel %i, Unit %i" % (chid, unit_i), file_origin=filename_ntt, unit_id=unit_i, channel_id=chid) # Collect all waveforms of the specific unit if waveforms and not lazy: # For computational reasons: no units, no time axis # transposing to adhere to neo guidline, which states that # time should be in the first axis. # This is stupid and not intuitive. st.waveforms = np.array( [data_points[t, :, :] for t in range(len(timestamps)) if cell_numbers[t] == unit_i]).transpose() # TODO: Add units to waveforms (pq.uV?) and add annotation # left_sweep = x * pq.ms indicating when threshold crossing # occurred in waveform st.annotations = self.parameters_ntt[chid] st.annotations['electrode_id'] = chid # This annotations is necessary for automatic generation of # recordingchannels st.annotations['channel_index'] = chid seg.spiketrains.append(st) # private routines # ################################################# def _associate(self, cachedir=None, usecache='hash'): """ Associates the object with a specified Neuralynx session, i.e., a combination of a .nse, .nev and .ncs files. The meta data is read into the object for future reference. Arguments: cachedir : Directory for loading and saving hashes of recording sessions and pickled meta information about files extracted during association process use_cache: method used for cache identification. Possible values: 'hash'/ 'always'/'datesize'/'never'. Default 'hash' Returns: - """ # If already associated, disassociate first if self.associated: raise OSError( "Trying to associate an already associated NeuralynxIO " "object.") # Create parameter containers # Dictionary that holds different parameters read from the .nev file self.parameters_nse = {} # List of parameter dictionaries for all potential file types self.parameters_ncs = {} self.parameters_nev = {} self.parameters_ntt = {} # combined global parameters self.parameters_global = {} # Scanning session directory for recorded files self.sessionfiles = [f for f in listdir(self.sessiondir) if isfile(os.path.join(self.sessiondir, f))] # Listing available files self.ncs_avail = [] self.nse_avail = [] self.nev_avail = [] self.ntt_avail = [] # Listing associated (=non corrupted, non empty files) self.ncs_asso = [] self.nse_asso = [] self.nev_asso = [] self.ntt_asso = [] if usecache not in ['hash', 'always', 'datesize', 'never']: raise ValueError( "Argument value of usecache '%s' is not valid. Accepted " "values are 'hash','always','datesize','never'" % usecache) if cachedir is None and usecache != 'never': raise ValueError('No cache directory provided.') # check if there are any changes of the data files -> new data check run check_files = True if usecache != 'always' else False # never # checking files if usecache=='always' if cachedir is not None and usecache != 'never': self._diagnostic_print( 'Calculating %s of session files to check for cached ' 'parameter files.' % usecache) cachefile = cachedir + sep + self.sessiondir.split(sep)[ -1] + '/hashkeys' if not os.path.exists(cachedir + sep + self.sessiondir.split(sep)[-1]): os.makedirs(cachedir + sep + self.sessiondir.split(sep)[-1]) if usecache == 'hash': hashes_calc = {} # calculates hash of all available files for f in self.sessionfiles: file_hash = self.hashfile(open(self.sessiondir + sep + f, 'rb'), hashlib.sha256()) hashes_calc[f] = file_hash elif usecache == 'datesize': hashes_calc = {} for f in self.sessionfiles: hashes_calc[f] = self.datesizefile( self.sessiondir + sep + f) # load hashes saved for this session in an earlier loading run if os.path.exists(cachefile): hashes_read = pickle.load(open(cachefile, 'rb')) else: hashes_read = {} # compare hashes to previously saved meta data und load meta data # if no changes occured if usecache == 'always' or all([f in hashes_calc and f in hashes_read and hashes_calc[f] == hashes_read[f] for f in self.sessionfiles]): check_files = False self._diagnostic_print( 'Using cached metadata from earlier analysis run in ' 'file ' '%s. Skipping file checks.' % cachefile) # loading saved parameters parameterfile = cachedir + sep + self.sessiondir.split(sep)[ -1] + '/parameters.cache' if os.path.exists(parameterfile): parameters_read = pickle.load(open(parameterfile, 'rb')) else: raise OSError('Inconsistent cache files.') for IOdict, dictname in [(self.parameters_global, 'global'), (self.parameters_ncs, 'ncs'), (self.parameters_nse, 'nse'), (self.parameters_nev, 'nev'), (self.parameters_ntt, 'ntt')]: IOdict.update(parameters_read[dictname]) self.nev_asso = self.parameters_nev.keys() self.ncs_asso = [val['filename'] for val in self.parameters_ncs.values()] self.nse_asso = [val['filename'] for val in self.parameters_nse.values()] self.ntt_asso = [val['filename'] for val in self.parameters_ntt.values()] for filename in self.sessionfiles: # Extracting only continuous signal files (.ncs) if filename[-4:] == '.ncs': self.ncs_avail.append(filename) elif filename[-4:] == '.nse': self.nse_avail.append(filename) elif filename[-4:] == '.nev': self.nev_avail.append(filename) elif filename[-4:] == '.ntt': self.ntt_avail.append(filename) else: self._diagnostic_print( 'Ignoring file of unknown data type %s' % filename) if check_files: self._diagnostic_print('Starting individual file checks.') # ======================================================================= # # Scan NCS files # ======================================================================= self._diagnostic_print( '\nDetected %i .ncs file(s).' % (len(self.ncs_avail))) for ncs_file in self.ncs_avail: # Loading individual NCS file and extracting parameters self._diagnostic_print("Scanning " + ncs_file + ".") # Reading file packet headers filehandle = self.__mmap_ncs_packet_headers(ncs_file) if filehandle is None: continue try: # Checking consistency of ncs file self.__ncs_packet_check(filehandle) except AssertionError: warnings.warn( 'Session file %s did not pass data packet check. ' 'This file can not be loaded.' % ncs_file) continue # Reading data packet header information and store them in # parameters_ncs self.__read_ncs_data_headers(filehandle, ncs_file) # Reading txt file header channel_id = self.get_channel_id_by_file_name(ncs_file) self.__read_text_header(ncs_file, self.parameters_ncs[channel_id]) # Check for invalid starting times of data packets in ncs file self.__ncs_invalid_first_sample_check(filehandle) # Check ncs file for gaps self.__ncs_gap_check(filehandle) self.ncs_asso.append(ncs_file) # ======================================================================= # # Scan NSE files # ======================================================================= # Loading individual NSE file and extracting parameters self._diagnostic_print( '\nDetected %i .nse file(s).' % (len(self.nse_avail))) for nse_file in self.nse_avail: # Loading individual NSE file and extracting parameters self._diagnostic_print('Scanning ' + nse_file + '.') # Reading file filehandle = self.__mmap_nse_packets(nse_file) if filehandle is None: continue try: # Checking consistency of nse file self.__nse_check(filehandle) except AssertionError: warnings.warn( 'Session file %s did not pass data packet check. ' 'This file can not be loaded.' % nse_file) continue # Reading header information and store them in parameters_nse self.__read_nse_data_header(filehandle, nse_file) # Reading txt file header channel_id = self.get_channel_id_by_file_name(nse_file) self.__read_text_header(nse_file, self.parameters_nse[channel_id]) # using sampling rate from txt header, as this is not saved # in data packets if 'SamplingFrequency' in self.parameters_nse[channel_id]: self.parameters_nse[channel_id]['sampling_rate'] = \ (self.parameters_nse[channel_id][ 'SamplingFrequency'] * self.nse_sr_unit) self.nse_asso.append(nse_file) # ======================================================================= # # Scan NEV files # ======================================================================= self._diagnostic_print( '\nDetected %i .nev file(s).' % (len(self.nev_avail))) for nev_file in self.nev_avail: # Loading individual NEV file and extracting parameters self._diagnostic_print('Scanning ' + nev_file + '.') # Reading file filehandle = self.__mmap_nev_file(nev_file) if filehandle is None: continue try: # Checking consistency of nev file self.__nev_check(filehandle) except AssertionError: warnings.warn( 'Session file %s did not pass data packet check. ' 'This file can not be loaded.' % nev_file) continue # Reading header information and store them in parameters_nev self.__read_nev_data_header(filehandle, nev_file) # Reading txt file header self.__read_text_header(nev_file, self.parameters_nev[nev_file]) self.nev_asso.append(nev_file) # ======================================================================= # # Scan NTT files # ======================================================================= self._diagnostic_print( '\nDetected %i .ntt file(s).' % (len(self.ntt_avail))) for ntt_file in self.ntt_avail: # Loading individual NTT file and extracting parameters self._diagnostic_print('Scanning ' + ntt_file + '.') # Reading file filehandle = self.__mmap_ntt_file(ntt_file) if filehandle is None: continue try: # Checking consistency of nev file self.__ntt_check(filehandle) except AssertionError: warnings.warn( 'Session file %s did not pass data packet check. ' 'This file can not be loaded.' % ntt_file) continue # Reading header information and store them in parameters_nev self.__read_ntt_data_header(filehandle, ntt_file) # Reading txt file header self.__read_ntt_text_header(ntt_file) # using sampling rate from txt header, as this is not saved # in data packets if 'SamplingFrequency' in self.parameters_ntt[channel_id]: self.parameters_ntt[channel_id]['sampling_rate'] = \ (self.parameters_ntt[channel_id][ 'SamplingFrequency'] * self.ntt_sr_unit) self.ntt_asso.append(ntt_file) # ======================================================================= # # Check consistency across files # ======================================================================= # check RECORDING_OPENED / CLOSED times (from txt header) for # different files for parameter_collection in [self.parameters_ncs, self.parameters_nse, self.parameters_nev, self.parameters_ntt]: # check recoding_closed times for specific file types if any(np.abs(np.diff([i['recording_opened'] for i in parameter_collection.values()])) > datetime.timedelta(seconds=1)): raise ValueError( 'NCS files were opened for recording with a delay ' 'greater than 0.1 second.') # check recoding_closed times for specific file types if any(np.diff([i['recording_closed'] for i in parameter_collection.values() if i['recording_closed'] is not None]) > datetime.timedelta(seconds=0.1)): raise ValueError( 'NCS files were closed after recording with a ' 'delay ' 'greater than 0.1 second.') # get maximal duration of any file in the recording parameter_collection = list(self.parameters_ncs.values()) + \ list(self.parameters_nse.values()) + \ list(self.parameters_ntt.values()) + \ list(self.parameters_nev.values()) self.parameters_global['recording_opened'] = min( [i['recording_opened'] for i in parameter_collection]) self.parameters_global['recording_closed'] = max( [i['recording_closed'] for i in parameter_collection]) # Set up GLOBAL TIMING SCHEME # ############################# for file_type, parameter_collection in [ ('ncs', self.parameters_ncs), ('nse', self.parameters_nse), ('nev', self.parameters_nev), ('ntt', self.parameters_ntt)]: # check starting times name_t1, name_t2 = ['t_start', 't_stop'] if ( file_type != 'nse' and file_type != 'ntt') \ else ['t_first', 't_last'] # checking if files of same type start at same time point if file_type != 'nse' and file_type != 'ntt' \ and len(np.unique(np.array( [i[name_t1].magnitude for i in parameter_collection.values()]))) > 1: raise ValueError( '%s files do not start at same time point.' % file_type) # saving t_start and t_stop for each file type available if len([i[name_t1] for i in parameter_collection.values()]): self.parameters_global['%s_t_start' % file_type] = min( [i[name_t1] for i in parameter_collection.values()]) self.parameters_global['%s_t_stop' % file_type] = min( [i[name_t2] for i in parameter_collection.values()]) # extracting minimial t_start and maximal t_stop value for this # recording session self.parameters_global['t_start'] = min( [self.parameters_global['%s_t_start' % t] for t in ['ncs', 'nev', 'nse', 'ntt'] if '%s_t_start' % t in self.parameters_global]) self.parameters_global['t_stop'] = max( [self.parameters_global['%s_t_stop' % t] for t in ['ncs', 'nev', 'nse', 'ntt'] if '%s_t_start' % t in self.parameters_global]) # checking gap consistency across ncs files # check number of gaps detected if len(np.unique([len(i['gaps']) for i in self.parameters_ncs.values()])) != 1: raise ValueError('NCS files contain different numbers of gaps!') # check consistency of gaps across files and create global gap # collection self.parameters_global['gaps'] = [] for g in range(len(list(self.parameters_ncs.values())[0]['gaps'])): integrated = False gap_stats = np.unique( [i['gaps'][g] for i in self.parameters_ncs.values()], return_counts=True) if len(gap_stats[0]) != 3 or len(np.unique(gap_stats[1])) != 1: raise ValueError( 'Gap number %i is not consistent across NCS ' 'files.' % ( g)) else: # check if this is second part of already existing gap for gg in range(len(self.parameters_global['gaps'])): globalgap = self.parameters_global['gaps'][gg] # check if stop time of first is start time of second # -> continuous gap if globalgap[2] == \ list(self.parameters_ncs.values())[0]['gaps'][ g][1]: self.parameters_global['gaps'][gg] = \ self.parameters_global['gaps'][gg][:2] + ( list(self.parameters_ncs.values())[0][ 'gaps'][g][ 2],) integrated = True break if not integrated: # add as new gap if this is not a continuation of # existing global gap self.parameters_global['gaps'].append( list(self.parameters_ncs.values())[0][ 'gaps'][g]) # save results of association for future analysis together with hash # values for change tracking if cachedir is not None and usecache != 'never': pickle.dump({'global': self.parameters_global, 'ncs': self.parameters_ncs, 'nev': self.parameters_nev, 'nse': self.parameters_nse, 'ntt': self.parameters_ntt}, open(cachedir + sep + self.sessiondir.split(sep)[ -1] + '/parameters.cache', 'wb')) if usecache != 'always': pickle.dump(hashes_calc, open( cachedir + sep + self.sessiondir.split(sep)[ -1] + '/hashkeys', 'wb')) self.associated = True # private routines # #########################################################� # Memory Mapping Methods def __mmap_nse_packets(self, filename): """ Memory map of the Neuralynx .ncs file optimized for extraction of data packet headers Reading standard dtype improves speed, but timestamps need to be reconstructed """ filesize = getsize(self.sessiondir + sep + filename) # in byte if filesize > 16384: data = np.memmap(self.sessiondir + sep + filename, dtype='<u2', shape=((filesize - 16384) // 2 // 56, 56), mode='r', offset=16384) # reconstructing original data # first 4 ints -> timestamp in microsec timestamps = data[:, 0] \ + data[:, 1] * 2 ** 16 \ + data[:, 2] * 2 ** 32 \ + data[:, 3] * 2 ** 48 channel_id = data[:, 4] + data[:, 5] * 2 ** 16 cell_number = data[:, 6] + data[:, 7] * 2 ** 16 features = [data[:, p] + data[:, p + 1] * 2 ** 16 for p in range(8, 23, 2)] features = np.array(features, dtype='i4') data_points = data[:, 24:56].astype('i2') del data return timestamps, channel_id, cell_number, features, data_points else: return None def __mmap_ncs_data(self, filename): """ Memory map of the Neuralynx .ncs file optimized for data extraction""" if getsize(self.sessiondir + sep + filename) > 16384: data = np.memmap(self.sessiondir + sep + filename, dtype=np.dtype(('i2', (522))), mode='r', offset=16384) # removing data packet headers and flattening data return data[:, 10:] else: return None def __mmap_ncs_packet_headers(self, filename): """ Memory map of the Neuralynx .ncs file optimized for extraction of data packet headers Reading standard dtype improves speed, but timestamps need to be reconstructed """ filesize = getsize(self.sessiondir + sep + filename) # in byte if filesize > 16384: data = np.memmap(self.sessiondir + sep + filename, dtype='<u4', shape=((filesize - 16384) // 4 // 261, 261), mode='r', offset=16384) ts = data[:, 0:2] multi = np.repeat(np.array([1, 2 ** 32], ndmin=2), len(data), axis=0) timestamps = np.sum(ts * multi, axis=1) # timestamps = data[:,0] + (data[:,1] *2**32) header_u4 = data[:, 2:5] return timestamps, header_u4 else: return None def __mmap_ncs_packet_timestamps(self, filename): """ Memory map of the Neuralynx .ncs file optimized for extraction of data packet headers Reading standard dtype improves speed, but timestamps need to be reconstructed """ filesize = getsize(self.sessiondir + sep + filename) # in byte if filesize > 16384: data = np.memmap(self.sessiondir + sep + filename, dtype='<u4', shape=(int((filesize - 16384) / 4 / 261), 261), mode='r', offset=16384) ts = data[:, 0:2] multi = np.repeat(np.array([1, 2 ** 32], ndmin=2), len(data), axis=0) timestamps = np.sum(ts * multi, axis=1) # timestamps = data[:,0] + data[:,1]*2**32 return timestamps else: return None def __mmap_nev_file(self, filename): """ Memory map the Neuralynx .nev file """ nev_dtype = np.dtype([ ('reserved', '<i2'), ('system_id', '<i2'), ('data_size', '<i2'), ('timestamp', '<u8'), ('event_id', '<i2'), ('ttl_input', '<i2'), ('crc_check', '<i2'), ('dummy1', '<i2'), ('dummy2', '<i2'), ('extra', '<i4', (8,)), ('event_string', 'a128'), ]) if getsize(self.sessiondir + sep + filename) > 16384: return np.memmap(self.sessiondir + sep + filename, dtype=nev_dtype, mode='r', offset=16384) else: return None def __mmap_ntt_file(self, filename): """ Memory map the Neuralynx .nse file """ nse_dtype = np.dtype([ ('timestamp', '<u8'), ('sc_number', '<u4'), ('cell_number', '<u4'), ('params', '<u4', (8,)), ('data', '<i2', (32, 4)), ]) if getsize(self.sessiondir + sep + filename) > 16384: return np.memmap(self.sessiondir + sep + filename, dtype=nse_dtype, mode='r', offset=16384) else: return None def __mmap_ntt_packets(self, filename): """ Memory map of the Neuralynx .ncs file optimized for extraction of data packet headers Reading standard dtype improves speed, but timestamps need to be reconstructed """ filesize = getsize(self.sessiondir + sep + filename) # in byte if filesize > 16384: data = np.memmap(self.sessiondir + sep + filename, dtype='<u2', shape=((filesize - 16384) / 2 / 152, 152), mode='r', offset=16384) # reconstructing original data # first 4 ints -> timestamp in microsec timestamps = data[:, 0] + data[:, 1] * 2 ** 16 + \ data[:, 2] * 2 ** 32 + data[:, 3] * 2 ** 48 channel_id = data[:, 4] + data[:, 5] * 2 ** 16 cell_number = data[:, 6] + data[:, 7] * 2 ** 16 features = [data[:, p] + data[:, p + 1] * 2 ** 16 for p in range(8, 23, 2)] features = np.array(features, dtype='i4') data_points = data[:, 24:152].astype('i2').reshape((4, 32)) del data return timestamps, channel_id, cell_number, features, data_points else: return None # ___________________________ header extraction __________________________ def __read_text_header(self, filename, parameter_dict): # Reading main file header (plain text, 16kB) text_header = codecs.open(self.sessiondir + sep + filename, 'r', 'latin-1').read(16384) parameter_dict['cheetah_version'] = \ self.__get_cheetah_version_from_txt_header(text_header, filename) parameter_dict.update(self.__get_filename_and_times_from_txt_header( text_header, parameter_dict['cheetah_version'])) # separating lines of header and ignoring last line (fill), check if # Linux or Windows OS if sep == '/': text_header = text_header.split('\r\n')[:-1] if sep == '\\': text_header = text_header.split('\n')[:-1] # minor parameters possibly saved in header (for any file type) minor_keys = ['AcqEntName', 'FileType', 'FileVersion', 'RecordSize', 'HardwareSubSystemName', 'HardwareSubSystemType', 'SamplingFrequency', 'ADMaxValue', 'ADBitVolts', 'NumADChannels', 'ADChannel', 'InputRange', 'InputInverted', 'DSPLowCutFilterEnabled', 'DspLowCutFrequency', 'DspLowCutNumTaps', 'DspLowCutFilterType', 'DSPHighCutFilterEnabled', 'DspHighCutFrequency', 'DspHighCutNumTaps', 'DspHighCutFilterType', 'DspDelayCompensation', 'DspFilterDelay_\xb5s', 'DisabledSubChannels', 'WaveformLength', 'AlignmentPt', 'ThreshVal', 'MinRetriggerSamples', 'SpikeRetriggerTime', 'DualThresholding', 'Feature Peak 0', 'Feature Valley 1', 'Feature Energy 2', 'Feature Height 3', 'Feature NthSample 4', 'Feature NthSample 5', 'Feature NthSample 6', 'Feature NthSample 7', 'SessionUUID', 'FileUUID', 'CheetahRev', 'ProbeName', 'OriginalFileName', 'TimeCreated', 'TimeClosed', 'ApplicationName', 'AcquisitionSystem', 'ReferenceChannel'] # extracting minor key values of header (only taking into account # non-empty lines) for i, minor_entry in enumerate(text_header): if minor_entry == '' or minor_entry[0] == '#': continue matching_key = [key for key in minor_keys if minor_entry.strip('-').startswith(key)] if len(matching_key) == 1: matching_key = matching_key[0] minor_value = minor_entry.split(matching_key)[1].strip( ' ').rstrip(' ') # determine data type of entry if minor_value.isdigit(): # converting to int if possible minor_value = int(minor_value) else: # converting to float if possible try: minor_value = float(minor_value) except: pass if matching_key in parameter_dict: warnings.warn( 'Multiple entries for {} in text header of {}'.format( matching_key, filename)) else: parameter_dict[matching_key] = minor_value elif len(matching_key) > 1: raise ValueError( 'Inconsistent minor key list for text header ' 'interpretation.') else: warnings.warn( 'Skipping text header entry %s, because it is not in ' 'minor key list' % minor_entry) self._diagnostic_print( 'Successfully decoded text header of file (%s).' % filename) def __get_cheetah_version_from_txt_header(self, text_header, filename): version_regex = re.compile(r'((-CheetahRev )|' r'(ApplicationName Cheetah "))' r'(?P<version>\d{1,3}\.\d{1,3}\.\d{1,3})') match = version_regex.search(text_header) if match: return match.groupdict()['version'] else: raise ValueError('Can not extract Cheetah version from file ' 'header of file %s' % filename) def __get_filename_and_times_from_txt_header(self, text_header, version): if parse_version(version) <= parse_version('5.6.4'): datetime1_regex = re.compile(r'## Time Opened \(m/d/y\): ' r'(?P<date>\S+)' r' \(h:m:s\.ms\) ' r'(?P<time>\S+)') datetime2_regex = re.compile(r'## Time Closed \(m/d/y\): ' r'(?P<date>\S+)' r' \(h:m:s\.ms\) ' r'(?P<time>\S+)') filename_regex = re.compile(r'## File Name (?P<filename>\S+)') datetimeformat = '%m/%d/%Y %H:%M:%S.%f' else: datetime1_regex = re.compile(r'-TimeCreated ' r'(?P<date>\S+) ' r'(?P<time>\S+)') datetime2_regex = re.compile(r'-TimeClosed ' r'(?P<date>\S+) ' r'(?P<time>\S+)') filename_regex = re.compile(r'-OriginalFileName ' r'"?(?P<filename>\S+)"?') datetimeformat = '%Y/%m/%d %H:%M:%S' matchtime1 = datetime1_regex.search(text_header).groupdict() matchtime2 = datetime2_regex.search(text_header).groupdict() matchfilename = filename_regex.search(text_header) filename = matchfilename.groupdict()['filename'] if '## Time Closed File was not closed properly' in text_header: warnings.warn('Text header of file %s does not contain recording ' 'closed time. File was not closed properly.' '' % filename) datetime1 = datetime.datetime.strptime(matchtime1['date'] + ' ' + matchtime1['time'], datetimeformat) datetime2 = datetime.datetime.strptime(matchtime2['date'] + ' ' + matchtime2['time'], datetimeformat) output = {'recording_opened': datetime1, 'recording_closed': datetime2, 'file_created': datetime1, 'file_closed': datetime2, 'recording_file_name': filename} return output def __read_ncs_data_headers(self, filehandle, filename): ''' Reads the .ncs data block headers and stores the information in the object's parameters_ncs dictionary. Args: filehandle (file object): Handle to the already opened .ncs file. filename (string): Name of the ncs file. Returns: dict of extracted data ''' timestamps = filehandle[0] header_u4 = filehandle[1] channel_id = header_u4[0][0] sr = header_u4[0][1] # in Hz t_start = timestamps[0] # in microseconds # calculating corresponding time stamp of first sample, that was not # recorded any more # t_stop= time of first sample in last packet +(#samples per packet * # conversion factor / sampling rate) # conversion factor is needed as times are recorded in ms t_stop = timestamps[-1] + ( (header_u4[-1][2]) * ( 1 / self.ncs_time_unit.rescale(pq.s)).magnitude / header_u4[-1][1]) if channel_id in self.parameters_ncs: raise ValueError( 'Detected multiple ncs files for channel_id %i.' % channel_id) else: sampling_unit = [pq.CompoundUnit('%f*%s' '' % (sr, self.ncs_sr_unit.symbol))] sampling_rate = sr * self.ncs_sr_unit self.parameters_ncs[channel_id] = {'filename': filename, 't_start': t_start * self.ncs_time_unit, 't_stop': t_stop * self.ncs_time_unit, 'sampling_rate': sampling_rate, 'sampling_unit': sampling_unit, 'gaps': []} return {channel_id: self.parameters_ncs[channel_id]} def __read_nse_data_header(self, filehandle, filename): ''' Reads the .nse data block headers and stores the information in the object's parameters_ncs dictionary. Args: filehandle (file object): Handle to the already opened .nse file. filename (string): Name of the nse file. Returns: - ''' [timestamps, channel_ids, cell_numbers, features, data_points] = filehandle if filehandle is not None: t_first = timestamps[0] # in microseconds t_last = timestamps[-1] # in microseconds channel_id = channel_ids[0] cell_count = cell_numbers[0] # number of cells identified self.parameters_nse[channel_id] = {'filename': filename, 't_first': t_first * self.nse_time_unit, 't_last': t_last * self.nse_time_unit, 'cell_count': cell_count} def __read_ntt_data_header(self, filehandle, filename): ''' Reads the .nse data block headers and stores the information in the object's parameters_ncs dictionary. Args: filehandle (file object): Handle to the already opened .nse file. filename (string): Name of the nse file. Returns: - ''' [timestamps, channel_ids, cell_numbers, features, data_points] = filehandle if filehandle is not None: t_first = timestamps[0] # in microseconds t_last = timestamps[-1] # in microseconds channel_id = channel_ids[0] cell_count = cell_numbers[0] # number of cells identified # spike_parameters = filehandle[0][3] # else: # t_first = None # channel_id = None # cell_count = 0 # # spike_parameters = None # # self._diagnostic_print('Empty file: No information # contained in %s'%filename) self.parameters_ntt[channel_id] = {'filename': filename, 't_first': t_first * self.ntt_time_unit, 't_last': t_last * self.nse_time_unit, 'cell_count': cell_count} def __read_nev_data_header(self, filehandle, filename): ''' Reads the .nev data block headers and stores the relevant information in the object's parameters_nev dictionary. Args: filehandle (file object): Handle to the already opened .nev file. filename (string): Name of the nev file. Returns: - ''' # Extracting basic recording events to be able to check recording # consistency if filename in self.parameters_nev: raise ValueError( 'Detected multiple nev files of name %s.' % (filename)) else: self.parameters_nev[filename] = {} if 'Starting_Recording' in self.parameters_nev[filename]: raise ValueError('Trying to read second nev file of name %s. ' ' Only one can be handled.' % filename) self.parameters_nev[filename]['Starting_Recording'] = [] self.parameters_nev[filename]['events'] = [] for event in filehandle: # separately extracting 'Starting Recording' if ((event[4] in [11, 19]) and (event[10].decode('latin-1') == 'Starting Recording')): self.parameters_nev[filename]['Starting_Recording'].append( event[3] * self.nev_time_unit) # adding all events to parameter collection self.parameters_nev[filename]['events'].append( {'timestamp': event[3] * self.nev_time_unit, 'event_id': event[4], 'nttl': event[5], 'name': event[10].decode('latin-1')}) if len(self.parameters_nev[filename]['Starting_Recording']) < 1: raise ValueError( 'No Event "Starting_Recording" detected in %s' % ( filename)) self.parameters_nev[filename]['t_start'] = min( self.parameters_nev[filename]['Starting_Recording']) # t_stop = time stamp of last event in file self.parameters_nev[filename]['t_stop'] = max( [e['timestamp'] for e in self.parameters_nev[filename]['events']]) # extract all occurring event types (= combination of nttl, # event_id and name/string) event_types = copy.deepcopy(self.parameters_nev[filename]['events']) for d in event_types: d.pop('timestamp') self.parameters_nev[filename]['event_types'] = [dict(y) for y in {tuple( x.items()) for x in event_types}] # ________________ File Checks __________________________________ def __ncs_packet_check(self, filehandle): ''' Checks consistency of data in ncs file and raises assertion error if a check fails. Detected recording gaps are added to parameter_ncs Args: filehandle (file object): Handle to the already opened .ncs file. ''' timestamps = filehandle[0] header_u4 = filehandle[1] # checking sampling rate of data packets sr0 = header_u4[0, 1] assert all(header_u4[:, 1] == sr0) # checking channel id of data packets channel_id = header_u4[0, 0] assert all(header_u4[:, 0] == channel_id) # time offset of data packets # TODO: Check if there is a safer way to do the delta_t check for ncs # data packets # this is a not safe assumption, that the first two data packets have # correct time stamps delta_t = timestamps[1] - timestamps[0] # valid samples of first data packet temp_valid_samples = header_u4[0, 2] # unit test # time difference between packets corresponds to number of recorded # samples assert delta_t == ( temp_valid_samples / ( self.ncs_time_unit.rescale(pq.s).magnitude * sr0)) self._diagnostic_print('NCS packet check successful.') def __nse_check(self, filehandle): ''' Checks consistency of data in ncs file and raises assertion error if a check fails. Args: filehandle (file object): Handle to the already opened .nse file. ''' [timestamps, channel_ids, cell_numbers, features, data_points] = filehandle assert all(channel_ids == channel_ids[0]) assert all([len(dp) == len(data_points[0]) for dp in data_points]) self._diagnostic_print('NSE file check successful.') def __nev_check(self, filehandle): ''' Checks consistency of data in nev file and raises assertion error if a check fails. Args: filehandle (file object): Handle to the already opened .nev file. ''' # this entry should always equal 2 (see Neuralynx File Description), # but it is not. For me, this is 0. assert all([f[2] == 2 or f[2] == 0 for f in filehandle]) # TODO: check with more nev files, if index 0,1,2,6,7,8 and 9 can be # non-zero. Interpretation? Include in event extraction. # only observed 0 for index 0,1,2,6,7,8,9 in nev files. # If they are non-zero, this needs to be included in event extraction assert all([f[0] == 0 for f in filehandle]) assert all([f[1] == 0 for f in filehandle]) assert all([f[2] in [0, 2] for f in filehandle]) assert all([f[6] == 0 for f in filehandle]) assert all([f[7] == 0 for f in filehandle]) assert all([f[8] == 0 for f in filehandle]) assert all([all(f[9] == 0) for f in filehandle]) self._diagnostic_print('NEV file check successful.') def __ntt_check(self, filehandle): ''' Checks consistency of data in ncs file and raises assertion error if a check fails. Args: filehandle (file object): Handle to the already opened .nse file. ''' # TODO: check this when first .ntt files are available [timestamps, channel_ids, cell_numbers, features, data_points] = filehandle assert all(channel_ids == channel_ids[0]) assert all([len(dp) == len(data_points[0]) for dp in data_points]) self._diagnostic_print('NTT file check successful.') def __ncs_gap_check(self, filehandle): ''' Checks individual data blocks of ncs files for consistent starting times with respect to sample count. This covers intended recording gaps as well as shortened data packet, which are incomplete ''' timestamps = filehandle[0] header_u4 = filehandle[1] channel_id = header_u4[0, 0] if channel_id not in self.parameters_ncs: self.parameters_ncs[channel_id] = {} # time stamps of data packets delta_t = timestamps[1] - timestamps[0] # in microsec data_packet_offsets = np.diff(timestamps) # in microsec # check if delta_t corresponds to number of valid samples present in # data packets # NOTE: This also detects recording gaps! valid_samples = header_u4[:-1, 2] sampling_rate = header_u4[0, 1] packet_checks = (valid_samples / (self.ncs_time_unit.rescale( pq.s).magnitude * sampling_rate)) == data_packet_offsets if not all(packet_checks): if 'broken_packets' not in self.parameters_ncs[channel_id]: self.parameters_ncs[channel_id]['broken_packets'] = [] broken_packets = np.where(np.array(packet_checks) is False)[0] for broken_packet in broken_packets: self.parameters_ncs[channel_id]['broken_packets'].append( (broken_packet, valid_samples[broken_packet], data_packet_offsets[broken_packet])) self._diagnostic_print('Detected broken packet in NCS file at ' 'packet id %i (sample number %i ' 'time offset id %i)' '' % (broken_packet, valid_samples[broken_packet], data_packet_offsets[broken_packet]) ) # in microsec # checking for irregular data packet durations -> gaps / shortened # data packets if not all(data_packet_offsets == delta_t): if 'gaps' not in self.parameters_ncs[channel_id]: self.parameters_ncs[channel_id]['gaps'] = [] # gap identification by (sample of gap start, duration) # gap packets gap_packet_ids = np.where(data_packet_offsets != delta_t)[0] for gap_packet_id in gap_packet_ids: # skip if this packet starting time is known to be corrupted # hoping no corruption and gap occurs simultaneously # corrupted time stamp affects two delta_t comparisons: if gap_packet_id in self.parameters_ncs[channel_id][ 'invalid_first_samples'] \ or gap_packet_id + 1 in self.parameters_ncs[channel_id][ 'invalid_first_samples']: continue gap_start = timestamps[ gap_packet_id] # t_start of last packet [microsec] gap_stop = timestamps[ gap_packet_id + 1] # t_stop of first packet [microsec] self.parameters_ncs[channel_id]['gaps'].append((gap_packet_id, gap_start, gap_stop)) # # [,microsec,microsec] self._diagnostic_print('Detected gap in NCS file between' 'sample time %i and %i (last correct ' 'packet id %i)' % (gap_start, gap_stop, gap_packet_id)) def __ncs_invalid_first_sample_check(self, filehandle): ''' Checks data blocks of ncs files for corrupted starting times indicating a missing first sample in the data packet. These are then excluded from the gap check, but ignored for further analysis. ''' timestamps = filehandle[0] header_u4 = filehandle[1] channel_id = header_u4[0, 0] self.parameters_ncs[channel_id]['invalid_first_samples'] = [] # checking if first bit of timestamp is 1, which indicates error invalid_packet_ids = np.where(timestamps >= 2 ** 55)[0] if len(invalid_packet_ids) > 0: warnings.warn('Invalid first sample(s) detected in ncs file' '(packet id(s) %i)! This error is ignored in' 'subsequent routines.' % (invalid_packet_ids)) self.parameters_ncs[channel_id][ 'invalid_first_samples'] = invalid_packet_ids # checking consistency of data around corrupted packet time for invalid_packet_id in invalid_packet_ids: if invalid_packet_id < 2 or invalid_packet_id > len( filehandle) - 2: raise ValueError( 'Corrupted ncs data packet at the beginning' 'or end of file.') elif (timestamps[invalid_packet_id + 1] - timestamps[ invalid_packet_id - 1] != 2 * ( timestamps[invalid_packet_id - 1] - timestamps[ invalid_packet_id - 2])): raise ValueError('Starting times of ncs data packets around' 'corrupted data packet are not ' 'consistent!') # Supplementory Functions def get_channel_id_by_file_name(self, filename): """ Checking parameters of NCS, NSE and NTT Files for given filename and return channel_id if result is consistent :param filename: :return: """ channel_ids = [] channel_ids += [k for k in self.parameters_ncs if self.parameters_ncs[k]['filename'] == filename] channel_ids += [k for k in self.parameters_nse if self.parameters_nse[k]['filename'] == filename] channel_ids += [k for k in self.parameters_ntt if self.parameters_ntt[k]['filename'] == filename] if len(np.unique(np.asarray(channel_ids))) == 1: return channel_ids[0] elif len(channel_ids) > 1: raise ValueError( 'Ambiguous channel ids detected. Filename %s is associated' ' to different channels of NCS and NSE and NTT %s' '' % (filename, channel_ids)) else: # if filename was not detected return None def hashfile(self, afile, hasher, blocksize=65536): buf = afile.read(blocksize) while len(buf) > 0: hasher.update(buf) buf = afile.read(blocksize) return hasher.digest() def datesizefile(self, filename): return str(os.path.getmtime(filename)) + '_' + str( os.path.getsize(filename)) def _diagnostic_print(self, text): ''' Print a diagnostic message. Args: text (string): Diagnostic text to print. Returns: - ''' if self._print_diagnostic: print('NeuralynxIO: ' + text)
bsd-3-clause
jarathomas/openVA-Pipeline
pipeline.py
1
49777
#-------------------------------------------------------------------------------------------------------------------------------------------# # openVA Pipeline: pipeline.py -- Software for processing Verbal Autopsy data with automated cause of death assignment. # # Copyright (C) 2018 Jason Thomas, Samuel Clark, Martin Bratschi in collaboration with the Bloomberg Data for Health Initiative # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see <http://www.gnu.org/licenses/>. # # # #-------------------------------------------------------------------------------------------------------------------------------------------# #-------------------------------------------------------------------------------------------------------------------------------------------# # User Settings sqlitePW = "enilepiP" dbName = "Pipeline.db" #-------------------------------------------------------------------------------------------------------------------------------------------# from pysqlcipher3 import dbapi2 as sqlcipher from pandas import read_csv, groupby import pandas as pd import sys import csv import datetime import os import subprocess import shutil import requests import json import sqlite3 import time import re import pickle #-------------------------------------------------------------------------------------------------------------------------------------------# # Define functions and objects needed for functioning of pipeline; then set up log files and configuration of pipeline #-------------------------------------------------------------------------------------------------------------------------------------------# class Dhis(object): """Access DHIS2 API.""" def __init__(self, dhisURL, dhisUser, dhisPass): if '/api' in dhisURL: print('Please do not specify /api/ in the server argument: e.g. --server=play.dhis2.org/demo') sys.exit() if dhisURL.startswith('localhost') or dhisURL.startswith('127.0.0.1'): dhisURL = 'http://{}'.format(dhisURL) elif dhisURL.startswith('http://'): dhisURL = dhisURL elif not dhisURL.startswith('https://'): dhisURL = 'https://{}'.format(dhisURL) self.auth = (dhisUser, dhisPass) self.url = '{}/api/25'.format(dhisURL) def get(self, endpoint, params=None): """ GET method for DHIS2 API. :rtype: dict """ url = '{}/{}.json'.format(self.url, endpoint) if not params: params = {} params['paging'] = False try: r = requests.get(url=url, params=params, auth=self.auth) if r.status_code != 200: print("HTTP Code: {}".format(r.status_code)) ## HERE print(r.text) else: return r.json() except requests.RequestException: raise requests.RequestException def post(self, endpoint, data): """ POST method for DHIS2 API. :rtype: dict """ url = '{}/{}.json'.format(self.url, endpoint) try: r = requests.post(url=url, json=data, auth=self.auth) if r.status_code not in range(200, 206): print("HTTP Code: {}".format(r.status_code)) ## HERE print(r.text) else: return r.json() except requests.RequestException: raise requests.RequestException def post_blob(self, f): """ Post file to DHIS2 and return created UID for that file :rtype: str """ url = '{}/fileResources'.format(self.url) files = {'file': (f, open(f, 'rb'), 'application/x-sqlite3', {'Expires': '0'})} try: r = requests.post(url, files=files, auth=self.auth) if r.status_code not in (200, 202): print("HTTP Code: {}".format(r.status_code)) ## HERE print(r.text) else: response = r.json() file_id = response['response']['fileResource']['id'] return file_id except requests.RequestException: raise requests.RequestException class VerbalAutopsyEvent(object): """ DHIS2 event + a BLOB file resource""" def __init__(self, va_id, program, dhis_orgunit, event_date, sex, dob, age, cod_code, algorithm_metadata, file_id): self.va_id = va_id self.program = program self.dhis_orgunit = dhis_orgunit self.event_date = event_date self.sex = sex self.dob = dob self.age = age self.cod_code = cod_code self.algorithm_metadata = algorithm_metadata self.datavalues = [ {"dataElement": "htm6PixLJNy", "value": self.va_id}, {"dataElement": "hi7qRC4SMMk", "value": self.sex}, {"dataElement": "mwSaVq64k7j", "value": self.dob}, {"dataElement": "F4XGdOBvWww", "value": self.cod_code}, {"dataElement": "wiJviUqN1io", "value": self.algorithm_metadata}, {"dataElement": "oPAg4MA0880", "value": self.age}, {"dataElement": "XLHIBoLtjGt", "value": file_id} ] def format_to_dhis2(self, dhisUser): """ Format object to DHIS2 compatible event for DHIS2 API :rtype: dict """ event = { "program": self.program, "orgUnit": self.dhis_orgunit, "eventDate": datetime.datetime.strftime(self.event_date, '%Y-%m-%d'), "status": "COMPLETED", "storedBy": dhisUser, "dataValues": self.datavalues } return event def __str__(self): return json.dumps(self, default=lambda o: o.__dict__) def create_db(fName, evaList): """ Create a SQLite database with VA data + COD :rtype: None """ conn = sqlite3.connect(fName) with conn: cur = conn.cursor() cur.execute("CREATE TABLE vaRecord(ID INT, Attrtibute TEXT, Value TEXT)") cur.executemany("INSERT INTO vaRecord VALUES (?,?,?)", evaList) def getCODCode(myDict, searchFor): for i in range(len(myDict.keys())): match = re.search(searchFor, list(myDict.keys())[i]) if match: return list(myDict.values())[i] # set the ODK_Conf table item odkLastRunResult as 0, log error message, and exit script def cleanup(errorMsg): # handle single case when DB file not found if connectionError == "1": with open(connectionErrorFile, "w", newline="") as f: writer = csv.writer(f) writer.writerow([timeFMT, "Unable to Connect to SQLite Database, see {} for details".format(errorFile)]) sys.exit(1) else: # update ODK_Conf table with LastRunResult = 0 try: sql = "UPDATE ODK_Conf SET odkLastRunResult = ?" par = ("0",) cursor.execute(sql, par) db.commit() if os.path.isfile(connectionErrorFile) == True: try: os.remove(connectionErrorFile) except OSError: print("Could not remove {}".format(connectionErrorFile)) # write errorMsg to errorFile if DB is inaccessible except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError): db.rollback() errorMsg[2] += "; unable to set odkLastRunResult to 0 (in ODK_Conf table)" try: with open(errorFile, "a", newline="") as f: writer = csv.writer(f) writer.writerow(errorMsg) except OSError: print(errorMsg) # close DB resources and exit script finally: cursor.close() db.close() sys.exit(1) def findKeyValue(key, d): if key in d: yield d[key] for k in d: if isinstance(d[k], list): for i in d[k]: for j in findKeyValue(key, i): yield j # error log files errorFile = "./dbErrorLog.csv" timeFMT = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S") connectionError = "0" connectionErrorFile = "./sqlConnect.csv" ## create error file if it does not exist if os.path.isfile(errorFile) == False: try: with open(errorFile, "w", newline="") as f: writer = csv.writer(f) writer.writerow(["Date"] + ["Description"] + ["Additional Information"]) except (OSError) as e: print(str(e)) sys.exit(1) # connect to the database and configure the pipeline's settings for ODK Aggregate, openVA, and DHIS2. if os.path.isfile(dbName) == False: connectionError = "1" with open(errorFile, "a", newline="") as f: writer = csv.writer(f) writer.writerow([timeFMT, "Database {}.db not found".format(dbName), ]) cleanup() db = sqlcipher.connect(dbName) db.execute("PRAGMA key = " + sqlitePW) sqlODK = "SELECT odkID, odkURL, odkUser, odkPass, odkFormID, odkLastRun, odkLastRunResult FROM ODK_Conf" sqlPipeline = "SELECT workingDirectory, openVA_Algorithm, algorithmMetadataCode, codSource FROM Pipeline_Conf" sqlInterVA4 = "SELECT HIV, Malaria FROM InterVA4_Conf" sqlAdvancedInterVA4 = "SELECT directory, filename, output, append, groupcode, replicate, replicate_bug1, replicate_bug2, write FROM Advanced_InterVA4_Conf" sqlInSilicoVA = "SELECT Nsim FROM InSilicoVA_Conf" sqlAdvancedInSilicoVA = "SELECT isNumeric, updateCondProb, keepProbbase_level, CondProb, CondProbNum, datacheck, datacheck_missing," \ + "warning_write, external_sep, thin, burnin, auto_length, conv_csmf, jump_scale," \ + "levels_prior, levels_strength, trunc_min, trunc_max, subpop, java_option, seed," \ + "phy_code, phy_cat, phy_unknown, phy_external, phy_debias, exclude_impossible_cause, indiv_CI " \ + "FROM Advanced_InSilicoVA_Conf" sqlDHIS = "SELECT dhisURL, dhisUser, dhisPass, dhisOrgUnit FROM DHIS_Conf" sqlCODCodes_WHO = "SELECT codName, codCode FROM COD_Codes_DHIS WHERE codSource = 'WHO'" sqlCODCodes_Tariff = "SELECT codName, codCode FROM COD_Codes_DHIS WHERE codSource = 'Tariff'" ## grab configuration settings from SQLite DB try: # ODK configuration cursor = db.cursor() cursor.execute(sqlODK) odkQuery = cursor.fetchall() for row in odkQuery: odkID = row[0] odkURL = row[1] odkUser = row[2] odkPass = row[3] odkFormID = row[4] odkLastRun = row[5] odkLastRunDate = datetime.datetime.strptime(odkLastRun, "%Y-%m-%d_%H:%M:%S").strftime("%Y/%m/%d") odkLastRunDatePrev = (datetime.datetime.strptime(odkLastRunDate, "%Y/%m/%d") - datetime.timedelta(days=1)).strftime("%Y/%m/%d") odkLastRunResult = row[6] # Pipeline configuration cursor.execute(sqlPipeline) pipelineQuery = cursor.fetchall() for row in pipelineQuery: processDir = row[0] pipelineAlgorithm = row[1] algorithmMetadataCode = row[2] codSource = row[3] # InterVA4 configuration cursor.execute(sqlInterVA4) interVA4Query = cursor.fetchall() for row in interVA4Query: interVA_HIV = row[0] interVA_Malaria = row[1] # InterVA4 advanced configuration cursor.execute(sqlAdvancedInterVA4) advancedInterVA4Query = cursor.fetchall() for row in advancedInterVA4Query: interVA_directory = row[0] interVA_filename = row[1] interVA_output = row[2] interVA_append = row[3] interVA_groupcode = row[4] interVA_replicate = row[5] interVA_replicate_bug1 = row[6] interVA_replicate_bug2 = row[7] interVA_write = row[8] # InSilicoVA configuration cursor.execute(sqlInSilicoVA) insilicoVAQuery = cursor.fetchall() for row in insilicoVAQuery: insilico_Nsim = row[0] # InSilicoVA advanced configuration cursor.execute(sqlAdvancedInSilicoVA) advancedInsilicoVAQuery = cursor.fetchall() for row in advancedInsilicoVAQuery: insilico_isNumeric = row[ 0] insilico_updateCondProb = row[ 1] insilico_keepProbbase_level = row[ 2] insilico_CondProb = row[ 3] insilico_CondProbNum = row[ 4] insilico_datacheck = row[ 5] insilico_datacheck_missing = row[ 6] insilico_warning_write = row[ 7] insilico_external_sep = row[ 8] insilico_thin = row[ 9] insilico_burnin = row[10] insilico_auto_length = row[11] insilico_conv_csmf = row[12] insilico_jump_scale = row[13] insilico_levels_prior = row[14] insilico_levels_strength = row[15] insilico_trunc_min = row[16] insilico_trunc_max = row[17] insilico_subpop = row[18] insilico_java_option = row[19] insilico_seed = row[20] insilico_phy_code = row[21] insilico_phy_cat = row[22] insilico_phy_unknown = row[23] insilico_phy_external = row[24] insilico_phy_debias = row[25] insilico_exclude_impossible_cause = row[26] insilico_indiv_CI = row[27] # DHIS2 configuration cursor.execute(sqlDHIS) dhisQuery = cursor.fetchall() for row in dhisQuery: dhisURL = row[0] dhisUser = row[1] dhisPass = row[2] dhisOrgUnit = row[3] # CoD Codes for DHIS2 cursor.execute(sqlCODCodes_WHO) resultsWHO = cursor.fetchall() codesWHO = dict(resultsWHO) cursor.execute(sqlCODCodes_Tariff) resultsTariff = cursor.fetchall() codesTariff = dict(resultsTariff) except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("Problem selecting config information from ODK_Conf ", str(e), timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, str(e), "Problem selecting config information from ODK_Conf"] cleanup(errorMsg) #-------------------------------------------------------------------------------------------------------------------------------------------# # create folders & files to store (ODK & openVA) input and output; also create call to ODK Briefcase #-------------------------------------------------------------------------------------------------------------------------------------------# odkBCExportDir = processDir + "/ODKExport" odkBCExportFilename = "ODKExportNew.csv" odkBCExportPrevious = odkBCExportDir + "/ODKExportPrevious.csv" odkBCExportNewFile = odkBCExportDir + "/" + odkBCExportFilename odkBCArgumentList = "java -jar ODK-Briefcase-v1.10.1.jar -oc -em -id '" + odkFormID + "' -sd '" + odkBCExportDir + "' -ed '" \ + odkBCExportDir + "' -f '" + odkBCExportFilename + "' -url '" + odkURL + "' -u '" + odkUser \ + "' -p '" + odkPass + "' -start '" + odkLastRunDatePrev + "'" openVAFilesDir = processDir + "/OpenVAFiles" openVAReadyFile = odkBCExportDir + "/OpenVAReadyFile.csv" rScriptIn = openVAFilesDir + "/" + timeFMT + "/RScript_" + timeFMT + ".R" rScriptOut = openVAFilesDir + "/" + timeFMT + "/RScript_" + timeFMT + ".Rout" dhisDir = processDir + "/DHIS2" if codSource=="WHO": dhisCODCodes = codesWHO else: dhisCODCodes = codesTariff # check if processing directory exists and create if necessary if not os.path.exists(processDir): try: os.makedirs(processDir) except OSError as e: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("Could not create processing directory: " + processDir, str(e), timeFMT) try: cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, str(e), "Could not create processing directory: " + processDir] cleanup(errorMsg) # create openVAFilesDir (if does not exist) if not os.path.exists(openVAFilesDir + "/" + timeFMT): try: os.makedirs(openVAFilesDir + "/" + timeFMT) except OSError as e: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("Could not create openVA Directory: " + openVAFilesDir + "/" + timeFMT, str(e), timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, str(e), "Could not create openVA directory: " + openVAFilesDir + "/" + timeFMT] cleanup(errorMsg) # make a copy of current ODK Briefcase Export file, to compare with new file once exported (if there is an existing export file) if os.path.isfile(odkBCExportNewFile) == True and odkLastRunResult == 1 and not os.path.isfile(connectionErrorFile): try: shutil.copy(odkBCExportNewFile, odkBCExportPrevious) except (OSError, shutil.Error) as e: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("Error: Trying to copy export files from ODK Briefcase", str(e), timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, str(e), "Error: Trying to copy export files from ODK Briefcase"] cleanup(errorMsg) try: os.remove(openVAReadyFile) except (OSError) as e: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime)" par = ("Could not remove " + openVAReadyFile, str(e), timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, str(e), "Could not remove " + openVAReadyFile] cleanup(errorMsg) # launch ODK Briefcase to collect ODK Aggregate data and export to file for further processing try: process = subprocess.Popen(odkBCArgumentList, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = process.communicate() rc = process.returncode except: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("Could not launch ODK Briefcase Java Application", "Error", timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, "Error: Could not launch ODK Briefcase Java Application",""] cleanup(errorMsg) # catch application errors from ODK Briefcase and log into EventLog table if rc != 0: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = (str(stderr), "Error", timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, str(stderr),""] cleanup(errorMsg) if "SEVERE" in str(stderr): try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = (stderr,"Error", timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, str(stderr),""] cleanup(errorMsg) else: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("Briefcase Export Completed Successfully", "Information", timeFMT) cursor.execute(sql, par) db.commit() sql = "UPDATE ODK_Conf SET odkLastRun=?, odkLastRunResult=?" par = (timeFMT,"1") cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, str(e), "ODK Briefcase ran successfully but problems writing to DB (check odkLastRunResult in ODK_Conf)"] cleanup(errorMsg) # check if previous file exists from above operations and create delta file of new entries if os.path.isfile(odkBCExportPrevious) == True: try: ## WARNING: odkBCExportPrevious & odkBCExportNewFil (CSV files) ## contain sensitive VA information (leaving them in folder) with open(odkBCExportPrevious, "r", newline="") as t1, open(odkBCExportNewFile, "r", newline="") as t2: fileone = t1.readlines() filetwo = t2.readlines() header = filetwo[0] with open(openVAReadyFile, "w", newline="") as outFile: outFile.write(header) for line in filetwo: if line not in fileone: outFile.write(line) except OSError as e: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES" par = ("Could not create: " + openVAReadyFile, "Error", timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, str(e), "Error: Could not create: " + openVAReadyFile] cleanup(errorMsg) else: # if there is no pre-existing ODK Briefcase Export file, then copy and rename to OpenVAReadyFile.csv try: shutil.copy(odkBCExportNewFile, openVAReadyFile) except (OSError, shutil.Error) as e: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = (e, "Error", timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, str(e), "Error: Could not copy: " + odkBCExportNewFile + " to: " + openVAReadyFile] cleanup(errorMsg) # if no records retrieved, then close up shop; otherwise, create R script for running openVA ## WARNING: openVAReadyFile (CSV file) contains sensitive VA information (leaving it in folder) with open(openVAReadyFile, "r", newline="") as outFile: nRecords = len(list(outFile)) - 1 ## take away 1 for the column header if nRecords == 0: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("No Records From ODK Briefcase (nothing more to do)", "Information", timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, str(e), "No records from ODK Briefcase, but error writing to DB"] cleanup(errorMsg) try: sql = "UPDATE ODK_Conf SET odkLastRun=?, odkLastRunResult=?" par = (timeFMT,"1") cursor.execute(sql, par) db.commit() cursor.close() db.close() sys.exit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, str(e), "No records from ODK Briefcase, but error writing to DB (trying to set odkLastRun & odkLastRunResult)."] cleanup(errorMsg) try: with open(rScriptIn, "w", newline="") as f: f.write("date() \n") f.write("library(openVA); library(CrossVA) \n") f.write("getwd() \n") f.write("records <- read.csv('" + openVAReadyFile + "') \n") # InSilicoVA if pipelineAlgorithm == "InSilicoVA": f.write("names(data) <- tolower(data) \n") f.write("data <- map_records_insilicova(records) \n") ## assign ID from survey (odkID) if specified, otherwise use uuid from ODK Aggregate if odkID == None: f.write("data$ID <- records$meta.instanceID \n") else: f.write("data$ID <- records$" + odkID + "\n") f.write("results <- insilico(data=data, " + ", \n") f.write("\t isNumeric=" + insilico_isNumeric + ", \n") f.write("\t updateCondProb=" + insilico_updateCondProb + ", \n") f.write("\t keepProbbase.level=" + insilico_keepProbbase_level + ", \n") f.write("\t CondProb=" + insilico_CondProb + ", \n") f.write("\t CondProbNum=" + insilico_CondProbNum + ", \n") f.write("\t datacheck=" + insilico_datacheck + ", \n") f.write("\t datacheck.missing=" + insilico_datacheck_missing + ", \n") f.write("\t warning.write=" + insilico_warning_write + ", \n") f.write("\t external.sep=" + insilico_external_sep + ", \n") f.write("\t Nsim=" + insilico_Nsim + ", \n") f.write("\t thin=" + insilico_thin + ", \n") f.write("\t burnin=" + insilico_burnin + ", \n") f.write("\t auto.length=" + insilico_auto_length + ", \n") f.write("\t conv.csmf=" + insilico_conv_csmf + ", \n") f.write("\t jump.scale=" + insilico_jump_scale + ", \n") f.write("\t levels.prior=" + insilico_levels_prior + ", \n") f.write("\t levels.strength=" + insilico_levels_strength + ", \n") f.write("\t trunc.min=" + insilico_trunc_min + ", \n") f.write("\t trunc.max=" + insilico_trunc_max + ", \n") f.write("\t subpop=" + insilico_subpop + ", \n") f.write("\t java.option=" + insilico_java_option + ", \n") f.write("\t seed=" + insilico_seed + ", \n") f.write("\t phy.code=" + insilico_phy_code + ", \n") f.write("\t phy.cat=" + insilico_phy_cat + ", \n") f.write("\t phy.unknown=" + insilico_phy_unknown + ", \n") f.write("\t phy.external=" + insilico_phy_external + ", \n") f.write("\t phy.debias=" + insilico_phy_debias + ", \n") f.write("\t exclude.impossible.cause=" + insilico_exclude_impossible_cause + ", \n") f.write("\t indiv.CI=" + insilico_indiv_CI + ") \n") f.write("sex <- ifelse(tolower(data$male)=='y', 'Male', 'Female') \n") # InterVA if pipelineAlgorithm == "InterVA": f.write("data <- map_records_interva4(records) \n") ## assign ID from survey (odkID) if specified, otherwise use uuid from ODK Aggregate if odkID == None: f.write("data$ID <- records$meta.instanceID \n") else: f.write("data$ID <- records$" + odkID + "\n") f.write("results <- InterVA(Input=data, \n") f.write("\t HIV= '" + interVA_HIV + "', \n") f.write("\t Malaria = '" + interVA_Malaria + "', \n") f.write("\t output='" + interVA_output + "', \n") f.write("\t groupcode=" + interVA_groupcode + ", \n") f.write("\t replicate=" + interVA_replicate + ", \n") f.write("\t replicate.bug1=" + interVA_replicate_bug1 + ", \n") f.write("\t replicate.bug2=" + interVA_replicate_bug2 + ", \n") f.write("\t write=FALSE) \n") f.write("sex <- ifelse(tolower(data$MALE)=='y', 'Male', 'Female') \n") # write results f.write("cod <- getTopCOD(results) \n") f.write("hasCOD <- as.character(data$ID) %in% as.character(levels(cod$ID)) \n") f.write("dob <- as.Date(as.character(records$consented.deceased_CRVS.info_on_deceased.Id10021), '%b %d, %Y') \n") ## HERE -- not sure if date format will vary! f.write("dod <- as.Date(as.character(records$consented.deceased_CRVS.info_on_deceased.Id10023), '%b %d, %Y') \n") ## HERE -- not sure if date format will vary! f.write("age <- floor(records$consented.deceased_CRVS.info_on_deceased.ageInDays/365.25) \n") f.write("## create matrices for DHIS2 blob (data2) and transfer database (data3) \n") f.write("## first column must be ID \n") f.write("metadataCode <- '" + algorithmMetadataCode + "'\n") f.write("cod2 <- rep('MISSING', nrow(data)); cod2[hasCOD] <- as.character(cod[,2]) \n") f.write("data2 <- cbind(data[,-1], cod2, metadataCode) \n") f.write("names(data2) <- c(names(data[,-1]), 'Cause of Death', 'Metadata') \n") f.write("evaBlob <- cbind(rep(as.character(data[,1]), each=ncol(data2)), rep(names(data2)), c(apply(data2, 1, c))) \n") f.write("colnames(evaBlob) <- c('ID', 'Attribute', 'Value') \n") f.write("write.csv(evaBlob, file='" + openVAFilesDir + "/entityAttributeValue.csv', row.names=FALSE, na='') \n\n") f.write("data3 <- cbind(as.character(data[,1]), sex, dob, dod, age, cod2, metadataCode, data[,-1]) \n") f.write("names(data3) <- c('id', 'sex', 'dob', 'dod', 'age', 'cod', 'metadataCode', names(data[,-1])) \n") f.write("write.csv(data3, file='" + openVAFilesDir + "/recordStorage.csv', row.names=FALSE, na='') \n") except OSError as e: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("Could not create R Script File","Error", timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, str(e), "Error: Could not create R Script File"] cleanup(errorMsg) # run R script rBatch = "R CMD BATCH --vanilla " + rScriptIn + " " + rScriptOut rprocess = subprocess.Popen(rBatch, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = rprocess.communicate() rrc = rprocess.returncode if rrc != 0: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("Could not run R Script", str(stderr), timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, "Error: Could not run R Script", str(stderr)] cleanup(errorMsg) else: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("OpenVA Analysis Completed Successfully", "Information", timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, str(e), "OpenVA Analysis Completed Successfully (error committing message to database)."] cleanup(errorMsg) # push results to DHIS2 try: api = Dhis(dhisURL, dhisUser, dhisPass) except (requests.RequestException) as e: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("Unable to connect to DHIS2", str(e), timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, str(e), "Error: Unable to connect to DHIS2"] cleanup(errorMsg) # verify VA program and orgUnit try: vaPrograms = api.get("programs", params={"filter": "name:like:Verbal Autopsy"}).get("programs") orgUnitValid = len(api.get("organisationUnits", params={"filter": "id:eq:{}".format(dhisOrgUnit)})["organisationUnits"])==1 if not orgUnitValid: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("Organisation Unit UID could not be found.", "Error", timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, "Error: Organisation Unit UID could not be found.", "Error committing message to database"] cleanup(errorMsg) if not vaPrograms: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("'Verbal Autopsy' program not found", "Error", timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, "Error: 'Verbal Autopsy' program not found.", "Error committing message to database"] cleanup(errorMsg) elif len(vaPrograms) > 1: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("More than one 'Verbal Autopsy' found.", "Error", timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, "Error: More than one 'Verbal Autopsy' found.", "Error committing message to database"] cleanup(errorMsg) except: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("Error using Dhis.get, unable to either get UID for VA Program or verify Org Unit ID", "Error", timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, "Error: Error using Dhis.get, unable to either get UID for VA Program or verify Org Unit ID", "Error committing message to database"] cleanup(errorMsg) vaProgramUID = vaPrograms[0]["id"] blobPath = os.path.join(dhisDir, "blobs") try: if not os.path.isdir(blobPath): os.makedirs(blobPath) except OSError as e: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("Unable to create folder for DHIS blobs.", "Error", timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, str(e), "Error: Unable to create folder for DHIS blobs."] cleanup(errorMsg) events = [] export = {} ## read in VA data (with COD and algorithm metadata) from csv's (and create groups by ID for Entity-Attribute-Value file) try: ## WARNING: The following CSV file contains sensitive VA information (leaving it in folder)! dfDHIS2 = pd.read_csv(openVAFilesDir + "/entityAttributeValue.csv") except: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("Unable to access file: " + openVAFilesDir + "entityAttributeVAlue.csv", "Error", timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, "Unable to access file: " + openVAFilesDir + "entityAttributeVAlue.csv", "Error committing message to database"] cleanup(errorMsg) grouped = dfDHIS2.groupby(["ID"]) ## prepare events for DHIS2 export try: with open(openVAFilesDir + "/recordStorage.csv", "r", newline="") as csvIn: with open(openVAFilesDir + "/newStorage.csv", "w", newline="") as csvOut: reader = csv.reader(csvIn) writer = csv.writer(csvOut, lineterminator="\n") header = next(reader) header.extend(["dhisVerbalAutopsyID", "pipelineOutcome"]) writer.writerow(header) for row in reader: if row[5]!="MISSING": vaID = str(row[0]) blobFile = "{}.db".format(os.path.join(dhisDir, "blobs", vaID)) blobRecord = grouped.get_group(str(row[0])) blobEVA = blobRecord.values.tolist() ## create DHIS2 blob try: create_db(blobFile, blobEVA) except: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("Unable to create DHIS2 BLOB", "Error", timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, "Unable to create DHIS2 BLOB", "Error committing message to database"] cleanup(errorMsg) ## post DHIS2 blob try: fileID = api.post_blob(blobFile) except requests.RequestException as e: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("Unable to post BLOB to DHIS2", str(e), timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, str(e), "Unable to post BLOB to DHIS2"] cleanup(errorMsg) sex = row[1].lower() dob = row[2] if row[3] =="": eventDate = datetime.date(9999,9,9) else: dod = datetime.datetime.strptime(row[3], "%Y-%m-%d") eventDate = datetime.date(dod.year, dod.month, dod.day) age = row[4] if row[5] == "Undetermined": codCode = "99" else: codCode = getCODCode(dhisCODCodes, row[5]) e = VerbalAutopsyEvent(vaID, vaProgramUID, dhisOrgUnit, eventDate, sex, dob, age, codCode, algorithmMetadataCode, fileID) events.append(e.format_to_dhis2(dhisUser)) row.extend([vaID, "Pushing to DHIS2"]) writer.writerow(row) else: row.extend(["", "No CoD Assigned"]) writer.writerow(row) except: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("Unable to access one of record/newStorage.csv files in folder: " + openVAFilesDir, "Error", timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, "Unable to access one of record/newStorage.csv files in folder: " + openVAFilesDir, "Error committing message to database"] cleanup(errorMsg) export["events"] = events try: log = api.post("events", data=export) except requests.RequestException as e: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("Unable to post events to DHIS2 VA Program.", str(e), timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, str(e), "Unable to post events to DHIS2 VA Program."] cleanup(errorMsg) if 'importSummaries' not in log['response'].keys(): try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("Failed to retrieve summary from post to DHIS2 VA Program.", "Error", timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, "Error", "Failed to retrieve summary from post to DHIS2 VA Program."] cleanup(errorMsg) if log["httpStatusCode"] == 200: nPosted = len(log['response']['importSummaries']) try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("Successfully posted {} events to DHIS2 VA Program.".format(nPosted), "Information", timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, str(e), "Successfully posted {} events to DHIS2 VA Program, but error writing to DB".format(nPosted)] cleanup(errorMsg) vaReferences = list(findKeyValue("reference", d=log["response"])) dfNewStorage = pd.read_csv(openVAFilesDir + "/newStorage.csv") try: for vaReference in vaReferences: postedDataValues = api.get("events/{}".format(vaReference)).get("dataValues") postedVAIDIndex = next((index for (index, d) in enumerate(postedDataValues) if d["dataElement"]=="htm6PixLJNy"), None) postedVAID = postedDataValues[postedVAIDIndex]["value"] rowVAID = dfNewStorage["dhisVerbalAutopsyID"] == postedVAID dfNewStorage.loc[rowVAID,"pipelineOutcome"] = "Pushed to DHIS2" except: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("Error trying to verify events posted to DHIS2", "Error", timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, "Error trying to verify events posted to DHIS2", ""] cleanup(errorMsg) # store results in database try: for row in dfNewStorage.itertuples(): xferDBID = row[1] xferDBOutcome = row[254] vaData = row[1],row[8:253] vaDataFlat = tuple([y for x in vaData for y in (x if isinstance(x, tuple) else (x,))]) xferDBRecord = pickle.dumps(vaDataFlat) sqlXferDB = "INSERT INTO VA_Storage (id, outcome, record, dateEntered) VALUES (?,?,?,?)" par = [xferDBID, xferDBOutcome, sqlite3.Binary(xferDBRecord), timeFMT] cursor.execute(sqlXferDB, par) db.commit() ## note: to read back in: (1) cursor.exetute(SQL SELECT STATEMENT) (2) results = pickle.loads(sqlResult[0]) ## An alternative version of storing VA records to SQLite DB(not relying on pickle) # for row in dfNewStorage.itertuples(): # xferDBID = row[1] # xferDBOutcome = row[254] # with open("xferDBRecord.txt", "w", newline="") as f: # vaData = row[1],row[8:253] # vaDataFlat = tuple([y for x in vaData for y in (x if isinstance(x, tuple) else (x,))]) # writer = csv.writer(f, lineterminator="\n") # writer.writerow(vaDataFlat) # with open("xferDBRecord.txt", "rb") as f: # xferDBRecord = f.read() # sqlXferDB = "INSERT INTO VA_Storage (id, outcome, record, dateEntered) VALUES (?,?,?,?)" # par = [xferDBID, xferDBOutcome, sqlite3.Binary(xferDBRecord), timeFMT] # cursor.execute(sqlXferDB, par) # db.commit() except: try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("Error storing Blobs to {}.db".format(dbName), "Error", timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, "Error storing Blobs to {}.db".format(dbName), ""] cleanup(errorMsg) try: nNewStorage = dfNewStorage.shape[0] sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("Stored {} records to {}.db".format(nNewStorage, dbName), "Information", timeFMT) cursor.execute(sql, par) db.commit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, str(e), "Stored {} records to {}.db, but error trying to log message to EventLog".format(nNewStorage, dbName)] cleanup(errorMsg) # all done! try: sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)" par = ("Successful completion of Pipeline", "Information", str(datetime.datetime.now())) cursor.execute(sql, par) db.commit() cursor.close() db.close() sys.exit() except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e: db.rollback() errorMsg = [timeFMT, str(e), "Finished executing Pipeline steps, but error trying to log last message."] cleanup(errorMsg)
gpl-3.0
PrashntS/scikit-learn
sklearn/linear_model/ridge.py
60
44642
""" Ridge regression """ # Author: Mathieu Blondel <mathieu@mblondel.org> # Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com> # Fabian Pedregosa <fabian@fseoane.net> # Michael Eickenberg <michael.eickenberg@nsup.org> # License: BSD 3 clause from abc import ABCMeta, abstractmethod import warnings import numpy as np from scipy import linalg from scipy import sparse from scipy.sparse import linalg as sp_linalg from .base import LinearClassifierMixin, LinearModel, _rescale_data from .sag import sag_solver from .sag_fast import get_max_squared_sum from ..base import RegressorMixin from ..utils.extmath import safe_sparse_dot from ..utils import check_X_y from ..utils import check_array from ..utils import check_consistent_length from ..utils import compute_sample_weight from ..utils import column_or_1d from ..preprocessing import LabelBinarizer from ..grid_search import GridSearchCV from ..externals import six from ..metrics.scorer import check_scoring def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0): n_samples, n_features = X.shape X1 = sp_linalg.aslinearoperator(X) coefs = np.empty((y.shape[1], n_features)) if n_features > n_samples: def create_mv(curr_alpha): def _mv(x): return X1.matvec(X1.rmatvec(x)) + curr_alpha * x return _mv else: def create_mv(curr_alpha): def _mv(x): return X1.rmatvec(X1.matvec(x)) + curr_alpha * x return _mv for i in range(y.shape[1]): y_column = y[:, i] mv = create_mv(alpha[i]) if n_features > n_samples: # kernel ridge # w = X.T * inv(X X^t + alpha*Id) y C = sp_linalg.LinearOperator( (n_samples, n_samples), matvec=mv, dtype=X.dtype) coef, info = sp_linalg.cg(C, y_column, tol=tol) coefs[i] = X1.rmatvec(coef) else: # linear ridge # w = inv(X^t X + alpha*Id) * X.T y y_column = X1.rmatvec(y_column) C = sp_linalg.LinearOperator( (n_features, n_features), matvec=mv, dtype=X.dtype) coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter, tol=tol) if info < 0: raise ValueError("Failed with error code %d" % info) if max_iter is None and info > 0 and verbose: warnings.warn("sparse_cg did not converge after %d iterations." % info) return coefs def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3): n_samples, n_features = X.shape coefs = np.empty((y.shape[1], n_features)) n_iter = np.empty(y.shape[1], dtype=np.int32) # According to the lsqr documentation, alpha = damp^2. sqrt_alpha = np.sqrt(alpha) for i in range(y.shape[1]): y_column = y[:, i] info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i], atol=tol, btol=tol, iter_lim=max_iter) coefs[i] = info[0] n_iter[i] = info[2] return coefs, n_iter def _solve_cholesky(X, y, alpha): # w = inv(X^t X + alpha*Id) * X.T y n_samples, n_features = X.shape n_targets = y.shape[1] A = safe_sparse_dot(X.T, X, dense_output=True) Xy = safe_sparse_dot(X.T, y, dense_output=True) one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]]) if one_alpha: A.flat[::n_features + 1] += alpha[0] return linalg.solve(A, Xy, sym_pos=True, overwrite_a=True).T else: coefs = np.empty([n_targets, n_features]) for coef, target, current_alpha in zip(coefs, Xy.T, alpha): A.flat[::n_features + 1] += current_alpha coef[:] = linalg.solve(A, target, sym_pos=True, overwrite_a=False).ravel() A.flat[::n_features + 1] -= current_alpha return coefs def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False): # dual_coef = inv(X X^t + alpha*Id) y n_samples = K.shape[0] n_targets = y.shape[1] if copy: K = K.copy() alpha = np.atleast_1d(alpha) one_alpha = (alpha == alpha[0]).all() has_sw = isinstance(sample_weight, np.ndarray) \ or sample_weight not in [1.0, None] if has_sw: # Unlike other solvers, we need to support sample_weight directly # because K might be a pre-computed kernel. sw = np.sqrt(np.atleast_1d(sample_weight)) y = y * sw[:, np.newaxis] K *= np.outer(sw, sw) if one_alpha: # Only one penalty, we can solve multi-target problems in one time. K.flat[::n_samples + 1] += alpha[0] try: # Note: we must use overwrite_a=False in order to be able to # use the fall-back solution below in case a LinAlgError # is raised dual_coef = linalg.solve(K, y, sym_pos=True, overwrite_a=False) except np.linalg.LinAlgError: warnings.warn("Singular matrix in solving dual problem. Using " "least-squares solution instead.") dual_coef = linalg.lstsq(K, y)[0] # K is expensive to compute and store in memory so change it back in # case it was user-given. K.flat[::n_samples + 1] -= alpha[0] if has_sw: dual_coef *= sw[:, np.newaxis] return dual_coef else: # One penalty per target. We need to solve each target separately. dual_coefs = np.empty([n_targets, n_samples]) for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha): K.flat[::n_samples + 1] += current_alpha dual_coef[:] = linalg.solve(K, target, sym_pos=True, overwrite_a=False).ravel() K.flat[::n_samples + 1] -= current_alpha if has_sw: dual_coefs *= sw[np.newaxis, :] return dual_coefs.T def _solve_svd(X, y, alpha): U, s, Vt = linalg.svd(X, full_matrices=False) idx = s > 1e-15 # same default value as scipy.linalg.pinv s_nnz = s[idx][:, np.newaxis] UTy = np.dot(U.T, y) d = np.zeros((s.size, alpha.size)) d[idx] = s_nnz / (s_nnz ** 2 + alpha) d_UT_y = d * UTy return np.dot(Vt.T, d_UT_y).T def ridge_regression(X, y, alpha, sample_weight=None, solver='auto', max_iter=None, tol=1e-3, verbose=0, random_state=None, return_n_iter=False): """Solve the ridge equation by the method of normal equations. Read more in the :ref:`User Guide <ridge_regression>`. Parameters ---------- X : {array-like, sparse matrix, LinearOperator}, shape = [n_samples, n_features] Training data y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values alpha : {float, array-like}, shape = [n_targets] if array-like The l_2 penalty to be used. If an array is passed, penalties are assumed to be specific to targets max_iter : int, optional Maximum number of iterations for conjugate gradient solver. For 'sparse_cg' and 'lsqr' solvers, the default value is determined by scipy.sparse.linalg. For 'sag' solver, the default value is 1000. sample_weight : float or numpy array of shape [n_samples] Individual weights for each sample. If sample_weight is not None and solver='auto', the solver will be set to 'cholesky'. solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'} Solver to use in the computational routines: - 'auto' chooses the solver automatically based on the type of data. - 'svd' uses a Singular Value Decomposition of X to compute the Ridge coefficients. More stable for singular matrices than 'cholesky'. - 'cholesky' uses the standard scipy.linalg.solve function to obtain a closed-form solution via a Cholesky decomposition of dot(X.T, X) - 'sparse_cg' uses the conjugate gradient solver as found in scipy.sparse.linalg.cg. As an iterative algorithm, this solver is more appropriate than 'cholesky' for large-scale data (possibility to set `tol` and `max_iter`). - 'lsqr' uses the dedicated regularized least-squares routine scipy.sparse.linalg.lsqr. It is the fatest but may not be available in old scipy versions. It also uses an iterative procedure. - 'sag' uses a Stochastic Average Gradient descent. It also uses an iterative procedure, and is often faster than other solvers when both n_samples and n_features are large. Note that 'sag' fast convergence is only guaranteed on features with approximately the same scale. You can preprocess the data with a scaler from sklearn.preprocessing. All last four solvers support both dense and sparse data. tol : float Precision of the solution. verbose : int Verbosity level. Setting verbose > 0 will display additional information depending on the solver used. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. Used in 'sag' solver. return_n_iter : boolean, default False If True, the method also returns `n_iter`, the actual number of iteration performed by the solver. Returns ------- coef : array, shape = [n_features] or [n_targets, n_features] Weight vector(s). n_iter : int, optional The actual number of iteration performed by the solver. Only returned if `return_n_iter` is True. Notes ----- This function won't compute the intercept. """ # SAG needs X and y columns to be C-contiguous and np.float64 if solver == 'sag': X = check_array(X, accept_sparse=['csr'], dtype=np.float64, order='C') y = check_array(y, dtype=np.float64, ensure_2d=False, order='F') else: X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64) y = check_array(y, dtype='numeric', ensure_2d=False) check_consistent_length(X, y) n_samples, n_features = X.shape if y.ndim > 2: raise ValueError("Target y has the wrong shape %s" % str(y.shape)) ravel = False if y.ndim == 1: y = y.reshape(-1, 1) ravel = True n_samples_, n_targets = y.shape if n_samples != n_samples_: raise ValueError("Number of samples in X and y does not correspond:" " %d != %d" % (n_samples, n_samples_)) has_sw = sample_weight is not None if solver == 'auto': # cholesky if it's a dense array and cg in any other case if not sparse.issparse(X) or has_sw: solver = 'cholesky' else: solver = 'sparse_cg' elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'): warnings.warn("""lsqr not available on this machine, falling back to sparse_cg.""") solver = 'sparse_cg' if has_sw: if np.atleast_1d(sample_weight).ndim > 1: raise ValueError("Sample weights must be 1D array or scalar") if solver != 'sag': # SAG supports sample_weight directly. For other solvers, # we implement sample_weight via a simple rescaling. X, y = _rescale_data(X, y, sample_weight) # There should be either 1 or n_targets penalties alpha = np.asarray(alpha).ravel() if alpha.size not in [1, n_targets]: raise ValueError("Number of targets and number of penalties " "do not correspond: %d != %d" % (alpha.size, n_targets)) if alpha.size == 1 and n_targets > 1: alpha = np.repeat(alpha, n_targets) if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag'): raise ValueError('Solver %s not understood' % solver) n_iter = None if solver == 'sparse_cg': coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose) elif solver == 'lsqr': coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol) elif solver == 'cholesky': if n_features > n_samples: K = safe_sparse_dot(X, X.T, dense_output=True) try: dual_coef = _solve_cholesky_kernel(K, y, alpha) coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T except linalg.LinAlgError: # use SVD solver if matrix is singular solver = 'svd' else: try: coef = _solve_cholesky(X, y, alpha) except linalg.LinAlgError: # use SVD solver if matrix is singular solver = 'svd' elif solver == 'sag': # precompute max_squared_sum for all targets max_squared_sum = get_max_squared_sum(X) coef = np.empty((y.shape[1], n_features)) n_iter = np.empty(y.shape[1], dtype=np.int32) for i, (alpha_i, target) in enumerate(zip(alpha, y.T)): coef_, n_iter_, _ = sag_solver( X, target.ravel(), sample_weight, 'squared', alpha_i, max_iter, tol, verbose, random_state, False, max_squared_sum, dict()) coef[i] = coef_ n_iter[i] = n_iter_ coef = np.asarray(coef) if solver == 'svd': if sparse.issparse(X): raise TypeError('SVD solver does not support sparse' ' inputs currently') coef = _solve_svd(X, y, alpha) if ravel: # When y was passed as a 1d-array, we flatten the coefficients. coef = coef.ravel() if return_n_iter: return coef, n_iter else: return coef class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)): @abstractmethod def __init__(self, alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=1e-3, solver="auto", random_state=None): self.alpha = alpha self.fit_intercept = fit_intercept self.normalize = normalize self.copy_X = copy_X self.max_iter = max_iter self.tol = tol self.solver = solver self.random_state = random_state def fit(self, X, y, sample_weight=None): X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64, multi_output=True, y_numeric=True) if ((sample_weight is not None) and np.atleast_1d(sample_weight).ndim > 1): raise ValueError("Sample weights must be 1D array or scalar") X, y, X_mean, y_mean, X_std = self._center_data( X, y, self.fit_intercept, self.normalize, self.copy_X, sample_weight=sample_weight) self.coef_, self.n_iter_ = ridge_regression( X, y, alpha=self.alpha, sample_weight=sample_weight, max_iter=self.max_iter, tol=self.tol, solver=self.solver, random_state=self.random_state, return_n_iter=True) self._set_intercept(X_mean, y_mean, X_std) return self class Ridge(_BaseRidge, RegressorMixin): """Linear least squares with l2 regularization. This model solves a regression model where the loss function is the linear least squares function and regularization is given by the l2-norm. Also known as Ridge Regression or Tikhonov regularization. This estimator has built-in support for multi-variate regression (i.e., when y is a 2d-array of shape [n_samples, n_targets]). Read more in the :ref:`User Guide <ridge_regression>`. Parameters ---------- alpha : {float, array-like}, shape (n_targets) Small positive values of alpha improve the conditioning of the problem and reduce the variance of the estimates. Alpha corresponds to ``C^-1`` in other linear models such as LogisticRegression or LinearSVC. If an array is passed, penalties are assumed to be specific to the targets. Hence they must correspond in number. copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. fit_intercept : boolean Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). max_iter : int, optional Maximum number of iterations for conjugate gradient solver. For 'sparse_cg' and 'lsqr' solvers, the default value is determined by scipy.sparse.linalg. For 'sag' solver, the default value is 1000. normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'} Solver to use in the computational routines: - 'auto' chooses the solver automatically based on the type of data. - 'svd' uses a Singular Value Decomposition of X to compute the Ridge coefficients. More stable for singular matrices than 'cholesky'. - 'cholesky' uses the standard scipy.linalg.solve function to obtain a closed-form solution. - 'sparse_cg' uses the conjugate gradient solver as found in scipy.sparse.linalg.cg. As an iterative algorithm, this solver is more appropriate than 'cholesky' for large-scale data (possibility to set `tol` and `max_iter`). - 'lsqr' uses the dedicated regularized least-squares routine scipy.sparse.linalg.lsqr. It is the fatest but may not be available in old scipy versions. It also uses an iterative procedure. - 'sag' uses a Stochastic Average Gradient descent. It also uses an iterative procedure, and is often faster than other solvers when both n_samples and n_features are large. Note that 'sag' fast convergence is only guaranteed on features with approximately the same scale. You can preprocess the data with a scaler from sklearn.preprocessing. All last four solvers support both dense and sparse data. tol : float Precision of the solution. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. Used in 'sag' solver. Attributes ---------- coef_ : array, shape (n_features,) or (n_targets, n_features) Weight vector(s). intercept_ : float | array, shape = (n_targets,) Independent term in decision function. Set to 0.0 if ``fit_intercept = False``. n_iter_ : array or None, shape (n_targets,) Actual number of iterations for each target. Available only for sag and lsqr solvers. Other solvers will return None. See also -------- RidgeClassifier, RidgeCV, KernelRidge Examples -------- >>> from sklearn.linear_model import Ridge >>> import numpy as np >>> n_samples, n_features = 10, 5 >>> np.random.seed(0) >>> y = np.random.randn(n_samples) >>> X = np.random.randn(n_samples, n_features) >>> clf = Ridge(alpha=1.0) >>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None, normalize=False, random_state=None, solver='auto', tol=0.001) """ def __init__(self, alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=1e-3, solver="auto", random_state=None): super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept, normalize=normalize, copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver, random_state=random_state) def fit(self, X, y, sample_weight=None): """Fit Ridge regression model Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training data y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values sample_weight : float or numpy array of shape [n_samples] Individual weights for each sample Returns ------- self : returns an instance of self. """ return super(Ridge, self).fit(X, y, sample_weight=sample_weight) class RidgeClassifier(LinearClassifierMixin, _BaseRidge): """Classifier using Ridge regression. Read more in the :ref:`User Guide <ridge_regression>`. Parameters ---------- alpha : float Small positive values of alpha improve the conditioning of the problem and reduce the variance of the estimates. Alpha corresponds to ``C^-1`` in other linear models such as LogisticRegression or LinearSVC. class_weight : dict or 'balanced', optional Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. fit_intercept : boolean Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). max_iter : int, optional Maximum number of iterations for conjugate gradient solver. The default value is determined by scipy.sparse.linalg. normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'} Solver to use in the computational routines: - 'auto' chooses the solver automatically based on the type of data. - 'svd' uses a Singular Value Decomposition of X to compute the Ridge coefficients. More stable for singular matrices than 'cholesky'. - 'cholesky' uses the standard scipy.linalg.solve function to obtain a closed-form solution. - 'sparse_cg' uses the conjugate gradient solver as found in scipy.sparse.linalg.cg. As an iterative algorithm, this solver is more appropriate than 'cholesky' for large-scale data (possibility to set `tol` and `max_iter`). - 'lsqr' uses the dedicated regularized least-squares routine scipy.sparse.linalg.lsqr. It is the fatest but may not be available in old scipy versions. It also uses an iterative procedure. - 'sag' uses a Stochastic Average Gradient descent. It also uses an iterative procedure, and is faster than other solvers when both n_samples and n_features are large. tol : float Precision of the solution. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. Used in 'sag' solver. Attributes ---------- coef_ : array, shape (n_features,) or (n_classes, n_features) Weight vector(s). intercept_ : float | array, shape = (n_targets,) Independent term in decision function. Set to 0.0 if ``fit_intercept = False``. n_iter_ : array or None, shape (n_targets,) Actual number of iterations for each target. Available only for sag and lsqr solvers. Other solvers will return None. See also -------- Ridge, RidgeClassifierCV Notes ----- For multi-class classification, n_class classifiers are trained in a one-versus-all approach. Concretely, this is implemented by taking advantage of the multi-variate response support in Ridge. """ def __init__(self, alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=1e-3, class_weight=None, solver="auto", random_state=None): super(RidgeClassifier, self).__init__( alpha=alpha, fit_intercept=fit_intercept, normalize=normalize, copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver, random_state=random_state) self.class_weight = class_weight def fit(self, X, y, sample_weight=None): """Fit Ridge regression model. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples,n_features] Training data y : array-like, shape = [n_samples] Target values sample_weight : float or numpy array of shape (n_samples,) Sample weight. Returns ------- self : returns an instance of self. """ self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1) Y = self._label_binarizer.fit_transform(y) if not self._label_binarizer.y_type_.startswith('multilabel'): y = column_or_1d(y, warn=True) if self.class_weight: if sample_weight is None: sample_weight = 1. # modify the sample weights with the corresponding class weight sample_weight = (sample_weight * compute_sample_weight(self.class_weight, y)) super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight) return self @property def classes_(self): return self._label_binarizer.classes_ class _RidgeGCV(LinearModel): """Ridge regression with built-in Generalized Cross-Validation It allows efficient Leave-One-Out cross-validation. This class is not intended to be used directly. Use RidgeCV instead. Notes ----- We want to solve (K + alpha*Id)c = y, where K = X X^T is the kernel matrix. Let G = (K + alpha*Id)^-1. Dual solution: c = Gy Primal solution: w = X^T c Compute eigendecomposition K = Q V Q^T. Then G = Q (V + alpha*Id)^-1 Q^T, where (V + alpha*Id) is diagonal. It is thus inexpensive to inverse for many alphas. Let loov be the vector of prediction values for each example when the model was fitted with all examples but this example. loov = (KGY - diag(KG)Y) / diag(I-KG) Let looe be the vector of prediction errors for each example when the model was fitted with all examples but this example. looe = y - loov = c / diag(G) References ---------- http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf """ def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True, normalize=False, scoring=None, copy_X=True, gcv_mode=None, store_cv_values=False): self.alphas = np.asarray(alphas) self.fit_intercept = fit_intercept self.normalize = normalize self.scoring = scoring self.copy_X = copy_X self.gcv_mode = gcv_mode self.store_cv_values = store_cv_values def _pre_compute(self, X, y): # even if X is very sparse, K is usually very dense K = safe_sparse_dot(X, X.T, dense_output=True) v, Q = linalg.eigh(K) QT_y = np.dot(Q.T, y) return v, Q, QT_y def _decomp_diag(self, v_prime, Q): # compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T)) return (v_prime * Q ** 2).sum(axis=-1) def _diag_dot(self, D, B): # compute dot(diag(D), B) if len(B.shape) > 1: # handle case where B is > 1-d D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)] return D * B def _errors(self, alpha, y, v, Q, QT_y): # don't construct matrix G, instead compute action on y & diagonal w = 1.0 / (v + alpha) c = np.dot(Q, self._diag_dot(w, QT_y)) G_diag = self._decomp_diag(w, Q) # handle case where y is 2-d if len(y.shape) != 1: G_diag = G_diag[:, np.newaxis] return (c / G_diag) ** 2, c def _values(self, alpha, y, v, Q, QT_y): # don't construct matrix G, instead compute action on y & diagonal w = 1.0 / (v + alpha) c = np.dot(Q, self._diag_dot(w, QT_y)) G_diag = self._decomp_diag(w, Q) # handle case where y is 2-d if len(y.shape) != 1: G_diag = G_diag[:, np.newaxis] return y - (c / G_diag), c def _pre_compute_svd(self, X, y): if sparse.issparse(X): raise TypeError("SVD not supported for sparse matrices") U, s, _ = linalg.svd(X, full_matrices=0) v = s ** 2 UT_y = np.dot(U.T, y) return v, U, UT_y def _errors_svd(self, alpha, y, v, U, UT_y): w = ((v + alpha) ** -1) - (alpha ** -1) c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y G_diag = self._decomp_diag(w, U) + (alpha ** -1) if len(y.shape) != 1: # handle case where y is 2-d G_diag = G_diag[:, np.newaxis] return (c / G_diag) ** 2, c def _values_svd(self, alpha, y, v, U, UT_y): w = ((v + alpha) ** -1) - (alpha ** -1) c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y G_diag = self._decomp_diag(w, U) + (alpha ** -1) if len(y.shape) != 1: # handle case when y is 2-d G_diag = G_diag[:, np.newaxis] return y - (c / G_diag), c def fit(self, X, y, sample_weight=None): """Fit Ridge regression model Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training data y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values sample_weight : float or array-like of shape [n_samples] Sample weight Returns ------- self : Returns self. """ X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float, multi_output=True, y_numeric=True) n_samples, n_features = X.shape X, y, X_mean, y_mean, X_std = LinearModel._center_data( X, y, self.fit_intercept, self.normalize, self.copy_X, sample_weight=sample_weight) gcv_mode = self.gcv_mode with_sw = len(np.shape(sample_weight)) if gcv_mode is None or gcv_mode == 'auto': if sparse.issparse(X) or n_features > n_samples or with_sw: gcv_mode = 'eigen' else: gcv_mode = 'svd' elif gcv_mode == "svd" and with_sw: # FIXME non-uniform sample weights not yet supported warnings.warn("non-uniform sample weights unsupported for svd, " "forcing usage of eigen") gcv_mode = 'eigen' if gcv_mode == 'eigen': _pre_compute = self._pre_compute _errors = self._errors _values = self._values elif gcv_mode == 'svd': # assert n_samples >= n_features _pre_compute = self._pre_compute_svd _errors = self._errors_svd _values = self._values_svd else: raise ValueError('bad gcv_mode "%s"' % gcv_mode) v, Q, QT_y = _pre_compute(X, y) n_y = 1 if len(y.shape) == 1 else y.shape[1] cv_values = np.zeros((n_samples * n_y, len(self.alphas))) C = [] scorer = check_scoring(self, scoring=self.scoring, allow_none=True) error = scorer is None for i, alpha in enumerate(self.alphas): weighted_alpha = (sample_weight * alpha if sample_weight is not None else alpha) if error: out, c = _errors(weighted_alpha, y, v, Q, QT_y) else: out, c = _values(weighted_alpha, y, v, Q, QT_y) cv_values[:, i] = out.ravel() C.append(c) if error: best = cv_values.mean(axis=0).argmin() else: # The scorer want an object that will make the predictions but # they are already computed efficiently by _RidgeGCV. This # identity_estimator will just return them def identity_estimator(): pass identity_estimator.decision_function = lambda y_predict: y_predict identity_estimator.predict = lambda y_predict: y_predict out = [scorer(identity_estimator, y.ravel(), cv_values[:, i]) for i in range(len(self.alphas))] best = np.argmax(out) self.alpha_ = self.alphas[best] self.dual_coef_ = C[best] self.coef_ = safe_sparse_dot(self.dual_coef_.T, X) self._set_intercept(X_mean, y_mean, X_std) if self.store_cv_values: if len(y.shape) == 1: cv_values_shape = n_samples, len(self.alphas) else: cv_values_shape = n_samples, n_y, len(self.alphas) self.cv_values_ = cv_values.reshape(cv_values_shape) return self class _BaseRidgeCV(LinearModel): def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True, normalize=False, scoring=None, cv=None, gcv_mode=None, store_cv_values=False): self.alphas = alphas self.fit_intercept = fit_intercept self.normalize = normalize self.scoring = scoring self.cv = cv self.gcv_mode = gcv_mode self.store_cv_values = store_cv_values def fit(self, X, y, sample_weight=None): """Fit Ridge regression model Parameters ---------- X : array-like, shape = [n_samples, n_features] Training data y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values sample_weight : float or array-like of shape [n_samples] Sample weight Returns ------- self : Returns self. """ if self.cv is None: estimator = _RidgeGCV(self.alphas, fit_intercept=self.fit_intercept, normalize=self.normalize, scoring=self.scoring, gcv_mode=self.gcv_mode, store_cv_values=self.store_cv_values) estimator.fit(X, y, sample_weight=sample_weight) self.alpha_ = estimator.alpha_ if self.store_cv_values: self.cv_values_ = estimator.cv_values_ else: if self.store_cv_values: raise ValueError("cv!=None and store_cv_values=True " " are incompatible") parameters = {'alpha': self.alphas} fit_params = {'sample_weight': sample_weight} gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept), parameters, fit_params=fit_params, cv=self.cv) gs.fit(X, y) estimator = gs.best_estimator_ self.alpha_ = gs.best_estimator_.alpha self.coef_ = estimator.coef_ self.intercept_ = estimator.intercept_ return self class RidgeCV(_BaseRidgeCV, RegressorMixin): """Ridge regression with built-in cross-validation. By default, it performs Generalized Cross-Validation, which is a form of efficient Leave-One-Out cross-validation. Read more in the :ref:`User Guide <ridge_regression>`. Parameters ---------- alphas : numpy array of shape [n_alphas] Array of alpha values to try. Small positive values of alpha improve the conditioning of the problem and reduce the variance of the estimates. Alpha corresponds to ``C^-1`` in other linear models such as LogisticRegression or LinearSVC. fit_intercept : boolean Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if ``y`` is binary or multiclass, :class:`StratifiedKFold` used, else, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. gcv_mode : {None, 'auto', 'svd', eigen'}, optional Flag indicating which strategy to use when performing Generalized Cross-Validation. Options are:: 'auto' : use svd if n_samples > n_features or when X is a sparse matrix, otherwise use eigen 'svd' : force computation via singular value decomposition of X (does not work for sparse matrices) 'eigen' : force computation via eigendecomposition of X^T X The 'auto' mode is the default and is intended to pick the cheaper option of the two depending upon the shape and format of the training data. store_cv_values : boolean, default=False Flag indicating if the cross-validation values corresponding to each alpha should be stored in the `cv_values_` attribute (see below). This flag is only compatible with `cv=None` (i.e. using Generalized Cross-Validation). Attributes ---------- cv_values_ : array, shape = [n_samples, n_alphas] or \ shape = [n_samples, n_targets, n_alphas], optional Cross-validation values for each alpha (if `store_cv_values=True` and \ `cv=None`). After `fit()` has been called, this attribute will \ contain the mean squared errors (by default) or the values of the \ `{loss,score}_func` function (if provided in the constructor). coef_ : array, shape = [n_features] or [n_targets, n_features] Weight vector(s). intercept_ : float | array, shape = (n_targets,) Independent term in decision function. Set to 0.0 if ``fit_intercept = False``. alpha_ : float Estimated regularization parameter. See also -------- Ridge: Ridge regression RidgeClassifier: Ridge classifier RidgeClassifierCV: Ridge classifier with built-in cross validation """ pass class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV): """Ridge classifier with built-in cross-validation. By default, it performs Generalized Cross-Validation, which is a form of efficient Leave-One-Out cross-validation. Currently, only the n_features > n_samples case is handled efficiently. Read more in the :ref:`User Guide <ridge_regression>`. Parameters ---------- alphas : numpy array of shape [n_alphas] Array of alpha values to try. Small positive values of alpha improve the conditioning of the problem and reduce the variance of the estimates. Alpha corresponds to ``C^-1`` in other linear models such as LogisticRegression or LinearSVC. fit_intercept : boolean Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the efficient Leave-One-Out cross-validation - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. class_weight : dict or 'balanced', optional Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` Attributes ---------- cv_values_ : array, shape = [n_samples, n_alphas] or \ shape = [n_samples, n_responses, n_alphas], optional Cross-validation values for each alpha (if `store_cv_values=True` and `cv=None`). After `fit()` has been called, this attribute will contain \ the mean squared errors (by default) or the values of the \ `{loss,score}_func` function (if provided in the constructor). coef_ : array, shape = [n_features] or [n_targets, n_features] Weight vector(s). intercept_ : float | array, shape = (n_targets,) Independent term in decision function. Set to 0.0 if ``fit_intercept = False``. alpha_ : float Estimated regularization parameter See also -------- Ridge: Ridge regression RidgeClassifier: Ridge classifier RidgeCV: Ridge regression with built-in cross validation Notes ----- For multi-class classification, n_class classifiers are trained in a one-versus-all approach. Concretely, this is implemented by taking advantage of the multi-variate response support in Ridge. """ def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True, normalize=False, scoring=None, cv=None, class_weight=None): super(RidgeClassifierCV, self).__init__( alphas=alphas, fit_intercept=fit_intercept, normalize=normalize, scoring=scoring, cv=cv) self.class_weight = class_weight def fit(self, X, y, sample_weight=None): """Fit the ridge classifier. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target values. sample_weight : float or numpy array of shape (n_samples,) Sample weight. Returns ------- self : object Returns self. """ self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1) Y = self._label_binarizer.fit_transform(y) if not self._label_binarizer.y_type_.startswith('multilabel'): y = column_or_1d(y, warn=True) if self.class_weight: if sample_weight is None: sample_weight = 1. # modify the sample weights with the corresponding class weight sample_weight = (sample_weight * compute_sample_weight(self.class_weight, y)) _BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight) return self @property def classes_(self): return self._label_binarizer.classes_
bsd-3-clause
dsquareindia/scikit-learn
sklearn/tests/test_cross_validation.py
79
47914
"""Test the cross_validation module""" from __future__ import division import warnings import numpy as np from scipy.sparse import coo_matrix from scipy.sparse import csr_matrix from scipy import stats from sklearn.exceptions import ConvergenceWarning from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import ignore_warnings from sklearn.utils.mocking import CheckingClassifier, MockDataFrame with warnings.catch_warnings(): warnings.simplefilter('ignore') from sklearn import cross_validation as cval from sklearn.datasets import make_regression from sklearn.datasets import load_boston from sklearn.datasets import load_digits from sklearn.datasets import load_iris from sklearn.datasets import make_multilabel_classification from sklearn.metrics import explained_variance_score from sklearn.metrics import make_scorer from sklearn.metrics import precision_score from sklearn.externals import six from sklearn.externals.six.moves import zip from sklearn.linear_model import Ridge from sklearn.multiclass import OneVsRestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.cluster import KMeans from sklearn.preprocessing import Imputer from sklearn.pipeline import Pipeline class MockClassifier(object): """Dummy classifier to test the cross-validation""" def __init__(self, a=0, allow_nd=False): self.a = a self.allow_nd = allow_nd def fit(self, X, Y=None, sample_weight=None, class_prior=None, sparse_sample_weight=None, sparse_param=None, dummy_int=None, dummy_str=None, dummy_obj=None, callback=None): """The dummy arguments are to test that this fit function can accept non-array arguments through cross-validation, such as: - int - str (this is actually array-like) - object - function """ self.dummy_int = dummy_int self.dummy_str = dummy_str self.dummy_obj = dummy_obj if callback is not None: callback(self) if self.allow_nd: X = X.reshape(len(X), -1) if X.ndim >= 3 and not self.allow_nd: raise ValueError('X cannot be d') if sample_weight is not None: assert_true(sample_weight.shape[0] == X.shape[0], 'MockClassifier extra fit_param sample_weight.shape[0]' ' is {0}, should be {1}'.format(sample_weight.shape[0], X.shape[0])) if class_prior is not None: assert_true(class_prior.shape[0] == len(np.unique(y)), 'MockClassifier extra fit_param class_prior.shape[0]' ' is {0}, should be {1}'.format(class_prior.shape[0], len(np.unique(y)))) if sparse_sample_weight is not None: fmt = ('MockClassifier extra fit_param sparse_sample_weight' '.shape[0] is {0}, should be {1}') assert_true(sparse_sample_weight.shape[0] == X.shape[0], fmt.format(sparse_sample_weight.shape[0], X.shape[0])) if sparse_param is not None: fmt = ('MockClassifier extra fit_param sparse_param.shape ' 'is ({0}, {1}), should be ({2}, {3})') assert_true(sparse_param.shape == P_sparse.shape, fmt.format(sparse_param.shape[0], sparse_param.shape[1], P_sparse.shape[0], P_sparse.shape[1])) return self def predict(self, T): if self.allow_nd: T = T.reshape(len(T), -1) return T[:, 0] def score(self, X=None, Y=None): return 1. / (1 + np.abs(self.a)) def get_params(self, deep=False): return {'a': self.a, 'allow_nd': self.allow_nd} X = np.ones((10, 2)) X_sparse = coo_matrix(X) W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))), shape=(10, 1)) P_sparse = coo_matrix(np.eye(5)) # avoid StratifiedKFold's Warning about least populated class in y y = np.arange(10) % 3 ############################################################################## # Tests def check_valid_split(train, test, n_samples=None): # Use python sets to get more informative assertion failure messages train, test = set(train), set(test) # Train and test split should not overlap assert_equal(train.intersection(test), set()) if n_samples is not None: # Check that the union of train an test split cover all the indices assert_equal(train.union(test), set(range(n_samples))) def check_cv_coverage(cv, expected_n_iter=None, n_samples=None): # Check that a all the samples appear at least once in a test fold if expected_n_iter is not None: assert_equal(len(cv), expected_n_iter) else: expected_n_iter = len(cv) collected_test_samples = set() iterations = 0 for train, test in cv: check_valid_split(train, test, n_samples=n_samples) iterations += 1 collected_test_samples.update(test) # Check that the accumulated test samples cover the whole dataset assert_equal(iterations, expected_n_iter) if n_samples is not None: assert_equal(collected_test_samples, set(range(n_samples))) def test_kfold_valueerrors(): # Check that errors are raised if there is not enough samples assert_raises(ValueError, cval.KFold, 3, 4) # Check that a warning is raised if the least populated class has too few # members. y = [3, 3, -1, -1, 3] cv = assert_warns_message(Warning, "The least populated class", cval.StratifiedKFold, y, 3) # Check that despite the warning the folds are still computed even # though all the classes are not necessarily represented at on each # side of the split at each split check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y)) # Check that errors are raised if all n_labels for individual # classes are less than n_folds. y = [3, 3, -1, -1, 2] assert_raises(ValueError, cval.StratifiedKFold, y, 3) # Error when number of folds is <= 1 assert_raises(ValueError, cval.KFold, 2, 0) assert_raises(ValueError, cval.KFold, 2, 1) error_string = ("k-fold cross validation requires at least one" " train / test split") assert_raise_message(ValueError, error_string, cval.StratifiedKFold, y, 0) assert_raise_message(ValueError, error_string, cval.StratifiedKFold, y, 1) # When n is not integer: assert_raises(ValueError, cval.KFold, 2.5, 2) # When n_folds is not integer: assert_raises(ValueError, cval.KFold, 5, 1.5) assert_raises(ValueError, cval.StratifiedKFold, y, 1.5) def test_kfold_indices(): # Check all indices are returned in the test folds kf = cval.KFold(300, 3) check_cv_coverage(kf, expected_n_iter=3, n_samples=300) # Check all indices are returned in the test folds even when equal-sized # folds are not possible kf = cval.KFold(17, 3) check_cv_coverage(kf, expected_n_iter=3, n_samples=17) def test_kfold_no_shuffle(): # Manually check that KFold preserves the data ordering on toy datasets splits = iter(cval.KFold(4, 2)) train, test = next(splits) assert_array_equal(test, [0, 1]) assert_array_equal(train, [2, 3]) train, test = next(splits) assert_array_equal(test, [2, 3]) assert_array_equal(train, [0, 1]) splits = iter(cval.KFold(5, 2)) train, test = next(splits) assert_array_equal(test, [0, 1, 2]) assert_array_equal(train, [3, 4]) train, test = next(splits) assert_array_equal(test, [3, 4]) assert_array_equal(train, [0, 1, 2]) def test_stratified_kfold_no_shuffle(): # Manually check that StratifiedKFold preserves the data ordering as much # as possible on toy datasets in order to avoid hiding sample dependencies # when possible splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2)) train, test = next(splits) assert_array_equal(test, [0, 2]) assert_array_equal(train, [1, 3]) train, test = next(splits) assert_array_equal(test, [1, 3]) assert_array_equal(train, [0, 2]) splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2)) train, test = next(splits) assert_array_equal(test, [0, 1, 3, 4]) assert_array_equal(train, [2, 5, 6]) train, test = next(splits) assert_array_equal(test, [2, 5, 6]) assert_array_equal(train, [0, 1, 3, 4]) def test_stratified_kfold_ratios(): # Check that stratified kfold preserves label ratios in individual splits # Repeat with shuffling turned off and on n_samples = 1000 labels = np.array([4] * int(0.10 * n_samples) + [0] * int(0.89 * n_samples) + [1] * int(0.01 * n_samples)) for shuffle in [False, True]: for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle): assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10, 2) assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89, 2) assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01, 2) assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2) assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2) assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2) def test_kfold_balance(): # Check that KFold returns folds with balanced sizes for kf in [cval.KFold(i, 5) for i in range(11, 17)]: sizes = [] for _, test in kf: sizes.append(len(test)) assert_true((np.max(sizes) - np.min(sizes)) <= 1) assert_equal(np.sum(sizes), kf.n) def test_stratifiedkfold_balance(): # Check that KFold returns folds with balanced sizes (only when # stratification is possible) # Repeat with shuffling turned off and on labels = [0] * 3 + [1] * 14 for shuffle in [False, True]: for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle) for i in range(11, 17)]: sizes = [] for _, test in skf: sizes.append(len(test)) assert_true((np.max(sizes) - np.min(sizes)) <= 1) assert_equal(np.sum(sizes), skf.n) def test_shuffle_kfold(): # Check the indices are shuffled properly, and that all indices are # returned in the different test folds kf = cval.KFold(300, 3, shuffle=True, random_state=0) ind = np.arange(300) all_folds = None for train, test in kf: assert_true(np.any(np.arange(100) != ind[test])) assert_true(np.any(np.arange(100, 200) != ind[test])) assert_true(np.any(np.arange(200, 300) != ind[test])) if all_folds is None: all_folds = ind[test].copy() else: all_folds = np.concatenate((all_folds, ind[test])) all_folds.sort() assert_array_equal(all_folds, ind) def test_shuffle_stratifiedkfold(): # Check that shuffling is happening when requested, and for proper # sample coverage labels = [0] * 20 + [1] * 20 kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0)) kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1)) for (_, test0), (_, test1) in zip(kf0, kf1): assert_true(set(test0) != set(test1)) check_cv_coverage(kf0, expected_n_iter=5, n_samples=40) def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372 # The digits samples are dependent: they are apparently grouped by authors # although we don't have any information on the groups segment locations # for this data. We can highlight this fact be computing k-fold cross- # validation with and without shuffling: we observe that the shuffling case # wrongly makes the IID assumption and is therefore too optimistic: it # estimates a much higher accuracy (around 0.96) than the non # shuffling variant (around 0.86). digits = load_digits() X, y = digits.data[:800], digits.target[:800] model = SVC(C=10, gamma=0.005) n = len(y) cv = cval.KFold(n, 5, shuffle=False) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(0.88, mean_score) assert_greater(mean_score, 0.85) # Shuffling the data artificially breaks the dependency and hides the # overfitting of the model with regards to the writing style of the authors # by yielding a seriously overestimated score: cv = cval.KFold(n, 5, shuffle=True, random_state=0) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(mean_score, 0.95) cv = cval.KFold(n, 5, shuffle=True, random_state=1) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(mean_score, 0.95) # Similarly, StratifiedKFold should try to shuffle the data as little # as possible (while respecting the balanced class constraints) # and thus be able to detect the dependency by not overestimating # the CV score either. As the digits dataset is approximately balanced # the estimated mean score is close to the score measured with # non-shuffled KFold cv = cval.StratifiedKFold(y, 5) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(0.88, mean_score) assert_greater(mean_score, 0.85) def test_label_kfold(): rng = np.random.RandomState(0) # Parameters of the test n_labels = 15 n_samples = 1000 n_folds = 5 # Construct the test data tolerance = 0.05 * n_samples # 5 percent error allowed labels = rng.randint(0, n_labels, n_samples) folds = cval.LabelKFold(labels, n_folds=n_folds).idxs ideal_n_labels_per_fold = n_samples // n_folds # Check that folds have approximately the same size assert_equal(len(folds), len(labels)) for i in np.unique(folds): assert_greater_equal(tolerance, abs(sum(folds == i) - ideal_n_labels_per_fold)) # Check that each label appears only in 1 fold for label in np.unique(labels): assert_equal(len(np.unique(folds[labels == label])), 1) # Check that no label is on both sides of the split labels = np.asarray(labels, dtype=object) for train, test in cval.LabelKFold(labels, n_folds=n_folds): assert_equal(len(np.intersect1d(labels[train], labels[test])), 0) # Construct the test data labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean', 'Francis', 'Robert', 'Michel', 'Rachel', 'Lois', 'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean', 'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix', 'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky', 'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis', 'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia'] labels = np.asarray(labels, dtype=object) n_labels = len(np.unique(labels)) n_samples = len(labels) n_folds = 5 tolerance = 0.05 * n_samples # 5 percent error allowed folds = cval.LabelKFold(labels, n_folds=n_folds).idxs ideal_n_labels_per_fold = n_samples // n_folds # Check that folds have approximately the same size assert_equal(len(folds), len(labels)) for i in np.unique(folds): assert_greater_equal(tolerance, abs(sum(folds == i) - ideal_n_labels_per_fold)) # Check that each label appears only in 1 fold for label in np.unique(labels): assert_equal(len(np.unique(folds[labels == label])), 1) # Check that no label is on both sides of the split for train, test in cval.LabelKFold(labels, n_folds=n_folds): assert_equal(len(np.intersect1d(labels[train], labels[test])), 0) # Should fail if there are more folds than labels labels = np.array([1, 1, 1, 2, 2]) assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3) def test_shuffle_split(): ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0) ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0) ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0) for typ in six.integer_types: ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0) for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4): assert_array_equal(t1[0], t2[0]) assert_array_equal(t2[0], t3[0]) assert_array_equal(t3[0], t4[0]) assert_array_equal(t1[1], t2[1]) assert_array_equal(t2[1], t3[1]) assert_array_equal(t3[1], t4[1]) def test_stratified_shuffle_split_init(): y = np.asarray([0, 1, 1, 1, 2, 2, 2]) # Check that error is raised if there is a class with only one sample assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2) # Check that error is raised if the test set size is smaller than n_classes assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2) # Check that error is raised if the train set size is smaller than # n_classes assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2) y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2]) # Check that errors are raised if there is not enough samples assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6) assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6) assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8) # Train size or test size too small assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2) assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2) def test_stratified_shuffle_split_iter(): ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2), np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]), np.array([-1] * 800 + [1] * 50) ] for y in ys: sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33, random_state=0) test_size = np.ceil(0.33 * len(y)) train_size = len(y) - test_size for train, test in sss: assert_array_equal(np.unique(y[train]), np.unique(y[test])) # Checks if folds keep classes proportions p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1]) / float(len(y[train]))) p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1]) / float(len(y[test]))) assert_array_almost_equal(p_train, p_test, 1) assert_equal(len(train) + len(test), y.size) assert_equal(len(train), train_size) assert_equal(len(test), test_size) assert_array_equal(np.lib.arraysetops.intersect1d(train, test), []) def test_stratified_shuffle_split_even(): # Test the StratifiedShuffleSplit, indices are drawn with a # equal chance n_folds = 5 n_iter = 1000 def assert_counts_are_ok(idx_counts, p): # Here we test that the distribution of the counts # per index is close enough to a binomial threshold = 0.05 / n_splits bf = stats.binom(n_splits, p) for count in idx_counts: p = bf.pmf(count) assert_true(p > threshold, "An index is not drawn with chance corresponding " "to even draws") for n_samples in (6, 22): labels = np.array((n_samples // 2) * [0, 1]) splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter, test_size=1. / n_folds, random_state=0) train_counts = [0] * n_samples test_counts = [0] * n_samples n_splits = 0 for train, test in splits: n_splits += 1 for counter, ids in [(train_counts, train), (test_counts, test)]: for id in ids: counter[id] += 1 assert_equal(n_splits, n_iter) assert_equal(len(train), splits.n_train) assert_equal(len(test), splits.n_test) assert_equal(len(set(train).intersection(test)), 0) label_counts = np.unique(labels) assert_equal(splits.test_size, 1.0 / n_folds) assert_equal(splits.n_train + splits.n_test, len(labels)) assert_equal(len(label_counts), 2) ex_test_p = float(splits.n_test) / n_samples ex_train_p = float(splits.n_train) / n_samples assert_counts_are_ok(train_counts, ex_train_p) assert_counts_are_ok(test_counts, ex_test_p) def test_stratified_shuffle_split_overlap_train_test_bug(): # See https://github.com/scikit-learn/scikit-learn/issues/6121 for # the original bug report labels = [0, 1, 2, 3] * 3 + [4, 5] * 5 splits = cval.StratifiedShuffleSplit(labels, n_iter=1, test_size=0.5, random_state=0) train, test = next(iter(splits)) assert_array_equal(np.intersect1d(train, test), []) def test_predefinedsplit_with_kfold_split(): # Check that PredefinedSplit can reproduce a split generated by Kfold. folds = -1 * np.ones(10) kf_train = [] kf_test = [] for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)): kf_train.append(train_ind) kf_test.append(test_ind) folds[test_ind] = i ps_train = [] ps_test = [] ps = cval.PredefinedSplit(folds) for train_ind, test_ind in ps: ps_train.append(train_ind) ps_test.append(test_ind) assert_array_equal(ps_train, kf_train) assert_array_equal(ps_test, kf_test) def test_label_shuffle_split(): ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]), np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]), ] for y in ys: n_iter = 6 test_size = 1. / 3 slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size, random_state=0) # Make sure the repr works repr(slo) # Test that the length is correct assert_equal(len(slo), n_iter) y_unique = np.unique(y) for train, test in slo: # First test: no train label is in the test set and vice versa y_train_unique = np.unique(y[train]) y_test_unique = np.unique(y[test]) assert_false(np.any(np.in1d(y[train], y_test_unique))) assert_false(np.any(np.in1d(y[test], y_train_unique))) # Second test: train and test add up to all the data assert_equal(y[train].size + y[test].size, y.size) # Third test: train and test are disjoint assert_array_equal(np.intersect1d(train, test), []) # Fourth test: # unique train and test labels are correct, # +- 1 for rounding error assert_true(abs(len(y_test_unique) - round(test_size * len(y_unique))) <= 1) assert_true(abs(len(y_train_unique) - round((1.0 - test_size) * len(y_unique))) <= 1) def test_leave_label_out_changing_labels(): # Check that LeaveOneLabelOut and LeavePLabelOut work normally if # the labels variable is changed before calling __iter__ labels = np.array([0, 1, 2, 1, 1, 2, 0, 0]) labels_changing = np.array(labels, copy=True) lolo = cval.LeaveOneLabelOut(labels) lolo_changing = cval.LeaveOneLabelOut(labels_changing) lplo = cval.LeavePLabelOut(labels, p=2) lplo_changing = cval.LeavePLabelOut(labels_changing, p=2) labels_changing[:] = 0 for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]: for (train, test), (train_chan, test_chan) in zip(llo, llo_changing): assert_array_equal(train, train_chan) assert_array_equal(test, test_chan) def test_cross_val_score(): clf = MockClassifier() for a in range(-10, 10): clf.a = a # Smoke test scores = cval.cross_val_score(clf, X, y) assert_array_equal(scores, clf.score(X, y)) # test with multioutput y scores = cval.cross_val_score(clf, X_sparse, X) assert_array_equal(scores, clf.score(X_sparse, X)) scores = cval.cross_val_score(clf, X_sparse, y) assert_array_equal(scores, clf.score(X_sparse, y)) # test with multioutput y scores = cval.cross_val_score(clf, X_sparse, X) assert_array_equal(scores, clf.score(X_sparse, X)) # test with X and y as list list_check = lambda x: isinstance(x, list) clf = CheckingClassifier(check_X=list_check) scores = cval.cross_val_score(clf, X.tolist(), y.tolist()) clf = CheckingClassifier(check_y=list_check) scores = cval.cross_val_score(clf, X, y.tolist()) assert_raises(ValueError, cval.cross_val_score, clf, X, y, scoring="sklearn") # test with 3d X and X_3d = X[:, :, np.newaxis] clf = MockClassifier(allow_nd=True) scores = cval.cross_val_score(clf, X_3d, y) clf = MockClassifier(allow_nd=False) assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y) def test_cross_val_score_pandas(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((Series, DataFrame)) except ImportError: pass for TargetType, InputFeatureType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) check_df = lambda x: isinstance(x, InputFeatureType) check_series = lambda x: isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) cval.cross_val_score(clf, X_df, y_ser) def test_cross_val_score_mask(): # test that cross_val_score works with boolean masks svm = SVC(kernel="linear") iris = load_iris() X, y = iris.data, iris.target cv_indices = cval.KFold(len(y), 5) scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices) cv_indices = cval.KFold(len(y), 5) cv_masks = [] for train, test in cv_indices: mask_train = np.zeros(len(y), dtype=np.bool) mask_test = np.zeros(len(y), dtype=np.bool) mask_train[train] = 1 mask_test[test] = 1 cv_masks.append((train, test)) scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks) assert_array_equal(scores_indices, scores_masks) def test_cross_val_score_precomputed(): # test for svm with precomputed kernel svm = SVC(kernel="precomputed") iris = load_iris() X, y = iris.data, iris.target linear_kernel = np.dot(X, X.T) score_precomputed = cval.cross_val_score(svm, linear_kernel, y) svm = SVC(kernel="linear") score_linear = cval.cross_val_score(svm, X, y) assert_array_equal(score_precomputed, score_linear) # Error raised for non-square X svm = SVC(kernel="precomputed") assert_raises(ValueError, cval.cross_val_score, svm, X, y) # test error is raised when the precomputed kernel is not array-like # or sparse assert_raises(ValueError, cval.cross_val_score, svm, linear_kernel.tolist(), y) def test_cross_val_score_fit_params(): clf = MockClassifier() n_samples = X.shape[0] n_classes = len(np.unique(y)) DUMMY_INT = 42 DUMMY_STR = '42' DUMMY_OBJ = object() def assert_fit_params(clf): # Function to test that the values are passed correctly to the # classifier arguments for non-array type assert_equal(clf.dummy_int, DUMMY_INT) assert_equal(clf.dummy_str, DUMMY_STR) assert_equal(clf.dummy_obj, DUMMY_OBJ) fit_params = {'sample_weight': np.ones(n_samples), 'class_prior': np.ones(n_classes) / n_classes, 'sparse_sample_weight': W_sparse, 'sparse_param': P_sparse, 'dummy_int': DUMMY_INT, 'dummy_str': DUMMY_STR, 'dummy_obj': DUMMY_OBJ, 'callback': assert_fit_params} cval.cross_val_score(clf, X, y, fit_params=fit_params) def test_cross_val_score_score_func(): clf = MockClassifier() _score_func_args = [] def score_func(y_test, y_predict): _score_func_args.append((y_test, y_predict)) return 1.0 with warnings.catch_warnings(record=True): scoring = make_scorer(score_func) score = cval.cross_val_score(clf, X, y, scoring=scoring) assert_array_equal(score, [1.0, 1.0, 1.0]) assert len(_score_func_args) == 3 def test_cross_val_score_errors(): class BrokenEstimator: pass assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X) def test_train_test_split_errors(): assert_raises(ValueError, cval.train_test_split) assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1) assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6, train_size=0.6) assert_raises(ValueError, cval.train_test_split, range(3), test_size=np.float32(0.6), train_size=np.float32(0.6)) assert_raises(ValueError, cval.train_test_split, range(3), test_size="wrong_type") assert_raises(ValueError, cval.train_test_split, range(3), test_size=2, train_size=4) assert_raises(TypeError, cval.train_test_split, range(3), some_argument=1.1) assert_raises(ValueError, cval.train_test_split, range(3), range(42)) def test_train_test_split(): X = np.arange(100).reshape((10, 10)) X_s = coo_matrix(X) y = np.arange(10) # simple test split = cval.train_test_split(X, y, test_size=None, train_size=.5) X_train, X_test, y_train, y_test = split assert_equal(len(y_test), len(y_train)) # test correspondence of X and y assert_array_equal(X_train[:, 0], y_train * 10) assert_array_equal(X_test[:, 0], y_test * 10) # conversion of lists to arrays (deprecated?) with warnings.catch_warnings(record=True): split = cval.train_test_split(X, X_s, y.tolist()) X_train, X_test, X_s_train, X_s_test, y_train, y_test = split assert_array_equal(X_train, X_s_train.toarray()) assert_array_equal(X_test, X_s_test.toarray()) # don't convert lists to anything else by default split = cval.train_test_split(X, X_s, y.tolist()) X_train, X_test, X_s_train, X_s_test, y_train, y_test = split assert_true(isinstance(y_train, list)) assert_true(isinstance(y_test, list)) # allow nd-arrays X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) split = cval.train_test_split(X_4d, y_3d) assert_equal(split[0].shape, (7, 5, 3, 2)) assert_equal(split[1].shape, (3, 5, 3, 2)) assert_equal(split[2].shape, (7, 7, 11)) assert_equal(split[3].shape, (3, 7, 11)) # test stratification option y = np.array([1, 1, 1, 1, 2, 2, 2, 2]) for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75], [2, 4, 2, 4, 6]): train, test = cval.train_test_split(y, test_size=test_size, stratify=y, random_state=0) assert_equal(len(test), exp_test_size) assert_equal(len(test) + len(train), len(y)) # check the 1:1 ratio of ones and twos in the data is preserved assert_equal(np.sum(train == 1), np.sum(train == 2)) def train_test_split_pandas(): # check cross_val_score doesn't destroy pandas dataframe types = [MockDataFrame] try: from pandas import DataFrame types.append(DataFrame) except ImportError: pass for InputFeatureType in types: # X dataframe X_df = InputFeatureType(X) X_train, X_test = cval.train_test_split(X_df) assert_true(isinstance(X_train, InputFeatureType)) assert_true(isinstance(X_test, InputFeatureType)) def train_test_split_mock_pandas(): # X mock dataframe X_df = MockDataFrame(X) X_train, X_test = cval.train_test_split(X_df) assert_true(isinstance(X_train, MockDataFrame)) assert_true(isinstance(X_test, MockDataFrame)) def test_cross_val_score_with_score_func_classification(): iris = load_iris() clf = SVC(kernel='linear') # Default score (should be the accuracy score) scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5) assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2) # Correct classification score (aka. zero / one score) - should be the # same as the default estimator score zo_scores = cval.cross_val_score(clf, iris.data, iris.target, scoring="accuracy", cv=5) assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2) # F1 score (class are balanced so f1_score should be equal to zero/one # score f1_scores = cval.cross_val_score(clf, iris.data, iris.target, scoring="f1_weighted", cv=5) assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2) def test_cross_val_score_with_score_func_regression(): X, y = make_regression(n_samples=30, n_features=20, n_informative=5, random_state=0) reg = Ridge() # Default score of the Ridge regression estimator scores = cval.cross_val_score(reg, X, y, cv=5) assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) # R2 score (aka. determination coefficient) - should be the # same as the default estimator score r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5) assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) # Mean squared error; this is a loss function, so "scores" are negative neg_mse_scores = cval.cross_val_score(reg, X, y, cv=5, scoring="neg_mean_squared_error") expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99]) assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2) # Explained variance scoring = make_scorer(explained_variance_score) ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring) assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) def test_permutation_score(): iris = load_iris() X = iris.data X_sparse = coo_matrix(X) y = iris.target svm = SVC(kernel='linear') cv = cval.StratifiedKFold(y, 2) score, scores, pvalue = cval.permutation_test_score( svm, X, y, n_permutations=30, cv=cv, scoring="accuracy") assert_greater(score, 0.9) assert_almost_equal(pvalue, 0.0, 1) score_label, _, pvalue_label = cval.permutation_test_score( svm, X, y, n_permutations=30, cv=cv, scoring="accuracy", labels=np.ones(y.size), random_state=0) assert_true(score_label == score) assert_true(pvalue_label == pvalue) # check that we obtain the same results with a sparse representation svm_sparse = SVC(kernel='linear') cv_sparse = cval.StratifiedKFold(y, 2) score_label, _, pvalue_label = cval.permutation_test_score( svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse, scoring="accuracy", labels=np.ones(y.size), random_state=0) assert_true(score_label == score) assert_true(pvalue_label == pvalue) # test with custom scoring object def custom_score(y_true, y_pred): return (((y_true == y_pred).sum() - (y_true != y_pred).sum()) / y_true.shape[0]) scorer = make_scorer(custom_score) score, _, pvalue = cval.permutation_test_score( svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0) assert_almost_equal(score, .93, 2) assert_almost_equal(pvalue, 0.01, 3) # set random y y = np.mod(np.arange(len(y)), 3) score, scores, pvalue = cval.permutation_test_score( svm, X, y, n_permutations=30, cv=cv, scoring="accuracy") assert_less(score, 0.5) assert_greater(pvalue, 0.2) def test_cross_val_generator_with_indices(): X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) y = np.array([1, 1, 2, 2]) labels = np.array([1, 2, 3, 4]) # explicitly passing indices value is deprecated loo = cval.LeaveOneOut(4) lpo = cval.LeavePOut(4, 2) kf = cval.KFold(4, 2) skf = cval.StratifiedKFold(y, 2) lolo = cval.LeaveOneLabelOut(labels) lopo = cval.LeavePLabelOut(labels, 2) ps = cval.PredefinedSplit([1, 1, 2, 2]) ss = cval.ShuffleSplit(2) for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]: for train, test in cv: assert_not_equal(np.asarray(train).dtype.kind, 'b') assert_not_equal(np.asarray(train).dtype.kind, 'b') X[train], X[test] y[train], y[test] @ignore_warnings def test_cross_val_generator_with_default_indices(): X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) y = np.array([1, 1, 2, 2]) labels = np.array([1, 2, 3, 4]) loo = cval.LeaveOneOut(4) lpo = cval.LeavePOut(4, 2) kf = cval.KFold(4, 2) skf = cval.StratifiedKFold(y, 2) lolo = cval.LeaveOneLabelOut(labels) lopo = cval.LeavePLabelOut(labels, 2) ss = cval.ShuffleSplit(2) ps = cval.PredefinedSplit([1, 1, 2, 2]) for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]: for train, test in cv: assert_not_equal(np.asarray(train).dtype.kind, 'b') assert_not_equal(np.asarray(train).dtype.kind, 'b') X[train], X[test] y[train], y[test] def test_shufflesplit_errors(): assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1, train_size=0.95) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3) assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None, train_size=None) def test_shufflesplit_reproducible(): # Check that iterating twice on the ShuffleSplit gives the same # sequence of train-test when the random_state is given ss = cval.ShuffleSplit(10, random_state=21) assert_array_equal(list(a for a, b in ss), list(a for a, b in ss)) def test_safe_split_with_precomputed_kernel(): clf = SVC() clfp = SVC(kernel="precomputed") iris = load_iris() X, y = iris.data, iris.target K = np.dot(X, X.T) cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0) tr, te = list(cv)[0] X_tr, y_tr = cval._safe_split(clf, X, y, tr) K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr) assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T)) X_te, y_te = cval._safe_split(clf, X, y, te, tr) K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr) assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T)) def test_cross_val_score_allow_nans(): # Check that cross_val_score allows input data with NaNs X = np.arange(200, dtype=np.float64).reshape(10, -1) X[2, :] = np.nan y = np.repeat([0, 1], X.shape[0] / 2) p = Pipeline([ ('imputer', Imputer(strategy='mean', missing_values='NaN')), ('classifier', MockClassifier()), ]) cval.cross_val_score(p, X, y, cv=5) def test_train_test_split_allow_nans(): # Check that train_test_split allows input data with NaNs X = np.arange(200, dtype=np.float64).reshape(10, -1) X[2, :] = np.nan y = np.repeat([0, 1], X.shape[0] / 2) cval.train_test_split(X, y, test_size=0.2, random_state=42) def test_permutation_test_score_allow_nans(): # Check that permutation_test_score allows input data with NaNs X = np.arange(200, dtype=np.float64).reshape(10, -1) X[2, :] = np.nan y = np.repeat([0, 1], X.shape[0] / 2) p = Pipeline([ ('imputer', Imputer(strategy='mean', missing_values='NaN')), ('classifier', MockClassifier()), ]) cval.permutation_test_score(p, X, y, cv=5) def test_check_cv_return_types(): X = np.ones((9, 2)) cv = cval.check_cv(3, X, classifier=False) assert_true(isinstance(cv, cval.KFold)) y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1]) cv = cval.check_cv(3, X, y_binary, classifier=True) assert_true(isinstance(cv, cval.StratifiedKFold)) y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2]) cv = cval.check_cv(3, X, y_multiclass, classifier=True) assert_true(isinstance(cv, cval.StratifiedKFold)) X = np.ones((5, 2)) y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]] cv = cval.check_cv(3, X, y_multilabel, classifier=True) assert_true(isinstance(cv, cval.KFold)) y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]]) cv = cval.check_cv(3, X, y_multioutput, classifier=True) assert_true(isinstance(cv, cval.KFold)) def test_cross_val_score_multilabel(): X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1], [-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]]) y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1], [0, 1], [1, 0], [1, 1], [1, 0], [0, 0]]) clf = KNeighborsClassifier(n_neighbors=1) scoring_micro = make_scorer(precision_score, average='micro') scoring_macro = make_scorer(precision_score, average='macro') scoring_samples = make_scorer(precision_score, average='samples') score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5) score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5) score_samples = cval.cross_val_score(clf, X, y, scoring=scoring_samples, cv=5) assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3]) assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4]) assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4]) def test_cross_val_predict(): boston = load_boston() X, y = boston.data, boston.target cv = cval.KFold(len(boston.target)) est = Ridge() # Naive loop (should be same as cross_val_predict): preds2 = np.zeros_like(y) for train, test in cv: est.fit(X[train], y[train]) preds2[test] = est.predict(X[test]) preds = cval.cross_val_predict(est, X, y, cv=cv) assert_array_almost_equal(preds, preds2) preds = cval.cross_val_predict(est, X, y) assert_equal(len(preds), len(y)) cv = cval.LeaveOneOut(len(y)) preds = cval.cross_val_predict(est, X, y, cv=cv) assert_equal(len(preds), len(y)) Xsp = X.copy() Xsp *= (Xsp > np.median(Xsp)) Xsp = coo_matrix(Xsp) preds = cval.cross_val_predict(est, Xsp, y) assert_array_almost_equal(len(preds), len(y)) preds = cval.cross_val_predict(KMeans(), X) assert_equal(len(preds), len(y)) def bad_cv(): for i in range(4): yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8]) assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv()) def test_cross_val_predict_input_types(): clf = Ridge() # Smoke test predictions = cval.cross_val_predict(clf, X, y) assert_equal(predictions.shape, (10,)) # test with multioutput y with ignore_warnings(category=ConvergenceWarning): predictions = cval.cross_val_predict(clf, X_sparse, X) assert_equal(predictions.shape, (10, 2)) predictions = cval.cross_val_predict(clf, X_sparse, y) assert_array_equal(predictions.shape, (10,)) # test with multioutput y with ignore_warnings(category=ConvergenceWarning): predictions = cval.cross_val_predict(clf, X_sparse, X) assert_array_equal(predictions.shape, (10, 2)) # test with X and y as list list_check = lambda x: isinstance(x, list) clf = CheckingClassifier(check_X=list_check) predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist()) clf = CheckingClassifier(check_y=list_check) predictions = cval.cross_val_predict(clf, X, y.tolist()) # test with 3d X and X_3d = X[:, :, np.newaxis] check_3d = lambda x: x.ndim == 3 clf = CheckingClassifier(check_X=check_3d) predictions = cval.cross_val_predict(clf, X_3d, y) assert_array_equal(predictions.shape, (10,)) def test_cross_val_predict_pandas(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((Series, DataFrame)) except ImportError: pass for TargetType, InputFeatureType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) check_df = lambda x: isinstance(x, InputFeatureType) check_series = lambda x: isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) cval.cross_val_predict(clf, X_df, y_ser) def test_sparse_fit_params(): iris = load_iris() X, y = iris.data, iris.target clf = MockClassifier() fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))} a = cval.cross_val_score(clf, X, y, fit_params=fit_params) assert_array_equal(a, np.ones(3)) def test_check_is_partition(): p = np.arange(100) assert_true(cval._check_is_partition(p, 100)) assert_false(cval._check_is_partition(np.delete(p, 23), 100)) p[0] = 23 assert_false(cval._check_is_partition(p, 100)) def test_cross_val_predict_sparse_prediction(): # check that cross_val_predict gives same result for sparse and dense input X, y = make_multilabel_classification(n_classes=2, n_labels=1, allow_unlabeled=False, return_indicator=True, random_state=1) X_sparse = csr_matrix(X) y_sparse = csr_matrix(y) classif = OneVsRestClassifier(SVC(kernel='linear')) preds = cval.cross_val_predict(classif, X, y, cv=10) preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10) preds_sparse = preds_sparse.toarray() assert_array_almost_equal(preds_sparse, preds)
bsd-3-clause
tiagofrepereira2012/tensorflow
tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py
52
69800
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for DNNLinearCombinedEstimators.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import json import tempfile import numpy as np from tensorflow.contrib.layers.python.layers import feature_column from tensorflow.contrib.learn.python.learn import experiment from tensorflow.contrib.learn.python.learn.datasets import base from tensorflow.contrib.learn.python.learn.estimators import _sklearn from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils from tensorflow.contrib.learn.python.learn.estimators import head as head_lib from tensorflow.contrib.learn.python.learn.estimators import model_fn from tensorflow.contrib.learn.python.learn.estimators import run_config from tensorflow.contrib.learn.python.learn.estimators import test_data from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec from tensorflow.contrib.metrics.python.ops import metric_ops from tensorflow.python.feature_column import feature_column as fc_core from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.losses import losses from tensorflow.python.platform import test from tensorflow.python.training import adagrad from tensorflow.python.training import ftrl from tensorflow.python.training import input as input_lib from tensorflow.python.training import learning_rate_decay from tensorflow.python.training import monitored_session from tensorflow.python.training import server_lib from tensorflow.python.training import session_run_hook from tensorflow.python.training import sync_replicas_optimizer from tensorflow.python.training import training_util def _assert_metrics_in_range(keys, metrics): epsilon = 0.00001 # Added for floating point edge cases. for key in keys: estimator_test_utils.assert_in_range(0.0 - epsilon, 1.0 + epsilon, key, metrics) class _CheckCallsHead(head_lib.Head): """Head that checks whether head_ops is called.""" def __init__(self): self._head_ops_called_times = 0 @property def logits_dimension(self): return 1 def create_model_fn_ops( self, mode, features, labels=None, train_op_fn=None, logits=None, logits_input=None, scope=None): """See `_Head`.""" self._head_ops_called_times += 1 loss = losses.mean_squared_error(labels, logits) return model_fn.ModelFnOps( mode, predictions={'loss': loss}, loss=loss, train_op=train_op_fn(loss), eval_metric_ops={'loss': loss}) @property def head_ops_called_times(self): return self._head_ops_called_times class _StepCounterHook(session_run_hook.SessionRunHook): """Counts the number of training steps.""" def __init__(self): self._steps = 0 def after_run(self, run_context, run_values): del run_context, run_values self._steps += 1 @property def steps(self): return self._steps class EmbeddingMultiplierTest(test.TestCase): """dnn_model_fn tests.""" def testRaisesNonEmbeddingColumn(self): one_hot_language = feature_column.one_hot_column( feature_column.sparse_column_with_hash_bucket('language', 10)) params = { 'dnn_feature_columns': [one_hot_language], 'head': head_lib.multi_class_head(2), 'dnn_hidden_units': [1], # Set lr mult to 0. to keep embeddings constant. 'embedding_lr_multipliers': { one_hot_language: 0.0 }, 'dnn_optimizer': 'Adagrad', } features = { 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [1, 0], [2, 0]], dense_shape=[3, 1]), } labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32) with self.assertRaisesRegexp(ValueError, 'can only be defined for embedding columns'): dnn_linear_combined._dnn_linear_combined_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params) def testMultipliesGradient(self): embedding_language = feature_column.embedding_column( feature_column.sparse_column_with_hash_bucket('language', 10), dimension=1, initializer=init_ops.constant_initializer(0.1)) embedding_wire = feature_column.embedding_column( feature_column.sparse_column_with_hash_bucket('wire', 10), dimension=1, initializer=init_ops.constant_initializer(0.1)) params = { 'dnn_feature_columns': [embedding_language, embedding_wire], 'head': head_lib.multi_class_head(2), 'dnn_hidden_units': [1], # Set lr mult to 0. to keep language embeddings constant, whereas wire # embeddings will be trained. 'embedding_lr_multipliers': { embedding_language: 0.0 }, 'dnn_optimizer': 'Adagrad', } with ops.Graph().as_default(): features = { 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [1, 0], [2, 0]], dense_shape=[3, 1]), 'wire': sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [2, 0]], dense_shape=[3, 1]), } labels = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32) training_util.create_global_step() model_ops = dnn_linear_combined._dnn_linear_combined_model_fn( features, labels, model_fn.ModeKeys.TRAIN, params) with monitored_session.MonitoredSession() as sess: language_var = dnn_linear_combined._get_embedding_variable( embedding_language, 'dnn', 'dnn/input_from_feature_columns') language_initial_value = sess.run(language_var) for _ in range(2): _, language_value = sess.run([model_ops.train_op, language_var]) self.assertAllClose(language_value, language_initial_value) # We could also test that wire_value changed, but that test would be flaky. class DNNLinearCombinedEstimatorTest(test.TestCase): def testEstimatorContract(self): estimator_test_utils.assert_estimator_contract( self, dnn_linear_combined.DNNLinearCombinedEstimator) def testNoFeatureColumns(self): with self.assertRaisesRegexp( ValueError, 'Either linear_feature_columns or dnn_feature_columns must be defined'): dnn_linear_combined.DNNLinearCombinedEstimator( head=_CheckCallsHead(), linear_feature_columns=None, dnn_feature_columns=None, dnn_hidden_units=[3, 3]) def testCheckCallsHead(self): """Tests binary classification using matrix data as input.""" head = _CheckCallsHead() iris = test_data.prepare_iris_data_for_logistic_regression() cont_features = [ feature_column.real_valued_column('feature', dimension=4)] bucketized_feature = [feature_column.bucketized_column( cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10))] estimator = dnn_linear_combined.DNNLinearCombinedEstimator( head, linear_feature_columns=bucketized_feature, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]) estimator.fit(input_fn=test_data.iris_input_multiclass_fn, steps=10) self.assertEqual(1, head.head_ops_called_times) estimator.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=10) self.assertEqual(2, head.head_ops_called_times) estimator.predict(input_fn=test_data.iris_input_multiclass_fn) self.assertEqual(3, head.head_ops_called_times) class DNNLinearCombinedClassifierTest(test.TestCase): def testEstimatorContract(self): estimator_test_utils.assert_estimator_contract( self, dnn_linear_combined.DNNLinearCombinedClassifier) def testExperimentIntegration(self): cont_features = [feature_column.real_valued_column('feature', dimension=4)] exp = experiment.Experiment( estimator=dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=cont_features, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]), train_input_fn=test_data.iris_input_logistic_fn, eval_input_fn=test_data.iris_input_logistic_fn) exp.test() def testNoFeatureColumns(self): with self.assertRaisesRegexp( ValueError, 'Either linear_feature_columns or dnn_feature_columns must be defined'): dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=None, dnn_feature_columns=None, dnn_hidden_units=[3, 3]) def testNoDnnHiddenUnits(self): def _input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 100) age = feature_column.real_valued_column('age') with self.assertRaisesRegexp( ValueError, 'dnn_hidden_units must be defined when dnn_feature_columns is ' 'specified'): classifier = dnn_linear_combined.DNNLinearCombinedClassifier( dnn_feature_columns=[age, language]) classifier.fit(input_fn=_input_fn, steps=2) def testSyncReplicasOptimizerUnsupported(self): cont_features = [feature_column.real_valued_column('feature', dimension=4)] sync_optimizer = sync_replicas_optimizer.SyncReplicasOptimizer( opt=adagrad.AdagradOptimizer(learning_rate=0.1), replicas_to_aggregate=1, total_num_replicas=1) sync_hook = sync_optimizer.make_session_run_hook(is_chief=True) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3], dnn_optimizer=sync_optimizer) with self.assertRaisesRegexp( ValueError, 'SyncReplicasOptimizer is not supported in DNNLinearCombined model'): classifier.fit( input_fn=test_data.iris_input_multiclass_fn, steps=100, monitors=[sync_hook]) def testEmbeddingMultiplier(self): embedding_language = feature_column.embedding_column( feature_column.sparse_column_with_hash_bucket('language', 10), dimension=1, initializer=init_ops.constant_initializer(0.1)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( dnn_feature_columns=[embedding_language], dnn_hidden_units=[3, 3], embedding_lr_multipliers={embedding_language: 0.8}) self.assertEqual({ embedding_language: 0.8 }, classifier.params['embedding_lr_multipliers']) def testInputPartitionSize(self): def _input_fn_float_label(num_epochs=None): features = { 'language': sparse_tensor.SparseTensor( values=input_lib.limit_epochs( ['en', 'fr', 'zh'], num_epochs=num_epochs), indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32) return features, labels language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) feature_columns = [ feature_column.embedding_column(language_column, dimension=1), ] # Set num_ps_replica to be 10 and the min slice size to be extremely small, # so as to ensure that there'll be 10 partititions produced. config = run_config.RunConfig(tf_random_seed=1) config._num_ps_replicas = 10 classifier = dnn_linear_combined.DNNLinearCombinedClassifier( n_classes=2, dnn_feature_columns=feature_columns, dnn_hidden_units=[3, 3], dnn_optimizer='Adagrad', config=config, input_layer_min_slice_size=1) # Ensure the param is passed in. self.assertTrue(callable(classifier.params['input_layer_partitioner'])) # Ensure the partition count is 10. classifier.fit(input_fn=_input_fn_float_label, steps=50) partition_count = 0 for name in classifier.get_variable_names(): if 'language_embedding' in name and 'Adagrad' in name: partition_count += 1 self.assertEqual(10, partition_count) def testLogisticRegression_MatrixData(self): """Tests binary classification using matrix data as input.""" iris = test_data.prepare_iris_data_for_logistic_regression() cont_features = [feature_column.real_valued_column('feature', dimension=4)] bucketized_feature = [ feature_column.bucketized_column( cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10)) ] classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=bucketized_feature, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]) classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100) scores = classifier.evaluate( input_fn=test_data.iris_input_logistic_fn, steps=100) _assert_metrics_in_range(('accuracy', 'auc'), scores) def testLogisticRegression_TensorData(self): """Tests binary classification using Tensor data as input.""" def _input_fn(): iris = test_data.prepare_iris_data_for_logistic_regression() features = {} for i in range(4): # The following shows how to provide the Tensor data for # RealValuedColumns. features.update({ str(i): array_ops.reshape( constant_op.constant( iris.data[:, i], dtype=dtypes.float32), [-1, 1]) }) # The following shows how to provide the SparseTensor data for # a SparseColumn. features['dummy_sparse_column'] = sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [60, 0]], dense_shape=[len(iris.target), 2]) labels = array_ops.reshape( constant_op.constant( iris.target, dtype=dtypes.int32), [-1, 1]) return features, labels iris = test_data.prepare_iris_data_for_logistic_regression() cont_features = [ feature_column.real_valued_column(str(i)) for i in range(4) ] linear_features = [ feature_column.bucketized_column(cont_features[i], test_data.get_quantile_based_buckets( iris.data[:, i], 10)) for i in range(4) ] linear_features.append( feature_column.sparse_column_with_hash_bucket( 'dummy_sparse_column', hash_bucket_size=100)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=linear_features, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]) classifier.fit(input_fn=_input_fn, steps=100) scores = classifier.evaluate(input_fn=_input_fn, steps=100) _assert_metrics_in_range(('accuracy', 'auc'), scores) def testEstimatorWithCoreFeatureColumns(self): """Tests binary classification using Tensor data as input.""" def _input_fn(): iris = test_data.prepare_iris_data_for_logistic_regression() features = {} for i in range(4): # The following shows how to provide the Tensor data for # RealValuedColumns. features.update({ str(i): array_ops.reshape( constant_op.constant(iris.data[:, i], dtype=dtypes.float32), [-1, 1]) }) # The following shows how to provide the SparseTensor data for # a SparseColumn. features['dummy_sparse_column'] = sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [60, 0]], dense_shape=[len(iris.target), 2]) labels = array_ops.reshape( constant_op.constant(iris.target, dtype=dtypes.int32), [-1, 1]) return features, labels iris = test_data.prepare_iris_data_for_logistic_regression() cont_features = [fc_core.numeric_column(str(i)) for i in range(4)] linear_features = [ fc_core.bucketized_column( cont_features[i], sorted(set(test_data.get_quantile_based_buckets( iris.data[:, i], 10)))) for i in range(4) ] linear_features.append( fc_core.categorical_column_with_hash_bucket( 'dummy_sparse_column', hash_bucket_size=100)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=linear_features, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]) classifier.fit(input_fn=_input_fn, steps=100) scores = classifier.evaluate(input_fn=_input_fn, steps=100) _assert_metrics_in_range(('accuracy', 'auc'), scores) def testTrainWithPartitionedVariables(self): """Tests training with partitioned variables.""" def _input_fn(): features = { 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } labels = constant_op.constant([[1], [0], [0]]) return features, labels sparse_features = [ # The given hash_bucket_size results in variables larger than the # default min_slice_size attribute, so the variables are partitioned. feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=2e7) ] embedding_features = [ feature_column.embedding_column( sparse_features[0], dimension=1) ] tf_config = { 'cluster': { run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1'] } } with test.mock.patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}): config = run_config.RunConfig() # Because we did not start a distributed cluster, we need to pass an # empty ClusterSpec, otherwise the device_setter will look for # distributed jobs, such as "/job:ps" which are not present. config._cluster_spec = server_lib.ClusterSpec({}) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=sparse_features, dnn_feature_columns=embedding_features, dnn_hidden_units=[3, 3], config=config) classifier.fit(input_fn=_input_fn, steps=100) scores = classifier.evaluate(input_fn=_input_fn, steps=1) _assert_metrics_in_range(('accuracy', 'auc'), scores) def testMultiClass(self): """Tests multi-class classification using matrix data as input. Please see testLogisticRegression_TensorData() for how to use Tensor data as input instead. """ iris = base.load_iris() cont_features = [feature_column.real_valued_column('feature', dimension=4)] bucketized_features = [ feature_column.bucketized_column( cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10)) ] classifier = dnn_linear_combined.DNNLinearCombinedClassifier( n_classes=3, linear_feature_columns=bucketized_features, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]) classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100) scores = classifier.evaluate( input_fn=test_data.iris_input_multiclass_fn, steps=100) _assert_metrics_in_range(('accuracy',), scores) def testMultiClassLabelKeys(self): """Tests n_classes > 2 with label_keys vocabulary for labels.""" # Byte literals needed for python3 test to pass. label_keys = [b'label0', b'label1', b'label2'] def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[.8], [0.2], [.1]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=input_lib.limit_epochs( ['en', 'fr', 'zh'], num_epochs=num_epochs), indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } labels = constant_op.constant( [[label_keys[1]], [label_keys[0]], [label_keys[0]]], dtype=dtypes.string) return features, labels language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( n_classes=3, linear_feature_columns=[language_column], dnn_feature_columns=[ feature_column.embedding_column( language_column, dimension=1), feature_column.real_valued_column('age') ], dnn_hidden_units=[3, 3], label_keys=label_keys) classifier.fit(input_fn=_input_fn, steps=50) scores = classifier.evaluate(input_fn=_input_fn, steps=1) _assert_metrics_in_range(('accuracy',), scores) self.assertIn('loss', scores) predict_input_fn = functools.partial(_input_fn, num_epochs=1) predicted_classes = list( classifier.predict_classes( input_fn=predict_input_fn, as_iterable=True)) self.assertEqual(3, len(predicted_classes)) for pred in predicted_classes: self.assertIn(pred, label_keys) predictions = list( classifier.predict(input_fn=predict_input_fn, as_iterable=True)) self.assertAllEqual(predicted_classes, predictions) def testLoss(self): """Tests loss calculation.""" def _input_fn_train(): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) # The logistic prediction should be (y = 0.25). features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),} labels = constant_op.constant([[1], [0], [0], [0]]) return features, labels classifier = dnn_linear_combined.DNNLinearCombinedClassifier( n_classes=2, linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) classifier.fit(input_fn=_input_fn_train, steps=100) scores = classifier.evaluate(input_fn=_input_fn_train, steps=1) # Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562 self.assertAlmostEqual(0.562, scores['loss'], delta=0.1) def testLossWithWeights(self): """Tests loss calculation with weights.""" def _input_fn_train(): # 4 rows with equal weight, one of them (y = x), three of them (y=Not(x)) # The logistic prediction should be (y = 0.25). features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[1.], [1.], [1.], [1.]]) } labels = constant_op.constant([[1.], [0.], [0.], [0.]]) return features, labels def _input_fn_eval(): # 4 rows, with different weights. features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[7.], [1.], [1.], [1.]]) } labels = constant_op.constant([[1.], [0.], [0.], [0.]]) return features, labels classifier = dnn_linear_combined.DNNLinearCombinedClassifier( weight_column_name='w', n_classes=2, linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) classifier.fit(input_fn=_input_fn_train, steps=100) scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1) # Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06 self.assertAlmostEqual(1.06, scores['loss'], delta=0.1) def testTrainWithWeights(self): """Tests training with given weight column.""" def _input_fn_train(): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) # First row has more weight than others. Model should fit (y=x) better # than (y=Not(x)) due to the relative higher weight of the first row. labels = constant_op.constant([[1], [0], [0], [0]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[100.], [3.], [2.], [2.]]) } return features, labels def _input_fn_eval(): # Create 4 rows (y = x). labels = constant_op.constant([[1], [1], [1], [1]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[1.], [1.], [1.], [1.]]) } return features, labels classifier = dnn_linear_combined.DNNLinearCombinedClassifier( weight_column_name='w', linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) classifier.fit(input_fn=_input_fn_train, steps=100) scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1) _assert_metrics_in_range(('accuracy',), scores) def testCustomOptimizerByObject(self): """Tests binary classification using matrix data as input.""" iris = test_data.prepare_iris_data_for_logistic_regression() cont_features = [feature_column.real_valued_column('feature', dimension=4)] bucketized_features = [ feature_column.bucketized_column( cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10)) ] classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=bucketized_features, linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1), dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3], dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1)) classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100) scores = classifier.evaluate( input_fn=test_data.iris_input_logistic_fn, steps=100) _assert_metrics_in_range(('accuracy',), scores) def testCustomOptimizerByString(self): """Tests binary classification using matrix data as input.""" iris = test_data.prepare_iris_data_for_logistic_regression() cont_features = [feature_column.real_valued_column('feature', dimension=4)] bucketized_features = [ feature_column.bucketized_column( cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10)) ] classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=bucketized_features, linear_optimizer='Ftrl', dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3], dnn_optimizer='Adagrad') classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100) scores = classifier.evaluate( input_fn=test_data.iris_input_logistic_fn, steps=100) _assert_metrics_in_range(('accuracy',), scores) def testCustomOptimizerByFunction(self): """Tests binary classification using matrix data as input.""" iris = test_data.prepare_iris_data_for_logistic_regression() cont_features = [feature_column.real_valued_column('feature', dimension=4)] bucketized_features = [ feature_column.bucketized_column( cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10)) ] def _optimizer_exp_decay(): global_step = training_util.get_global_step() learning_rate = learning_rate_decay.exponential_decay( learning_rate=0.1, global_step=global_step, decay_steps=100, decay_rate=0.001) return adagrad.AdagradOptimizer(learning_rate=learning_rate) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=bucketized_features, linear_optimizer=_optimizer_exp_decay, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3], dnn_optimizer=_optimizer_exp_decay) classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100) scores = classifier.evaluate( input_fn=test_data.iris_input_logistic_fn, steps=100) _assert_metrics_in_range(('accuracy',), scores) def testPredict(self): """Tests weight column in evaluation.""" def _input_fn_train(): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) labels = constant_op.constant([[1], [0], [0], [0]]) features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32)} return features, labels def _input_fn_predict(): y = input_lib.limit_epochs( array_ops.ones( shape=[4, 1], dtype=dtypes.float32), num_epochs=1) features = {'x': y} return features classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3]) classifier.fit(input_fn=_input_fn_train, steps=100) probs = list(classifier.predict_proba(input_fn=_input_fn_predict)) self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.05) classes = list(classifier.predict_classes(input_fn=_input_fn_predict)) self.assertListEqual([0] * 4, classes) def testCustomMetrics(self): """Tests custom evaluation metrics.""" def _input_fn(num_epochs=None): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) labels = constant_op.constant([[1], [0], [0], [0]]) features = { 'x': input_lib.limit_epochs( array_ops.ones( shape=[4, 1], dtype=dtypes.float32), num_epochs=num_epochs) } return features, labels def _my_metric_op(predictions, labels): # For the case of binary classification, the 2nd column of "predictions" # denotes the model predictions. labels = math_ops.to_float(labels) predictions = array_ops.strided_slice( predictions, [0, 1], [-1, 2], end_mask=1) return math_ops.reduce_sum(math_ops.multiply(predictions, labels)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3]) classifier.fit(input_fn=_input_fn, steps=100) scores = classifier.evaluate( input_fn=_input_fn, steps=100, metrics={ 'my_accuracy': MetricSpec( metric_fn=metric_ops.streaming_accuracy, prediction_key='classes'), 'my_precision': MetricSpec( metric_fn=metric_ops.streaming_precision, prediction_key='classes'), 'my_metric': MetricSpec( metric_fn=_my_metric_op, prediction_key='probabilities') }) self.assertTrue( set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset( set(scores.keys()))) predict_input_fn = functools.partial(_input_fn, num_epochs=1) predictions = np.array(list(classifier.predict_classes( input_fn=predict_input_fn))) self.assertEqual( _sklearn.accuracy_score([1, 0, 0, 0], predictions), scores['my_accuracy']) # Test the case where the 2nd element of the key is neither "classes" nor # "probabilities". with self.assertRaisesRegexp(KeyError, 'bad_type'): classifier.evaluate( input_fn=_input_fn, steps=100, metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc}) # Test the case where the tuple of the key doesn't have 2 elements. with self.assertRaises(ValueError): classifier.evaluate( input_fn=_input_fn, steps=100, metrics={ ('bad_length_name', 'classes', 'bad_length'): metric_ops.streaming_accuracy }) # Test the case where the prediction_key is neither "classes" nor # "probabilities". with self.assertRaisesRegexp(KeyError, 'bad_type'): classifier.evaluate( input_fn=_input_fn, steps=100, metrics={ 'bad_name': MetricSpec( metric_fn=metric_ops.streaming_auc, prediction_key='bad_type') }) def testVariableQuery(self): """Tests get_variable_names and get_variable_value.""" def _input_fn_train(): # Create 4 rows, three (y = x), one (y=Not(x)) labels = constant_op.constant([[1], [1], [1], [0]]) features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),} return features, labels classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3]) classifier.fit(input_fn=_input_fn_train, steps=500) var_names = classifier.get_variable_names() self.assertGreater(len(var_names), 3) for name in var_names: classifier.get_variable_value(name) def testExport(self): """Tests export model for servo.""" def input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 100) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[ feature_column.real_valued_column('age'), language, ], dnn_feature_columns=[ feature_column.embedding_column( language, dimension=1), ], dnn_hidden_units=[3, 3]) classifier.fit(input_fn=input_fn, steps=100) export_dir = tempfile.mkdtemp() input_feature_key = 'examples' def serving_input_fn(): features, targets = input_fn() features[input_feature_key] = array_ops.placeholder(dtypes.string) return features, targets classifier.export( export_dir, serving_input_fn, input_feature_key, use_deprecated_input_fn=False) def testCenteredBias(self): """Tests bias is centered or not.""" def _input_fn_train(): # Create 4 rows, three (y = x), one (y=Not(x)) labels = constant_op.constant([[1], [1], [1], [0]]) features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),} return features, labels classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], enable_centered_bias=True) classifier.fit(input_fn=_input_fn_train, steps=1000) self.assertIn('binary_logistic_head/centered_bias_weight', classifier.get_variable_names()) # logodds(0.75) = 1.09861228867 self.assertAlmostEqual( 1.0986, float(classifier.get_variable_value( 'binary_logistic_head/centered_bias_weight')[0]), places=2) def testDisableCenteredBias(self): """Tests bias is centered or not.""" def _input_fn_train(): # Create 4 rows, three (y = x), one (y=Not(x)) labels = constant_op.constant([[1], [1], [1], [0]]) features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),} return features, labels classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], enable_centered_bias=False) classifier.fit(input_fn=_input_fn_train, steps=500) self.assertNotIn('centered_bias_weight', classifier.get_variable_names()) def testGlobalStepLinearOnly(self): """Tests global step update for linear-only model.""" def input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 10) age = feature_column.real_valued_column('age') step_counter = _StepCounterHook() classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[age, language]) classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter]) self.assertEqual(100, step_counter.steps) def testGlobalStepDNNOnly(self): """Tests global step update for dnn-only model.""" def input_fn(): return { 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 10) step_counter = _StepCounterHook() classifier = dnn_linear_combined.DNNLinearCombinedClassifier( dnn_feature_columns=[ feature_column.embedding_column(language, dimension=1)], dnn_hidden_units=[3, 3]) classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter]) self.assertEqual(100, step_counter.steps) def testGlobalStepDNNLinearCombinedBug(self): """Tests global step update for dnn-linear combined model.""" def input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 10) age = feature_column.real_valued_column('age') step_counter = _StepCounterHook() classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[age, language], dnn_feature_columns=[ feature_column.embedding_column(language, dimension=1)], dnn_hidden_units=[3, 3], fix_global_step_increment_bug=False) classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter]) global_step = classifier.get_variable_value('global_step') if global_step == 100: # Expected is 100, but because of the global step increment bug, is 50. self.assertEqual(50, step_counter.steps) else: # Occasionally, training stops when global_step == 101, due to a race # condition. self.assertEqual(51, step_counter.steps) def testGlobalStepDNNLinearCombinedBugFixed(self): """Tests global step update for dnn-linear combined model.""" def input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 10) age = feature_column.real_valued_column('age') step_counter = _StepCounterHook() classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[age, language], dnn_feature_columns=[ feature_column.embedding_column(language, dimension=1)], dnn_hidden_units=[3, 3], fix_global_step_increment_bug=True) classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter]) self.assertEqual(100, step_counter.steps) def testLinearOnly(self): """Tests that linear-only instantiation works.""" def input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 100) age = feature_column.real_valued_column('age') classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[age, language]) classifier.fit(input_fn=input_fn, steps=100) loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] classifier.fit(input_fn=input_fn, steps=200) loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] self.assertLess(loss2, loss1) variable_names = classifier.get_variable_names() self.assertNotIn('dnn/logits/biases', variable_names) self.assertNotIn('dnn/logits/weights', variable_names) self.assertIn('linear/bias_weight', variable_names) self.assertIn('linear/age/weight', variable_names) self.assertIn('linear/language/weights', variable_names) self.assertEquals( 1, len(classifier.get_variable_value('linear/age/weight'))) self.assertEquals( 100, len(classifier.get_variable_value('linear/language/weights'))) def testLinearOnlyOneFeature(self): """Tests that linear-only instantiation works for one feature only.""" def input_fn(): return { 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 99) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[language]) classifier.fit(input_fn=input_fn, steps=100) loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] classifier.fit(input_fn=input_fn, steps=200) loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] self.assertLess(loss2, loss1) variable_names = classifier.get_variable_names() self.assertNotIn('dnn/logits/biases', variable_names) self.assertNotIn('dnn/logits/weights', variable_names) self.assertIn('linear/bias_weight', variable_names) self.assertIn('linear/language/weights', variable_names) self.assertEquals( 1, len(classifier.get_variable_value('linear/bias_weight'))) self.assertEquals( 99, len(classifier.get_variable_value('linear/language/weights'))) def testDNNOnly(self): """Tests that DNN-only instantiation works.""" cont_features = [feature_column.real_valued_column('feature', dimension=4)] classifier = dnn_linear_combined.DNNLinearCombinedClassifier( n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]) classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=1000) classifier.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=100) variable_names = classifier.get_variable_names() self.assertIn('dnn/hiddenlayer_0/weights', variable_names) self.assertIn('dnn/hiddenlayer_0/biases', variable_names) self.assertIn('dnn/hiddenlayer_1/weights', variable_names) self.assertIn('dnn/hiddenlayer_1/biases', variable_names) self.assertIn('dnn/logits/weights', variable_names) self.assertIn('dnn/logits/biases', variable_names) self.assertNotIn('linear/bias_weight', variable_names) self.assertNotIn('linear/feature_BUCKETIZED/weight', variable_names) def testDNNWeightsBiasesNames(self): """Tests the names of DNN weights and biases in the checkpoints.""" def _input_fn_train(): # Create 4 rows, three (y = x), one (y=Not(x)) labels = constant_op.constant([[1], [1], [1], [0]]) features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),} return features, labels classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3]) classifier.fit(input_fn=_input_fn_train, steps=5) variable_names = classifier.get_variable_names() self.assertIn('dnn/hiddenlayer_0/weights', variable_names) self.assertIn('dnn/hiddenlayer_0/biases', variable_names) self.assertIn('dnn/hiddenlayer_1/weights', variable_names) self.assertIn('dnn/hiddenlayer_1/biases', variable_names) self.assertIn('dnn/logits/weights', variable_names) self.assertIn('dnn/logits/biases', variable_names) class DNNLinearCombinedRegressorTest(test.TestCase): def testExperimentIntegration(self): cont_features = [feature_column.real_valued_column('feature', dimension=4)] exp = experiment.Experiment( estimator=dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=cont_features, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]), train_input_fn=test_data.iris_input_logistic_fn, eval_input_fn=test_data.iris_input_logistic_fn) exp.test() def testEstimatorContract(self): estimator_test_utils.assert_estimator_contract( self, dnn_linear_combined.DNNLinearCombinedRegressor) def testRegression_MatrixData(self): """Tests regression using matrix data as input.""" cont_features = [feature_column.real_valued_column('feature', dimension=4)] regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=cont_features, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=test_data.iris_input_logistic_fn, steps=10) scores = regressor.evaluate( input_fn=test_data.iris_input_logistic_fn, steps=1) self.assertIn('loss', scores.keys()) def testRegression_TensorData(self): """Tests regression using tensor data as input.""" def _input_fn(): # Create 4 rows of (y = x) labels = constant_op.constant([[100.], [3.], [2.], [2.]]) features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])} return features, labels classifier = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) classifier.fit(input_fn=_input_fn, steps=10) classifier.evaluate(input_fn=_input_fn, steps=1) def testLoss(self): """Tests loss calculation.""" def _input_fn_train(): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) # The algorithm should learn (y = 0.25). labels = constant_op.constant([[1.], [0.], [0.], [0.]]) features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),} return features, labels regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn_train, steps=100) scores = regressor.evaluate(input_fn=_input_fn_train, steps=1) # Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875 self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1) def testLossWithWeights(self): """Tests loss calculation with weights.""" def _input_fn_train(): # 4 rows with equal weight, one of them (y = x), three of them (y=Not(x)) # The algorithm should learn (y = 0.25). labels = constant_op.constant([[1.], [0.], [0.], [0.]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[1.], [1.], [1.], [1.]]) } return features, labels def _input_fn_eval(): # 4 rows, with different weights. labels = constant_op.constant([[1.], [0.], [0.], [0.]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[7.], [1.], [1.], [1.]]) } return features, labels regressor = dnn_linear_combined.DNNLinearCombinedRegressor( weight_column_name='w', linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn_train, steps=100) scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1) # Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125 self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1) def testTrainWithWeights(self): """Tests training with given weight column.""" def _input_fn_train(): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) # First row has more weight than others. Model should fit (y=x) better # than (y=Not(x)) due to the relative higher weight of the first row. labels = constant_op.constant([[1.], [0.], [0.], [0.]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[100.], [3.], [2.], [2.]]) } return features, labels def _input_fn_eval(): # Create 4 rows (y = x) labels = constant_op.constant([[1.], [1.], [1.], [1.]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[1.], [1.], [1.], [1.]]) } return features, labels regressor = dnn_linear_combined.DNNLinearCombinedRegressor( weight_column_name='w', linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn_train, steps=100) scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1) # The model should learn (y = x) because of the weights, so the loss should # be close to zero. self.assertLess(scores['loss'], 0.2) def testPredict_AsIterableFalse(self): """Tests predict method with as_iterable=False.""" labels = [1., 0., 0.2] def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant(labels, dtype=dtypes.float32) language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[ language_column, feature_column.real_valued_column('age') ], dnn_feature_columns=[ feature_column.embedding_column( language_column, dimension=1), feature_column.real_valued_column('age') ], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=10) scores = regressor.evaluate(input_fn=_input_fn, steps=1) self.assertIn('loss', scores.keys()) regressor.predict_scores(input_fn=_input_fn, as_iterable=False) def testPredict_AsIterable(self): """Tests predict method with as_iterable=True.""" labels = [1., 0., 0.2] def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant(labels, dtype=dtypes.float32) language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[ language_column, feature_column.real_valued_column('age') ], dnn_feature_columns=[ feature_column.embedding_column( language_column, dimension=1), feature_column.real_valued_column('age') ], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=10) scores = regressor.evaluate(input_fn=_input_fn, steps=1) self.assertIn('loss', scores.keys()) predict_input_fn = functools.partial(_input_fn, num_epochs=1) regressor.predict_scores(input_fn=predict_input_fn, as_iterable=True) def testCustomMetrics(self): """Tests custom evaluation metrics.""" def _input_fn(num_epochs=None): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) labels = constant_op.constant([[1.], [0.], [0.], [0.]]) features = { 'x': input_lib.limit_epochs( array_ops.ones( shape=[4, 1], dtype=dtypes.float32), num_epochs=num_epochs) } return features, labels def _my_metric_op(predictions, labels): return math_ops.reduce_sum(math_ops.multiply(predictions, labels)) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=10) scores = regressor.evaluate( input_fn=_input_fn, steps=1, metrics={ 'my_error': metric_ops.streaming_mean_squared_error, ('my_metric', 'scores'): _my_metric_op }) self.assertIn('loss', set(scores.keys())) self.assertIn('my_error', set(scores.keys())) self.assertIn('my_metric', set(scores.keys())) predict_input_fn = functools.partial(_input_fn, num_epochs=1) predictions = np.array(list(regressor.predict_scores( input_fn=predict_input_fn))) self.assertAlmostEqual( _sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions), scores['my_error']) # Tests the case that the 2nd element of the key is not "scores". with self.assertRaises(KeyError): regressor.evaluate( input_fn=_input_fn, steps=1, metrics={ ('my_error', 'predictions'): metric_ops.streaming_mean_squared_error }) # Tests the case where the tuple of the key doesn't have 2 elements. with self.assertRaises(ValueError): regressor.evaluate( input_fn=_input_fn, steps=1, metrics={ ('bad_length_name', 'scores', 'bad_length'): metric_ops.streaming_mean_squared_error }) def testCustomMetricsWithMetricSpec(self): """Tests custom evaluation metrics.""" def _input_fn(num_epochs=None): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) labels = constant_op.constant([[1.], [0.], [0.], [0.]]) features = { 'x': input_lib.limit_epochs( array_ops.ones( shape=[4, 1], dtype=dtypes.float32), num_epochs=num_epochs) } return features, labels def _my_metric_op(predictions, labels): return math_ops.reduce_sum(math_ops.multiply(predictions, labels)) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=5) scores = regressor.evaluate( input_fn=_input_fn, steps=1, metrics={ 'my_error': MetricSpec( metric_fn=metric_ops.streaming_mean_squared_error, prediction_key='scores'), 'my_metric': MetricSpec( metric_fn=_my_metric_op, prediction_key='scores') }) self.assertIn('loss', set(scores.keys())) self.assertIn('my_error', set(scores.keys())) self.assertIn('my_metric', set(scores.keys())) predict_input_fn = functools.partial(_input_fn, num_epochs=1) predictions = np.array(list(regressor.predict_scores( input_fn=predict_input_fn))) self.assertAlmostEqual( _sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions), scores['my_error']) # Tests the case where the prediction_key is not "scores". with self.assertRaisesRegexp(KeyError, 'bad_type'): regressor.evaluate( input_fn=_input_fn, steps=1, metrics={ 'bad_name': MetricSpec( metric_fn=metric_ops.streaming_auc, prediction_key='bad_type') }) def testExport(self): """Tests export model for servo.""" labels = [1., 0., 0.2] def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant(labels, dtype=dtypes.float32) language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[ language_column, feature_column.real_valued_column('age') ], dnn_feature_columns=[ feature_column.embedding_column( language_column, dimension=1), ], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=10) export_dir = tempfile.mkdtemp() input_feature_key = 'examples' def serving_input_fn(): features, targets = _input_fn() features[input_feature_key] = array_ops.placeholder(dtypes.string) return features, targets regressor.export( export_dir, serving_input_fn, input_feature_key, use_deprecated_input_fn=False) def testTrainSaveLoad(self): """Tests regression with restarting training / evaluate.""" def _input_fn(num_epochs=None): # Create 4 rows of (y = x) labels = constant_op.constant([[100.], [3.], [2.], [2.]]) features = { 'x': input_lib.limit_epochs( constant_op.constant([[100.], [3.], [2.], [2.]]), num_epochs=num_epochs) } return features, labels model_dir = tempfile.mkdtemp() # pylint: disable=g-long-lambda new_regressor = lambda: dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], model_dir=model_dir, config=run_config.RunConfig(tf_random_seed=1)) predict_input_fn = functools.partial(_input_fn, num_epochs=1) regressor = new_regressor() regressor.fit(input_fn=_input_fn, steps=10) predictions = list(regressor.predict_scores(input_fn=predict_input_fn)) del regressor regressor = new_regressor() predictions2 = list(regressor.predict_scores(input_fn=predict_input_fn)) self.assertAllClose(predictions, predictions2) def testTrainWithPartitionedVariables(self): """Tests training with partitioned variables.""" def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32) # The given hash_bucket_size results in variables larger than the # default min_slice_size attribute, so the variables are partitioned. language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=2e7) tf_config = { 'cluster': { run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1'] } } with test.mock.patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}): config = run_config.RunConfig(tf_random_seed=1) # Because we did not start a distributed cluster, we need to pass an # empty ClusterSpec, otherwise the device_setter will look for # distributed jobs, such as "/job:ps" which are not present. config._cluster_spec = server_lib.ClusterSpec({}) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[ language_column, feature_column.real_valued_column('age') ], dnn_feature_columns=[ feature_column.embedding_column( language_column, dimension=1), feature_column.real_valued_column('age') ], dnn_hidden_units=[3, 3], config=config) regressor.fit(input_fn=_input_fn, steps=100) scores = regressor.evaluate(input_fn=_input_fn, steps=1) self.assertIn('loss', scores.keys()) def testDisableCenteredBias(self): """Tests that we can disable centered bias.""" def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32) language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[ language_column, feature_column.real_valued_column('age') ], dnn_feature_columns=[ feature_column.embedding_column( language_column, dimension=1), feature_column.real_valued_column('age') ], dnn_hidden_units=[3, 3], enable_centered_bias=False, config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=100) scores = regressor.evaluate(input_fn=_input_fn, steps=1) self.assertIn('loss', scores.keys()) def testLinearOnly(self): """Tests linear-only instantiation and training.""" def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32) language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[ language_column, feature_column.real_valued_column('age') ], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=100) scores = regressor.evaluate(input_fn=_input_fn, steps=1) self.assertIn('loss', scores.keys()) def testDNNOnly(self): """Tests DNN-only instantiation and training.""" def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32) language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( dnn_feature_columns=[ feature_column.embedding_column( language_column, dimension=1), feature_column.real_valued_column('age') ], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=100) scores = regressor.evaluate(input_fn=_input_fn, steps=1) self.assertIn('loss', scores.keys()) class FeatureEngineeringFunctionTest(test.TestCase): """Tests feature_engineering_fn.""" def testNoneFeatureEngineeringFn(self): def input_fn(): # Create 4 rows of (y = x) labels = constant_op.constant([[100.], [3.], [2.], [2.]]) features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])} return features, labels def feature_engineering_fn(features, labels): _, _ = features, labels labels = constant_op.constant([[1000.], [30.], [20.], [20.]]) features = {'x': constant_op.constant([[1000.], [30.], [20.], [20.]])} return features, labels estimator_with_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1), feature_engineering_fn=feature_engineering_fn) estimator_with_fe_fn.fit(input_fn=input_fn, steps=110) estimator_without_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) estimator_without_fe_fn.fit(input_fn=input_fn, steps=110) # predictions = y prediction_with_fe_fn = next( estimator_with_fe_fn.predict_scores( input_fn=input_fn, as_iterable=True)) self.assertAlmostEqual(1000., prediction_with_fe_fn, delta=10.0) prediction_without_fe_fn = next( estimator_without_fe_fn.predict_scores( input_fn=input_fn, as_iterable=True)) self.assertAlmostEqual(100., prediction_without_fe_fn, delta=1.0) if __name__ == '__main__': test.main()
apache-2.0
jstoxrocky/statsmodels
statsmodels/regression/tests/test_regression.py
6
37622
""" Test functions for models.regression """ # TODO: Test for LM from statsmodels.compat.python import long, lrange import warnings import pandas import numpy as np from numpy.testing import (assert_almost_equal, assert_approx_equal, assert_raises, assert_equal, assert_allclose) from scipy.linalg import toeplitz from statsmodels.tools.tools import add_constant, categorical from statsmodels.compat.numpy import np_matrix_rank from statsmodels.regression.linear_model import OLS, WLS, GLS, yule_walker from statsmodels.datasets import longley from scipy.stats import t as student_t DECIMAL_4 = 4 DECIMAL_3 = 3 DECIMAL_2 = 2 DECIMAL_1 = 1 DECIMAL_7 = 7 DECIMAL_0 = 0 class CheckRegressionResults(object): """ res2 contains results from Rmodelwrap or were obtained from a statistical packages such as R, Stata, or SAS and were written to model_results """ decimal_params = DECIMAL_4 def test_params(self): assert_almost_equal(self.res1.params, self.res2.params, self.decimal_params) decimal_standarderrors = DECIMAL_4 def test_standarderrors(self): assert_almost_equal(self.res1.bse,self.res2.bse, self.decimal_standarderrors) decimal_confidenceintervals = DECIMAL_4 def test_confidenceintervals(self): #NOTE: stata rounds residuals (at least) to sig digits so approx_equal conf1 = self.res1.conf_int() conf2 = self.res2.conf_int() for i in range(len(conf1)): assert_approx_equal(conf1[i][0], conf2[i][0], self.decimal_confidenceintervals) assert_approx_equal(conf1[i][1], conf2[i][1], self.decimal_confidenceintervals) decimal_conf_int_subset = DECIMAL_4 def test_conf_int_subset(self): if len(self.res1.params) > 1: ci1 = self.res1.conf_int(cols=(1,2)) ci2 = self.res1.conf_int()[1:3] assert_almost_equal(ci1, ci2, self.decimal_conf_int_subset) else: pass decimal_scale = DECIMAL_4 def test_scale(self): assert_almost_equal(self.res1.scale, self.res2.scale, self.decimal_scale) decimal_rsquared = DECIMAL_4 def test_rsquared(self): assert_almost_equal(self.res1.rsquared, self.res2.rsquared, self.decimal_rsquared) decimal_rsquared_adj = DECIMAL_4 def test_rsquared_adj(self): assert_almost_equal(self.res1.rsquared_adj, self.res2.rsquared_adj, self.decimal_rsquared_adj) def test_degrees(self): assert_equal(self.res1.model.df_model, self.res2.df_model) assert_equal(self.res1.model.df_resid, self.res2.df_resid) decimal_ess = DECIMAL_4 def test_ess(self): #Explained Sum of Squares assert_almost_equal(self.res1.ess, self.res2.ess, self.decimal_ess) decimal_ssr = DECIMAL_4 def test_sumof_squaredresids(self): assert_almost_equal(self.res1.ssr, self.res2.ssr, self.decimal_ssr) decimal_mse_resid = DECIMAL_4 def test_mse_resid(self): #Mean squared error of residuals assert_almost_equal(self.res1.mse_model, self.res2.mse_model, self.decimal_mse_resid) decimal_mse_model = DECIMAL_4 def test_mse_model(self): assert_almost_equal(self.res1.mse_resid, self.res2.mse_resid, self.decimal_mse_model) decimal_mse_total = DECIMAL_4 def test_mse_total(self): assert_almost_equal(self.res1.mse_total, self.res2.mse_total, self.decimal_mse_total, err_msg="Test class %s" % self) decimal_fvalue = DECIMAL_4 def test_fvalue(self): #didn't change this, not sure it should complain -inf not equal -inf #if not (np.isinf(self.res1.fvalue) and np.isinf(self.res2.fvalue)): assert_almost_equal(self.res1.fvalue, self.res2.fvalue, self.decimal_fvalue) decimal_loglike = DECIMAL_4 def test_loglike(self): assert_almost_equal(self.res1.llf, self.res2.llf, self.decimal_loglike) decimal_aic = DECIMAL_4 def test_aic(self): assert_almost_equal(self.res1.aic, self.res2.aic, self.decimal_aic) decimal_bic = DECIMAL_4 def test_bic(self): assert_almost_equal(self.res1.bic, self.res2.bic, self.decimal_bic) decimal_pvalues = DECIMAL_4 def test_pvalues(self): assert_almost_equal(self.res1.pvalues, self.res2.pvalues, self.decimal_pvalues) decimal_wresid = DECIMAL_4 def test_wresid(self): assert_almost_equal(self.res1.wresid, self.res2.wresid, self.decimal_wresid) decimal_resids = DECIMAL_4 def test_resids(self): assert_almost_equal(self.res1.resid, self.res2.resid, self.decimal_resids) decimal_norm_resids = DECIMAL_4 def test_norm_resids(self): assert_almost_equal(self.res1.resid_pearson, self.res2.resid_pearson, self.decimal_norm_resids) #TODO: test fittedvalues and what else? class TestOLS(CheckRegressionResults): @classmethod def setupClass(cls): from .results.results_regression import Longley data = longley.load() data.exog = add_constant(data.exog, prepend=False) res1 = OLS(data.endog, data.exog).fit() res2 = Longley() res2.wresid = res1.wresid # workaround hack cls.res1 = res1 cls.res2 = res2 res_qr = OLS(data.endog, data.exog).fit(method="qr") model_qr = OLS(data.endog, data.exog) Q, R = np.linalg.qr(data.exog) model_qr.exog_Q, model_qr.exog_R = Q, R model_qr.normalized_cov_params = np.linalg.inv(np.dot(R.T, R)) model_qr.rank = np_matrix_rank(R) res_qr2 = model_qr.fit(method="qr") cls.res_qr = res_qr cls.res_qr_manual = res_qr2 def test_eigenvalues(self): eigenval_perc_diff = (self.res_qr.eigenvals - self.res_qr_manual.eigenvals) eigenval_perc_diff /= self.res_qr.eigenvals zeros = np.zeros_like(eigenval_perc_diff) assert_almost_equal(eigenval_perc_diff, zeros, DECIMAL_7) # Robust error tests. Compare values computed with SAS def test_HC0_errors(self): #They are split up because the copied results do not have any DECIMAL_4 #places for the last place. assert_almost_equal(self.res1.HC0_se[:-1], self.res2.HC0_se[:-1], DECIMAL_4) assert_approx_equal(np.round(self.res1.HC0_se[-1]), self.res2.HC0_se[-1]) def test_HC1_errors(self): assert_almost_equal(self.res1.HC1_se[:-1], self.res2.HC1_se[:-1], DECIMAL_4) assert_approx_equal(self.res1.HC1_se[-1], self.res2.HC1_se[-1]) def test_HC2_errors(self): assert_almost_equal(self.res1.HC2_se[:-1], self.res2.HC2_se[:-1], DECIMAL_4) assert_approx_equal(self.res1.HC2_se[-1], self.res2.HC2_se[-1]) def test_HC3_errors(self): assert_almost_equal(self.res1.HC3_se[:-1], self.res2.HC3_se[:-1], DECIMAL_4) assert_approx_equal(self.res1.HC3_se[-1], self.res2.HC3_se[-1]) def test_qr_params(self): assert_almost_equal(self.res1.params, self.res_qr.params, 6) def test_qr_normalized_cov_params(self): #todo: need assert_close assert_almost_equal(np.ones_like(self.res1.normalized_cov_params), self.res1.normalized_cov_params / self.res_qr.normalized_cov_params, 5) def test_missing(self): data = longley.load() data.exog = add_constant(data.exog, prepend=False) data.endog[[3, 7, 14]] = np.nan mod = OLS(data.endog, data.exog, missing='drop') assert_equal(mod.endog.shape[0], 13) assert_equal(mod.exog.shape[0], 13) def test_rsquared_adj_overfit(self): # Test that if df_resid = 0, rsquared_adj = 0. # This is a regression test for user issue: # https://github.com/statsmodels/statsmodels/issues/868 with warnings.catch_warnings(record=True): x = np.random.randn(5) y = np.random.randn(5, 6) results = OLS(x, y).fit() rsquared_adj = results.rsquared_adj assert_equal(rsquared_adj, np.nan) def test_qr_alternatives(self): assert_allclose(self.res_qr.params, self.res_qr_manual.params, rtol=5e-12) def test_norm_resid(self): resid = self.res1.wresid norm_resid = resid / np.sqrt(np.sum(resid**2.0) / self.res1.df_resid) model_norm_resid = self.res1.resid_pearson assert_almost_equal(model_norm_resid, norm_resid, DECIMAL_7) def test_norm_resid_zero_variance(self): with warnings.catch_warnings(record=True): y = self.res1.model.endog res = OLS(y,y).fit() assert_allclose(res.scale, 0, atol=1e-20) assert_allclose(res.wresid, res.resid_pearson, atol=5e-11) class TestRTO(CheckRegressionResults): @classmethod def setupClass(cls): from .results.results_regression import LongleyRTO data = longley.load() res1 = OLS(data.endog, data.exog).fit() res2 = LongleyRTO() res2.wresid = res1.wresid # workaround hack cls.res1 = res1 cls.res2 = res2 res_qr = OLS(data.endog, data.exog).fit(method="qr") cls.res_qr = res_qr class TestFtest(object): """ Tests f_test vs. RegressionResults """ @classmethod def setupClass(cls): data = longley.load() data.exog = add_constant(data.exog, prepend=False) cls.res1 = OLS(data.endog, data.exog).fit() R = np.identity(7)[:-1,:] cls.Ftest = cls.res1.f_test(R) def test_F(self): assert_almost_equal(self.Ftest.fvalue, self.res1.fvalue, DECIMAL_4) def test_p(self): assert_almost_equal(self.Ftest.pvalue, self.res1.f_pvalue, DECIMAL_4) def test_Df_denom(self): assert_equal(self.Ftest.df_denom, self.res1.model.df_resid) def test_Df_num(self): assert_equal(self.Ftest.df_num, 6) class TestFTest2(object): """ A joint test that the coefficient on GNP = the coefficient on UNEMP and that the coefficient on POP = the coefficient on YEAR for the Longley dataset. Ftest1 is from statsmodels. Results are from Rpy using R's car library. """ @classmethod def setupClass(cls): data = longley.load() data.exog = add_constant(data.exog, prepend=False) res1 = OLS(data.endog, data.exog).fit() R2 = [[0,1,-1,0,0,0,0],[0, 0, 0, 0, 1, -1, 0]] cls.Ftest1 = res1.f_test(R2) hyp = 'x2 = x3, x5 = x6' cls.NewFtest1 = res1.f_test(hyp) def test_new_ftest(self): assert_equal(self.NewFtest1.fvalue, self.Ftest1.fvalue) def test_fvalue(self): assert_almost_equal(self.Ftest1.fvalue, 9.7404618732968196, DECIMAL_4) def test_pvalue(self): assert_almost_equal(self.Ftest1.pvalue, 0.0056052885317493459, DECIMAL_4) def test_df_denom(self): assert_equal(self.Ftest1.df_denom, 9) def test_df_num(self): assert_equal(self.Ftest1.df_num, 2) class TestFtestQ(object): """ A joint hypothesis test that Rb = q. Coefficient tests are essentially made up. Test values taken from Stata. """ @classmethod def setupClass(cls): data = longley.load() data.exog = add_constant(data.exog, prepend=False) res1 = OLS(data.endog, data.exog).fit() R = np.array([[0,1,1,0,0,0,0], [0,1,0,1,0,0,0], [0,1,0,0,0,0,0], [0,0,0,0,1,0,0], [0,0,0,0,0,1,0]]) q = np.array([0,0,0,1,0]) cls.Ftest1 = res1.f_test((R,q)) def test_fvalue(self): assert_almost_equal(self.Ftest1.fvalue, 70.115557, 5) def test_pvalue(self): assert_almost_equal(self.Ftest1.pvalue, 6.229e-07, 10) def test_df_denom(self): assert_equal(self.Ftest1.df_denom, 9) def test_df_num(self): assert_equal(self.Ftest1.df_num, 5) class TestTtest(object): """ Test individual t-tests. Ie., are the coefficients significantly different than zero. """ @classmethod def setupClass(cls): data = longley.load() data.exog = add_constant(data.exog, prepend=False) cls.res1 = OLS(data.endog, data.exog).fit() R = np.identity(7) cls.Ttest = cls.res1.t_test(R) hyp = 'x1 = 0, x2 = 0, x3 = 0, x4 = 0, x5 = 0, x6 = 0, const = 0' cls.NewTTest = cls.res1.t_test(hyp) def test_new_tvalue(self): assert_equal(self.NewTTest.tvalue, self.Ttest.tvalue) def test_tvalue(self): assert_almost_equal(self.Ttest.tvalue, self.res1.tvalues, DECIMAL_4) def test_sd(self): assert_almost_equal(self.Ttest.sd, self.res1.bse, DECIMAL_4) def test_pvalue(self): assert_almost_equal(self.Ttest.pvalue, student_t.sf( np.abs(self.res1.tvalues), self.res1.model.df_resid)*2, DECIMAL_4) def test_df_denom(self): assert_equal(self.Ttest.df_denom, self.res1.model.df_resid) def test_effect(self): assert_almost_equal(self.Ttest.effect, self.res1.params) class TestTtest2(object): """ Tests the hypothesis that the coefficients on POP and YEAR are equal. Results from RPy using 'car' package. """ @classmethod def setupClass(cls): R = np.zeros(7) R[4:6] = [1,-1] data = longley.load() data.exog = add_constant(data.exog, prepend=False) res1 = OLS(data.endog, data.exog).fit() cls.Ttest1 = res1.t_test(R) def test_tvalue(self): assert_almost_equal(self.Ttest1.tvalue, -4.0167754636397284, DECIMAL_4) def test_sd(self): assert_almost_equal(self.Ttest1.sd, 455.39079425195314, DECIMAL_4) def test_pvalue(self): assert_almost_equal(self.Ttest1.pvalue, 2*0.0015163772380932246, DECIMAL_4) def test_df_denom(self): assert_equal(self.Ttest1.df_denom, 9) def test_effect(self): assert_almost_equal(self.Ttest1.effect, -1829.2025687186533, DECIMAL_4) class TestGLS(object): """ These test results were obtained by replication with R. """ @classmethod def setupClass(cls): from .results.results_regression import LongleyGls data = longley.load() exog = add_constant(np.column_stack((data.exog[:,1], data.exog[:,4])), prepend=False) tmp_results = OLS(data.endog, exog).fit() rho = np.corrcoef(tmp_results.resid[1:], tmp_results.resid[:-1])[0][1] # by assumption order = toeplitz(np.arange(16)) sigma = rho**order GLS_results = GLS(data.endog, exog, sigma=sigma).fit() cls.res1 = GLS_results cls.res2 = LongleyGls() # attach for test_missing cls.sigma = sigma cls.exog = exog cls.endog = data.endog def test_aic(self): assert_approx_equal(self.res1.aic+2, self.res2.aic, 3) def test_bic(self): assert_approx_equal(self.res1.bic, self.res2.bic, 2) def test_loglike(self): assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_0) def test_params(self): assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_1) def test_resid(self): assert_almost_equal(self.res1.resid, self.res2.resid, DECIMAL_4) def test_scale(self): assert_almost_equal(self.res1.scale, self.res2.scale, DECIMAL_4) def test_tvalues(self): assert_almost_equal(self.res1.tvalues, self.res2.tvalues, DECIMAL_4) def test_standarderrors(self): assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4) def test_fittedvalues(self): assert_almost_equal(self.res1.fittedvalues, self.res2.fittedvalues, DECIMAL_4) def test_pvalues(self): assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4) def test_missing(self): endog = self.endog.copy() # copy or changes endog for other methods endog[[4,7,14]] = np.nan mod = GLS(endog, self.exog, sigma=self.sigma, missing='drop') assert_equal(mod.endog.shape[0], 13) assert_equal(mod.exog.shape[0], 13) assert_equal(mod.sigma.shape, (13,13)) class TestGLS_alt_sigma(CheckRegressionResults): """ Test that GLS with no argument is equivalent to OLS. """ @classmethod def setupClass(cls): data = longley.load() data.exog = add_constant(data.exog, prepend=False) ols_res = OLS(data.endog, data.exog).fit() gls_res = GLS(data.endog, data.exog).fit() gls_res_scalar = GLS(data.endog, data.exog, sigma=1) cls.endog = data.endog cls.exog = data.exog cls.res1 = gls_res cls.res2 = ols_res cls.res3 = gls_res_scalar # self.res2.conf_int = self.res2.conf_int() def test_wrong_size_sigma_1d(self): n = len(self.endog) assert_raises(ValueError, GLS, self.endog, self.exog, sigma=np.ones(n-1)) def test_wrong_size_sigma_2d(self): n = len(self.endog) assert_raises(ValueError, GLS, self.endog, self.exog, sigma=np.ones((n-1,n-1))) # def check_confidenceintervals(self, conf1, conf2): # assert_almost_equal(conf1, conf2, DECIMAL_4) class TestLM(object): @classmethod def setupClass(cls): # TODO: Test HAC method X = np.random.randn(100,3) b = np.ones((3,1)) e = np.random.randn(100,1) y = np.dot(X,b) + e # Cases? # Homoskedastic # HC0 cls.res1_full = OLS(y,X).fit() cls.res1_restricted = OLS(y,X[:,0]).fit() cls.res2_full = cls.res1_full.get_robustcov_results('HC0') cls.res2_restricted = cls.res1_restricted.get_robustcov_results('HC0') cls.X = X cls.Y = y def test_LM_homoskedastic(self): resid = self.res1_restricted.wresid n = resid.shape[0] X = self.X S = np.dot(resid,resid) / n * np.dot(X.T,X) / n Sinv = np.linalg.inv(S) s = np.mean(X * resid[:,None], 0) LMstat = n * np.dot(np.dot(s,Sinv),s.T) LMstat_OLS = self.res1_full.compare_lm_test(self.res1_restricted) LMstat2 = LMstat_OLS[0] assert_almost_equal(LMstat, LMstat2, DECIMAL_7) def test_LM_heteroskedastic_nodemean(self): resid = self.res1_restricted.wresid n = resid.shape[0] X = self.X scores = X * resid[:,None] S = np.dot(scores.T,scores) / n Sinv = np.linalg.inv(S) s = np.mean(scores, 0) LMstat = n * np.dot(np.dot(s,Sinv),s.T) LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted, demean=False) LMstat2 = LMstat_OLS[0] assert_almost_equal(LMstat, LMstat2, DECIMAL_7) def test_LM_heteroskedastic_demean(self): resid = self.res1_restricted.wresid n = resid.shape[0] X = self.X scores = X * resid[:,None] scores_demean = scores - scores.mean(0) S = np.dot(scores_demean.T,scores_demean) / n Sinv = np.linalg.inv(S) s = np.mean(scores, 0) LMstat = n * np.dot(np.dot(s,Sinv),s.T) LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted) LMstat2 = LMstat_OLS[0] assert_almost_equal(LMstat, LMstat2, DECIMAL_7) def test_LM_heteroskedastic_LRversion(self): resid = self.res1_restricted.wresid resid_full = self.res1_full.wresid n = resid.shape[0] X = self.X scores = X * resid[:,None] s = np.mean(scores, 0) scores = X * resid_full[:,None] S = np.dot(scores.T,scores) / n Sinv = np.linalg.inv(S) LMstat = n * np.dot(np.dot(s,Sinv),s.T) LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted, use_lr = True) LMstat2 = LMstat_OLS[0] assert_almost_equal(LMstat, LMstat2, DECIMAL_7) def test_LM_nonnested(self): assert_raises(ValueError, self.res2_restricted.compare_lm_test, self.res2_full) class TestOLS_GLS_WLS_equivalence(object): @classmethod def setupClass(cls): data = longley.load() data.exog = add_constant(data.exog, prepend=False) y = data.endog X = data.exog n = y.shape[0] w = np.ones(n) cls.results = [] cls.results.append(OLS(y, X).fit()) cls.results.append(WLS(y, X, w).fit()) cls.results.append(GLS(y, X, 100*w).fit()) cls.results.append(GLS(y, X, np.diag(0.1*w)).fit()) def test_ll(self): llf = np.array([r.llf for r in self.results]) llf_1 = np.ones_like(llf) * self.results[0].llf assert_almost_equal(llf, llf_1, DECIMAL_7) ic = np.array([r.aic for r in self.results]) ic_1 = np.ones_like(ic) * self.results[0].aic assert_almost_equal(ic, ic_1, DECIMAL_7) ic = np.array([r.bic for r in self.results]) ic_1 = np.ones_like(ic) * self.results[0].bic assert_almost_equal(ic, ic_1, DECIMAL_7) def test_params(self): params = np.array([r.params for r in self.results]) params_1 = np.array([self.results[0].params] * len(self.results)) assert_allclose(params, params_1) def test_ss(self): bse = np.array([r.bse for r in self.results]) bse_1 = np.array([self.results[0].bse] * len(self.results)) assert_allclose(bse, bse_1) def test_rsquared(self): rsquared = np.array([r.rsquared for r in self.results]) rsquared_1 = np.array([self.results[0].rsquared] * len(self.results)) assert_almost_equal(rsquared, rsquared_1, DECIMAL_7) class TestGLS_WLS_equivalence(TestOLS_GLS_WLS_equivalence): # reuse test methods @classmethod def setupClass(cls): data = longley.load() data.exog = add_constant(data.exog, prepend=False) y = data.endog X = data.exog n = y.shape[0] np.random.seed(5) w = np.random.uniform(0.5, 1, n) w_inv = 1. / w cls.results = [] cls.results.append(WLS(y, X, w).fit()) cls.results.append(WLS(y, X, 0.01 * w).fit()) cls.results.append(GLS(y, X, 100 * w_inv).fit()) cls.results.append(GLS(y, X, np.diag(0.1 * w_inv)).fit()) def test_rsquared(self): # TODO: WLS rsquared is ok, GLS might have wrong centered_tss # We only check that WLS and GLS rsquared is invariant to scaling # WLS and GLS have different rsquared assert_almost_equal(self.results[1].rsquared, self.results[0].rsquared, DECIMAL_7) assert_almost_equal(self.results[3].rsquared, self.results[2].rsquared, DECIMAL_7) class TestNonFit(object): @classmethod def setupClass(cls): data = longley.load() data.exog = add_constant(data.exog, prepend=False) cls.endog = data.endog cls.exog = data.exog cls.ols_model = OLS(data.endog, data.exog) def test_df_resid(self): df_resid = self.endog.shape[0] - self.exog.shape[1] assert_equal(self.ols_model.df_resid, long(9)) class TestWLS_CornerCases(object): @classmethod def setupClass(cls): cls.exog = np.ones((1,)) cls.endog = np.ones((1,)) weights = 1 cls.wls_res = WLS(cls.endog, cls.exog, weights=weights).fit() def test_wrong_size_weights(self): weights = np.ones((10,10)) assert_raises(ValueError, WLS, self.endog, self.exog, weights=weights) class TestWLSExogWeights(CheckRegressionResults): #Test WLS with Greene's credit card data #reg avgexp age income incomesq ownrent [aw=1/incomesq] def __init__(self): from .results.results_regression import CCardWLS from statsmodels.datasets.ccard import load dta = load() dta.exog = add_constant(dta.exog, prepend=False) nobs = 72. weights = 1/dta.exog[:,2] # for comparison with stata analytic weights scaled_weights = ((weights * nobs)/weights.sum()) self.res1 = WLS(dta.endog, dta.exog, weights=scaled_weights).fit() self.res2 = CCardWLS() self.res2.wresid = scaled_weights ** .5 * self.res2.resid # correction because we use different definition for loglike/llf corr_ic = 2 * (self.res1.llf - self.res2.llf) self.res2.aic -= corr_ic self.res2.bic -= corr_ic self.res2.llf += 0.5 * np.sum(np.log(self.res1.model.weights)) def test_wls_example(): #example from the docstring, there was a note about a bug, should #be fixed now Y = [1,3,4,5,2,3,4] X = lrange(1,8) X = add_constant(X, prepend=False) wls_model = WLS(Y,X, weights=lrange(1,8)).fit() #taken from R lm.summary assert_almost_equal(wls_model.fvalue, 0.127337843215, 6) assert_almost_equal(wls_model.scale, 2.44608530786**2, 6) def test_wls_tss(): y = np.array([22, 22, 22, 23, 23, 23]) X = [[1, 0], [1, 0], [1, 1], [0, 1], [0, 1], [0, 1]] ols_mod = OLS(y, add_constant(X, prepend=False)).fit() yw = np.array([22, 22, 23.]) Xw = [[1,0],[1,1],[0,1]] w = np.array([2, 1, 3.]) wls_mod = WLS(yw, add_constant(Xw, prepend=False), weights=w).fit() assert_equal(ols_mod.centered_tss, wls_mod.centered_tss) class TestWLSScalarVsArray(CheckRegressionResults): @classmethod def setupClass(cls): from statsmodels.datasets.longley import load dta = load() dta.exog = add_constant(dta.exog, prepend=True) wls_scalar = WLS(dta.endog, dta.exog, weights=1./3).fit() weights = [1/3.] * len(dta.endog) wls_array = WLS(dta.endog, dta.exog, weights=weights).fit() cls.res1 = wls_scalar cls.res2 = wls_array #class TestWLS_GLS(CheckRegressionResults): # @classmethod # def setupClass(cls): # from statsmodels.datasets.ccard import load # data = load() # cls.res1 = WLS(data.endog, data.exog, weights = 1/data.exog[:,2]).fit() # cls.res2 = GLS(data.endog, data.exog, sigma = data.exog[:,2]).fit() # # def check_confidenceintervals(self, conf1, conf2): # assert_almost_equal(conf1, conf2(), DECIMAL_4) def test_wls_missing(): from statsmodels.datasets.ccard import load data = load() endog = data.endog endog[[10, 25]] = np.nan mod = WLS(data.endog, data.exog, weights = 1/data.exog[:,2], missing='drop') assert_equal(mod.endog.shape[0], 70) assert_equal(mod.exog.shape[0], 70) assert_equal(mod.weights.shape[0], 70) class TestWLS_OLS(CheckRegressionResults): @classmethod def setupClass(cls): data = longley.load() data.exog = add_constant(data.exog, prepend=False) cls.res1 = OLS(data.endog, data.exog).fit() cls.res2 = WLS(data.endog, data.exog).fit() def check_confidenceintervals(self, conf1, conf2): assert_almost_equal(conf1, conf2(), DECIMAL_4) class TestGLS_OLS(CheckRegressionResults): @classmethod def setupClass(cls): data = longley.load() data.exog = add_constant(data.exog, prepend=False) cls.res1 = GLS(data.endog, data.exog).fit() cls.res2 = OLS(data.endog, data.exog).fit() def check_confidenceintervals(self, conf1, conf2): assert_almost_equal(conf1, conf2(), DECIMAL_4) #TODO: test AR # why the two-stage in AR? #class test_ar(object): # from statsmodels.datasets.sunspots import load # data = load() # model = AR(data.endog, rho=4).fit() # R_res = RModel(data.endog, aic="FALSE", order_max=4) # def test_params(self): # assert_almost_equal(self.model.rho, # pass # def test_order(self): # In R this can be defined or chosen by minimizing the AIC if aic=True # pass class TestYuleWalker(object): @classmethod def setupClass(cls): from statsmodels.datasets.sunspots import load data = load() cls.rho, cls.sigma = yule_walker(data.endog, order=4, method="mle") cls.R_params = [1.2831003105694765, -0.45240924374091945, -0.20770298557575195, 0.047943648089542337] def test_params(self): assert_almost_equal(self.rho, self.R_params, DECIMAL_4) class TestDataDimensions(CheckRegressionResults): @classmethod def setupClass(cls): np.random.seed(54321) cls.endog_n_ = np.random.uniform(0,20,size=30) cls.endog_n_one = cls.endog_n_[:,None] cls.exog_n_ = np.random.uniform(0,20,size=30) cls.exog_n_one = cls.exog_n_[:,None] cls.degen_exog = cls.exog_n_one[:-1] cls.mod1 = OLS(cls.endog_n_one, cls.exog_n_one) cls.mod1.df_model += 1 cls.res1 = cls.mod1.fit() # Note that these are created for every subclass.. # A little extra overhead probably cls.mod2 = OLS(cls.endog_n_one, cls.exog_n_one) cls.mod2.df_model += 1 cls.res2 = cls.mod2.fit() def check_confidenceintervals(self, conf1, conf2): assert_almost_equal(conf1, conf2(), DECIMAL_4) class TestGLS_large_data(TestDataDimensions): @classmethod def setupClass(cls): nobs = 1000 y = np.random.randn(nobs,1) X = np.random.randn(nobs,20) sigma = np.ones_like(y) cls.gls_res = GLS(y, X, sigma=sigma).fit() cls.gls_res_scalar = GLS(y, X, sigma=1).fit() cls.gls_res_none= GLS(y, X).fit() cls.ols_res = OLS(y, X).fit() def test_large_equal_params(self): assert_almost_equal(self.ols_res.params, self.gls_res.params, DECIMAL_7) def test_large_equal_loglike(self): assert_almost_equal(self.ols_res.llf, self.gls_res.llf, DECIMAL_7) def test_large_equal_params_none(self): assert_almost_equal(self.gls_res.params, self.gls_res_none.params, DECIMAL_7) class TestNxNx(TestDataDimensions): @classmethod def setupClass(cls): super(TestNxNx, cls).setupClass() cls.mod2 = OLS(cls.endog_n_, cls.exog_n_) cls.mod2.df_model += 1 cls.res2 = cls.mod2.fit() class TestNxOneNx(TestDataDimensions): @classmethod def setupClass(cls): super(TestNxOneNx, cls).setupClass() cls.mod2 = OLS(cls.endog_n_one, cls.exog_n_) cls.mod2.df_model += 1 cls.res2 = cls.mod2.fit() class TestNxNxOne(TestDataDimensions): @classmethod def setupClass(cls): super(TestNxNxOne, cls).setupClass() cls.mod2 = OLS(cls.endog_n_, cls.exog_n_one) cls.mod2.df_model += 1 cls.res2 = cls.mod2.fit() def test_bad_size(): np.random.seed(54321) data = np.random.uniform(0,20,31) assert_raises(ValueError, OLS, data, data[1:]) def test_const_indicator(): np.random.seed(12345) X = np.random.randint(0, 3, size=30) X = categorical(X, drop=True) y = np.dot(X, [1., 2., 3.]) + np.random.normal(size=30) modc = OLS(y, add_constant(X[:,1:], prepend=True)).fit() mod = OLS(y, X, hasconst=True).fit() assert_almost_equal(modc.rsquared, mod.rsquared, 12) def test_706(): # make sure one regressor pandas Series gets passed to DataFrame # for conf_int. y = pandas.Series(np.random.randn(10)) x = pandas.Series(np.ones(10)) res = OLS(y,x).fit() conf_int = res.conf_int() np.testing.assert_equal(conf_int.shape, (1, 2)) np.testing.assert_(isinstance(conf_int, pandas.DataFrame)) def test_summary(): # test 734 import re dta = longley.load_pandas() X = dta.exog X["constant"] = 1 y = dta.endog with warnings.catch_warnings(record=True): res = OLS(y, X).fit() table = res.summary().as_latex() # replace the date and time table = re.sub("(?<=\n\\\\textbf\{Date:\} &).+?&", " Sun, 07 Apr 2013 &", table) table = re.sub("(?<=\n\\\\textbf\{Time:\} &).+?&", " 13:46:07 &", table) expected = """\\begin{center} \\begin{tabular}{lclc} \\toprule \\textbf{Dep. Variable:} & TOTEMP & \\textbf{ R-squared: } & 0.995 \\\\ \\textbf{Model:} & OLS & \\textbf{ Adj. R-squared: } & 0.992 \\\\ \\textbf{Method:} & Least Squares & \\textbf{ F-statistic: } & 330.3 \\\\ \\textbf{Date:} & Sun, 07 Apr 2013 & \\textbf{ Prob (F-statistic):} & 4.98e-10 \\\\ \\textbf{Time:} & 13:46:07 & \\textbf{ Log-Likelihood: } & -109.62 \\\\ \\textbf{No. Observations:} & 16 & \\textbf{ AIC: } & 233.2 \\\\ \\textbf{Df Residuals:} & 9 & \\textbf{ BIC: } & 238.6 \\\\ \\textbf{Df Model:} & 6 & \\textbf{ } & \\\\ \\bottomrule \\end{tabular} \\begin{tabular}{lccccc} & \\textbf{coef} & \\textbf{std err} & \\textbf{t} & \\textbf{P$>$$|$t$|$} & \\textbf{[95.0\\% Conf. Int.]} \\\\ \\midrule \\textbf{GNPDEFL} & 15.0619 & 84.915 & 0.177 & 0.863 & -177.029 207.153 \\\\ \\textbf{GNP} & -0.0358 & 0.033 & -1.070 & 0.313 & -0.112 0.040 \\\\ \\textbf{UNEMP} & -2.0202 & 0.488 & -4.136 & 0.003 & -3.125 -0.915 \\\\ \\textbf{ARMED} & -1.0332 & 0.214 & -4.822 & 0.001 & -1.518 -0.549 \\\\ \\textbf{POP} & -0.0511 & 0.226 & -0.226 & 0.826 & -0.563 0.460 \\\\ \\textbf{YEAR} & 1829.1515 & 455.478 & 4.016 & 0.003 & 798.788 2859.515 \\\\ \\textbf{constant} & -3.482e+06 & 8.9e+05 & -3.911 & 0.004 & -5.5e+06 -1.47e+06 \\\\ \\bottomrule \\end{tabular} \\begin{tabular}{lclc} \\textbf{Omnibus:} & 0.749 & \\textbf{ Durbin-Watson: } & 2.559 \\\\ \\textbf{Prob(Omnibus):} & 0.688 & \\textbf{ Jarque-Bera (JB): } & 0.684 \\\\ \\textbf{Skew:} & 0.420 & \\textbf{ Prob(JB): } & 0.710 \\\\ \\textbf{Kurtosis:} & 2.434 & \\textbf{ Cond. No. } & 4.86e+09 \\\\ \\bottomrule \\end{tabular} %\\caption{OLS Regression Results} \\end{center}""" assert_equal(table, expected) class TestRegularizedFit(object): # Make sure there are no issues when there are no selected # variables. def test_empty_model(self): np.random.seed(742) n = 100 endog = np.random.normal(size=n) exog = np.random.normal(size=(n, 3)) model = OLS(endog, exog) result = model.fit_regularized(alpha=1000) assert_equal(result.params, 0.) assert_equal(result.bse, 0.) def test_regularized(self): import os from . import glmnet_r_results cur_dir = os.path.dirname(os.path.abspath(__file__)) data = np.loadtxt(os.path.join(cur_dir, "results", "lasso_data.csv"), delimiter=",") tests = [x for x in dir(glmnet_r_results) if x.startswith("rslt_")] for test in tests: vec = getattr(glmnet_r_results, test) n = vec[0] p = vec[1] L1_wt = float(vec[2]) lam = float(vec[3]) params = vec[4:].astype(np.float64) endog = data[0:n, 0] exog = data[0:n, 1:(p+1)] endog = endog - endog.mean() endog /= endog.std(ddof=1) exog = exog - exog.mean(0) exog /= exog.std(0, ddof=1) mod = OLS(endog, exog) rslt = mod.fit_regularized(L1_wt=L1_wt, alpha=lam) assert_almost_equal(rslt.params, params, decimal=3) # Smoke test for summary smry = rslt.summary() def test_formula_missing_cat(): # gh-805 import statsmodels.api as sm from statsmodels.formula.api import ols from patsy import PatsyError dta = sm.datasets.grunfeld.load_pandas().data dta.ix[0, 'firm'] = np.nan mod = ols(formula='value ~ invest + capital + firm + year', data=dta.dropna()) res = mod.fit() mod2 = ols(formula='value ~ invest + capital + firm + year', data=dta) res2 = mod2.fit() assert_almost_equal(res.params.values, res2.params.values) assert_raises(PatsyError, ols, 'value ~ invest + capital + firm + year', data=dta, missing='raise') def test_missing_formula_predict(): # see 2171 nsample = 30 data = pandas.DataFrame({'x': np.linspace(0, 10, nsample)}) null = pandas.DataFrame({'x': np.array([np.nan])}) data = pandas.concat([data, null]) beta = np.array([1, 0.1]) e = np.random.normal(size=nsample+1) data['y'] = beta[0] + beta[1] * data['x'] + e model = OLS.from_formula('y ~ x', data=data) fit = model.fit() pred = fit.predict(exog=data[:-1]) if __name__=="__main__": import nose # run_module_suite() nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'], exit=False) # nose.runmodule(argv=[__file__,'-vvs','-x'], exit=False) #, '--pdb'
bsd-3-clause
google/rysim
python/results_analyzer/Main.py
1
119456
# Copyright 2014 The RySim Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABCMeta, abstractmethod from array import * import collections import gflags import numpy import os import pprint import re import scipy.integrate import scipy.interpolate import sqlite3 import sys from matplotlib import pylab import pandas as pd import statsmodels.formula.api as sm from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as pyplot # Global state experiment_db = None event_count_buckets = [5000, 10000, 20000, 40000, 50000] bucketing_factor = 0.001 kernel_results_table = None kernel_machine_results_table = None kernel_machine_type_results_table = None fit_comparison_table = dict() # gflag defn's and registration FLAGS = gflags.FLAGS gflags.DEFINE_string('root_dir', '.', 'Root directory to start searching and where to store the database. Defaults to the current ' 'directory') gflags.DEFINE_string('output_db', 'experiment.db', 'Name of the database file that should be created. If the file already exists it will be ' 'overwritten. Defaults to "experiment.db"') gflags.DEFINE_bool('read_inputs', False, 'Controls if the application should re-read the inputs. If so the output DB will be clobbered ' 'entirely. If not only the analysis tables will be removed') class DBWrapper(object): def __init__(self, db_filename): self.db = sqlite3.connect(db_filename, check_same_thread=False) def commit(self): self.db.commit() def execute_safe(self, cmd): self.execute(cmd) self.commit() def execute(self, cmd): self.db.execute(cmd) def select(self, cmd): return self.db.execute(cmd) def cleanup(self): self.db.commit() self.db.close() self.db = None class ResultsTable(object): filtered_table_entry = collections.namedtuple('FilteredTableEntry', ['event_count', 'event_count_std', 'agents', 'agents_std', 'connections', 'connections_std', 'cpu', 'cpu_std', 'maxmem', 'maxmem_std']) filtered_entry = collections.namedtuple('FilteredEntry', ['mean', 'std']) def __init__(self): self.raw_table = dict() self.filtered_table = dict() def get_keys(self): return self.filtered_table.keys() def add_entry(self, key, bucket, model, event_count, agents, connections, cpu, maxmem): if key not in self.raw_table.keys(): self.raw_table[key] = dict() if model not in self.raw_table[key].keys(): self.raw_table[key][model] = dict() if agents not in self.raw_table[key][model].keys(): self.raw_table[key][model][agents] = dict() if connections not in self.raw_table[key][model][agents].keys(): self.raw_table[key][model][agents][connections] = dict() if bucket not in self.raw_table[key][model][agents][connections].keys(): self.raw_table[key][model][agents][connections][bucket] = dict() self.raw_table[key][model][agents][connections][bucket]["cpu"] = list() self.raw_table[key][model][agents][connections][bucket]["maxmem"] = list() self.raw_table[key][model][agents][connections][bucket]["event_count"] = list() self.raw_table[key][model][agents][connections][bucket]["cpu"].append(cpu) self.raw_table[key][model][agents][connections][bucket]["maxmem"].append(maxmem) self.raw_table[key][model][agents][connections][bucket]["event_count"].append(event_count) def create_filtered_table(self): self.filtered_table = dict() for key in self.raw_table.keys(): self.filtered_table[key] = list() for model in self.raw_table[key].keys(): for agents in self.raw_table[key][model].keys(): for connections in self.raw_table[key][model][agents].keys(): for bucket in self.raw_table[key][model][agents][connections].keys(): if len(self.raw_table[key][model][agents][connections][bucket]["event_count"]) is 0: continue event_count = ResultsTable.filter_bucket_entry( self.raw_table[key][model][agents][connections][bucket]["event_count"]) cpu = ResultsTable.filter_bucket_entry( self.raw_table[key][model][agents][connections][bucket]["cpu"]) maxmem = ResultsTable.filter_bucket_entry( self.raw_table[key][model][agents][connections][bucket]["maxmem"]) self.filtered_table[key].append(ResultsTable.filtered_table_entry( event_count=event_count.mean, event_count_std=event_count.std, agents=agents, agents_std=0, connections=connections, connections_std=0, cpu=cpu.mean, cpu_std=cpu.std, maxmem=maxmem.mean, maxmem_std=maxmem.std)) @staticmethod def filter_bucket_entry(entry): return ResultsTable.filtered_entry(mean=numpy.mean(entry), std=numpy.std(entry)) def get_entries_for_key(self, key): return self.filtered_table[key] def get_event_count_lists_for_key(self, key): key_data = self.get_entries_for_key(key) return ResultsTable.filtered_entry(mean=[row[0] for row in key_data], std=[row[1] for row in key_data]) def get_agents_lists_for_key(self, key): key_data = self.get_entries_for_key(key) return ResultsTable.filtered_entry(mean=[row[2] for row in key_data], std=[row[3] for row in key_data]) def get_connections_lists_for_key(self, key): key_data = self.get_entries_for_key(key) return ResultsTable.filtered_entry(mean=[row[4] for row in key_data], std=[row[5] for row in key_data]) def get_cpu_lists_for_key(self, key): key_data = self.get_entries_for_key(key) return ResultsTable.filtered_entry(mean=[row[6] for row in key_data], std=[row[7] for row in key_data]) def get_maxmem_lists_for_key(self, key): key_data = self.get_entries_for_key(key) return ResultsTable.filtered_entry(mean=[row[8] for row in key_data], std=[row[9] for row in key_data]) class ScoreTable(object): def __init__(self, kernels, tag): global fit_comparison_table fit_comparison_table[tag] = 0.0 self.tag = tag self.r2_values = list() self.kernels = kernels self.table = dict() self.total_count = 0 self.total_score_idx = len(kernels) for kernel in kernels: self.table[kernel] = array('I', [0] * (1 + len(kernels))) def get_table(self): return self.table def get_total_count(self): return self.total_count def add_1d_fit_score(self, fits): self.total_count += 1 f_list = list() for kernel in fits.keys(): slope = fits[kernel][0][0] intercept = fits[kernel][1] self.r2_values.append(float(fits[kernel][2])) x_min = float(fits[kernel][3][0]) x_max = float(fits[kernel][4][0]) f_list.append((scipy.integrate.quad(lambda x: slope * x + intercept, x_min, x_max)[0], kernel[0])) f_list.sort() for i in range(0, len(f_list)): self.table[f_list[i][1]][i] += 1 self.table[f_list[i][1]][self.total_score_idx] += len(f_list) - i global fit_comparison_table fit_comparison_table[self.tag] = numpy.mean(self.r2_values) def add_2d_fit_score(self, fits): self.total_count += 1 f_list = list() for kernel in fits.keys(): slope_x = fits[kernel][0][0] slope_y = fits[kernel][0][1] intercept = fits[kernel][1] self.r2_values.append(float(fits[kernel][2])) x_min = float(fits[kernel][3][0]) x_max = float(fits[kernel][4][0]) y_min = float(fits[kernel][3][1]) y_max = float(fits[kernel][4][1]) f_list.append((scipy.integrate.dblquad(lambda x, y: slope_x * x + slope_y * y + intercept, x_min, x_max, lambda x: y_min, lambda x: y_max)[0], kernel[0])) f_list.sort() for i in range(0, len(f_list)): self.table[f_list[i][1]][i] += 1 self.table[f_list[i][1]][self.total_score_idx] += len(f_list) - i global fit_comparison_table fit_comparison_table[self.tag] = numpy.mean(self.r2_values) def add_3d_fit_score(self, fits): self.total_count += 1 f_list = list() for kernel in fits.keys(): slope_x = fits[kernel][0][0] slope_y = fits[kernel][0][1] slope_z = fits[kernel][0][2] intercept = fits[kernel][1] self.r2_values.append(float(fits[kernel][2])) x_min = float(fits[kernel][3][0]) x_max = float(fits[kernel][4][0]) y_min = float(fits[kernel][3][1]) y_max = float(fits[kernel][4][1]) z_min = float(fits[kernel][3][2]) z_max = float(fits[kernel][4][2]) f_list.append((scipy.integrate.tplquad(lambda x, y, z: slope_x * x + slope_y * y + slope_z * z + intercept, x_min, x_max, lambda x: y_min, lambda x: y_max, lambda x, y: z_min, lambda x, y: z_max)[0], kernel[0])) f_list.sort() for i in range(0, len(f_list)): self.table[f_list[i][1]][i] += 1 self.table[f_list[i][1]][self.total_score_idx] += len(f_list) - i global fit_comparison_table fit_comparison_table[self.tag] = numpy.mean(self.r2_values) class MachineComparisonTable(object): machine_core_counts = {'m3.large': 2, 'm3.2xlarge': 8, 'm3.medium': 1, 'm3.xlarge': 4} def __init__(self, kernels): self.per_kernel_means = dict() self.per_kernel_data = dict() self.kernels = kernels self.table = dict() self.per_kernel_splines = dict() for kernel in self.kernels: self.per_kernel_means[kernel] = dict() self.per_kernel_data[kernel] = dict() self.box_props = dict(linewidth=0.5, color='DimGray', markeredgecolor='DimGray') def add_1d_fit_score(self, fits, machine): machine_entry = self.get_machine_entry(machine) for kernel in fits.keys(): slope = fits[kernel][0][0] intercept = fits[kernel][1] x_min = float(fits[kernel][3][0]) x_max = float(fits[kernel][4][0]) machine_entry[kernel[0]].append(scipy.integrate.quad(lambda x: slope * x + intercept, x_min, x_max)[0]) def add_2d_fit_score(self, fits, machine): machine_entry = self.get_machine_entry(machine) for kernel in fits.keys(): slope_x = fits[kernel][0][0] slope_y = fits[kernel][0][1] intercept = fits[kernel][1] x_min = float(fits[kernel][3][0]) x_max = float(fits[kernel][4][0]) y_min = float(fits[kernel][3][1]) y_max = float(fits[kernel][4][1]) machine_entry[kernel[0]].append(scipy.integrate.dblquad(lambda x, y: slope_x * x + slope_y * y + intercept, x_min, x_max, lambda x: y_min, lambda x: y_max)[0]) def add_3d_fit_score(self, fits, machine): machine_entry = self.get_machine_entry(machine) for kernel in fits.keys(): slope_x = fits[kernel][0][0] slope_y = fits[kernel][0][1] slope_z = fits[kernel][0][2] intercept = fits[kernel][1] x_min = float(fits[kernel][3][0]) x_max = float(fits[kernel][4][0]) y_min = float(fits[kernel][3][1]) y_max = float(fits[kernel][4][1]) z_min = float(fits[kernel][3][2]) z_max = float(fits[kernel][4][2]) machine_entry[kernel[0]].append(scipy.integrate.tplquad( lambda x, y, z: slope_x * x + slope_y * y + slope_z * z + intercept, x_min, x_max, lambda x: y_min, lambda x: y_max, lambda x, y: z_min, lambda x, y: z_max)[0]) def get_machine_entry(self, machine): if machine in self.table: return self.table[machine] else: self.table[machine] = dict() for kernel in self.kernels: self.table[machine][kernel] = list() return self.table[machine] def generate_per_kernel_means(self): for machine in self.table.keys(): for kernel in self.kernels: self.per_kernel_means[kernel][MachineComparisonTable.machine_core_counts[machine]] = \ numpy.mean(self.table[machine][kernel]) def generate_per_kernel_data(self): for machine in self.table.keys(): for kernel in self.kernels: self.per_kernel_data[kernel][MachineComparisonTable.machine_core_counts[machine]] = \ self.table[machine][kernel] def generate_mean_list(self, kernel): mean_list = list() for cores, value in self.per_kernel_means[kernel].iteritems(): mean_list.append((cores, value)) mean_list.sort() return mean_list def generate_1d_plot(self, dependent_caption, dependent_filename, independent_caption, independent_filename, kernel, key_label_filename): data_list = self.generate_mean_list(kernel) x_list = list() y_list = list() for entry in data_list: x_list.append(entry[0]) y_list.append(entry[1]) x_data = numpy.array(x_list) y_data = numpy.array(y_list) x_new = numpy.linspace(x_data.min(), x_data.max(), 300) y_new = scipy.interpolate.spline(x_data, y_data, x_new) self.per_kernel_splines[kernel] = dict() self.per_kernel_splines[kernel]['x_data'] = x_data self.per_kernel_splines[kernel]['y_data'] = y_data self.per_kernel_splines[kernel]['x_new'] = x_new self.per_kernel_splines[kernel]['y_new'] = y_new GenericArtifacts.set_figure_params() filename_base = "machine_comparison_{}_vs_{}_{}_{}".format(independent_filename, dependent_filename, str(kernel).lower(), key_label_filename) plot_filename = os.path.join(FLAGS.root_dir, "{}_plot.eps".format(filename_base)) print "\tGenerating {}".format(plot_filename) pylab.figure(1) pylab.clf() pylab.plot(x_data, y_data, linestyle='-', color='k') pylab.scatter(x_data, y_data, marker='s', color='k', label=kernel) pylab.autoscale() pylab.xlabel("Number of Cores") pylab.ylabel(dependent_caption) pylab.legend(loc='upper center', bbox_to_anchor=(0.5, -0.10), ncol=4) pylab.savefig(plot_filename, bbox_inches='tight', orientation='portrait') caption = "Plot of Machine Comparison of {} for {} vs {}".format(kernel, independent_caption, dependent_caption) tex_filename = os.path.join(FLAGS.root_dir, "{}_plot.tex".format(filename_base)) print "\tGenerating {}".format(tex_filename) tex_figure_path = os.path.join("figures", "auto", "{}_plot.eps".format(filename_base)) output_latex = r"""\begin{figure} \centering """ output_latex += "\\includegraphics{%s}\n" % tex_figure_path output_latex += "\\caption{%s}\n" % caption output_latex += "\\label{fig:%s}\n" % filename_base output_latex += r"""\end{figure}""" with open(tex_filename, 'w') as f: f.write(output_latex) def generate_box_whisker_plot(self, dependent_caption, dependent_filename, independent_caption, independent_filename, kernel, key_label_filename): positions = self.per_kernel_data[kernel].keys() positions.sort() box_data = list() for position in positions: box_data.append(self.per_kernel_data[kernel][position]) x_data = self.per_kernel_splines[kernel]['x_data'] y_data = self.per_kernel_splines[kernel]['y_data'] GenericArtifacts.set_figure_params() filename_base = "machine_comparison_box_{}_vs_{}_{}_{}".format(independent_filename, dependent_filename, str(kernel).lower(), key_label_filename) plot_filename = os.path.join(FLAGS.root_dir, "{}_bwplot.eps".format(filename_base)) print "\tGenerating {}".format(plot_filename) pylab.figure(1) pylab.clf() flier_props = self.box_props.copy() flier_props['marker'] = 's' pylab.boxplot(x=box_data, positions=positions, boxprops=self.box_props, whiskerprops=self.box_props, capprops=self.box_props, flierprops=flier_props, medianprops=self.box_props, meanprops=self.box_props) pylab.plot(x_data, y_data, linestyle='-', color='k') pylab.scatter(x_data, y_data, marker='s', color='k', label=kernel) pylab.autoscale() pylab.xlabel("Number of Cores") pylab.ylabel(dependent_caption) pylab.legend(loc='upper center', bbox_to_anchor=(0.5, -0.10), ncol=4) pylab.savefig(plot_filename, bbox_inches='tight', orientation='portrait') caption = "Box \& Whisker Plot of Machine Comparison of {} for {} vs {}".format(kernel, independent_caption, dependent_caption) tex_filename = os.path.join(FLAGS.root_dir, "{}_bwplot.tex".format(filename_base)) print "\tGenerating {}".format(tex_filename) tex_figure_path = os.path.join("figures", "auto", "{}_bwplot.eps".format(filename_base)) output_latex = r"""\begin{figure} \centering """ output_latex += "\\includegraphics{%s}\n" % tex_figure_path output_latex += "\\caption{%s}\n" % caption output_latex += "\\label{fig:%s}\n" % filename_base output_latex += r"""\end{figure}""" with open(tex_filename, 'w') as f: f.write(output_latex) def generate_table(self, dependent_caption, dependent_filename, independent_caption, independent_filename, kernel, key_label_caption, key_label_filename): filename_base = "machine_comparison_{}_vs_{}_{}_{}".format(independent_filename, dependent_filename, str(kernel).lower(), key_label_filename) tex_filename = os.path.join(FLAGS.root_dir, "{}_table.tex".format(filename_base)) print "\tGenerating {}".format(tex_filename) output_latex = r"""\begin{table}[h] \centering """ output_latex += r"""\begin{tabular}{|c|c|} \hline """ output_latex += r"""Cores & Score \\ \hline """ for entry in self.generate_mean_list(kernel): cores = entry[0] score = entry[1] output_latex += "%d & %.4e \\\\ \n" % (cores, score) output_latex += r"""\hline \end{tabular} """ output_latex += "\\caption{Machine Comparison of %s for %s vs %s in %s}\n" % (kernel, independent_caption, dependent_caption, key_label_caption) output_latex += "\\label{tab:%s}\n" % filename_base output_latex += r"""\end{table}""" with open(tex_filename, 'w') as f: f.write(output_latex) def generate_multiline_plot(self, dependent_caption, dependent_filename, independent_caption, independent_filename, key_label_filename): filename_base = "machine_comparison_{}_vs_{}_{}".format(independent_filename, dependent_filename, key_label_filename) plot_filename = os.path.join(FLAGS.root_dir, "{}_plot.eps".format(filename_base)) print "\tGenerating {}".format(plot_filename) pylab.figure(1) pylab.clf() markers = ['v', '^', 's', 'D', 'x', '*', 'h'] markers_count = 0 for kernel in self.kernels: x_data = self.per_kernel_splines[kernel]['x_data'] y_data = self.per_kernel_splines[kernel]['y_data'] pylab.plot(x_data, y_data, linestyle='-', color='k') pylab.scatter(x_data, y_data, marker=markers[markers_count], color='k', label=kernel) markers_count += 1 pylab.autoscale() pylab.xlabel("Number of Cores") pylab.ylabel(dependent_caption) pylab.legend(loc='upper center', bbox_to_anchor=(0.5, -0.10), ncol=4) pylab.savefig(plot_filename, bbox_inches='tight', orientation='portrait') caption = "Multi-line Plot of Machine Comparison for {} vs {}".format(independent_caption, dependent_caption) tex_filename = os.path.join(FLAGS.root_dir, "{}_plot.tex".format(filename_base)) print "\tGenerating {}".format(tex_filename) tex_figure_path = os.path.join("figures", "auto", "{}_plot.eps".format(filename_base)) output_latex = r"""\begin{figure} \centering """ output_latex += "\\includegraphics{%s}\n" % tex_figure_path output_latex += "\\caption{%s}\n" % caption output_latex += "\\label{fig:%s}\n" % filename_base output_latex += r"""\end{figure}""" with open(tex_filename, 'w') as f: f.write(output_latex) def generate_multiline_box_whisker_plot(self, dependent_caption, dependent_filename, independent_caption, independent_filename, key_label_filename): filename_base = "machine_comparison_box_{}_vs_{}_{}".format(independent_filename, dependent_filename, key_label_filename) plot_filename = os.path.join(FLAGS.root_dir, "{}_bwplot.eps".format(filename_base)) print "\tGenerating {}".format(plot_filename) pylab.figure(1) pylab.clf() markers = ['v', '^', 's', 'D', 'x', '*', 'h'] markers_count = 0 for kernel in self.kernels: x_data = self.per_kernel_splines[kernel]['x_data'] y_data = self.per_kernel_splines[kernel]['y_data'] positions = self.per_kernel_data[kernel].keys() positions.sort() box_data = list() for position in positions: box_data.append(self.per_kernel_data[kernel][position]) flier_props = self.box_props.copy() flier_props['marker'] = markers[markers_count] width = 0.1 * float(markers_count + 1) whisker_props = self.box_props.copy() whisker_props['linestyle'] = 'none' pylab.boxplot(x=box_data, positions=positions, widths=width, boxprops=self.box_props, whiskerprops=whisker_props, showcaps=False, showfliers=False, medianprops=self.box_props, meanprops=self.box_props) pylab.plot(x_data, y_data, linestyle='-', color='k') pylab.scatter(x_data, y_data, marker=markers[markers_count], color='k', label=kernel) markers_count += 1 pylab.autoscale() pylab.xlabel("Number of Cores") pylab.ylabel(dependent_caption) pylab.legend(loc='upper center', bbox_to_anchor=(0.5, -0.10), ncol=4) pylab.savefig(plot_filename, bbox_inches='tight', orientation='portrait') caption = "Multi-line Box \& Whisker Plot of Machine Comparison for {} vs {}".format(independent_caption, dependent_caption) tex_filename = os.path.join(FLAGS.root_dir, "{}_bwplot.tex".format(filename_base)) print "\tGenerating {}".format(tex_filename) tex_figure_path = os.path.join("figures", "auto", "{}_bwplot.eps".format(filename_base)) output_latex = r"""\begin{figure} \centering """ output_latex += "\\includegraphics{%s}\n" % tex_figure_path output_latex += "\\caption{%s}\n" % caption output_latex += "\\label{fig:%s}\n" % filename_base output_latex += r"""\end{figure}""" with open(tex_filename, 'w') as f: f.write(output_latex) def generate_artifacts(self, key_label_caption, key_label_filename, independent_caption, independent_filename, dependent_caption, dependent_filename): self.generate_per_kernel_means() self.generate_per_kernel_data() for kernel in self.kernels: self.generate_1d_plot(dependent_caption, dependent_filename, independent_caption, independent_filename, kernel, key_label_filename) self.generate_box_whisker_plot(dependent_caption, dependent_filename, independent_caption, independent_filename, kernel, key_label_filename) self.generate_table(dependent_caption, dependent_filename, independent_caption, independent_filename, kernel, key_label_caption, key_label_filename) self.generate_multiline_plot(dependent_caption, dependent_filename, independent_caption, independent_filename, key_label_filename) self.generate_multiline_box_whisker_plot(dependent_caption, dependent_filename, independent_caption, independent_filename, key_label_filename) class GenericArtifacts: __metaclass__ = ABCMeta linear_regression = collections.namedtuple('LinearRegression', ['slope', 'intercept', 'r_squared', 'min', 'max']) def __init__(self, results_table, key_label_tuple): self.results_table = results_table self.key_label_tuple = key_label_tuple self.keys = self.results_table.get_keys() self.sub_key_label_tuple = None if len(self.key_label_tuple) is 1 else self.key_label_tuple[1:] self.sub_keys = None if not self.sub_key_label_tuple else set() self.kernels = set() self.cpu_ranges = dict() self.maxmem_ranges = dict() self.event_count_ranges = dict() self.agents_ranges = dict() self.connections_ranges = dict() self.event_count_vs_cpu_fits = dict() self.event_count_vs_maxmem_fits = dict() self.agents_vs_cpu_fits = dict() self.agents_vs_maxmem_fits = dict() self.connections_vs_cpu_fits = dict() self.connections_vs_maxmem_fits = dict() self.event_count_and_agents_vs_cpu_fits = dict() self.event_count_and_agents_vs_maxmem_fits = dict() self.event_count_and_connections_vs_cpu_fits = dict() self.event_count_and_connections_vs_maxmem_fits = dict() self.agents_and_connections_vs_cpu_fits = dict() self.agents_and_connections_vs_maxmem_fits = dict() self.event_count_and_agents_and_connections_vs_cpu_fits = dict() self.event_count_and_agents_and_connections_vs_maxmem_fits = dict() for key in self.keys: self.calculate_fits_for_key(key) self.kernels.add(key[0]) if self.sub_keys is not None: self.sub_keys.add(key[1:]) def calculate_fits_for_key(self, key): self.cpu_ranges[key] = self.results_table.get_cpu_lists_for_key(key).mean self.maxmem_ranges[key] = self.results_table.get_maxmem_lists_for_key(key).mean self.event_count_ranges[key] = self.results_table.get_event_count_lists_for_key(key).mean self.agents_ranges[key] = self.results_table.get_event_count_lists_for_key(key).mean self.connections_ranges[key] = self.results_table.get_event_count_lists_for_key(key).mean self.event_count_vs_cpu_fits[key] = GenericArtifacts.calculate_linear_regression_1d( self.event_count_ranges[key], self.cpu_ranges[key]) self.event_count_vs_maxmem_fits[key] = GenericArtifacts.calculate_linear_regression_1d( self.event_count_ranges[key], self.maxmem_ranges[key]) self.agents_vs_cpu_fits[key] = GenericArtifacts.calculate_linear_regression_1d( self.agents_ranges[key], self.cpu_ranges[key]) self.agents_vs_maxmem_fits[key] = GenericArtifacts.calculate_linear_regression_1d( self.agents_ranges[key], self.maxmem_ranges[key]) self.connections_vs_cpu_fits[key] = GenericArtifacts.calculate_linear_regression_1d( self.connections_ranges[key], self.cpu_ranges[key]) self.connections_vs_maxmem_fits[key] = GenericArtifacts.calculate_linear_regression_1d( self.connections_ranges[key], self.maxmem_ranges[key]) self.event_count_and_agents_vs_cpu_fits[key] = GenericArtifacts.calculate_linear_regression_2d( self.event_count_ranges[key], self.agents_ranges[key], self.cpu_ranges[key]) self.event_count_and_agents_vs_maxmem_fits[key] = GenericArtifacts.calculate_linear_regression_2d( self.event_count_ranges[key], self.agents_ranges[key], self.maxmem_ranges[key]) self.event_count_and_connections_vs_cpu_fits[key] = GenericArtifacts.calculate_linear_regression_2d( self.event_count_ranges[key], self.connections_ranges[key], self.cpu_ranges[key]) self.event_count_and_connections_vs_maxmem_fits[key] = GenericArtifacts.calculate_linear_regression_2d( self.event_count_ranges[key], self.connections_ranges[key], self.maxmem_ranges[key]) self.agents_and_connections_vs_cpu_fits[key] = GenericArtifacts.calculate_linear_regression_2d( self.agents_ranges[key], self.connections_ranges[key], self.cpu_ranges[key]) self.agents_and_connections_vs_maxmem_fits[key] = GenericArtifacts.calculate_linear_regression_2d( self.agents_ranges[key], self.connections_ranges[key], self.maxmem_ranges[key]) self.event_count_and_agents_and_connections_vs_cpu_fits[key] = \ GenericArtifacts.calculate_linear_regression_3d( self.event_count_ranges[key], self.agents_ranges[key], self.connections_ranges[key], self.cpu_ranges[key]) self.event_count_and_agents_and_connections_vs_maxmem_fits[key] = \ GenericArtifacts.calculate_linear_regression_3d( self.event_count_ranges[key], self.agents_ranges[key], self.connections_ranges[key], self.maxmem_ranges[key]) def filter_dict_for_sub_key(self, raw_dict, sub_key): return_dict = dict() for kernel in iter(self.kernels): return_dict[(kernel,)] = raw_dict[(kernel,) + sub_key] return return_dict @staticmethod def key_tuple_to_caption_string(key_tuple, capitialize=False): return_string = "" for entry in key_tuple: if not capitialize: return_string += "{} and ".format(entry) else: return_string += "{} and ".format(str(entry).capitalize()) return return_string[:-5] @staticmethod def key_tuple_to_filename_string(key_tuple, lowercase=False): return_string = "" for entry in key_tuple: if not lowercase: return_string += "{}_".format(entry) else: return_string += "{}_".format(str(entry).lower()) return return_string[:-1] @abstractmethod def generate_multiline_plots(self): pass @abstractmethod def generate_fit_tables(self): pass @abstractmethod def generate_score_tables(self): pass @abstractmethod def generate_machine_comparison_tables(self): pass @staticmethod def set_figure_params(): fig_width = 7.5 # width in inches fig_height = 3.75 # height in inches fig_size = [fig_width, fig_height] fig_params = {'backend': 'ps', 'axes.labelsize': 8, 'text.fontsize': 8, 'legend.fontsize': 8, 'xtick.labelsize': 6, 'ytick.labelsize': 6, 'text.usetex': True, 'figure.figsize': fig_size} pylab.rcParams.update(fig_params) @staticmethod def calculate_linear_regression_1d(x_list, f_list): results = sm.ols(formula="F ~ X", data=({'F': f_list, 'X': x_list})).fit() slope = list() slope.append(results.params['X']) min_value = list() min_value.append(min(x_list)) max_value = list() max_value.append(max(x_list)) intercept = results.params['Intercept'] r_squared = results.rsquared return GenericArtifacts.linear_regression(slope=slope, intercept=intercept, r_squared=r_squared, min=min_value, max=max_value) @staticmethod def calculate_linear_regression_2d(x_list, y_list, f_list): results = sm.ols(formula="F ~ X + Y", data=({'F': f_list, 'X': x_list, 'Y': y_list})).fit() slope = list() slope.append(results.params['X']) slope.append(results.params['Y']) min_value = list() min_value.append(min(x_list)) min_value.append(min(y_list)) max_value = list() max_value.append(max(x_list)) max_value.append(max(y_list)) intercept = results.params['Intercept'] r_squared = results.rsquared return GenericArtifacts.linear_regression(slope=slope, intercept=intercept, r_squared=r_squared, min=min_value, max=max_value) @staticmethod def calculate_linear_regression_3d(x_list, y_list, z_list, f_list): results = sm.ols(formula="F ~ X + Y + Z", data=({'F': f_list, 'X': x_list, 'Y': y_list, 'Z': z_list})).fit() slope = list() slope.append(results.params['X']) slope.append(results.params['Y']) slope.append(results.params['Z']) min_value = list() min_value.append(min(x_list)) min_value.append(min(y_list)) min_value.append(min(z_list)) max_value = list() max_value.append(max(x_list)) max_value.append(max(y_list)) max_value.append(max(z_list)) intercept = results.params['Intercept'] r_squared = results.rsquared return GenericArtifacts.linear_regression(slope=slope, intercept=intercept, r_squared=r_squared, min=min_value, max=max_value) @staticmethod def generate_1d_multiline_plot(fits, x_ranges, x_label, f_label, caption, filename_base): markers = ['v', '^', 's', 'D', 'x', '*', 'h'] GenericArtifacts.set_figure_params() filename_base = filename_base.replace('.', '_') plot_filename = os.path.join(FLAGS.root_dir, "{}.eps".format(filename_base)) print "\tGenerating {}".format(plot_filename) pylab.figure(1) pylab.clf() marker_count = 0 for kernel in fits.keys(): x_list = [0] x_max = max(x_ranges[kernel]) x_list.append(x_max / 2) x_list.append(x_max) f_fit = lambda x: x * fits[kernel][0][0] + fits[kernel][1] y_list = [f_fit(entry) for entry in x_list] pylab.plot(x_list, y_list, marker=markers[marker_count], linestyle='-', color='k', label=kernel[0]) marker_count += 1 pylab.autoscale() pylab.xlabel(x_label) pylab.ylabel(f_label) pylab.legend(loc='upper center', bbox_to_anchor=(0.5, -0.10), ncol=4, mode="expand", borderaxespad=0.) pylab.savefig(plot_filename, bbox_inches='tight', orientation='portrait') tex_filename = os.path.join(FLAGS.root_dir, "{}.tex".format(filename_base)) print "\tGenerating {}".format(tex_filename) tex_figure_path = os.path.join("figures", "auto", filename_base) output_latex = r"""\begin{figure} \centering """ output_latex += "\\includegraphics{%s}\n" % tex_figure_path output_latex += "\\caption{%s}\n" % caption output_latex += "\\label{fig:%s}\n" % filename_base output_latex += r"""\end{figure}""" with open(tex_filename, 'w') as f: f.write(output_latex) @staticmethod def generate_1d_fit_table(key_labels, fits, caption, filename_base): filename_base = filename_base.replace('.', '_') table_filename = os.path.join(FLAGS.root_dir, "{}_table.tex".format(filename_base)) print "\tGenerating {}".format(table_filename) output_latex = r"""\begin{table}[h] \centering """ output_latex += "\\begin{tabular}{|" for _ in key_labels: output_latex += "l|" output_latex += "|c|c|c|}\n" output_latex += "\\hline\n" for label in key_labels: output_latex += "{} & ".format(label) output_latex += "Slope & Intercept & $R^2$ \\\\\n\\hline\n" for key in fits.keys(): for entry in key: output_latex += "%s & " % entry output_latex += " %.4g & %.4g & %.4g \\\\\n" % (fits[key][0][0], fits[key][1], fits[key][2]) output_latex += r"""\hline \end{tabular} """ output_latex += "\\caption{%s}\n" % caption output_latex += "\\label{tab:%s}\n" % filename_base output_latex += r"""\end{table}""" with open(table_filename, 'w') as f: f.write(output_latex) @staticmethod def generate_2d_fit_table(key_labels, fits, x_label, y_label, caption, filename_base): filename_base = filename_base.replace('.', '_') table_filename = os.path.join(FLAGS.root_dir, "{}_table.tex".format(filename_base)) print "\tGenerating {}".format(table_filename) output_latex = r"""\begin{table}[h] \centering """ output_latex += "\\begin{tabular}{|" for _ in key_labels: output_latex += "l|" output_latex += "|c|c|c|c|}\n" output_latex += "\\hline\n" for label in key_labels: output_latex += "{} & ".format(label) output_latex += "{} Slope & {} Slope & Intercept & $R^2$ \\\\\n\\hline\n".format(x_label, y_label) for key in fits.keys(): for entry in key: output_latex += "%s & " % entry output_latex += "%.4g & %.4g & %.4g & %.4g \\\\\n" % (fits[key][0][0], fits[key][0][1], fits[key][1], fits[key][2]) output_latex += r"""\hline \end{tabular} """ output_latex += "\\caption{%s}\n" % caption output_latex += "\\label{tab:%s}\n" % filename_base output_latex += r"""\end{table}""" with open(table_filename, 'w') as f: f.write(output_latex) @staticmethod def generate_3d_fit_table(key_labels, fits, x_label, y_label, z_label, caption, filename_base): filename_base = filename_base.replace('.', '_') table_filename = os.path.join(FLAGS.root_dir, "{}_table.tex".format(filename_base)) print "\tGenerating {}".format(table_filename) output_latex = r"""\begin{table}[h] \centering """ output_latex += "\\begin{tabular}{|" for _ in key_labels: output_latex += "l|" output_latex += "|c|c|c|c|c|}\n" output_latex += "\\hline\n" for label in key_labels: output_latex += "{} & ".format(label) output_latex += "{} Slope & {} Slope & {} Slope & Intercept & $R^2$ \\\\\n\\hline\n".format(x_label, y_label, z_label) for key in fits.keys(): for entry in key: output_latex += "%s & " % entry output_latex += "%.4g & %.4g & %.4g & %.4g & %.4g \\\\\n" % (fits[key][0][0], fits[key][0][1], fits[key][0][2], fits[key][1], fits[key][2]) output_latex += r"""\hline \end{tabular} """ output_latex += "\\caption{%s}\n" % caption output_latex += "\\label{tab:%s}\n" % filename_base output_latex += r"""\end{table}""" with open(table_filename, 'w') as f: f.write(output_latex) @staticmethod def generate_score_table(score_table, caption, filename_base): ordinal_ranks = ["1st", "2nd", "3rd", "4th", "5th", "6th", "7th", "8th", "9th", "10th"] filename_base = filename_base.replace('.', '_') table_filename = os.path.join(FLAGS.root_dir, "{}_table.tex".format(filename_base)) print "\tGenerating {}".format(table_filename) output_latex = r"""\begin{table}[h] \centering """ output_latex += "\\begin{tabular}{|l|" for _ in score_table.get_table().keys(): output_latex += "|c" output_latex += "||c" output_latex += "|}\\hline\n" output_latex += "Kernel " for i in range(0, len(score_table.get_table().keys())): output_latex += "& %s " % ordinal_ranks[i] output_latex += "& Total Score " output_latex += "\\\\\n" output_latex += r"""\hline """ total_count = score_table.get_total_count() assert(total_count > 0) for kernel in score_table.get_table().keys(): output_latex += "%s " % kernel for entry in score_table.get_table()[kernel]: if entry > 0: output_latex += "& %d " % entry else: output_latex += "& \\textemdash " output_latex += "\\\\\n" output_latex += r"""\hline \end{tabular} """ output_latex += "\\caption{%s}\n" % caption output_latex += "\\label{tab:%s}\n" % filename_base output_latex += r"""\end{table}""" with open(table_filename, 'w') as f: f.write(output_latex) @staticmethod def generate_score_percentage_table(score_table, caption, filename_base): ordinal_ranks = ["1st", "2nd", "3rd", "4th", "5th", "6th", "7th", "8th", "9th", "10th"] filename_base = filename_base.replace('.', '_') table_filename = os.path.join(FLAGS.root_dir, "{}_table.tex".format(filename_base)) print "\tGenerating {}".format(table_filename) output_latex = r"""\begin{table}[h] \centering """ output_latex += "\\begin{tabular}{|l|" for _ in score_table.get_table().keys(): output_latex += "|c" output_latex += "|}\\hline\n" output_latex += "Kernel " for i in range(0, len(score_table.get_table().keys())): output_latex += "& %s " % ordinal_ranks[i] output_latex += "\\\\\n" output_latex += r"""\hline """ total_count = float(score_table.get_total_count()) assert(total_count > 0.0) for kernel in score_table.get_table().keys(): output_latex += "%s " % kernel for i in range(0, len(score_table.get_table().keys())): entry = score_table.get_table()[kernel][i] if entry > 0: output_latex += "& %5.4f " % (float(entry) / total_count) else: output_latex += "& \\textemdash " output_latex += "\\\\\n" output_latex += r"""\hline \end{tabular} """ output_latex += "\\caption{%s}\n" % caption output_latex += "\\label{tab:%s}\n" % filename_base output_latex += r"""\end{table}""" with open(table_filename, 'w') as f: f.write(output_latex) class KernelArtifacts(GenericArtifacts): def __init__(self, results_table): super(KernelArtifacts, self).__init__(results_table, ("Kernel",)) def generate_multiline_plots(self): key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple) key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True) GenericArtifacts.generate_1d_multiline_plot(self.event_count_vs_cpu_fits, self.event_count_ranges, "Event Count", "CPU Time (mS)", "Trend lines for Event Count vs CPU Time for per {} fits".format( key_label_caption), "event_count_vs_cpu_per_{}_multiline_plot".format( key_label_filename)) GenericArtifacts.generate_1d_multiline_plot(self.event_count_vs_maxmem_fits, self.event_count_ranges, "Event Count", "Max Memory (kB)", "Trend lines for Event Count vs Max Memory for per {} fits".format( key_label_caption), "event_count_vs_maxmem_per_{}_multiline_plot".format( key_label_filename)) GenericArtifacts.generate_1d_multiline_plot(self.agents_vs_cpu_fits, self.agents_ranges, "Agents", "CPU Time (mS)", "Trend lines for Agents vs CPU Time for per {} fits".format( key_label_caption), "agents_vs_cpu_per_{}_multiline_plot".format(key_label_filename)) GenericArtifacts.generate_1d_multiline_plot(self.agents_vs_maxmem_fits, self.agents_ranges, "Agents", "Max Memory (kB)", "Trend lines for Agents vs Max Memory for per {} fits".format( key_label_caption), "agents_vs_maxmem_per_{}_multiline_plot".format(key_label_filename)) GenericArtifacts.generate_1d_multiline_plot(self.connections_vs_cpu_fits, self.connections_ranges, "Connections", "CPU Time (mS)", "Trend lines for Connections vs CPU Time for per {} fits".format( key_label_caption), "connections_vs_cpu_per_{}_multiline_plot".format( key_label_filename)) GenericArtifacts.generate_1d_multiline_plot(self.connections_vs_maxmem_fits, self.connections_ranges, "Connections", "Max Memory (kB)", "Trend lines for Connections vs Max Memory for per {} fits".format( key_label_caption), "connections_vs_maxmem_per_{}_multiline_plot".format( key_label_filename)) def generate_fit_tables(self): key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple) key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True) GenericArtifacts.generate_1d_fit_table(self.key_label_tuple, self.event_count_vs_cpu_fits, "Event Count vs CPU Time (mS) for per {} fits".format(key_label_caption), "event_count_vs_cpu_per_{}_fit".format(key_label_filename)) GenericArtifacts.generate_1d_fit_table(self.key_label_tuple, self.event_count_vs_maxmem_fits, "Event Count vs Max Memory (kB) for per {} fits".format( key_label_caption), "event_count_vs_maxmem_per_{}_fit".format(key_label_filename)) GenericArtifacts.generate_1d_fit_table(self.key_label_tuple, self.agents_vs_cpu_fits, "Agents vs CPU Time (mS) for per {} fits".format(key_label_caption), "agents_vs_cpu_per_{}_fit".format(key_label_filename)) GenericArtifacts.generate_1d_fit_table(self.key_label_tuple, self.agents_vs_maxmem_fits, "Agents vs Max Memory (kB) for per {} fits".format(key_label_caption), "agents_vs_maxmem_per_{}_fit".format(key_label_filename)) GenericArtifacts.generate_1d_fit_table(self.key_label_tuple, self.connections_vs_cpu_fits, "Connections vs CPU Time (mS) for per {} fits".format(key_label_caption), "connections_vs_cpu_per_{}_fit".format(key_label_filename)) GenericArtifacts.generate_1d_fit_table(self.key_label_tuple, self.connections_vs_maxmem_fits, "Connections vs Max Memory (kB) for per {} fits".format( key_label_caption), "connections_vs_maxmem_per_{}_fit".format(key_label_filename)) GenericArtifacts.generate_2d_fit_table(self.key_label_tuple, self.event_count_and_agents_vs_cpu_fits, "Event Count", "Agents", "Event Count and Agents vs CPU Time (mS) for per {} fits".format( key_label_caption), "event_count_and_agents_vs_cpu_per_{}_fit".format(key_label_filename)) GenericArtifacts.generate_2d_fit_table(self.key_label_tuple, self.event_count_and_agents_vs_maxmem_fits, "Event Count", "Agents", "Event Count and Agents vs Max Memory (kB) for per {} fits".format( key_label_caption), "event_count_and_agents_vs_maxmem_per_{}_fit".format(key_label_filename)) GenericArtifacts.generate_2d_fit_table(self.key_label_tuple, self.event_count_and_connections_vs_cpu_fits, "Event Count", "Connections", "Event Count and Connections vs CPU Time (mS) for per {} fits".format( key_label_caption), "event_count_and_connections_vs_cpu_per_{}_fit".format( key_label_filename)) GenericArtifacts.generate_2d_fit_table(self.key_label_tuple, self.event_count_and_connections_vs_maxmem_fits, "Event Count", "Connections", "Event Count and Connections vs Max Memory (kB) for per {} fits".format( key_label_caption), "event_count_and_connections_vs_maxmem_per_{}_fit".format( key_label_filename)) GenericArtifacts.generate_2d_fit_table(self.key_label_tuple, self.agents_and_connections_vs_cpu_fits, "Agents", "Connections", "Agents and Connections vs CPU Time (mS) for per {} fits".format( key_label_caption), "agents_and_connections_vs_cpu_per_{}_fit".format(key_label_filename)) GenericArtifacts.generate_2d_fit_table(self.key_label_tuple, self.agents_and_connections_vs_maxmem_fits, "Agents", "Connections", "Agents and Connections vs Max Memory (kB) for per {} fits".format( key_label_caption), "agents_and_connections_vs_maxmem_per_{}_fit".format(key_label_filename)) GenericArtifacts.generate_3d_fit_table(self.key_label_tuple, self.event_count_and_agents_and_connections_vs_cpu_fits, "Event Count", "Agents", "Connections", "Event Count and Agents and Connections vs CPU Time (mS) for per {} " "fits".format(key_label_caption), "event_count_and_agents_and_connections_vs_cpu_per_{}_fit".format( key_label_filename)) GenericArtifacts.generate_3d_fit_table(self.key_label_tuple, self.event_count_and_agents_and_connections_vs_maxmem_fits, "Event Count", "Agents", "Connections", "Event Count and Agents and Connections vs Max Memory (kB) for per " "{} fits".format(key_label_caption), "event_count_and_agents_and_connections_vs_maxmem_per_{}_fit".format( key_label_filename)) def generate_score_tables(self): key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple) key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True) score_tables = dict() selection = (("Kernel", ), ("Agents", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_1d_fit_score(self.agents_vs_cpu_fits) selection = (("Kernel", ), ("Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_1d_fit_score(self.connections_vs_cpu_fits) selection = (("Kernel", ), ("Events", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_1d_fit_score(self.event_count_vs_cpu_fits) selection = (("Kernel", ), ("Agents", "Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_2d_fit_score(self.agents_and_connections_vs_cpu_fits) selection = (("Kernel", ), ("Events", "Agents", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_2d_fit_score(self.event_count_and_agents_vs_cpu_fits) selection = (("Kernel", ), ("Events", "Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_2d_fit_score(self.event_count_and_connections_vs_cpu_fits) selection = (("Kernel", ), ("Events", "Agents", "Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_3d_fit_score(self.event_count_and_agents_and_connections_vs_cpu_fits) selection = (("Kernel", ), ("Agents", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_1d_fit_score(self.agents_vs_maxmem_fits) selection = (("Kernel", ), ("Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_1d_fit_score(self.connections_vs_maxmem_fits) selection = (("Kernel", ), ("Events", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_1d_fit_score(self.event_count_vs_maxmem_fits) selection = (("Kernel", ), ("Agents", "Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_2d_fit_score(self.agents_and_connections_vs_maxmem_fits) selection = (("Kernel", ), ("Events", "Agents", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_2d_fit_score(self.event_count_and_agents_vs_maxmem_fits) selection = (("Kernel", ), ("Events", "Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_2d_fit_score(self.event_count_and_connections_vs_maxmem_fits) selection = (("Kernel", ), ("Events", "Agents", "Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) score_tables[selection].add_3d_fit_score(self.event_count_and_agents_and_connections_vs_maxmem_fits) for selection, table in score_tables.iteritems(): independent_vars = selection[1] independent_caption = "" independent_filename = "" for var in independent_vars: independent_caption += "{} and ".format(var) independent_filename += "{}_".format(str(var).lower()) independent_caption = independent_caption[:-5] independent_filename = independent_filename[:-1] dependent_caption = selection[2] if dependent_caption == "CPU": dependent_filename = "cpu" else: dependent_filename = "maxmem" GenericArtifacts.generate_score_table(table, "Scores based on {} vs {} for {} fits".format(independent_caption, dependent_caption, key_label_caption), "{}_vs_{}_per_{}_fits_scores".format(independent_filename, dependent_filename, key_label_filename)) GenericArtifacts.generate_score_percentage_table(table, "Score percentages based on {} vs {} for {} fits".format( independent_caption, dependent_caption, key_label_caption), "{}_vs_{}_per_{}_fits_score_percentage".format( independent_filename, dependent_filename, key_label_filename)) def generate_machine_comparison_tables(self): pass class KernelMachineArtifacts(GenericArtifacts): def __init__(self, results_table): super(KernelMachineArtifacts, self).__init__(results_table, ("Kernel", "Machine")) def generate_multiline_plots(self): key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple) key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True) for sub_key in iter(self.sub_keys): sub_key_caption = GenericArtifacts.key_tuple_to_caption_string(sub_key) sub_key_filename = GenericArtifacts.key_tuple_to_filename_string(sub_key) event_count_ranges = self.filter_dict_for_sub_key(self.event_count_ranges, sub_key) agents_ranges = self.filter_dict_for_sub_key(self.agents_ranges, sub_key) connections_ranges = self.filter_dict_for_sub_key(self.connections_ranges, sub_key) event_count_vs_cpu_fits = self.filter_dict_for_sub_key(self.event_count_vs_cpu_fits, sub_key) event_count_vs_maxmem_fits = self.filter_dict_for_sub_key(self.event_count_vs_maxmem_fits, sub_key) agents_vs_cpu_fits = self.filter_dict_for_sub_key(self.agents_vs_cpu_fits, sub_key) agents_vs_maxmem_fits = self.filter_dict_for_sub_key(self.agents_vs_maxmem_fits, sub_key) connections_vs_cpu_fits = self.filter_dict_for_sub_key(self.connections_vs_cpu_fits, sub_key) connections_vs_maxmem_fits = self.filter_dict_for_sub_key(self.connections_vs_maxmem_fits, sub_key) GenericArtifacts.generate_1d_multiline_plot(event_count_vs_cpu_fits, event_count_ranges, "Event Count", "CPU Time (mS)", "Trend lines for Event Count vs CPU Time for per {} fits for {}" .format(key_label_caption, sub_key_caption), "event_count_vs_cpu_per_{}_multiline_plot_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_multiline_plot(event_count_vs_maxmem_fits, event_count_ranges, "Event Count", "Max Memory (kB)", "Trend lines for Event Count vs Max Memory for per {} fits " "for {}".format(key_label_caption, sub_key_caption), "event_count_vs_maxmem_per_{}_multiline_plot_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_multiline_plot(agents_vs_cpu_fits, agents_ranges, "Agents", "CPU Time (mS)", "Trend lines for Agents vs CPU Time for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "agents_vs_cpu_per_{}_multiline_plot_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_multiline_plot(agents_vs_maxmem_fits, agents_ranges, "Agents", "Max Memory (kB)", "Trend lines for Agents vs Max Memory for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "agents_vs_maxmem_per_{}_multiline_plot_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_multiline_plot(connections_vs_cpu_fits, connections_ranges, "Connections", "CPU Time (mS)", "Trend lines for Connections vs CPU Time for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "connections_vs_cpu_per_{}_multiline_plot_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_multiline_plot(connections_vs_maxmem_fits, connections_ranges, "Connections", "Max Memory (kB)", "Trend lines for Connections vs Max Memory for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "connections_vs_maxmem_per_{}_multiline_plot_for_{}".format( key_label_filename, sub_key_filename)) def generate_fit_tables(self): key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple) key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True) for sub_key in iter(self.sub_keys): sub_key_caption = GenericArtifacts.key_tuple_to_caption_string(sub_key) sub_key_filename = GenericArtifacts.key_tuple_to_filename_string(sub_key) event_count_vs_cpu_fits = self.filter_dict_for_sub_key(self.event_count_vs_cpu_fits, sub_key) event_count_vs_maxmem_fits = self.filter_dict_for_sub_key(self.event_count_vs_maxmem_fits, sub_key) agents_vs_cpu_fits = self.filter_dict_for_sub_key(self.agents_vs_cpu_fits, sub_key) agents_vs_maxmem_fits = self.filter_dict_for_sub_key(self.agents_vs_maxmem_fits, sub_key) connections_vs_cpu_fits = self.filter_dict_for_sub_key(self.connections_vs_cpu_fits, sub_key) connections_vs_maxmem_fits = self.filter_dict_for_sub_key(self.connections_vs_maxmem_fits, sub_key) event_count_and_agents_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_vs_cpu_fits, sub_key) event_count_and_agents_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_vs_maxmem_fits, sub_key) event_count_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_connections_vs_cpu_fits, sub_key) event_count_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_connections_vs_maxmem_fits, sub_key) agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.agents_and_connections_vs_cpu_fits, sub_key) agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.agents_and_connections_vs_maxmem_fits, sub_key) event_count_and_agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_and_connections_vs_cpu_fits, sub_key) event_count_and_agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_and_connections_vs_maxmem_fits, sub_key) GenericArtifacts.generate_1d_fit_table(("Kernel",), event_count_vs_cpu_fits, "Event Count vs CPU Time (mS) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "event_count_vs_cpu_per_{}_fit_for_" "{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_fit_table(("Kernel",), event_count_vs_maxmem_fits, "Event Count vs Max Memory (kB) for per {} fits for {}".format( key_label_caption, sub_key_caption), "event_count_vs_maxmem_per_{}_fit_for_{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_fit_table(("Kernel",), agents_vs_cpu_fits, "Agents vs CPU Time (mS) for per {} fits for {}".format( key_label_caption, sub_key_caption), "agents_vs_cpu_per_{}_fit_for_{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_fit_table(("Kernel",), agents_vs_maxmem_fits, "Agents vs Max Memory (kB) for per {} fits for {}".format( key_label_caption, sub_key_caption), "agents_vs_maxmem_per_{}_fit_for_{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_fit_table(("Kernel",), connections_vs_cpu_fits, "Connections vs CPU Time (mS) for per {} fits for {}".format( key_label_caption, sub_key_caption), "connections_vs_cpu_per_{}_fit_for_{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_fit_table(("Kernel",), connections_vs_maxmem_fits, "Connections vs Max Memory (kB) for per {} fits for {}".format( key_label_caption, sub_key_caption), "connections_vs_maxmem_per_{}_fit_for_{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), event_count_and_agents_vs_cpu_fits, "Event Count", "Agents", "Event Count and Agents vs CPU Time (mS) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "event_count_and_agents_vs_cpu_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), event_count_and_agents_vs_maxmem_fits, "Event Count", "Agents", "Event Count and Agents vs Max Memory (kB) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "event_count_and_agents_vs_maxmem_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), event_count_and_connections_vs_cpu_fits, "Event Count", "Connections", "Event Count and Connections vs CPU Time (mS) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "event_count_and_connections_vs_cpu_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), event_count_and_connections_vs_maxmem_fits, "Event Count", "Connections", "Event Count and Connections vs Max Memory (kB) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "event_count_and_connections_vs_maxmem_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), agents_and_connections_vs_cpu_fits, "Agents", "Connections", "Agents and Connections vs CPU Time (mS) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "agents_and_connections_vs_cpu_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), agents_and_connections_vs_maxmem_fits, "Agents", "Connections", "Agents and Connections vs Max Memory (kB) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "agents_and_connections_vs_maxmem_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_3d_fit_table(("Kernel",), event_count_and_agents_and_connections_vs_cpu_fits, "Event Count", "Agents", "Connections", "Event Count and Agents and Connections vs CPU Time (mS) for per {} " "fits".format(key_label_caption, sub_key_caption), "event_count_and_agents_and_connections_vs_cpu_per_{}_fit_for_" "{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_3d_fit_table(("Kernel",), event_count_and_agents_and_connections_vs_maxmem_fits, "Event Count", "Agents", "Connections", "Event Count and Agents and Connections vs Max Memory (kB) for per " "{} fits for {}".format(key_label_caption, sub_key_caption), "event_count_and_agents_and_connections_vs_maxmem_per_{}_fit_for_" "{}".format(key_label_filename, sub_key_filename)) def generate_score_tables(self): key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple) key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True) score_tables = dict() selection = (("Kernel", "Machine", ), ("Agents", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Events", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Agents", "Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Events", "Agents", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Events", "Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Events", "Agents", "Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Agents", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Events", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Agents", "Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Events", "Agents", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Events", "Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", ), ("Events", "Agents", "Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) for sub_key in iter(self.sub_keys): event_count_vs_cpu_fits = self.filter_dict_for_sub_key(self.event_count_vs_cpu_fits, sub_key) event_count_vs_maxmem_fits = self.filter_dict_for_sub_key(self.event_count_vs_maxmem_fits, sub_key) agents_vs_cpu_fits = self.filter_dict_for_sub_key(self.agents_vs_cpu_fits, sub_key) agents_vs_maxmem_fits = self.filter_dict_for_sub_key(self.agents_vs_maxmem_fits, sub_key) connections_vs_cpu_fits = self.filter_dict_for_sub_key(self.connections_vs_cpu_fits, sub_key) connections_vs_maxmem_fits = self.filter_dict_for_sub_key(self.connections_vs_maxmem_fits, sub_key) event_count_and_agents_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_vs_cpu_fits, sub_key) event_count_and_agents_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_vs_maxmem_fits, sub_key) event_count_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_connections_vs_cpu_fits, sub_key) event_count_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_connections_vs_maxmem_fits, sub_key) agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.agents_and_connections_vs_cpu_fits, sub_key) agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.agents_and_connections_vs_maxmem_fits, sub_key) event_count_and_agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_and_connections_vs_cpu_fits, sub_key) event_count_and_agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_and_connections_vs_maxmem_fits, sub_key) score_tables[(("Kernel", "Machine", ), ("Agents", ), "CPU")].add_1d_fit_score(agents_vs_cpu_fits) score_tables[(("Kernel", "Machine", ), ("Agents", ), "Max Memory")].add_1d_fit_score(agents_vs_maxmem_fits) score_tables[(("Kernel", "Machine", ), ("Connections", ), "CPU")].add_1d_fit_score(connections_vs_cpu_fits) score_tables[(("Kernel", "Machine", ), ("Connections", ), "Max Memory")].add_1d_fit_score( connections_vs_maxmem_fits) score_tables[(("Kernel", "Machine", ), ("Events", ), "CPU")].add_1d_fit_score(event_count_vs_cpu_fits) score_tables[(("Kernel", "Machine", ), ("Events", ), "Max Memory")].add_1d_fit_score( event_count_vs_maxmem_fits) score_tables[(("Kernel", "Machine", ), ("Agents", "Connections",), "CPU")].add_2d_fit_score( agents_and_connections_vs_cpu_fits) score_tables[(("Kernel", "Machine", ), ("Agents", "Connections",), "Max Memory")].add_2d_fit_score( agents_and_connections_vs_maxmem_fits) score_tables[(("Kernel", "Machine", ), ("Events", "Agents",), "CPU")].add_2d_fit_score( event_count_and_agents_vs_cpu_fits) score_tables[(("Kernel", "Machine", ), ("Events", "Agents",), "Max Memory")].add_2d_fit_score( event_count_and_agents_vs_maxmem_fits) score_tables[(("Kernel", "Machine", ), ("Events", "Connections",), "CPU")].add_2d_fit_score( event_count_and_connections_vs_cpu_fits) score_tables[(("Kernel", "Machine", ), ("Events", "Connections",), "Max Memory")].add_2d_fit_score( event_count_and_connections_vs_maxmem_fits) score_tables[(("Kernel", "Machine", ), ("Events", "Agents", "Connections",), "CPU")].add_2d_fit_score( event_count_and_agents_and_connections_vs_cpu_fits) score_tables[(("Kernel", "Machine", ), ("Events", "Agents", "Connections",), "Max Memory")].\ add_3d_fit_score(event_count_and_agents_and_connections_vs_maxmem_fits) for selection, table in score_tables.iteritems(): independent_vars = selection[1] independent_caption = "" independent_filename = "" for var in independent_vars: independent_caption += "{} and ".format(var) independent_filename += "{}_".format(str(var).lower()) independent_caption = independent_caption[:-5] independent_filename = independent_filename[:-1] dependent_caption = selection[2] if dependent_caption == "CPU": dependent_filename = "cpu" else: dependent_filename = "maxmem" GenericArtifacts.generate_score_table(table, "Scores based on {} vs {} for {} fits".format(independent_caption, dependent_caption, key_label_caption), "{}_vs_{}_per_{}_fits_scores".format(independent_filename, dependent_filename, key_label_filename)) GenericArtifacts.generate_score_percentage_table(table, "Score percentages based on {} vs {} for {} fits".format( independent_caption, dependent_caption, key_label_caption), "{}_vs_{}_per_{}_fits_score_percentage".format( independent_filename, dependent_filename, key_label_filename)) def generate_machine_comparison_tables(self): pass class KernelMachineTypeArtifacts(GenericArtifacts): def __init__(self, results_table): super(KernelMachineTypeArtifacts, self).__init__(results_table, ("Kernel", "Machine", "Type")) def generate_multiline_plots(self): pass def generate_fit_tables(self): key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple) key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True) for sub_key in iter(self.sub_keys): sub_key_caption = GenericArtifacts.key_tuple_to_caption_string(sub_key) sub_key_filename = GenericArtifacts.key_tuple_to_filename_string(sub_key) event_count_vs_cpu_fits = self.filter_dict_for_sub_key(self.event_count_vs_cpu_fits, sub_key) event_count_vs_maxmem_fits = self.filter_dict_for_sub_key(self.event_count_vs_maxmem_fits, sub_key) agents_vs_cpu_fits = self.filter_dict_for_sub_key(self.agents_vs_cpu_fits, sub_key) agents_vs_maxmem_fits = self.filter_dict_for_sub_key(self.agents_vs_maxmem_fits, sub_key) connections_vs_cpu_fits = self.filter_dict_for_sub_key(self.connections_vs_cpu_fits, sub_key) connections_vs_maxmem_fits = self.filter_dict_for_sub_key(self.connections_vs_maxmem_fits, sub_key) event_count_and_agents_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_vs_cpu_fits, sub_key) event_count_and_agents_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_vs_maxmem_fits, sub_key) event_count_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_connections_vs_cpu_fits, sub_key) event_count_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_connections_vs_maxmem_fits, sub_key) agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.agents_and_connections_vs_cpu_fits, sub_key) agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.agents_and_connections_vs_maxmem_fits, sub_key) event_count_and_agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_and_connections_vs_cpu_fits, sub_key) event_count_and_agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_and_connections_vs_maxmem_fits, sub_key) GenericArtifacts.generate_1d_fit_table(("Kernel",), event_count_vs_cpu_fits, "Event Count vs CPU Time (mS) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "event_count_vs_cpu_per_{}_fit_for_" "{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_fit_table(("Kernel",), event_count_vs_maxmem_fits, "Event Count vs Max Memory (kB) for per {} fits for {}".format( key_label_caption, sub_key_caption), "event_count_vs_maxmem_per_{}_fit_for_{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_fit_table(("Kernel",), agents_vs_cpu_fits, "Agents vs CPU Time (mS) for per {} fits for {}".format( key_label_caption, sub_key_caption), "agents_vs_cpu_per_{}_fit_for_{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_fit_table(("Kernel",), agents_vs_maxmem_fits, "Agents vs Max Memory (kB) for per {} fits for {}".format( key_label_caption, sub_key_caption), "agents_vs_maxmem_per_{}_fit_for_{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_fit_table(("Kernel",), connections_vs_cpu_fits, "Connections vs CPU Time (mS) for per {} fits for {}".format( key_label_caption, sub_key_caption), "connections_vs_cpu_per_{}_fit_for_{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_1d_fit_table(("Kernel",), connections_vs_maxmem_fits, "Connections vs Max Memory (kB) for per {} fits for {}".format( key_label_caption, sub_key_caption), "connections_vs_maxmem_per_{}_fit_for_{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), event_count_and_agents_vs_cpu_fits, "Event Count", "Agents", "Event Count and Agents vs CPU Time (mS) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "event_count_and_agents_vs_cpu_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), event_count_and_agents_vs_maxmem_fits, "Event Count", "Agents", "Event Count and Agents vs Max Memory (kB) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "event_count_and_agents_vs_maxmem_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), event_count_and_connections_vs_cpu_fits, "Event Count", "Connections", "Event Count and Connections vs CPU Time (mS) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "event_count_and_connections_vs_cpu_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), event_count_and_connections_vs_maxmem_fits, "Event Count", "Connections", "Event Count and Connections vs Max Memory (kB) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "event_count_and_connections_vs_maxmem_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), agents_and_connections_vs_cpu_fits, "Agents", "Connections", "Agents and Connections vs CPU Time (mS) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "agents_and_connections_vs_cpu_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_2d_fit_table(("Kernel",), agents_and_connections_vs_maxmem_fits, "Agents", "Connections", "Agents and Connections vs Max Memory (kB) for per {} fits for " "{}".format(key_label_caption, sub_key_caption), "agents_and_connections_vs_maxmem_per_{}_fit_for_{}".format( key_label_filename, sub_key_filename)) GenericArtifacts.generate_3d_fit_table(("Kernel",), event_count_and_agents_and_connections_vs_cpu_fits, "Event Count", "Agents", "Connections", "Event Count and Agents and Connections vs CPU Time (mS) for per {} " "fits".format(key_label_caption, sub_key_caption), "event_count_and_agents_and_connections_vs_cpu_per_{}_fit_for_" "{}".format(key_label_filename, sub_key_filename)) GenericArtifacts.generate_3d_fit_table(("Kernel",), event_count_and_agents_and_connections_vs_maxmem_fits, "Event Count", "Agents", "Connections", "Event Count and Agents and Connections vs Max Memory (kB) for per " "{} fits for {}".format(key_label_caption, sub_key_caption), "event_count_and_agents_and_connections_vs_maxmem_per_{}_fit_for_" "{}".format(key_label_filename, sub_key_filename)) def generate_score_tables(self): key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple) key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True) score_tables = dict() selection = (("Kernel", "Machine", "Type", ), ("Agents", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Events", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Agents", "Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Events", "Agents", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Events", "Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Events", "Agents", "Connections", ), "CPU") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Agents", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Events", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Agents", "Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Events", "Agents", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Events", "Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) selection = (("Kernel", "Machine", "Type", ), ("Events", "Agents", "Connections", ), "Max Memory") score_tables[selection] = ScoreTable(self.kernels, selection) for sub_key in iter(self.sub_keys): event_count_vs_cpu_fits = self.filter_dict_for_sub_key(self.event_count_vs_cpu_fits, sub_key) event_count_vs_maxmem_fits = self.filter_dict_for_sub_key(self.event_count_vs_maxmem_fits, sub_key) agents_vs_cpu_fits = self.filter_dict_for_sub_key(self.agents_vs_cpu_fits, sub_key) agents_vs_maxmem_fits = self.filter_dict_for_sub_key(self.agents_vs_maxmem_fits, sub_key) connections_vs_cpu_fits = self.filter_dict_for_sub_key(self.connections_vs_cpu_fits, sub_key) connections_vs_maxmem_fits = self.filter_dict_for_sub_key(self.connections_vs_maxmem_fits, sub_key) event_count_and_agents_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_vs_cpu_fits, sub_key) event_count_and_agents_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_vs_maxmem_fits, sub_key) event_count_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_connections_vs_cpu_fits, sub_key) event_count_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_connections_vs_maxmem_fits, sub_key) agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.agents_and_connections_vs_cpu_fits, sub_key) agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.agents_and_connections_vs_maxmem_fits, sub_key) event_count_and_agents_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_and_connections_vs_cpu_fits, sub_key) event_count_and_agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_and_connections_vs_maxmem_fits, sub_key) score_tables[(("Kernel", "Machine", "Type", ), ("Agents", ), "CPU")].add_1d_fit_score(agents_vs_cpu_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Agents", ), "Max Memory")].add_1d_fit_score( agents_vs_maxmem_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Connections", ), "CPU")].add_1d_fit_score( connections_vs_cpu_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Connections", ), "Max Memory")].add_1d_fit_score( connections_vs_maxmem_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Events", ), "CPU")].add_1d_fit_score( event_count_vs_cpu_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Events", ), "Max Memory")].add_1d_fit_score( event_count_vs_maxmem_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Agents", "Connections",), "CPU")].add_2d_fit_score( agents_and_connections_vs_cpu_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Agents", "Connections",), "Max Memory")].add_2d_fit_score( agents_and_connections_vs_maxmem_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Events", "Agents",), "CPU")].add_2d_fit_score( event_count_and_agents_vs_cpu_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Events", "Agents",), "Max Memory")].add_2d_fit_score( event_count_and_agents_vs_maxmem_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Events", "Connections",), "CPU")].add_2d_fit_score( event_count_and_connections_vs_cpu_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Events", "Connections",), "Max Memory")].add_2d_fit_score( event_count_and_connections_vs_maxmem_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Events", "Agents", "Connections",), "CPU")].\ add_2d_fit_score(event_count_and_agents_and_connections_vs_cpu_fits) score_tables[(("Kernel", "Machine", "Type", ), ("Events", "Agents", "Connections",), "Max Memory")].\ add_3d_fit_score(event_count_and_agents_and_connections_vs_maxmem_fits) for selection, table in score_tables.iteritems(): independent_vars = selection[1] independent_caption = "" independent_filename = "" for var in independent_vars: independent_caption += "{} and ".format(var) independent_filename += "{}_".format(str(var).lower()) independent_caption = independent_caption[:-5] independent_filename = independent_filename[:-1] dependent_caption = selection[2] if dependent_caption == "CPU": dependent_filename = "cpu" else: dependent_filename = "maxmem" GenericArtifacts.generate_score_table(table, "Scores based on {} vs {} for {} fits".format(independent_caption, dependent_caption, key_label_caption), "{}_vs_{}_per_{}_fits_scores".format(independent_filename, dependent_filename, key_label_filename)) GenericArtifacts.generate_score_percentage_table(table, "Score percentages based on {} vs {} for {} fits".format( independent_caption, dependent_caption, key_label_caption), "{}_vs_{}_per_{}_fits_score_percentage".format( independent_filename, dependent_filename, key_label_filename)) def generate_machine_comparison_tables(self): key_label_caption = GenericArtifacts.key_tuple_to_caption_string(self.key_label_tuple) key_label_filename = GenericArtifacts.key_tuple_to_filename_string(self.key_label_tuple, lowercase=True) machine_comparison_tables = dict() machine_comparison_tables[(("Events", "Connections", ), "CPU")] = MachineComparisonTable(self.kernels) machine_comparison_tables[(("Events", "Agents", "Connections", ), "Max Memory")] = MachineComparisonTable( self.kernels) for sub_key in self.sub_keys: machine = sub_key[0] event_count_and_connections_vs_cpu_fits = self.filter_dict_for_sub_key( self.event_count_and_connections_vs_cpu_fits, sub_key) event_count_and_agents_and_connections_vs_maxmem_fits = self.filter_dict_for_sub_key( self.event_count_and_agents_and_connections_vs_maxmem_fits, sub_key) machine_comparison_tables[(("Events", "Connections",), "CPU")].add_2d_fit_score( event_count_and_connections_vs_cpu_fits, machine) machine_comparison_tables[(("Events", "Agents", "Connections",), "Max Memory")].add_3d_fit_score( event_count_and_agents_and_connections_vs_maxmem_fits, machine) selection = (("Events", "Connections",), "CPU") independent_caption = "Events \& Connections" independent_filename = "events_connections" dependent_caption = "CPU" dependent_filename = "cpu" print "Printing results for {}".format(selection) machine_comparison_tables[selection].generate_artifacts(key_label_caption, key_label_filename, independent_caption, independent_filename, dependent_caption, dependent_filename) selection = (("Events", "Agents", "Connections",), "Max Memory") independent_caption = "Events \& Agents \& Connections" independent_filename = "events_agents_connections" dependent_caption = "Maximum Memory" dependent_filename = "maxmem" print "Printing results for {}".format(selection) machine_comparison_tables[selection].generate_artifacts(key_label_caption, key_label_filename, independent_caption, independent_filename, dependent_caption, dependent_filename) def read_raw_inputs(): print "Reading in raw results" create_str = "CREATE TABLE IF NOT EXISTS raw_results (machine text, kernel text, type text, model text, " \ "iteration long, event_count long, final_time long, cpu long, maxmem long, agents long, " \ "connections long, bucket long)" experiment_db.execute(create_str) for input_file in os.listdir(FLAGS.root_dir): if re.search(r'run_result.*\.db', input_file): result_file = os.path.join(FLAGS.root_dir, input_file) print 'Reading results from {}'.format(result_file) input_db = DBWrapper(result_file) read_results(input_db) input_db.cleanup() def get_correct_type(row): row = list(row) model = row[3] if re.match("CompleteBi.*", model): row[2] = "complete-bipartite" elif re.match("SmallModel.*", model): row[2] = "Watts-Strogatz" elif re.match("Cycle.*", model): row[2] = "cycle" elif re.match("Hyper.*", model): row[2] = "hypercube" elif re.match("Star.*", model): row[2] = "star" elif re.match("Complete.*", model): row[2] = "complete" elif re.match("Erdos.*", model): row[2] = "erdos-reyni" elif re.match("Wheel.*", model): row[2] = "wheel" elif re.match("Circular.*", model): row[2] = "circular-ladder" elif re.match("Periodic.*", model): row[2] = "periodic-2grid" elif re.match("NonPeriodic.*", model): row[2] = "nonperiodic-2grid" else: print "Unknown model {}".format(model) assert False return row def get_bucket_event_count(event_count): global event_count_buckets global bucketing_factor for bucket in event_count_buckets: if (1.0 + bucketing_factor) * bucket >= event_count >= (1.0 - bucketing_factor) * bucket: return bucket return None def read_results(input_db): global experiment_db cmd_str = "SELECT machine, kernel, type, model, iteration, event_count, final_time, cpu, maxmem, agents, " \ "connections FROM 'raw_results'" for row in input_db.select(cmd_str): if row[2] == "None": row = get_correct_type(row) bucket = get_bucket_event_count(row[5]) if bucket is None: continue cmd_str = "INSERT INTO raw_results " \ "(machine, kernel, type, model, iteration, event_count, final_time, cpu, maxmem, agents, " \ "connections, bucket) " \ "VALUES ('{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}')" \ .format(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10], bucket) experiment_db.execute(cmd_str) experiment_db.commit() def generate_per_kernel_results_table(): global experiment_db global kernel_results_table global event_count_buckets kernel_results_table = ResultsTable() select_cmd = "SELECT kernel, bucket, model, event_count, agents, connections, cpu, maxmem FROM raw_results" for row in experiment_db.select(select_cmd): kernel_results_table.add_entry((row[0],), row[1], row[2], row[3], row[4], row[5], row[6], row[7]) kernel_results_table.create_filtered_table() def generate_per_kernel_results_artifacts(): print "Generating per kernel artifacts" global kernel_results_table kernel_artifacts = KernelArtifacts(kernel_results_table) kernel_artifacts.generate_multiline_plots() kernel_artifacts.generate_fit_tables() kernel_artifacts.generate_score_tables() kernel_artifacts.generate_machine_comparison_tables() print "Finished per kernel artifacts" def generate_per_kernel_and_machine_results_table(): global experiment_db global kernel_machine_results_table global event_count_buckets kernel_machine_results_table = ResultsTable() select_cmd = "SELECT kernel, machine, bucket, model, event_count, agents, connections, cpu, maxmem FROM raw_results" for row in experiment_db.select(select_cmd): kernel_machine_results_table.add_entry((row[0], row[1]), row[2], row[3], row[4], row[5], row[6], row[7], row[8]) kernel_machine_results_table.create_filtered_table() def generate_per_kernel_and_machine_results_artifacts(): print "Generating per kernel and machine artifacts" global kernel_machine_results_table kernel_and_machine_artifacts = KernelMachineArtifacts(kernel_machine_results_table) kernel_and_machine_artifacts.generate_multiline_plots() kernel_and_machine_artifacts.generate_fit_tables() kernel_and_machine_artifacts.generate_score_tables() kernel_and_machine_artifacts.generate_machine_comparison_tables() print "Finished per kernel and machine artifacts" def generate_per_kernel_and_machine_and_type_results_table(): global experiment_db global kernel_machine_type_results_table global event_count_buckets kernel_machine_type_results_table = ResultsTable() select_cmd = "SELECT kernel, machine, type, bucket, model, event_count, agents, connections, cpu, maxmem FROM " \ "raw_results" for row in experiment_db.select(select_cmd): kernel_machine_type_results_table.add_entry((row[0], row[1], row[2]), row[3], row[4], row[5], row[6], row[7], row[8], row[9]) kernel_machine_type_results_table.create_filtered_table() def generate_per_kernel_and_machine_and_type_results_artifacts(): print "Generating per kernel and machine and type artifacts" global kernel_machine_type_results_table kernel_and_machine_and_type_artifacts = KernelMachineTypeArtifacts(kernel_machine_type_results_table) kernel_and_machine_and_type_artifacts.generate_fit_tables() kernel_and_machine_and_type_artifacts.generate_score_tables() kernel_and_machine_and_type_artifacts.generate_machine_comparison_tables() print "Finished per kernel and machine and type artifacts" def generate_fit_comparison_artifacts(): global fit_comparison_table cpu_fit_list = list() memory_fit_list = list() for key, value in fit_comparison_table.iteritems(): if key[2] == "CPU": cpu_fit_list.append((value, key,)) else: memory_fit_list.append((value, key,)) cpu_fit_list.sort() cpu_fit_list.reverse() cpu_comparison_filename = os.path.join(FLAGS.root_dir, "cpu_fit_comparison_table.tex") print "\tGenerating {}".format(cpu_comparison_filename) output_latex = r"""\begin{table}[h] \centering """ output_latex += r"""\begin{tabular}{|l|l|c|} \hline """ output_latex += r"""Selection Keys & Independent Variables & $R^2$ \\ \hline """ for cpu_fit in cpu_fit_list: score = cpu_fit[0] entry = cpu_fit[1] for key in entry[0]: output_latex += "{} \& ".format(key) output_latex = output_latex[:-4] output_latex += " & " for var in entry[1]: output_latex += "{} \& ".format(var) output_latex = output_latex[:-4] output_latex += " & %5.4f" % float(score) output_latex += r""" \\ """ output_latex += r"""\hline \end{tabular} """ output_latex += "\\caption{Comparisons for CPU fits}\n" output_latex += "\\label{tab:cpu_fit_comparison}\n" output_latex += r"""\end{table}""" with open(cpu_comparison_filename, 'w') as f: f.write(output_latex) memory_fit_list.sort() memory_fit_list.reverse() memory_comparison_filename = os.path.join(FLAGS.root_dir, "memory_fit_comparison_table.tex") print "\tGenerating {}".format(memory_comparison_filename) output_latex = r"""\begin{table}[h] \centering """ output_latex += r"""\begin{tabular}{|l|l|c|} \hline """ output_latex += r"""Selection Keys & Independent Variables & $R^2$ \\ \hline """ for memory_fit in memory_fit_list: score = memory_fit[0] entry = memory_fit[1] for key in entry[0]: output_latex += "{} \& ".format(key) output_latex = output_latex[:-4] output_latex += " & " for var in entry[1]: output_latex += "{} \& ".format(var) output_latex = output_latex[:-4] output_latex += " & %5.4f" % float(score) output_latex += r""" \\ """ output_latex += r"""\hline \end{tabular} """ output_latex += "\\caption{Comparisons for Memory fits}\n" output_latex += "\\label{tab:memory_fit_comparison}\n" output_latex += r"""\end{table}""" with open(memory_comparison_filename, 'w') as f: f.write(output_latex) def process_raw_results(): generate_per_kernel_results_table() generate_per_kernel_results_artifacts() generate_per_kernel_and_machine_results_table() generate_per_kernel_and_machine_results_artifacts() generate_per_kernel_and_machine_and_type_results_table() generate_per_kernel_and_machine_and_type_results_artifacts() generate_fit_comparison_artifacts() def main(argv): global experiment_db try: FLAGS(argv) # parse flags except gflags.FlagsError, e: print '%s\nUsage: %s ARGS\n%s' % (e, sys.argv[0], FLAGS) sys.exit(1) full_path = os.path.join(FLAGS.root_dir, FLAGS.output_db) if FLAGS.read_inputs: print "Unlinking {}".format(full_path) try: os.unlink(full_path) except OSError, e: print "Unable able to unlink {} due to {}".format(full_path, e) else: print "Reusing {}".format(full_path) experiment_db = DBWrapper(full_path) if FLAGS.read_inputs: read_raw_inputs() process_raw_results() experiment_db.cleanup() if __name__ == '__main__': main(sys.argv)
apache-2.0
pratapvardhan/pandas
pandas/tests/test_base.py
2
46174
# -*- coding: utf-8 -*- from __future__ import print_function import re import sys from datetime import datetime, timedelta import pytest import numpy as np import pandas as pd import pandas.compat as compat from pandas.core.dtypes.common import ( is_object_dtype, is_datetimetz, is_datetime64_dtype, needs_i8_conversion) import pandas.util.testing as tm from pandas import (Series, Index, DatetimeIndex, TimedeltaIndex, PeriodIndex, Timedelta, IntervalIndex, Interval, CategoricalIndex, Timestamp) from pandas.compat import StringIO, PYPY, long from pandas.compat.numpy import np_array_datetime64_compat from pandas.core.accessor import PandasDelegate from pandas.core.base import PandasObject, NoNewAttributesMixin from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin from pandas._libs.tslib import iNaT class CheckStringMixin(object): def test_string_methods_dont_fail(self): repr(self.container) str(self.container) bytes(self.container) if not compat.PY3: unicode(self.container) # noqa def test_tricky_container(self): if not hasattr(self, 'unicode_container'): pytest.skip('Need unicode_container to test with this') repr(self.unicode_container) str(self.unicode_container) bytes(self.unicode_container) if not compat.PY3: unicode(self.unicode_container) # noqa class CheckImmutable(object): mutable_regex = re.compile('does not support mutable operations') def check_mutable_error(self, *args, **kwargs): # Pass whatever function you normally would to assert_raises_regex # (after the Exception kind). tm.assert_raises_regex( TypeError, self.mutable_regex, *args, **kwargs) def test_no_mutable_funcs(self): def setitem(): self.container[0] = 5 self.check_mutable_error(setitem) def setslice(): self.container[1:2] = 3 self.check_mutable_error(setslice) def delitem(): del self.container[0] self.check_mutable_error(delitem) def delslice(): del self.container[0:3] self.check_mutable_error(delslice) mutable_methods = getattr(self, "mutable_methods", []) for meth in mutable_methods: self.check_mutable_error(getattr(self.container, meth)) def test_slicing_maintains_type(self): result = self.container[1:2] expected = self.lst[1:2] self.check_result(result, expected) def check_result(self, result, expected, klass=None): klass = klass or self.klass assert isinstance(result, klass) assert result == expected class TestPandasDelegate(object): class Delegator(object): _properties = ['foo'] _methods = ['bar'] def _set_foo(self, value): self.foo = value def _get_foo(self): return self.foo foo = property(_get_foo, _set_foo, doc="foo property") def bar(self, *args, **kwargs): """ a test bar method """ pass class Delegate(PandasDelegate, PandasObject): def __init__(self, obj): self.obj = obj def setup_method(self, method): pass def test_invalid_delegation(self): # these show that in order for the delegation to work # the _delegate_* methods need to be overridden to not raise # a TypeError self.Delegate._add_delegate_accessors( delegate=self.Delegator, accessors=self.Delegator._properties, typ='property' ) self.Delegate._add_delegate_accessors( delegate=self.Delegator, accessors=self.Delegator._methods, typ='method' ) delegate = self.Delegate(self.Delegator()) def f(): delegate.foo pytest.raises(TypeError, f) def f(): delegate.foo = 5 pytest.raises(TypeError, f) def f(): delegate.foo() pytest.raises(TypeError, f) @pytest.mark.skipif(PYPY, reason="not relevant for PyPy") def test_memory_usage(self): # Delegate does not implement memory_usage. # Check that we fall back to in-built `__sizeof__` # GH 12924 delegate = self.Delegate(self.Delegator()) sys.getsizeof(delegate) class Ops(object): def _allow_na_ops(self, obj): """Whether to skip test cases including NaN""" if (isinstance(obj, Index) and (obj.is_boolean() or not obj._can_hold_na)): # don't test boolean / int64 index return False return True def setup_method(self, method): self.bool_index = tm.makeBoolIndex(10, name='a') self.int_index = tm.makeIntIndex(10, name='a') self.float_index = tm.makeFloatIndex(10, name='a') self.dt_index = tm.makeDateIndex(10, name='a') self.dt_tz_index = tm.makeDateIndex(10, name='a').tz_localize( tz='US/Eastern') self.period_index = tm.makePeriodIndex(10, name='a') self.string_index = tm.makeStringIndex(10, name='a') self.unicode_index = tm.makeUnicodeIndex(10, name='a') arr = np.random.randn(10) self.int_series = Series(arr, index=self.int_index, name='a') self.float_series = Series(arr, index=self.float_index, name='a') self.dt_series = Series(arr, index=self.dt_index, name='a') self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True) self.period_series = Series(arr, index=self.period_index, name='a') self.string_series = Series(arr, index=self.string_index, name='a') types = ['bool', 'int', 'float', 'dt', 'dt_tz', 'period', 'string', 'unicode'] fmts = ["{0}_{1}".format(t, f) for t in types for f in ['index', 'series']] self.objs = [getattr(self, f) for f in fmts if getattr(self, f, None) is not None] def check_ops_properties(self, props, filter=None, ignore_failures=False): for op in props: for o in self.is_valid_objs: # if a filter, skip if it doesn't match if filter is not None: filt = o.index if isinstance(o, Series) else o if not filter(filt): continue try: if isinstance(o, Series): expected = Series( getattr(o.index, op), index=o.index, name='a') else: expected = getattr(o, op) except (AttributeError): if ignore_failures: continue result = getattr(o, op) # these couuld be series, arrays or scalars if isinstance(result, Series) and isinstance(expected, Series): tm.assert_series_equal(result, expected) elif isinstance(result, Index) and isinstance(expected, Index): tm.assert_index_equal(result, expected) elif isinstance(result, np.ndarray) and isinstance(expected, np.ndarray): tm.assert_numpy_array_equal(result, expected) else: assert result == expected # freq raises AttributeError on an Int64Index because its not # defined we mostly care about Series here anyhow if not ignore_failures: for o in self.not_valid_objs: # an object that is datetimelike will raise a TypeError, # otherwise an AttributeError if issubclass(type(o), DatetimeIndexOpsMixin): pytest.raises(TypeError, lambda: getattr(o, op)) else: pytest.raises(AttributeError, lambda: getattr(o, op)) def test_binary_ops_docs(self): from pandas import DataFrame, Panel op_map = {'add': '+', 'sub': '-', 'mul': '*', 'mod': '%', 'pow': '**', 'truediv': '/', 'floordiv': '//'} for op_name in ['add', 'sub', 'mul', 'mod', 'pow', 'truediv', 'floordiv']: for klass in [Series, DataFrame, Panel]: operand1 = klass.__name__.lower() operand2 = 'other' op = op_map[op_name] expected_str = ' '.join([operand1, op, operand2]) assert expected_str in getattr(klass, op_name).__doc__ # reverse version of the binary ops expected_str = ' '.join([operand2, op, operand1]) assert expected_str in getattr(klass, 'r' + op_name).__doc__ class TestIndexOps(Ops): def setup_method(self, method): super(TestIndexOps, self).setup_method(method) self.is_valid_objs = self.objs self.not_valid_objs = [] def test_none_comparison(self): # bug brought up by #1079 # changed from TypeError in 0.17.0 for o in self.is_valid_objs: if isinstance(o, Series): o[0] = np.nan # noinspection PyComparisonWithNone result = o == None # noqa assert not result.iat[0] assert not result.iat[1] # noinspection PyComparisonWithNone result = o != None # noqa assert result.iat[0] assert result.iat[1] result = None == o # noqa assert not result.iat[0] assert not result.iat[1] # this fails for numpy < 1.9 # and oddly for *some* platforms # result = None != o # noqa # assert result.iat[0] # assert result.iat[1] if (is_datetime64_dtype(o) or is_datetimetz(o)): # Following DatetimeIndex (and Timestamp) convention, # inequality comparisons with Series[datetime64] raise with pytest.raises(TypeError): None > o with pytest.raises(TypeError): o > None else: result = None > o assert not result.iat[0] assert not result.iat[1] result = o < None assert not result.iat[0] assert not result.iat[1] def test_ndarray_compat_properties(self): for o in self.objs: # Check that we work. for p in ['shape', 'dtype', 'T', 'nbytes']: assert getattr(o, p, None) is not None # deprecated properties for p in ['flags', 'strides', 'itemsize']: with tm.assert_produces_warning(FutureWarning): assert getattr(o, p, None) is not None with tm.assert_produces_warning(FutureWarning): assert hasattr(o, 'base') # If we have a datetime-like dtype then needs a view to work # but the user is responsible for that try: with tm.assert_produces_warning(FutureWarning): assert o.data is not None except ValueError: pass with pytest.raises(ValueError): o.item() # len > 1 assert o.ndim == 1 assert o.size == len(o) assert Index([1]).item() == 1 assert Series([1]).item() == 1 def test_ops(self): for op in ['max', 'min']: for o in self.objs: result = getattr(o, op)() if not isinstance(o, PeriodIndex): expected = getattr(o.values, op)() else: expected = pd.Period( ordinal=getattr(o._ndarray_values, op)(), freq=o.freq) try: assert result == expected except TypeError: # comparing tz-aware series with np.array results in # TypeError expected = expected.astype('M8[ns]').astype('int64') assert result.value == expected def test_nanops(self): # GH 7261 for op in ['max', 'min']: for klass in [Index, Series]: obj = klass([np.nan, 2.0]) assert getattr(obj, op)() == 2.0 obj = klass([np.nan]) assert pd.isna(getattr(obj, op)()) obj = klass([]) assert pd.isna(getattr(obj, op)()) obj = klass([pd.NaT, datetime(2011, 11, 1)]) # check DatetimeIndex monotonic path assert getattr(obj, op)() == datetime(2011, 11, 1) obj = klass([pd.NaT, datetime(2011, 11, 1), pd.NaT]) # check DatetimeIndex non-monotonic path assert getattr(obj, op)(), datetime(2011, 11, 1) # argmin/max obj = Index(np.arange(5, dtype='int64')) assert obj.argmin() == 0 assert obj.argmax() == 4 obj = Index([np.nan, 1, np.nan, 2]) assert obj.argmin() == 1 assert obj.argmax() == 3 obj = Index([np.nan]) assert obj.argmin() == -1 assert obj.argmax() == -1 obj = Index([pd.NaT, datetime(2011, 11, 1), datetime(2011, 11, 2), pd.NaT]) assert obj.argmin() == 1 assert obj.argmax() == 2 obj = Index([pd.NaT]) assert obj.argmin() == -1 assert obj.argmax() == -1 def test_value_counts_unique_nunique(self): for orig in self.objs: o = orig.copy() klass = type(o) values = o._values if isinstance(values, Index): # reset name not to affect latter process values.name = None # create repeated values, 'n'th element is repeated by n+1 times # skip boolean, because it only has 2 values at most if isinstance(o, Index) and o.is_boolean(): continue elif isinstance(o, Index): expected_index = Index(o[::-1]) expected_index.name = None o = o.repeat(range(1, len(o) + 1)) o.name = 'a' else: expected_index = Index(values[::-1]) idx = o.index.repeat(range(1, len(o) + 1)) rep = np.repeat(values, range(1, len(o) + 1)) o = klass(rep, index=idx, name='a') # check values has the same dtype as the original assert o.dtype == orig.dtype expected_s = Series(range(10, 0, -1), index=expected_index, dtype='int64', name='a') result = o.value_counts() tm.assert_series_equal(result, expected_s) assert result.index.name is None assert result.name == 'a' result = o.unique() if isinstance(o, Index): assert isinstance(result, o.__class__) tm.assert_index_equal(result, orig) elif is_datetimetz(o): # datetimetz Series returns array of Timestamp assert result[0] == orig[0] for r in result: assert isinstance(r, Timestamp) tm.assert_numpy_array_equal(result, orig._values.astype(object).values) else: tm.assert_numpy_array_equal(result, orig.values) assert o.nunique() == len(np.unique(o.values)) def test_value_counts_unique_nunique_null(self): for null_obj in [np.nan, None]: for orig in self.objs: o = orig.copy() klass = type(o) values = o._ndarray_values if not self._allow_na_ops(o): continue # special assign to the numpy array if is_datetimetz(o): if isinstance(o, DatetimeIndex): v = o.asi8 v[0:2] = iNaT values = o._shallow_copy(v) else: o = o.copy() o[0:2] = iNaT values = o._values elif needs_i8_conversion(o): values[0:2] = iNaT values = o._shallow_copy(values) else: values[0:2] = null_obj # check values has the same dtype as the original assert values.dtype == o.dtype # create repeated values, 'n'th element is repeated by n+1 # times if isinstance(o, (DatetimeIndex, PeriodIndex)): expected_index = o.copy() expected_index.name = None # attach name to klass o = klass(values.repeat(range(1, len(o) + 1))) o.name = 'a' else: if is_datetimetz(o): expected_index = orig._values._shallow_copy(values) else: expected_index = Index(values) expected_index.name = None o = o.repeat(range(1, len(o) + 1)) o.name = 'a' # check values has the same dtype as the original assert o.dtype == orig.dtype # check values correctly have NaN nanloc = np.zeros(len(o), dtype=np.bool) nanloc[:3] = True if isinstance(o, Index): tm.assert_numpy_array_equal(pd.isna(o), nanloc) else: exp = Series(nanloc, o.index, name='a') tm.assert_series_equal(pd.isna(o), exp) expected_s_na = Series(list(range(10, 2, -1)) + [3], index=expected_index[9:0:-1], dtype='int64', name='a') expected_s = Series(list(range(10, 2, -1)), index=expected_index[9:1:-1], dtype='int64', name='a') result_s_na = o.value_counts(dropna=False) tm.assert_series_equal(result_s_na, expected_s_na) assert result_s_na.index.name is None assert result_s_na.name == 'a' result_s = o.value_counts() tm.assert_series_equal(o.value_counts(), expected_s) assert result_s.index.name is None assert result_s.name == 'a' result = o.unique() if isinstance(o, Index): tm.assert_index_equal(result, Index(values[1:], name='a')) elif is_datetimetz(o): # unable to compare NaT / nan vals = values[2:].astype(object).values tm.assert_numpy_array_equal(result[1:], vals) assert result[0] is pd.NaT else: tm.assert_numpy_array_equal(result[1:], values[2:]) assert pd.isna(result[0]) assert result.dtype == orig.dtype assert o.nunique() == 8 assert o.nunique(dropna=False) == 9 def test_value_counts_inferred(self): klasses = [Index, Series] for klass in klasses: s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a'] s = klass(s_values) expected = Series([4, 3, 2, 1], index=['b', 'a', 'd', 'c']) tm.assert_series_equal(s.value_counts(), expected) if isinstance(s, Index): exp = Index(np.unique(np.array(s_values, dtype=np.object_))) tm.assert_index_equal(s.unique(), exp) else: exp = np.unique(np.array(s_values, dtype=np.object_)) tm.assert_numpy_array_equal(s.unique(), exp) assert s.nunique() == 4 # don't sort, have to sort after the fact as not sorting is # platform-dep hist = s.value_counts(sort=False).sort_values() expected = Series([3, 1, 4, 2], index=list('acbd')).sort_values() tm.assert_series_equal(hist, expected) # sort ascending hist = s.value_counts(ascending=True) expected = Series([1, 2, 3, 4], index=list('cdab')) tm.assert_series_equal(hist, expected) # relative histogram. hist = s.value_counts(normalize=True) expected = Series([.4, .3, .2, .1], index=['b', 'a', 'd', 'c']) tm.assert_series_equal(hist, expected) def test_value_counts_bins(self): klasses = [Index, Series] for klass in klasses: s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a'] s = klass(s_values) # bins pytest.raises(TypeError, lambda bins: s.value_counts(bins=bins), 1) s1 = Series([1, 1, 2, 3]) res1 = s1.value_counts(bins=1) exp1 = Series({Interval(0.997, 3.0): 4}) tm.assert_series_equal(res1, exp1) res1n = s1.value_counts(bins=1, normalize=True) exp1n = Series({Interval(0.997, 3.0): 1.0}) tm.assert_series_equal(res1n, exp1n) if isinstance(s1, Index): tm.assert_index_equal(s1.unique(), Index([1, 2, 3])) else: exp = np.array([1, 2, 3], dtype=np.int64) tm.assert_numpy_array_equal(s1.unique(), exp) assert s1.nunique() == 3 # these return the same res4 = s1.value_counts(bins=4, dropna=True) intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0]) exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2])) tm.assert_series_equal(res4, exp4) res4 = s1.value_counts(bins=4, dropna=False) intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0]) exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2])) tm.assert_series_equal(res4, exp4) res4n = s1.value_counts(bins=4, normalize=True) exp4n = Series([0.5, 0.25, 0.25, 0], index=intervals.take([0, 3, 1, 2])) tm.assert_series_equal(res4n, exp4n) # handle NA's properly s_values = ['a', 'b', 'b', 'b', np.nan, np.nan, 'd', 'd', 'a', 'a', 'b'] s = klass(s_values) expected = Series([4, 3, 2], index=['b', 'a', 'd']) tm.assert_series_equal(s.value_counts(), expected) if isinstance(s, Index): exp = Index(['a', 'b', np.nan, 'd']) tm.assert_index_equal(s.unique(), exp) else: exp = np.array(['a', 'b', np.nan, 'd'], dtype=object) tm.assert_numpy_array_equal(s.unique(), exp) assert s.nunique() == 3 s = klass({}) expected = Series([], dtype=np.int64) tm.assert_series_equal(s.value_counts(), expected, check_index_type=False) # returned dtype differs depending on original if isinstance(s, Index): tm.assert_index_equal(s.unique(), Index([]), exact=False) else: tm.assert_numpy_array_equal(s.unique(), np.array([]), check_dtype=False) assert s.nunique() == 0 @pytest.mark.parametrize('klass', [Index, Series]) def test_value_counts_datetime64(self, klass): # GH 3002, datetime64[ns] # don't test names though txt = "\n".join(['xxyyzz20100101PIE', 'xxyyzz20100101GUM', 'xxyyzz20100101EGG', 'xxyyww20090101EGG', 'foofoo20080909PIE', 'foofoo20080909GUM']) f = StringIO(txt) df = pd.read_fwf(f, widths=[6, 8, 3], names=["person_id", "dt", "food"], parse_dates=["dt"]) s = klass(df['dt'].copy()) s.name = None idx = pd.to_datetime(['2010-01-01 00:00:00Z', '2008-09-09 00:00:00Z', '2009-01-01 00:00:00Z']) expected_s = Series([3, 2, 1], index=idx) tm.assert_series_equal(s.value_counts(), expected_s) expected = np_array_datetime64_compat(['2010-01-01 00:00:00Z', '2009-01-01 00:00:00Z', '2008-09-09 00:00:00Z'], dtype='datetime64[ns]') if isinstance(s, Index): tm.assert_index_equal(s.unique(), DatetimeIndex(expected)) else: tm.assert_numpy_array_equal(s.unique(), expected) assert s.nunique() == 3 # with NaT s = df['dt'].copy() s = klass([v for v in s.values] + [pd.NaT]) result = s.value_counts() assert result.index.dtype == 'datetime64[ns]' tm.assert_series_equal(result, expected_s) result = s.value_counts(dropna=False) expected_s[pd.NaT] = 1 tm.assert_series_equal(result, expected_s) unique = s.unique() assert unique.dtype == 'datetime64[ns]' # numpy_array_equal cannot compare pd.NaT if isinstance(s, Index): exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT]) tm.assert_index_equal(unique, exp_idx) else: tm.assert_numpy_array_equal(unique[:3], expected) assert pd.isna(unique[3]) assert s.nunique() == 3 assert s.nunique(dropna=False) == 4 # timedelta64[ns] td = df.dt - df.dt + timedelta(1) td = klass(td, name='dt') result = td.value_counts() expected_s = Series([6], index=[Timedelta('1day')], name='dt') tm.assert_series_equal(result, expected_s) expected = TimedeltaIndex(['1 days'], name='dt') if isinstance(td, Index): tm.assert_index_equal(td.unique(), expected) else: tm.assert_numpy_array_equal(td.unique(), expected.values) td2 = timedelta(1) + (df.dt - df.dt) td2 = klass(td2, name='dt') result2 = td2.value_counts() tm.assert_series_equal(result2, expected_s) def test_factorize(self): for orig in self.objs: o = orig.copy() if isinstance(o, Index) and o.is_boolean(): exp_arr = np.array([0, 1] + [0] * 8, dtype=np.intp) exp_uniques = o exp_uniques = Index([False, True]) else: exp_arr = np.array(range(len(o)), dtype=np.intp) exp_uniques = o labels, uniques = o.factorize() tm.assert_numpy_array_equal(labels, exp_arr) if isinstance(o, Series): tm.assert_index_equal(uniques, Index(orig), check_names=False) else: # factorize explicitly resets name tm.assert_index_equal(uniques, exp_uniques, check_names=False) def test_factorize_repeated(self): for orig in self.objs: o = orig.copy() # don't test boolean if isinstance(o, Index) and o.is_boolean(): continue # sort by value, and create duplicates if isinstance(o, Series): o = o.sort_values() n = o.iloc[5:].append(o) else: indexer = o.argsort() o = o.take(indexer) n = o[5:].append(o) exp_arr = np.array([5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp) labels, uniques = n.factorize(sort=True) tm.assert_numpy_array_equal(labels, exp_arr) if isinstance(o, Series): tm.assert_index_equal(uniques, Index(orig).sort_values(), check_names=False) else: tm.assert_index_equal(uniques, o, check_names=False) exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4], np.intp) labels, uniques = n.factorize(sort=False) tm.assert_numpy_array_equal(labels, exp_arr) if isinstance(o, Series): expected = Index(o.iloc[5:10].append(o.iloc[:5])) tm.assert_index_equal(uniques, expected, check_names=False) else: expected = o[5:10].append(o[:5]) tm.assert_index_equal(uniques, expected, check_names=False) def test_duplicated_drop_duplicates_index(self): # GH 4060 for original in self.objs: if isinstance(original, Index): # special case if original.is_boolean(): result = original.drop_duplicates() expected = Index([False, True], name='a') tm.assert_index_equal(result, expected) continue # original doesn't have duplicates expected = np.array([False] * len(original), dtype=bool) duplicated = original.duplicated() tm.assert_numpy_array_equal(duplicated, expected) assert duplicated.dtype == bool result = original.drop_duplicates() tm.assert_index_equal(result, original) assert result is not original # has_duplicates assert not original.has_duplicates # create repeated values, 3rd and 5th values are duplicated idx = original[list(range(len(original))) + [5, 3]] expected = np.array([False] * len(original) + [True, True], dtype=bool) duplicated = idx.duplicated() tm.assert_numpy_array_equal(duplicated, expected) assert duplicated.dtype == bool tm.assert_index_equal(idx.drop_duplicates(), original) base = [False] * len(idx) base[3] = True base[5] = True expected = np.array(base) duplicated = idx.duplicated(keep='last') tm.assert_numpy_array_equal(duplicated, expected) assert duplicated.dtype == bool result = idx.drop_duplicates(keep='last') tm.assert_index_equal(result, idx[~expected]) base = [False] * len(original) + [True, True] base[3] = True base[5] = True expected = np.array(base) duplicated = idx.duplicated(keep=False) tm.assert_numpy_array_equal(duplicated, expected) assert duplicated.dtype == bool result = idx.drop_duplicates(keep=False) tm.assert_index_equal(result, idx[~expected]) with tm.assert_raises_regex( TypeError, r"drop_duplicates\(\) got an unexpected " "keyword argument"): idx.drop_duplicates(inplace=True) else: expected = Series([False] * len(original), index=original.index, name='a') tm.assert_series_equal(original.duplicated(), expected) result = original.drop_duplicates() tm.assert_series_equal(result, original) assert result is not original idx = original.index[list(range(len(original))) + [5, 3]] values = original._values[list(range(len(original))) + [5, 3]] s = Series(values, index=idx, name='a') expected = Series([False] * len(original) + [True, True], index=idx, name='a') tm.assert_series_equal(s.duplicated(), expected) tm.assert_series_equal(s.drop_duplicates(), original) base = [False] * len(idx) base[3] = True base[5] = True expected = Series(base, index=idx, name='a') tm.assert_series_equal(s.duplicated(keep='last'), expected) tm.assert_series_equal(s.drop_duplicates(keep='last'), s[~np.array(base)]) base = [False] * len(original) + [True, True] base[3] = True base[5] = True expected = Series(base, index=idx, name='a') tm.assert_series_equal(s.duplicated(keep=False), expected) tm.assert_series_equal(s.drop_duplicates(keep=False), s[~np.array(base)]) s.drop_duplicates(inplace=True) tm.assert_series_equal(s, original) def test_drop_duplicates_series_vs_dataframe(self): # GH 14192 df = pd.DataFrame({'a': [1, 1, 1, 'one', 'one'], 'b': [2, 2, np.nan, np.nan, np.nan], 'c': [3, 3, np.nan, np.nan, 'three'], 'd': [1, 2, 3, 4, 4], 'e': [datetime(2015, 1, 1), datetime(2015, 1, 1), datetime(2015, 2, 1), pd.NaT, pd.NaT] }) for column in df.columns: for keep in ['first', 'last', False]: dropped_frame = df[[column]].drop_duplicates(keep=keep) dropped_series = df[column].drop_duplicates(keep=keep) tm.assert_frame_equal(dropped_frame, dropped_series.to_frame()) def test_fillna(self): # # GH 11343 # though Index.fillna and Series.fillna has separate impl, # test here to confirm these works as the same for orig in self.objs: o = orig.copy() values = o.values # values will not be changed result = o.fillna(o.astype(object).values[0]) if isinstance(o, Index): tm.assert_index_equal(o, result) else: tm.assert_series_equal(o, result) # check shallow_copied assert o is not result for null_obj in [np.nan, None]: for orig in self.objs: o = orig.copy() klass = type(o) if not self._allow_na_ops(o): continue if needs_i8_conversion(o): values = o.astype(object).values fill_value = values[0] values[0:2] = pd.NaT else: values = o.values.copy() fill_value = o.values[0] values[0:2] = null_obj expected = [fill_value] * 2 + list(values[2:]) expected = klass(expected) o = klass(values) # check values has the same dtype as the original assert o.dtype == orig.dtype result = o.fillna(fill_value) if isinstance(o, Index): tm.assert_index_equal(result, expected) else: tm.assert_series_equal(result, expected) # check shallow_copied assert o is not result @pytest.mark.skipif(PYPY, reason="not relevant for PyPy") def test_memory_usage(self): for o in self.objs: res = o.memory_usage() res_deep = o.memory_usage(deep=True) if (is_object_dtype(o) or (isinstance(o, Series) and is_object_dtype(o.index))): # if there are objects, only deep will pick them up assert res_deep > res else: assert res == res_deep if isinstance(o, Series): assert ((o.memory_usage(index=False) + o.index.memory_usage()) == o.memory_usage(index=True)) # sys.getsizeof will call the .memory_usage with # deep=True, and add on some GC overhead diff = res_deep - sys.getsizeof(o) assert abs(diff) < 100 def test_searchsorted(self): # See gh-12238 for o in self.objs: index = np.searchsorted(o, max(o)) assert 0 <= index <= len(o) index = np.searchsorted(o, max(o), sorter=range(len(o))) assert 0 <= index <= len(o) def test_validate_bool_args(self): invalid_values = [1, "True", [1, 2, 3], 5.0] for value in invalid_values: with pytest.raises(ValueError): self.int_series.drop_duplicates(inplace=value) class TestTranspose(Ops): errmsg = "the 'axes' parameter is not supported" def test_transpose(self): for obj in self.objs: if isinstance(obj, Index): tm.assert_index_equal(obj.transpose(), obj) else: tm.assert_series_equal(obj.transpose(), obj) def test_transpose_non_default_axes(self): for obj in self.objs: tm.assert_raises_regex(ValueError, self.errmsg, obj.transpose, 1) tm.assert_raises_regex(ValueError, self.errmsg, obj.transpose, axes=1) def test_numpy_transpose(self): for obj in self.objs: if isinstance(obj, Index): tm.assert_index_equal(np.transpose(obj), obj) else: tm.assert_series_equal(np.transpose(obj), obj) tm.assert_raises_regex(ValueError, self.errmsg, np.transpose, obj, axes=1) class TestNoNewAttributesMixin(object): def test_mixin(self): class T(NoNewAttributesMixin): pass t = T() assert not hasattr(t, "__frozen") t.a = "test" assert t.a == "test" t._freeze() assert "__frozen" in dir(t) assert getattr(t, "__frozen") def f(): t.b = "test" pytest.raises(AttributeError, f) assert not hasattr(t, "b") class TestToIterable(object): # test that we convert an iterable to python types dtypes = [ ('int8', (int, long)), ('int16', (int, long)), ('int32', (int, long)), ('int64', (int, long)), ('uint8', (int, long)), ('uint16', (int, long)), ('uint32', (int, long)), ('uint64', (int, long)), ('float16', float), ('float32', float), ('float64', float), ('datetime64[ns]', Timestamp), ('datetime64[ns, US/Eastern]', Timestamp), ('timedelta64[ns]', Timedelta)] @pytest.mark.parametrize( 'dtype, rdtype', dtypes) @pytest.mark.parametrize( 'method', [ lambda x: x.tolist(), lambda x: list(x), lambda x: list(x.__iter__()), ], ids=['tolist', 'list', 'iter']) @pytest.mark.parametrize('typ', [Series, Index]) def test_iterable(self, typ, method, dtype, rdtype): # gh-10904 # gh-13258 # coerce iteration to underlying python / pandas types s = typ([1], dtype=dtype) result = method(s)[0] assert isinstance(result, rdtype) @pytest.mark.parametrize( 'dtype, rdtype, obj', [ ('object', object, 'a'), ('object', (int, long), 1), ('category', object, 'a'), ('category', (int, long), 1)]) @pytest.mark.parametrize( 'method', [ lambda x: x.tolist(), lambda x: list(x), lambda x: list(x.__iter__()), ], ids=['tolist', 'list', 'iter']) @pytest.mark.parametrize('typ', [Series, Index]) def test_iterable_object_and_category(self, typ, method, dtype, rdtype, obj): # gh-10904 # gh-13258 # coerce iteration to underlying python / pandas types s = typ([obj], dtype=dtype) result = method(s)[0] assert isinstance(result, rdtype) @pytest.mark.parametrize( 'dtype, rdtype', dtypes) def test_iterable_items(self, dtype, rdtype): # gh-13258 # test items / iteritems yields the correct boxed scalars # this only applies to series s = Series([1], dtype=dtype) _, result = list(s.items())[0] assert isinstance(result, rdtype) _, result = list(s.iteritems())[0] assert isinstance(result, rdtype) @pytest.mark.parametrize( 'dtype, rdtype', dtypes + [ ('object', (int, long)), ('category', (int, long))]) @pytest.mark.parametrize('typ', [Series, Index]) def test_iterable_map(self, typ, dtype, rdtype): # gh-13236 # coerce iteration to underlying python / pandas types s = typ([1], dtype=dtype) result = s.map(type)[0] if not isinstance(rdtype, tuple): rdtype = tuple([rdtype]) assert result in rdtype @pytest.mark.parametrize( 'method', [ lambda x: x.tolist(), lambda x: list(x), lambda x: list(x.__iter__()), ], ids=['tolist', 'list', 'iter']) def test_categorial_datetimelike(self, method): i = CategoricalIndex([Timestamp('1999-12-31'), Timestamp('2000-12-31')]) result = method(i)[0] assert isinstance(result, Timestamp) def test_iter_box(self): vals = [Timestamp('2011-01-01'), Timestamp('2011-01-02')] s = Series(vals) assert s.dtype == 'datetime64[ns]' for res, exp in zip(s, vals): assert isinstance(res, Timestamp) assert res.tz is None assert res == exp vals = [Timestamp('2011-01-01', tz='US/Eastern'), Timestamp('2011-01-02', tz='US/Eastern')] s = Series(vals) assert s.dtype == 'datetime64[ns, US/Eastern]' for res, exp in zip(s, vals): assert isinstance(res, Timestamp) assert res.tz == exp.tz assert res == exp # timedelta vals = [Timedelta('1 days'), Timedelta('2 days')] s = Series(vals) assert s.dtype == 'timedelta64[ns]' for res, exp in zip(s, vals): assert isinstance(res, Timedelta) assert res == exp # period (object dtype, not boxed) vals = [pd.Period('2011-01-01', freq='M'), pd.Period('2011-01-02', freq='M')] s = Series(vals) assert s.dtype == 'object' for res, exp in zip(s, vals): assert isinstance(res, pd.Period) assert res.freq == 'M' assert res == exp @pytest.mark.parametrize('array, expected_type, dtype', [ (np.array([0, 1], dtype=np.int64), np.ndarray, 'int64'), (np.array(['a', 'b']), np.ndarray, 'object'), (pd.Categorical(['a', 'b']), pd.Categorical, 'category'), (pd.DatetimeIndex(['2017', '2018']), np.ndarray, 'datetime64[ns]'), (pd.DatetimeIndex(['2017', '2018'], tz="US/Central"), pd.DatetimeIndex, 'datetime64[ns, US/Central]'), (pd.TimedeltaIndex([10**10]), np.ndarray, 'm8[ns]'), (pd.PeriodIndex([2018, 2019], freq='A'), np.ndarray, 'object'), (pd.IntervalIndex.from_breaks([0, 1, 2]), np.ndarray, 'object'), ]) def test_values_consistent(array, expected_type, dtype): l_values = pd.Series(array)._values r_values = pd.Index(array)._values assert type(l_values) is expected_type assert type(l_values) is type(r_values) if isinstance(l_values, np.ndarray): tm.assert_numpy_array_equal(l_values, r_values) elif isinstance(l_values, pd.Index): tm.assert_index_equal(l_values, r_values) elif pd.api.types.is_categorical(l_values): tm.assert_categorical_equal(l_values, r_values) else: raise TypeError("Unexpected type {}".format(type(l_values))) assert l_values.dtype == dtype assert r_values.dtype == dtype @pytest.mark.parametrize('array, expected', [ (np.array([0, 1], dtype=np.int64), np.array([0, 1], dtype=np.int64)), (np.array(['0', '1']), np.array(['0', '1'], dtype=object)), (pd.Categorical(['a', 'a']), np.array([0, 0], dtype='int8')), (pd.DatetimeIndex(['2017-01-01T00:00:00']), np.array(['2017-01-01T00:00:00'], dtype='M8[ns]')), (pd.DatetimeIndex(['2017-01-01T00:00:00'], tz="US/Eastern"), np.array(['2017-01-01T05:00:00'], dtype='M8[ns]')), (pd.TimedeltaIndex([10**10]), np.array([10**10], dtype='m8[ns]')), pytest.param( pd.PeriodIndex(['2017', '2018'], freq='D'), np.array([17167, 17532]), marks=pytest.mark.xfail(reason="PeriodArray Not implemented") ), ]) def test_ndarray_values(array, expected): l_values = pd.Series(array)._ndarray_values r_values = pd.Index(array)._ndarray_values tm.assert_numpy_array_equal(l_values, r_values) tm.assert_numpy_array_equal(l_values, expected)
bsd-3-clause
rvraghav93/scikit-learn
sklearn/feature_extraction/tests/test_text.py
8
35969
from __future__ import unicode_literals import warnings from sklearn.feature_extraction.text import strip_tags from sklearn.feature_extraction.text import strip_accents_unicode from sklearn.feature_extraction.text import strip_accents_ascii from sklearn.feature_extraction.text import HashingVectorizer from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline from sklearn.svm import LinearSVC from sklearn.base import clone import numpy as np from numpy.testing import assert_array_almost_equal from numpy.testing import assert_array_equal from numpy.testing import assert_raises from sklearn.utils.testing import (assert_equal, assert_false, assert_true, assert_not_equal, assert_almost_equal, assert_in, assert_less, assert_greater, assert_warns_message, assert_raise_message, clean_warning_registry, SkipTest) from collections import defaultdict, Mapping from functools import partial import pickle from io import StringIO JUNK_FOOD_DOCS = ( "the pizza pizza beer copyright", "the pizza burger beer copyright", "the the pizza beer beer copyright", "the burger beer beer copyright", "the coke burger coke copyright", "the coke burger burger", ) NOTJUNK_FOOD_DOCS = ( "the salad celeri copyright", "the salad salad sparkling water copyright", "the the celeri celeri copyright", "the tomato tomato salad water", "the tomato salad water copyright", ) ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS def uppercase(s): return strip_accents_unicode(s).upper() def strip_eacute(s): return s.replace('\xe9', 'e') def split_tokenize(s): return s.split() def lazy_analyze(s): return ['the_ultimate_feature'] def test_strip_accents(): # check some classical latin accentuated symbols a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb' expected = 'aaaaaaceeee' assert_equal(strip_accents_unicode(a), expected) a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd' expected = 'iiiinooooouuuuy' assert_equal(strip_accents_unicode(a), expected) # check some arabic a = '\u0625' # halef with a hamza below expected = '\u0627' # simple halef assert_equal(strip_accents_unicode(a), expected) # mix letters accentuated and not a = "this is \xe0 test" expected = 'this is a test' assert_equal(strip_accents_unicode(a), expected) def test_to_ascii(): # check some classical latin accentuated symbols a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb' expected = 'aaaaaaceeee' assert_equal(strip_accents_ascii(a), expected) a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd' expected = 'iiiinooooouuuuy' assert_equal(strip_accents_ascii(a), expected) # check some arabic a = '\u0625' # halef with a hamza below expected = '' # halef has no direct ascii match assert_equal(strip_accents_ascii(a), expected) # mix letters accentuated and not a = "this is \xe0 test" expected = 'this is a test' assert_equal(strip_accents_ascii(a), expected) def test_word_analyzer_unigrams(): for Vectorizer in (CountVectorizer, HashingVectorizer): wa = Vectorizer(strip_accents='ascii').build_analyzer() text = ("J'ai mang\xe9 du kangourou ce midi, " "c'\xe9tait pas tr\xeas bon.") expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi', 'etait', 'pas', 'tres', 'bon'] assert_equal(wa(text), expected) text = "This is a test, really.\n\n I met Harry yesterday." expected = ['this', 'is', 'test', 'really', 'met', 'harry', 'yesterday'] assert_equal(wa(text), expected) wa = Vectorizer(input='file').build_analyzer() text = StringIO("This is a test with a file-like object!") expected = ['this', 'is', 'test', 'with', 'file', 'like', 'object'] assert_equal(wa(text), expected) # with custom preprocessor wa = Vectorizer(preprocessor=uppercase).build_analyzer() text = ("J'ai mang\xe9 du kangourou ce midi, " " c'\xe9tait pas tr\xeas bon.") expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI', 'ETAIT', 'PAS', 'TRES', 'BON'] assert_equal(wa(text), expected) # with custom tokenizer wa = Vectorizer(tokenizer=split_tokenize, strip_accents='ascii').build_analyzer() text = ("J'ai mang\xe9 du kangourou ce midi, " "c'\xe9tait pas tr\xeas bon.") expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,', "c'etait", 'pas', 'tres', 'bon.'] assert_equal(wa(text), expected) def test_word_analyzer_unigrams_and_bigrams(): wa = CountVectorizer(analyzer="word", strip_accents='unicode', ngram_range=(1, 2)).build_analyzer() text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon." expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi', 'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du', 'du kangourou', 'kangourou ce', 'ce midi', 'midi etait', 'etait pas', 'pas tres', 'tres bon'] assert_equal(wa(text), expected) def test_unicode_decode_error(): # decode_error default to strict, so this should fail # First, encode (as bytes) a unicode string. text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon." text_bytes = text.encode('utf-8') # Then let the Analyzer try to decode it as ascii. It should fail, # because we have given it an incorrect encoding. wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer() assert_raises(UnicodeDecodeError, wa, text_bytes) ca = CountVectorizer(analyzer='char', ngram_range=(3, 6), encoding='ascii').build_analyzer() assert_raises(UnicodeDecodeError, ca, text_bytes) def test_char_ngram_analyzer(): cnga = CountVectorizer(analyzer='char', strip_accents='unicode', ngram_range=(3, 6)).build_analyzer() text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon" expected = ["j'a", "'ai", 'ai ', 'i m', ' ma'] assert_equal(cnga(text)[:5], expected) expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon'] assert_equal(cnga(text)[-5:], expected) text = "This \n\tis a test, really.\n\n I met Harry yesterday" expected = ['thi', 'his', 'is ', 's i', ' is'] assert_equal(cnga(text)[:5], expected) expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday'] assert_equal(cnga(text)[-5:], expected) cnga = CountVectorizer(input='file', analyzer='char', ngram_range=(3, 6)).build_analyzer() text = StringIO("This is a test with a file-like object!") expected = ['thi', 'his', 'is ', 's i', ' is'] assert_equal(cnga(text)[:5], expected) def test_char_wb_ngram_analyzer(): cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode', ngram_range=(3, 6)).build_analyzer() text = "This \n\tis a test, really.\n\n I met Harry yesterday" expected = [' th', 'thi', 'his', 'is ', ' thi'] assert_equal(cnga(text)[:5], expected) expected = ['yester', 'esterd', 'sterda', 'terday', 'erday '] assert_equal(cnga(text)[-5:], expected) cnga = CountVectorizer(input='file', analyzer='char_wb', ngram_range=(3, 6)).build_analyzer() text = StringIO("A test with a file-like object!") expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes'] assert_equal(cnga(text)[:6], expected) def test_countvectorizer_custom_vocabulary(): vocab = {"pizza": 0, "beer": 1} terms = set(vocab.keys()) # Try a few of the supported types. for typ in [dict, list, iter, partial(defaultdict, int)]: v = typ(vocab) vect = CountVectorizer(vocabulary=v) vect.fit(JUNK_FOOD_DOCS) if isinstance(v, Mapping): assert_equal(vect.vocabulary_, vocab) else: assert_equal(set(vect.vocabulary_), terms) X = vect.transform(JUNK_FOOD_DOCS) assert_equal(X.shape[1], len(terms)) def test_countvectorizer_custom_vocabulary_pipeline(): what_we_like = ["pizza", "beer"] pipe = Pipeline([ ('count', CountVectorizer(vocabulary=what_we_like)), ('tfidf', TfidfTransformer())]) X = pipe.fit_transform(ALL_FOOD_DOCS) assert_equal(set(pipe.named_steps['count'].vocabulary_), set(what_we_like)) assert_equal(X.shape[1], len(what_we_like)) def test_countvectorizer_custom_vocabulary_repeated_indeces(): vocab = {"pizza": 0, "beer": 0} try: CountVectorizer(vocabulary=vocab) except ValueError as e: assert_in("vocabulary contains repeated indices", str(e).lower()) def test_countvectorizer_custom_vocabulary_gap_index(): vocab = {"pizza": 1, "beer": 2} try: CountVectorizer(vocabulary=vocab) except ValueError as e: assert_in("doesn't contain index", str(e).lower()) def test_countvectorizer_stop_words(): cv = CountVectorizer() cv.set_params(stop_words='english') assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS) cv.set_params(stop_words='_bad_str_stop_') assert_raises(ValueError, cv.get_stop_words) cv.set_params(stop_words='_bad_unicode_stop_') assert_raises(ValueError, cv.get_stop_words) stoplist = ['some', 'other', 'words'] cv.set_params(stop_words=stoplist) assert_equal(cv.get_stop_words(), set(stoplist)) def test_countvectorizer_empty_vocabulary(): try: vect = CountVectorizer(vocabulary=[]) vect.fit(["foo"]) assert False, "we shouldn't get here" except ValueError as e: assert_in("empty vocabulary", str(e).lower()) try: v = CountVectorizer(max_df=1.0, stop_words="english") # fit on stopwords only v.fit(["to be or not to be", "and me too", "and so do you"]) assert False, "we shouldn't get here" except ValueError as e: assert_in("empty vocabulary", str(e).lower()) def test_fit_countvectorizer_twice(): cv = CountVectorizer() X1 = cv.fit_transform(ALL_FOOD_DOCS[:5]) X2 = cv.fit_transform(ALL_FOOD_DOCS[5:]) assert_not_equal(X1.shape[1], X2.shape[1]) def test_tf_idf_smoothing(): X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]] tr = TfidfTransformer(smooth_idf=True, norm='l2') tfidf = tr.fit_transform(X).toarray() assert_true((tfidf >= 0).all()) # check normalization assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.]) # this is robust to features with only zeros X = [[1, 1, 0], [1, 1, 0], [1, 0, 0]] tr = TfidfTransformer(smooth_idf=True, norm='l2') tfidf = tr.fit_transform(X).toarray() assert_true((tfidf >= 0).all()) def test_tfidf_no_smoothing(): X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]] tr = TfidfTransformer(smooth_idf=False, norm='l2') tfidf = tr.fit_transform(X).toarray() assert_true((tfidf >= 0).all()) # check normalization assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.]) # the lack of smoothing make IDF fragile in the presence of feature with # only zeros X = [[1, 1, 0], [1, 1, 0], [1, 0, 0]] tr = TfidfTransformer(smooth_idf=False, norm='l2') clean_warning_registry() with warnings.catch_warnings(record=True) as w: 1. / np.array([0.]) numpy_provides_div0_warning = len(w) == 1 in_warning_message = 'divide by zero' tfidf = assert_warns_message(RuntimeWarning, in_warning_message, tr.fit_transform, X).toarray() if not numpy_provides_div0_warning: raise SkipTest("Numpy does not provide div 0 warnings.") def test_sublinear_tf(): X = [[1], [2], [3]] tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None) tfidf = tr.fit_transform(X).toarray() assert_equal(tfidf[0], 1) assert_greater(tfidf[1], tfidf[0]) assert_greater(tfidf[2], tfidf[1]) assert_less(tfidf[1], 2) assert_less(tfidf[2], 3) def test_vectorizer(): # raw documents as an iterator train_data = iter(ALL_FOOD_DOCS[:-1]) test_data = [ALL_FOOD_DOCS[-1]] n_train = len(ALL_FOOD_DOCS) - 1 # test without vocabulary v1 = CountVectorizer(max_df=0.5) counts_train = v1.fit_transform(train_data) if hasattr(counts_train, 'tocsr'): counts_train = counts_train.tocsr() assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2) # build a vectorizer v1 with the same vocabulary as the one fitted by v1 v2 = CountVectorizer(vocabulary=v1.vocabulary_) # compare that the two vectorizer give the same output on the test sample for v in (v1, v2): counts_test = v.transform(test_data) if hasattr(counts_test, 'tocsr'): counts_test = counts_test.tocsr() vocabulary = v.vocabulary_ assert_equal(counts_test[0, vocabulary["salad"]], 1) assert_equal(counts_test[0, vocabulary["tomato"]], 1) assert_equal(counts_test[0, vocabulary["water"]], 1) # stop word from the fixed list assert_false("the" in vocabulary) # stop word found automatically by the vectorizer DF thresholding # words that are high frequent across the complete corpus are likely # to be not informative (either real stop words of extraction # artifacts) assert_false("copyright" in vocabulary) # not present in the sample assert_equal(counts_test[0, vocabulary["coke"]], 0) assert_equal(counts_test[0, vocabulary["burger"]], 0) assert_equal(counts_test[0, vocabulary["beer"]], 0) assert_equal(counts_test[0, vocabulary["pizza"]], 0) # test tf-idf t1 = TfidfTransformer(norm='l1') tfidf = t1.fit(counts_train).transform(counts_train).toarray() assert_equal(len(t1.idf_), len(v1.vocabulary_)) assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_))) # test tf-idf with new data tfidf_test = t1.transform(counts_test).toarray() assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_))) # test tf alone t2 = TfidfTransformer(norm='l1', use_idf=False) tf = t2.fit(counts_train).transform(counts_train).toarray() assert_false(hasattr(t2, "idf_")) # test idf transform with unlearned idf vector t3 = TfidfTransformer(use_idf=True) assert_raises(ValueError, t3.transform, counts_train) # test idf transform with incompatible n_features X = [[1, 1, 5], [1, 1, 0]] t3.fit(X) X_incompt = [[1, 3], [1, 3]] assert_raises(ValueError, t3.transform, X_incompt) # L1-normalized term frequencies sum to one assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train) # test the direct tfidf vectorizer # (equivalent to term count vectorizer + tfidf transformer) train_data = iter(ALL_FOOD_DOCS[:-1]) tv = TfidfVectorizer(norm='l1') tv.max_df = v1.max_df tfidf2 = tv.fit_transform(train_data).toarray() assert_false(tv.fixed_vocabulary_) assert_array_almost_equal(tfidf, tfidf2) # test the direct tfidf vectorizer with new data tfidf_test2 = tv.transform(test_data).toarray() assert_array_almost_equal(tfidf_test, tfidf_test2) # test transform on unfitted vectorizer with empty vocabulary v3 = CountVectorizer(vocabulary=None) assert_raises(ValueError, v3.transform, train_data) # ascii preprocessor? v3.set_params(strip_accents='ascii', lowercase=False) assert_equal(v3.build_preprocessor(), strip_accents_ascii) # error on bad strip_accents param v3.set_params(strip_accents='_gabbledegook_', preprocessor=None) assert_raises(ValueError, v3.build_preprocessor) # error with bad analyzer type v3.set_params = '_invalid_analyzer_type_' assert_raises(ValueError, v3.build_analyzer) def test_tfidf_vectorizer_setters(): tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False, sublinear_tf=False) tv.norm = 'l1' assert_equal(tv._tfidf.norm, 'l1') tv.use_idf = True assert_true(tv._tfidf.use_idf) tv.smooth_idf = True assert_true(tv._tfidf.smooth_idf) tv.sublinear_tf = True assert_true(tv._tfidf.sublinear_tf) def test_hashing_vectorizer(): v = HashingVectorizer() X = v.transform(ALL_FOOD_DOCS) token_nnz = X.nnz assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features)) assert_equal(X.dtype, v.dtype) # By default the hashed values receive a random sign and l2 normalization # makes the feature values bounded assert_true(np.min(X.data) > -1) assert_true(np.min(X.data) < 0) assert_true(np.max(X.data) > 0) assert_true(np.max(X.data) < 1) # Check that the rows are normalized for i in range(X.shape[0]): assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0) # Check vectorization with some non-default parameters v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1') X = v.transform(ALL_FOOD_DOCS) assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features)) assert_equal(X.dtype, v.dtype) # ngrams generate more non zeros ngrams_nnz = X.nnz assert_true(ngrams_nnz > token_nnz) assert_true(ngrams_nnz < 2 * token_nnz) # makes the feature values bounded assert_true(np.min(X.data) > 0) assert_true(np.max(X.data) < 1) # Check that the rows are normalized for i in range(X.shape[0]): assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0) def test_feature_names(): cv = CountVectorizer(max_df=0.5) # test for Value error on unfitted/empty vocabulary assert_raises(ValueError, cv.get_feature_names) X = cv.fit_transform(ALL_FOOD_DOCS) n_samples, n_features = X.shape assert_equal(len(cv.vocabulary_), n_features) feature_names = cv.get_feature_names() assert_equal(len(feature_names), n_features) assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza', 'salad', 'sparkling', 'tomato', 'water'], feature_names) for idx, name in enumerate(feature_names): assert_equal(idx, cv.vocabulary_.get(name)) def test_vectorizer_max_features(): vec_factories = ( CountVectorizer, TfidfVectorizer, ) expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza']) expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke', u'sparkling', u'water', u'the']) for vec_factory in vec_factories: # test bounded number of extracted features vectorizer = vec_factory(max_df=0.6, max_features=4) vectorizer.fit(ALL_FOOD_DOCS) assert_equal(set(vectorizer.vocabulary_), expected_vocabulary) assert_equal(vectorizer.stop_words_, expected_stop_words) def test_count_vectorizer_max_features(): # Regression test: max_features didn't work correctly in 0.14. cv_1 = CountVectorizer(max_features=1) cv_3 = CountVectorizer(max_features=3) cv_None = CountVectorizer(max_features=None) counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0) counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0) counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0) features_1 = cv_1.get_feature_names() features_3 = cv_3.get_feature_names() features_None = cv_None.get_feature_names() # The most common feature is "the", with frequency 7. assert_equal(7, counts_1.max()) assert_equal(7, counts_3.max()) assert_equal(7, counts_None.max()) # The most common feature should be the same assert_equal("the", features_1[np.argmax(counts_1)]) assert_equal("the", features_3[np.argmax(counts_3)]) assert_equal("the", features_None[np.argmax(counts_None)]) def test_vectorizer_max_df(): test_data = ['abc', 'dea', 'eat'] vect = CountVectorizer(analyzer='char', max_df=1.0) vect.fit(test_data) assert_true('a' in vect.vocabulary_.keys()) assert_equal(len(vect.vocabulary_.keys()), 6) assert_equal(len(vect.stop_words_), 0) vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5 vect.fit(test_data) assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain assert_true('a' in vect.stop_words_) assert_equal(len(vect.stop_words_), 2) vect.max_df = 1 vect.fit(test_data) assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain assert_true('a' in vect.stop_words_) assert_equal(len(vect.stop_words_), 2) def test_vectorizer_min_df(): test_data = ['abc', 'dea', 'eat'] vect = CountVectorizer(analyzer='char', min_df=1) vect.fit(test_data) assert_true('a' in vect.vocabulary_.keys()) assert_equal(len(vect.vocabulary_.keys()), 6) assert_equal(len(vect.stop_words_), 0) vect.min_df = 2 vect.fit(test_data) assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain assert_true('c' in vect.stop_words_) assert_equal(len(vect.stop_words_), 4) vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4 vect.fit(test_data) assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains assert_true('c' in vect.stop_words_) assert_equal(len(vect.stop_words_), 5) def test_count_binary_occurrences(): # by default multiple occurrences are counted as longs test_data = ['aaabc', 'abbde'] vect = CountVectorizer(analyzer='char', max_df=1.0) X = vect.fit_transform(test_data).toarray() assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names()) assert_array_equal([[3, 1, 1, 0, 0], [1, 2, 0, 1, 1]], X) # using boolean features, we can fetch the binary occurrence info # instead. vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True) X = vect.fit_transform(test_data).toarray() assert_array_equal([[1, 1, 1, 0, 0], [1, 1, 0, 1, 1]], X) # check the ability to change the dtype vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True, dtype=np.float32) X_sparse = vect.fit_transform(test_data) assert_equal(X_sparse.dtype, np.float32) def test_hashed_binary_occurrences(): # by default multiple occurrences are counted as longs test_data = ['aaabc', 'abbde'] vect = HashingVectorizer(analyzer='char', non_negative=True, norm=None) X = vect.transform(test_data) assert_equal(np.max(X[0:1].data), 3) assert_equal(np.max(X[1:2].data), 2) assert_equal(X.dtype, np.float64) # using boolean features, we can fetch the binary occurrence info # instead. vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True, norm=None) X = vect.transform(test_data) assert_equal(np.max(X.data), 1) assert_equal(X.dtype, np.float64) # check the ability to change the dtype vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True, norm=None, dtype=np.float64) X = vect.transform(test_data) assert_equal(X.dtype, np.float64) def test_vectorizer_inverse_transform(): # raw documents data = ALL_FOOD_DOCS for vectorizer in (TfidfVectorizer(), CountVectorizer()): transformed_data = vectorizer.fit_transform(data) inversed_data = vectorizer.inverse_transform(transformed_data) analyze = vectorizer.build_analyzer() for doc, inversed_terms in zip(data, inversed_data): terms = np.sort(np.unique(analyze(doc))) inversed_terms = np.sort(np.unique(inversed_terms)) assert_array_equal(terms, inversed_terms) # Test that inverse_transform also works with numpy arrays transformed_data = transformed_data.toarray() inversed_data2 = vectorizer.inverse_transform(transformed_data) for terms, terms2 in zip(inversed_data, inversed_data2): assert_array_equal(np.sort(terms), np.sort(terms2)) def test_count_vectorizer_pipeline_grid_selection(): # raw documents data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS # label junk food as -1, the others as +1 target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS) # split the dataset for model development and final evaluation train_data, test_data, target_train, target_test = train_test_split( data, target, test_size=.2, random_state=0) pipeline = Pipeline([('vect', CountVectorizer()), ('svc', LinearSVC())]) parameters = { 'vect__ngram_range': [(1, 1), (1, 2)], 'svc__loss': ('hinge', 'squared_hinge') } # find the best parameters for both the feature extraction and the # classifier grid_search = GridSearchCV(pipeline, parameters, n_jobs=1) # Check that the best model found by grid search is 100% correct on the # held out evaluation set. pred = grid_search.fit(train_data, target_train).predict(test_data) assert_array_equal(pred, target_test) # on this toy dataset bigram representation which is used in the last of # the grid_search is considered the best estimator since they all converge # to 100% accuracy models assert_equal(grid_search.best_score_, 1.0) best_vectorizer = grid_search.best_estimator_.named_steps['vect'] assert_equal(best_vectorizer.ngram_range, (1, 1)) def test_vectorizer_pipeline_grid_selection(): # raw documents data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS # label junk food as -1, the others as +1 target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS) # split the dataset for model development and final evaluation train_data, test_data, target_train, target_test = train_test_split( data, target, test_size=.1, random_state=0) pipeline = Pipeline([('vect', TfidfVectorizer()), ('svc', LinearSVC())]) parameters = { 'vect__ngram_range': [(1, 1), (1, 2)], 'vect__norm': ('l1', 'l2'), 'svc__loss': ('hinge', 'squared_hinge'), } # find the best parameters for both the feature extraction and the # classifier grid_search = GridSearchCV(pipeline, parameters, n_jobs=1) # Check that the best model found by grid search is 100% correct on the # held out evaluation set. pred = grid_search.fit(train_data, target_train).predict(test_data) assert_array_equal(pred, target_test) # on this toy dataset bigram representation which is used in the last of # the grid_search is considered the best estimator since they all converge # to 100% accuracy models assert_equal(grid_search.best_score_, 1.0) best_vectorizer = grid_search.best_estimator_.named_steps['vect'] assert_equal(best_vectorizer.ngram_range, (1, 1)) assert_equal(best_vectorizer.norm, 'l2') assert_false(best_vectorizer.fixed_vocabulary_) def test_vectorizer_pipeline_cross_validation(): # raw documents data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS # label junk food as -1, the others as +1 target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS) pipeline = Pipeline([('vect', TfidfVectorizer()), ('svc', LinearSVC())]) cv_scores = cross_val_score(pipeline, data, target, cv=3) assert_array_equal(cv_scores, [1., 1., 1.]) def test_vectorizer_unicode(): # tests that the count vectorizer works with cyrillic. document = ( "\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0" "\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0" "\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd" "\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7" "\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81" "\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3" "\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0" "\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87" "\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82" "\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80" "\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3" "\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81" "\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 " "\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1" "\x8f.") vect = CountVectorizer() X_counted = vect.fit_transform([document]) assert_equal(X_counted.shape, (1, 15)) vect = HashingVectorizer(norm=None, non_negative=True) X_hashed = vect.transform([document]) assert_equal(X_hashed.shape, (1, 2 ** 20)) # No collisions on such a small dataset assert_equal(X_counted.nnz, X_hashed.nnz) # When norm is None and non_negative, the tokens are counted up to # collisions assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data)) def test_tfidf_vectorizer_with_fixed_vocabulary(): # non regression smoke test for inheritance issues vocabulary = ['pizza', 'celeri'] vect = TfidfVectorizer(vocabulary=vocabulary) X_1 = vect.fit_transform(ALL_FOOD_DOCS) X_2 = vect.transform(ALL_FOOD_DOCS) assert_array_almost_equal(X_1.toarray(), X_2.toarray()) assert_true(vect.fixed_vocabulary_) def test_pickling_vectorizer(): instances = [ HashingVectorizer(), HashingVectorizer(norm='l1'), HashingVectorizer(binary=True), HashingVectorizer(ngram_range=(1, 2)), CountVectorizer(), CountVectorizer(preprocessor=strip_tags), CountVectorizer(analyzer=lazy_analyze), CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS), CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS), TfidfVectorizer(), TfidfVectorizer(analyzer=lazy_analyze), TfidfVectorizer().fit(JUNK_FOOD_DOCS), ] for orig in instances: s = pickle.dumps(orig) copy = pickle.loads(s) assert_equal(type(copy), orig.__class__) assert_equal(copy.get_params(), orig.get_params()) assert_array_equal( copy.fit_transform(JUNK_FOOD_DOCS).toarray(), orig.fit_transform(JUNK_FOOD_DOCS).toarray()) def test_countvectorizer_vocab_sets_when_pickling(): # ensure that vocabulary of type set is coerced to a list to # preserve iteration ordering after deserialization rng = np.random.RandomState(0) vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza', 'salad', 'sparkling', 'tomato', 'water']) for x in range(0, 100): vocab_set = set(rng.choice(vocab_words, size=5, replace=False)) cv = CountVectorizer(vocabulary=vocab_set) unpickled_cv = pickle.loads(pickle.dumps(cv)) cv.fit(ALL_FOOD_DOCS) unpickled_cv.fit(ALL_FOOD_DOCS) assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names()) def test_countvectorizer_vocab_dicts_when_pickling(): rng = np.random.RandomState(0) vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza', 'salad', 'sparkling', 'tomato', 'water']) for x in range(0, 100): vocab_dict = dict() words = rng.choice(vocab_words, size=5, replace=False) for y in range(0, 5): vocab_dict[words[y]] = y cv = CountVectorizer(vocabulary=vocab_dict) unpickled_cv = pickle.loads(pickle.dumps(cv)) cv.fit(ALL_FOOD_DOCS) unpickled_cv.fit(ALL_FOOD_DOCS) assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names()) def test_stop_words_removal(): # Ensure that deleting the stop_words_ attribute doesn't affect transform fitted_vectorizers = ( TfidfVectorizer().fit(JUNK_FOOD_DOCS), CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS), CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS) ) for vect in fitted_vectorizers: vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray() vect.stop_words_ = None stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray() delattr(vect, 'stop_words_') stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray() assert_array_equal(stop_None_transform, vect_transform) assert_array_equal(stop_del_transform, vect_transform) def test_pickling_transformer(): X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS) orig = TfidfTransformer().fit(X) s = pickle.dumps(orig) copy = pickle.loads(s) assert_equal(type(copy), orig.__class__) assert_array_equal( copy.fit_transform(X).toarray(), orig.fit_transform(X).toarray()) def test_non_unique_vocab(): vocab = ['a', 'b', 'c', 'a', 'a'] vect = CountVectorizer(vocabulary=vocab) assert_raises(ValueError, vect.fit, []) def test_hashingvectorizer_nan_in_docs(): # np.nan can appear when using pandas to load text fields from a csv file # with missing values. message = "np.nan is an invalid document, expected byte or unicode string." exception = ValueError def func(): hv = HashingVectorizer() hv.fit_transform(['hello world', np.nan, 'hello hello']) assert_raise_message(exception, message, func) def test_tfidfvectorizer_binary(): # Non-regression test: TfidfVectorizer used to ignore its "binary" param. v = TfidfVectorizer(binary=True, use_idf=False, norm=None) assert_true(v.binary) X = v.fit_transform(['hello world', 'hello hello']).toarray() assert_array_equal(X.ravel(), [1, 1, 1, 0]) X2 = v.transform(['hello world', 'hello hello']).toarray() assert_array_equal(X2.ravel(), [1, 1, 1, 0]) def test_tfidfvectorizer_export_idf(): vect = TfidfVectorizer(use_idf=True) vect.fit(JUNK_FOOD_DOCS) assert_array_almost_equal(vect.idf_, vect._tfidf.idf_) def test_vectorizer_vocab_clone(): vect_vocab = TfidfVectorizer(vocabulary=["the"]) vect_vocab_clone = clone(vect_vocab) vect_vocab.fit(ALL_FOOD_DOCS) vect_vocab_clone.fit(ALL_FOOD_DOCS) assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_) def test_vectorizer_string_object_as_input(): message = ("Iterable over raw text documents expected, " "string object received.") for vec in [CountVectorizer(), TfidfVectorizer(), HashingVectorizer()]: assert_raise_message( ValueError, message, vec.fit_transform, "hello world!") assert_raise_message( ValueError, message, vec.fit, "hello world!") assert_raise_message( ValueError, message, vec.transform, "hello world!")
bsd-3-clause
cgalleguillosm/accasim
accasim/utils/plot_factory.py
1
51706
""" MIT License Copyright (c) 2017 cgalleguillosm, AlessioNetti Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import matplotlib.pyplot as plt import matplotlib.patches as patches from matplotlib.backends.backend_pdf import PdfPages from math import floor from accasim.utils.reader_class import DefaultReader from accasim.utils.misc import load_config, from_isodatetime_2_timestamp as timestamp_func, str_resources from accasim.utils.file import path_leaf, load_jsonfile from accasim.base.resource_manager_class import Resources from accasim.experimentation.schedule_parser import define_result_parser from accasim.utils.misc import DEFAULT_SIMULATION from copy import deepcopy from os.path import splitext, join from scipy.signal import savgol_filter from os.path import isfile import numpy as np from matplotlib.pyplot import boxplot class PlotFactory: """ A class for plot production and schedule files pre-processing. In this class, some basic algorithms are implemented for pre-processing the schedule files produced through simulation, and for producing some common evaluation plots. """ SCHEDULE_CLASS = 'schedule' BENCHMARK_CLASS = 'benchmark' SLOWDOWN_PLOT = 'slowdown' QUEUE_SIZE_PLOT = 'queue_size' LOAD_RATIO_PLOT = 'load_ratio' EFFICIENCY_PLOT = 'efficiency' SCALABILITY_PLOT = 'scalability' SIMULATION_TIME_PLOT = 'sim_time' SIMULAION_MEMORY_PLOT = 'sim_memory' PLOT_TYPES = { SCHEDULE_CLASS: [SLOWDOWN_PLOT, QUEUE_SIZE_PLOT, LOAD_RATIO_PLOT, EFFICIENCY_PLOT], BENCHMARK_CLASS: [SCALABILITY_PLOT, SIMULATION_TIME_PLOT, SIMULAION_MEMORY_PLOT] } def __init__(self, plot_class, sim_params_fname=None, config=None, resource=None, workload_parser=None, debug=False): """ The constructor for the class. :param plot_class: the plot_class of files to be analyzed. Can be either 'schedule', if schedule files are going to be analyzed, or 'benchmark' if resource usage log files will be analyzed; :params sim_params_fname: :param config: The path to a system configuration file. Needed for the schedule meta-simulation; :param resource: a resource type in the system to be considered. If specified, all resource-related statistics will be computed in regards to this resource alone; :param workload_parser: :param debug: Debug flag. """ self._debug = debug if not (plot_class in self.PLOT_TYPES.keys()): if self._debug: print('Wrong Plot plot_class chosen. Selecting schedule plot_class by default...') plot_class = self.SCHEDULE_CLASS self._plot_class = plot_class self._sim_params_fname = sim_params_fname # if sim_params_fname is not None and isfile(sim_params_fname) else None self._config = config self._resource = resource self._workload_parser = workload_parser self._preprocessed = False self._filepaths = [] self._labels = [] self._slowdowns = [] self._queuesizes = [] self._loadratiosX = [] self._loadratiosY = [] self._efficiencies = [] self._simdata = [] self._schedtimes = [] self._mantimes = [] self._simmemory = [] self._scalabilitydataX = [] self._scalabilitydataY = [] self._resource_order = None if self._sim_params_fname is None: self._resource_order = DEFAULT_SIMULATION['RESOURCE_ORDER'] # Base resource availability per-node (never changes) self._base_res = {} # Current resource availability per-node self._sys_res = {} # Aggregated used resources for all nodes self._used_res_sum = {} # Aggregate base resource availability for used nodes only self._avl_res_sum = {} # Aggregated base resource availability for all nodes self._base_res_sum = {} # Amount of currently used nodes self._used_nodes = 0 # Number of total nodes in the system self._total_nodes = 0 def set_files(self, paths, labels): """ Set the paths and labels of the files to be analyzed. :param paths: A list of filepaths related to the files to be analyzed; :param labels: the labels associated to each single file, used in the plots; must have the same length as paths; """ self._preprocessed = False if not isinstance(paths, (list, tuple)): self._filepaths = [paths] self._labels = [labels] else: self._filepaths = paths self._labels = labels if len(self._filepaths) != len(self._labels): if self._debug: print("Filepaths and Labels lists must have the same lengths.") self._labels = [] self._filepaths = [] def pre_process(self, trimSlowdown=True, trimQueueSize=False): """ Performs pre-processing on all specified files, according to their type. If the files are of the schedule type, a meta-simulation is run for each of them, computing data like slowdown, queue size, load ratios and such. If the data is of the benchmark type, the files are simply parsed and their information stored. :param: trimSlowdown: boolean flag. If True, slowdown values equal to 1 will be discarded. Default is True :param: trimQueueSize: boolean flag. If True, queue size values equal to 0 will be discarded. Default is False """ if not self._preprocessed: # Perform pre-processing for schedule files if self._plot_class == self.SCHEDULE_CLASS: self._slowdowns = [] self._queuesizes = [] self._loadratiosX = [] self._loadratiosY = [] self._efficiencies = [] self._preprocessed = True for f in self._filepaths: # If an error is encountered on one of the files, the process is aborted if not self._getScheduleData(f, self._config, self._resource, trimSlowdown, trimQueueSize): self._preprocessed = False break # Perform pre-processing for benchmark files elif self._plot_class == self.BENCHMARK_CLASS: self._simdata = [] self._schedtimes = [] self._mantimes = [] self._simmemory = [] self._scalabilitydataX = [] self._scalabilitydataY = [] self._preprocessed = True for f in self._filepaths: if not self._getBenchmarkData(f): self._preprocessed = False break if not self._preprocessed: print("Could not process files, please ensure they are in the correct path and format.") return self._preprocessed def produce_plot(self, type, title='', scale='linear', xlim=(None, None), ylim=(None, None), legend=True, figsize=(7, 5), meansonly=False, alpha=0.005, smooth=30, output='Output.pdf', groups=1, **kwargs): """ Produces a single plot on the pre-processed files. The user can produce plots among the available types. These are: - slowdown: a box-plot distribution plot for slowdown values across test instances - queue_size: a box-plot for queue size in the simulation across test instances - load_ratio: a distribution scatter plot for the load ratio in function of the number of used nodes, for test instances separately; - efficiency: a box-plot for resource allocation efficiency across test instances - scalability: a scalability plot for dispatching methods across test instances - sim_time: a bar plot for the simulation timings across test instances - sim_memory: a bar plot for memory usage across test instances :param type: the type of the plot, must be one of the above; :param title: the title of the plot; :param scale: the scale of the plot (see matplotlib documentation); :param xlim: the left-right bounds for axis scaling, is a tuple; :param ylim: the bottom-top bounds for axis scaling, is a tuple; :param legend: activates the legend, is a boolean; :param figsize: the size of the figure, is a tuple; :param meansonly: triggers the plot of mean values alone in box-plots, is a boolean; :param alpha: the alpha of certain features in plots, in particular for distribution scatter plots; :param smooth: smoothing factor used for the Savitzky-Golay filter in the scalabily plot. The lower the number, the higher the smoothing; :param output: path of the output PDF file; """ if not self._preprocessed: self.pre_process() print("Plot_factory: Files were not pre-processed yet. Calling the pre_process method.") if type == self.SLOWDOWN_PLOT and self._plot_class == self.SCHEDULE_CLASS: self.box_plot(self._slowdowns, title=title, ylabel='Slowdown', scale=scale, xlim=xlim, ylim=ylim, figsize=figsize, meansonly=meansonly, output=output, groups=groups, **kwargs) elif type == self.QUEUE_SIZE_PLOT and self._plot_class == self.SCHEDULE_CLASS: self.box_plot(self._queuesizes, title=title, ylabel='Queue size', scale=scale, xlim=xlim, ylim=(0, None), figsize=figsize, meansonly=meansonly, output=output, groups=groups, **kwargs) elif type == self.LOAD_RATIO_PLOT and self._plot_class == self.SCHEDULE_CLASS: self.distribution_scatter_plot(self._loadratiosX, self._loadratiosY, title=title, scale=scale, xlim=(-0.01, 1.01), ylim=(-0.01, 1.01), figsize=figsize, alpha=alpha, output=output, **kwargs) elif type == self.EFFICIENCY_PLOT and self._plot_class == self.SCHEDULE_CLASS: self.box_plot(self._efficiencies, title=title, ylabel='Resource efficiency', scale=scale, xlim=xlim, ylim=ylim, figsize=figsize, meansonly=meansonly, output=output, groups=groups, **kwargs) elif type == self.SCALABILITY_PLOT and self._plot_class == self.BENCHMARK_CLASS: self.scalability_plot(self._scalabilitydataX, self._scalabilitydataY, title, scale=scale, xlim=xlim, ylim=ylim, figsize=figsize, legend=legend, smooth=smooth, output=output, **kwargs) elif type == self.SIMULATION_TIME_PLOT and self._plot_class == self.BENCHMARK_CLASS: self.box_plot_times(self._mantimes, self._schedtimes, title=title, scale=scale, xlim=xlim, ylim=ylim, figsize=figsize, legend=legend, output=output, **kwargs) elif type == self.SIMULAION_MEMORY_PLOT and self._plot_class == self.BENCHMARK_CLASS: self.box_plot_memory(self._simmemory, title=title, scale=scale, xlim=xlim, ylim=ylim, figsize=figsize, legend=legend, output=output, **kwargs) else: raise Exception("Plot type specified is not valid. Review the documentation for valid plot types.") def _getBenchmarkData(self, filepath): """ Pre-processes a resource usage log file. :param filepath: the path to the log file; :return: True if successful, False otherwise; """ if self._debug: print("- Pre-processing file " + filepath + "...") # Tries to read from the file, aborts if an error is encountered try: f = open(filepath) mantimes = [] schedtimes = [] mems = [] simtime = 0 disptime = 0 maxqueuesize = 0 for line in f: # Each line is parsed and values are extracted from it attrs = line.split(';') mantimes.append(float(attrs[4])) schedtimes.append((int(attrs[1]), float(attrs[3]))) mems.append(float(attrs[5])) simtime += float(attrs[2]) disptime += float(attrs[3]) if int(attrs[1]) > maxqueuesize: maxqueuesize = int(attrs[1]) f.close() except Exception as e: raise Exception("Error encountered while pre-processing: " + str(e)) # Certain statistics are computed from the data data = {} data['avgman'] = np.average(np.array(mantimes)) data['avgsched'] = np.average(np.array([el[1] for el in schedtimes])) data['simtime'] = simtime / 1000.0 data['schedtime'] = disptime / 1000.0 data['mantime'] = data['simtime'] - data['schedtime'] data['avgmem'] = np.average(np.array(mems)) data['maxmem'] = np.max(np.array(mems)) # The scalability data is computed through binning: we want to obtain an X, Y set, where in X are the distinct # queue sizes, and in Y are the average times in ms to perform dispatching on such queue sizes binningfactor = 1 bins = int(floor(maxqueuesize / binningfactor)) queuevalues = np.linspace(0, maxqueuesize, bins) mappinglist = [] for i in range(bins): mappinglist.append([]) step = (maxqueuesize) / (bins - 1) for qsize, stime in schedtimes: index = int(floor(qsize / step)) mappinglist[index].append(stime) finallist = [] finalqueuevalues = [] for i in range(len(mappinglist)): l = mappinglist[i] if len(l) > 0: finallist.append(sum(l) / len(l)) finalqueuevalues.append(queuevalues[i]) self._mantimes.append(mantimes) self._schedtimes.append([el[1] for el in schedtimes]) self._simmemory.append(mems) self._simdata.append(data) self._scalabilitydataX.append(finalqueuevalues) self._scalabilitydataY.append(finallist) return True def _getScheduleData(self, filepath, config, resource=None, trimSlowdown=True, trimQueueSize=False): """ Performs pre-processing on a schedule file through a meta-simulation process. :param filepath: The path of the file to be analyzed; :param config: The path to the system configuration file; :param resource: A resource to be considered for resource-related metrics; if none is specified, all resource types are used; :param: trimSlowdown: boolean flag. If True, slowdown values equal to 1 will be discarded. Default is True :param: trimQueueSize: boolean flag. If True, queue size values equal to 0 will be discarded. Default is False :return: True if successful, False otherwise; """ if self._debug: print("- Pre-processing file " + filepath + "...") # Generates the dictionary of system resources from the config file resobject, equiv = self._generateSystemConfig(config) self._base_res = resobject.availability() res_types = resobject._system_resource_types # Makes sure the resource type exists in the system if resource is not None and resource not in resobject._system_resource_types: if self._debug: print("Resource type " + resource + "is not valid. Using all available resources...") resource = None # Tries to read from the log file, aborts if an error is encountered try: _sim_params_path = None # If the simulator config path points to a file, it is considered as is if self._sim_params_fname is not None and isfile(self._sim_params_fname): _sim_params_path = self._sim_params_fname # If it is a plain string, it is used as a token for config files in the experimentation elif self._sim_params_fname is not None: _path, _filename = path_leaf(filepath) _sim_params_path = join(_path, self._sim_params_fname) # If it is none, the default_result_parser will use the DEFAULT_SIMULATION config if _sim_params_path is not None: _resource_order = load_jsonfile(_sim_params_path)['RESOURCE_ORDER'] else: _resource_order = self._resource_order if self._workload_parser is not None: reader = DefaultReader(filepath, parser=self._workload_parser, equivalence=equiv) else: reader = DefaultReader(filepath, parser=define_result_parser(_sim_params_path), equivalence=equiv) slowdowns = [] timePoints = set() jobs = {} rev_timePoints = {} if self._debug: print("Loading jobs...") while True: # Jobs are read and their slowdown values are stored job = reader._read() if job is not None: job['start_time'] = timestamp_func(job['start_time']) job['end_time'] = timestamp_func(job['end_time']) job['queue_time'] = timestamp_func(job['queue_time']) _start_time = job['start_time'] _end_time = job['end_time'] _queued_time = job['queue_time'] duration = _end_time - _start_time wait = _start_time - _queued_time slowdown = (wait + duration) / duration if duration != 0 else wait if wait != 0 else 1.0 if slowdown > 1.0 or not trimSlowdown: slowdowns.append(slowdown) job_id = job['job_id'] jobs[job_id] = job # Timepoints for use in the simulation are stored timePoints.add(_queued_time) self._addToDictAsList(rev_timePoints, _queued_time, job_id, 'queue') timePoints.add(_start_time) self._addToDictAsList(rev_timePoints, _start_time, job_id, 'start') if duration > 0: timePoints.add(_end_time) self._addToDictAsList(rev_timePoints, _end_time, job_id, 'end') else: break except Exception as e: raise Exception("Error encountered while pre-processing: " + str(e)) # It may happen that the slowdown list is empty if all jobs have a value equal to 1. In this case we add # a fake value, equal to 1 as well if trimSlowdown and len(slowdowns) == 0: slowdowns.append(1) if self._debug: print("Jobs loaded. Sorting...") # We compute the final set of distinct, ordered timepoints timePoints = sorted(timePoints) timePointsIDX = 0 self._sys_res = deepcopy(self._base_res) self._base_res_sum = {k: sum(self._base_res[n][k] for n in self._base_res) for k in res_types} self._used_res_sum = {k: 0 for k in res_types} self._avl_res_sum = {k: 0 for k in res_types} self._used_nodes = 0 self._total_nodes = len(self._base_res.values()) queue = set() running = set() # Pre-allocating the lists to store performance metrics, for efficiency queued = [0] * len(timePoints) # [] resources = [0] * len(timePoints) # [] run = [0] * len(timePoints) # [] efficiency = [0] * len(timePoints) # [] efficiencyperjob = [0] * len(jobs) # [] efficiencyIDX = 0 if self._debug: print("Sorting done. Starting simulation...") # Meta-simulation: goes on until there are no more timepoints to consider while timePointsIDX < len(timePoints): point = timePoints[timePointsIDX] timePointsIDX += 1 # Adds to the queue jobs that were submitted in this timepoint jobstoqueue = rev_timePoints[point]['queue'] # queue += len(jobstoqueue) queue.update(jobstoqueue) # Jobs that have terminated release their resources jobstoend = rev_timePoints[point]['end'] if len(jobstoend) > 0: for j_id in jobstoend: j = jobs[j_id] req, assignations = self._getRequestedResources(_resource_order, j['assignations']) self._deallocate_resources(req, assignations, resource) # running -= len(jobstoend) running = running - jobstoend # Jobs that have to start take their resources from the system jobstostart = rev_timePoints[point]['start'] if len(jobstostart) > 0: for j_id in jobstostart: j = jobs[j_id] if j['end_time'] - j['start_time'] > 0: req, assignations = self._getRequestedResources(_resource_order, j['assignations']) self._allocate_resources(req, assignations, resource) # running += 1 running.add(j_id) # queue -= len(jobstostart) queue = queue - jobstostart # Additionally, we store for every started job its resource allocation efficiency for j_id in jobstostart: j = jobs[j_id] if j['end_time'] - j['start_time'] > 0: req, assignations = self._getRequestedResources(_resource_order, j['assignations']) eff = self._getResourceEfficiency(req, assignations, self._sys_res, resource) efficiencyperjob[efficiencyIDX] = eff efficiencyIDX += 1 # System metrics are computed AFTER dispatching queued[timePointsIDX - 1] = len(queue) # queue run[timePointsIDX - 1] = len(running) # running resources[timePointsIDX - 1] = self._getLoadRatio(resource) efficiency[timePointsIDX - 1] = self._getLoadRatioSelective(resource) if self._debug: print("Simulation done!") if trimQueueSize: queued = [q for q in queued if q != 0] run = [r for r in run if r != 0] # The metrics values for this instance are added to the internal variables self._slowdowns.append(slowdowns) self._queuesizes.append(queued) self._efficiencies.append(efficiencyperjob) self._loadratiosX.append([el[0] for el in efficiency]) self._loadratiosY.append([el[1] for el in efficiency]) return True def _addToDictAsList(self, dict, key, el, type): """ Simple method that adds an element to a dictionary and creates sub-entries if needed. :param dict: The target dictionary :param key: The key of the element to add :param el: The element to add :param type: The type of the element to add, used in the sub-dictionary for the key entry :return: None """ if key not in dict: dict[key] = {'queue': set(), 'start': set(), 'end': set()} dict[key][type].add(el) def _allocate_resources(self, req, assignations, resource=None): """ Method that allocates the resources for a certain starting job and updates all data structures related to resource usage :param req: The resource request of the job :param assignations: The list of nodes assigned to the job :param resource: A resource type to be considered for performance metrics (optional) :return: None """ for node in assignations: # If the node goes from the unused to the used state, we update the number of used nodes and the amount # of available resources among the used nodes, for the efficiency plots if resource is None and all(self._sys_res[node][k] == self._base_res[node][k] for k in self._base_res[node].keys()): self._used_nodes += 1 for k, v in self._base_res[node].items(): self._avl_res_sum[k] += v # If a specific resource type is considered, the same condition is triggered only if such resource is used elif resource is not None and self._sys_res[node][resource] == self._base_res[node][resource] and req[resource] > 0: self._used_nodes += 1 self._avl_res_sum[resource] += self._base_res[node][resource] # Updating the per-node currently available resources for k, val in req.items(): self._sys_res[node][k] -= val if self._sys_res[node][k] < 0: self._sys_res[node][k] = 0 if self._debug: print("Caution: resource " + k + " is going below zero.") # Updating the dictionary of per-type currently used resources for k, v in req.items(): self._used_res_sum[k] += v * len(assignations) if self._used_res_sum[k] > self._avl_res_sum[k]: self._used_res_sum[k] = self._avl_res_sum[k] def _deallocate_resources(self, req, assignations, resource): """ Method that de-allocates the resources for a certain starting job and updates all data structures related to resource usage :param req: The resource request of the job :param assignations: The list of nodes assigned to the job :param resource: A resource type to be considered for performance metrics (optional) :return: None """ for node in assignations: for k, val in req.items(): self._sys_res[node][k] += val if self._sys_res[node][k] > self._base_res[node][k]: self._sys_res[node][k] = self._base_res[node][k] if self._debug: print("Caution: resource " + k + " is going beyond its base capacity.") # In this case the check for used-unused nodes must be performed after the resources are de-allocated if resource is None and all(self._sys_res[node][k] == self._base_res[node][k] for k in self._base_res[node].keys()): self._used_nodes -= 1 for k, v in self._base_res[node].items(): self._avl_res_sum[k] -= v elif resource is not None and self._sys_res[node][resource] == self._base_res[node][resource] and req[resource] > 0: self._used_nodes -= 1 self._avl_res_sum[resource] -= self._base_res[node][resource] # The method is specular to allocate_resources and works identically for k, v in req.items(): self._used_res_sum[k] -= v * len(assignations) if self._used_res_sum[k] < 0: self._used_res_sum[k] = 0 def _generateSystemConfig(self, config_path): """ Generates a Resources object from a system configuration file. :param config_path: the path to the config file; :return: the Resources object and the resource equivalence; """ try: config = load_config(config_path) equiv = config.pop('equivalence', {}) # PEP 448 - Additional Unpacking Generalizations # python 3.5 and newer if not('node_prefix' in config): config['node_prefix'] = '' resources = Resources(**config) return resources, equiv except Exception as e: if config_path != '': print("Could not load system config: " + str(e)) else: print("A system configuration file must be specified.") exit() return None, None def _getRequestedResources(self, _resource_order, assignations_str): """ TO BE IMPLEMENTED: returns the requested resources for the input job. :param job: the dictionary related to the current job; :return: the dictionary of resources needed by each job unit, and the list of node assignations; """ _assignations_list = assignations_str.split(str_resources.SEPARATOR)[0:-1] _nodes_list = [assign.split(';')[0] for assign in _assignations_list] _request = { k:int(v) for k, v in zip(_resource_order, _assignations_list[0].split(';')[1:])} return _request, _nodes_list def _getResourceEfficiency(self, reqres, nodes, sys_res, resource): """ Computes the resource allocation efficiency metric for a certain input job. This method computed the resource allocation efficiency AFTER dispatching is performed, not before. :param reqres: the dictionary of resources requested by each job unit; :param nodes: the list of node assignations; :param sys_res: the dictionary of system resources; :param resource: the resource type to be considered (if present); :return: the resource allocation efficiency; """ # Computing the amount of used resources by the job if resource is None: used = sum(r * len(nodes) for r in reqres.values()) else: used = reqres[resource] * len(nodes) avl = 0 # Computing the amount of available resources in nodes used by the job for node in set(nodes): if resource is None: avl += sum(r for r in sys_res[node].values()) else: avl += sys_res[node][resource] return used / (avl + used) def _getLoadRatio(self, resource): """ Returns the standard load ratio for the system. :param resource: the resource type to be considered (if present); :return: the load ratio; """ loadratio = 0 if resource is None: loadratio = sum(self._used_res_sum.values()) / sum(self._base_res_sum.values()) elif resource in self._base_res_sum: loadratio = self._used_res_sum[resource] / self._base_res_sum[resource] return loadratio def _getLoadRatioSelective(self, resource): """ Returns the per-step resource allocation efficiency. This is defined as a X,Y pair where X expresses the fraction of used nodes, and Y defines the fraction of used resources in such nodes. :param resource: the resource type to be considered (if present); :return: an X,Y pair expressing the per-step resource allocation efficiency; """ loadratio = 0 if self._used_nodes > 0: if resource is None: loadratio = sum(self._used_res_sum.values()) / sum(self._avl_res_sum.values()) elif resource in self._avl_res_sum: loadratio = self._used_res_sum[resource] / self._avl_res_sum[resource] return self._used_nodes / self._total_nodes, loadratio else: return 0, 0 def _getDistributionStats(self, data): """ Returns some useful distribution statistics for the input data. The mean, minimum, maximum, median, and quartiles for the data are computed. :param data: The iterable for the input data; :return: a dictionary of statistics for the data distribution; """ stats = {} stats['avg'] = np.average(data) stats['min'] = np.min(data) stats['max'] = np.max(data) stats['median'] = np.median(data) stats['quartiles'] = np.percentile(data, range(0, 100, 25)) return stats def box_plot(self, data, title='', ylabel='', scale='linear', figsize=(7, 5), meansonly=False, output='Output.pdf', groups=1, **kwargs): """ Produces a box-and-whiskers plot for the input data's distributions. :param data: the input data; must be a list, in which each element is again a list containing all of the data regarding a certain test instance; the ordering must be that of the labels; :param title: the title of the plot; :param ylabel: the Y-axis label; :param scale: the scale of the plot; :param figsize: the size of the figure, is a tuple; :param meansonly: if True only the mean values for each distribution are depicted; :param output: the path to the output file; :param **kwargs: - fig_format: { 'format': eps or pdf, 'dpi': Int number } - xlim: the left-right axis boundaries, is a tuple; - ylim: the bottom-top axis boundaries, is a tuple; """ color_cycler = ['b', 'r', 'y', 'g', 'c', 'm', 'k', 'w'] hatch_cycler = ['/', '\\', '|', '-', '+', 'x', 'o', 'O', '.', '*'] ncycle = 2 fontsize = 12 plt.rc('xtick', labelsize=fontsize) plt.rc('ytick', labelsize=fontsize) N = len(data) ylim = kwargs.pop('ylim', None) xlim = kwargs.pop('xlim', None) show_legend = kwargs.pop('show_legend', False) spacing = 0.2 ind = [i * spacing for i in np.arange(N)] width = 0.1 markersize = 250 linecol = 'black' tricol = 'black' vertlinecol = 'gray' fig, ax = plt.subplots(figsize=figsize) c_group = 0 c = groups r_hatch = len(hatch_cycler) color_list = [] hatch_list = [] for i, d in enumerate(data): color_list.append(color_cycler[c_group]) hatch_list.append(hatch_cycler[len(hatch_cycler) - r_hatch] * ncycle) c -= 1 if c == 0: c_group += 1 c = groups r_hatch -= 1 if r_hatch == 0: ncycle += 1 r_hatch = len(hatch_cycler) bp = ax.boxplot(data, labels=self._labels, patch_artist=True, sym="", whis=[0, 100], showmeans=True, showfliers=False) for patch, color, hatch in zip(bp['boxes'], color_list, hatch_list): patch.set_facecolor(color) patch.set_alpha(0.75) patch.set_hatch(hatch) # add some text for labels, title and axes ticks ax.set_ylabel(ylabel, fontsize=fontsize) ax.set_xlabel('Dispatching method', fontsize=fontsize) ax.set_title(title) ax.set_yscale(scale) if show_legend: ax.legend(bp['boxes'], self._labels, bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=len(self._labels) // 2, mode="expand", borderaxespad=0.) if ylim: ax.set_ylim(top=ylim[1], bottom=ylim[0], emit=True, auto=False) if xlim: ax.set_xlim(left=xlim[0], right=xlim[1], emit=True, auto=False) plt.tight_layout() plt.grid(linestyle=':', color='gray', zorder=0) plt.show() fig_format = kwargs.pop('fig_format', {}) fig.savefig(output, **fig_format) def box_plot_times(self, dataman, datasched, title='', scale='linear', xlim=(None, None), ylim=(None, None), figsize=(7, 5), legend=True, output='Output.pdf'): """ Produces a bar plot for the timings in the simulations, across test instances. The bars will depict the average time required to perform dispatching in each simulation step, and the time required to perform simulation-related tasks in the simulation. :param dataman: the data for the time required in each step to perform simulation-related tasks. Is a list, where each element is again a list containing the data for a certain test instance; :param datasched: the data for the time required in each step to perform dispatching. Is a list, where each element is again a list containing the data for a certain test instance; :param title: the title of the plot; :param scale: the scale of the plot; :param xlim: the left-right boundaries for the plot, is a tuple; :param ylim: the bottom-top boundaries for the plot, is a tuple; :param figsize: the size of the figure, is a tuple; :param legend: enables or disables visualization of the legend; :param output: the path to the output file; """ fontsize = 12 plt.rc('xtick', labelsize=fontsize) plt.rc('ytick', labelsize=fontsize) N = len(dataman) spacing = 0.2 ind = [i * spacing for i in np.arange(N)] width = 0.1 markersize = 250 fig, ax = plt.subplots(figsize=figsize) for i in range(N): avgman = np.average(np.array(dataman[i])) avgsched = np.average(np.array(datasched[i])) if i == 0: ax.add_patch(patches.Rectangle((ind[i], 0), width, avgman, facecolor='orange', edgecolor='black', hatch='//', alpha=0.75)) # , label='Simulation')) ax.add_patch(patches.Rectangle((ind[i], avgman), width, avgsched, facecolor='blue', edgecolor='black', hatch='\\', alpha=0.75, label='Dispatching decision')) else: ax.add_patch(patches.Rectangle((ind[i], 0), width, avgman, facecolor='orange', edgecolor='black', hatch='//', alpha=0.75)) ax.add_patch(patches.Rectangle((ind[i], avgman), width, avgsched, facecolor='blue', edgecolor='black', hatch='\\', alpha=0.75)) ax.scatter(ind[i] + width / 2, avgman + avgsched, marker='_', s=markersize / 4, zorder=0, color='black') # add some text for labels, title and axes ticks ax.set_ylabel('Time [ms]', fontsize=fontsize) ax.set_xlabel('Dispatching method', fontsize=fontsize) ax.set_title(title) ax.set_xticks([i + width / 2 for i in ind]) if legend: ax.legend() ax.set_xticklabels(self._labels) ax.set_yscale(scale) ax.set_ylim(top=ylim[1], bottom=ylim[0], emit=True, auto=False) ax.set_xlim(left=xlim[0], right=xlim[1], emit=True, auto=False) plt.grid(linestyle=':', color='gray', zorder=0) plt.setp(plt.gca().get_legend().get_texts(), fontsize=fontsize) plt.show() ff = PdfPages(output) ff.savefig(fig) ff.close() def box_plot_memory(self, data, title='', scale='linear', xlim=(None, None), ylim=(None, None), figsize=(7, 5), legend=True, output='Output.pdf'): """ Produces a bar plot for the memory usage in the simulations, across test instances. The bars depict average and maximum memory usage in the simulation. :param data: the data for memory usage in each simulation step. Is a list, where each element is again a list containing the data for a certain test instance; :param title: the title of the plot; :param scale: the scale of the plot; :param xlim: the left-right boundaries for the plot, is a tuple; :param ylim: the bottom-top boundaries for the plot, is a tuple; :param figsize: the size of the figure, is a tuple; :param legend: enables or disables visualization of the legend; :param output: the path to the output file; """ fontsize = 12 plt.rc('xtick', labelsize=fontsize) plt.rc('ytick', labelsize=fontsize) N = len(data) spacing = 0.2 ind = [i * spacing for i in np.arange(N)] width = 0.1 markersize = 250 fig, ax = plt.subplots(figsize=figsize) for i in range(N): avgmem = np.average(np.array(data[i])) maxmem = np.max(np.array(data[i])) if i == 0: ax.add_patch(patches.Rectangle((ind[i], 0), width, avgmem, facecolor='orange', edgecolor='black', hatch='//', alpha=0.75, label='Avg. Mem')) ax.add_patch(patches.Rectangle((ind[i], avgmem), width, maxmem - avgmem, facecolor='blue', edgecolor='black', hatch='\\', alpha=0.75, label='Max. Mem')) else: ax.add_patch(patches.Rectangle((ind[i], 0), width, avgmem, facecolor='orange', edgecolor='black', hatch='//', alpha=0.75)) ax.add_patch(patches.Rectangle((ind[i], avgmem), width, maxmem - avgmem, facecolor='blue', edgecolor='black', hatch='\\', alpha=0.75)) ax.scatter(ind[i] + width / 2, maxmem, marker='_', s=markersize / 4, zorder=0, color='black') ax.set_ylabel('Average Memory Usage [MB]', fontsize=fontsize) ax.set_xlabel('Dispatching method', fontsize=fontsize) ax.set_title(title) ax.set_xticks([i + width / 2 for i in ind]) if legend: ax.legend() ax.set_xticklabels(self._labels) ax.set_yscale(scale) ax.set_ylim(top=ylim[1], bottom=ylim[0], emit=True, auto=False) ax.set_xlim(left=xlim[0], right=xlim[1], emit=True, auto=False) plt.grid(linestyle=':', color='gray', zorder=0) plt.setp(plt.gca().get_legend().get_texts(), fontsize=fontsize) plt.show() ff = PdfPages(output) ff.savefig(fig) ff.close() def scalability_plot(self, xdata, ydata, title='', scale='linear', xlim=(None, None), ylim=(None, None), figsize=(7, 5), legend=True, smooth=30, linestyles=None, markers=None, output='Output.pdf'): """ Creates a scalability plot for all test instances, where X represents the queue size, and Y the average time required by each dispatching method in the instances. :param xdata: the X data, containing the queue sizes for each test instance; is a list, where each element contains a list with the data for each test instance; :param ydata: the Y data, containing the average times required to perform dispatching in each test instance; is a list, where each element contains a list with the data for each test instance; :param title: the title of the plot; :param scale: the scale of the plot; :param xlim: the left-right boundaries for the plot, is a tuple; :param ylim: the bottom-top boundaries for the plot, is a tuple; :param figsize: the size of the figure, is a tuple; :param legend: enables or disables visualization of the legend; :param smooth: smoothing factor for the Savitzky-Golay filter. The lower the number, the higher the smoothing; :param output: the path of the output file; """ fontsize = 12 plt.rc('xtick', labelsize=fontsize) plt.rc('ytick', labelsize=fontsize) if not linestyles: linestyles = ('-', '-', '--', '--', '-.', '-.', ':', ':') if not markers: markers = (None, 'o', None, '^', None, 's', None, 'p') numstyles = len(linestyles) fig, ax = plt.subplots(figsize=figsize) divideFactor = smooth for i in range(len(xdata)): markeroffset = floor(max(xdata[i]) / 20 + i * 2) if divideFactor > 1 and len(ydata[i]) >= divideFactor: win_len = floor(len(ydata[i]) / divideFactor) win_len += (win_len + 1) % 2 if win_len < 5: win_len = 5 yfiltered = savgol_filter(ydata[i], win_len, 3) else: yfiltered = ydata[i] ax.plot(xdata[i], yfiltered, label=self._labels[i], linestyle=linestyles[i % numstyles], marker=markers[i % numstyles], markevery=markeroffset, zorder=2 if markers[i % numstyles] is None else 0) ax.set_ylabel('Time [ms]', fontsize=fontsize) ax.set_xlabel('Queue size', fontsize=fontsize) ax.set_title(title) if legend: ax.legend() ax.set_yscale(scale) ax.set_ylim(top=ylim[1], bottom=ylim[0], emit=True, auto=False) ax.set_xlim(left=xlim[0], right=xlim[1], emit=True, auto=False) plt.grid(linestyle=':', color='gray', zorder=0) plt.setp(plt.gca().get_legend().get_texts(), fontsize=fontsize) plt.show() ff = PdfPages(output) ff.savefig(fig) ff.close() def distribution_scatter_plot(self, xdata, ydata, title='', scale='linear', xlim=(0, 1.05), ylim=(0, 1.05), figsize=(7, 5), alpha=0.005, output='Output.pdf'): """ Creates a distribution scatter plot for the system's resource efficiency. The X values represent the amount of used nodes in a certain time step, while the Y values represent the fraction of used resources in such nodes. Darker areas of the plot represent values with higher frequency. The method creates one plot per test instance, automatically. :param xdata: :param ydata: :param alpha: the alpha to be used for each dot in the plot; :param title: the title of the plot; :param scale: the scale of the plot; :param xlim: the left-right boundaries for the plot, is a tuple; :param ylim: the bottom-top boundaries for the plot, is a tuple; :param figsize: the size of the figure, is a tuple; :param output: the path to the output files: the label for each test instance will be automatically added for each file; """ for i in range(len(xdata)): fig, ax = plt.subplots(figsize=figsize) ax.scatter(xdata[i], ydata[i], color='black', alpha=alpha, s=5) ax.set_title(title) ax.set_xlabel('Used Nodes') ax.set_ylabel('Used Resources') ax.set_yscale(scale) ax.set_ylim(top=ylim[1], bottom=ylim[0], emit=True, auto=False) ax.set_xlim(left=xlim[0], right=xlim[1], emit=True, auto=False) ax.grid(True) plt.show() splitoutput = splitext(output) ff = PdfPages(splitoutput[0] + '-' + self._labels[i] + '.pdf') ff.savefig(fig) ff.close() def get_preprocessed_benchmark_data(self): """ Returns all of the pre-processed benchmark-related data. A tuple is returned; each element of the tuple is related to a specific kind of metric that was processed. Also, each element of the tuple is a list, with as many entries as the files that were processed, in the same order. Each element of these lists contains then the data related to a specific metric, for a specific test instance. All data is stored in standard Python lists. :return: a tuple in which every element is a list containing, in each element, a specific kind of data regarding one of the test instances. The tuple contains, in this order: - the resource usage statistics' dictionaries; - the lists of dispatching times for each time step; - the lists of management times for each time step; - the lists of memory usage values for each time step; - the X scalability data containing the queue size for each test instance; - the Y scalability data containing the average dispatching times for each test instance; """ if not self._preprocessed or self._plot_class != self.BENCHMARK_CLASS: return None, None, None, None, None, None else: return self._simdata, self._schedtimes, self._mantimes, self._simmemory, self._scalabilitydataX, self._scalabilitydataY def get_preprocessed_schedule_data(self): """ Returns all of the pre-processed schedule-related data. A tuple is returned; each element of the tuple is related to a specific kind of metric that was processed. Also, each element of the tuple is a list, with as many entries as the files that were processed, in the same order. Each element of these lists contains then the data related to a specific metric, for a specific test instance. All data is stored in standard Python lists. :return: a tuple in which every element is a list containing, in each element, the data regarding one of the test instances. The tuple contains, in this order: - the slowdown values for jobs; - the queue sizes for all time steps; - the resource allocation efficiencies for all jobs; - the X data regarding the load ratios (fraction of used nodes) for all time steps; - the Y data regarding the load ratios (fraction of used resources) for all time steps; """ if not self._preprocessed or self._plot_class != self.SCHEDULE_CLASS: return None, None, None, None, None else: return self._slowdowns, self._queuesizes, self._efficiencies, self._loadratiosX, self._loadratiosY if __name__ == '__main__': # This is an example. It should not be executed here, but in a script in the project's root, where also # basic_example.py is, so that all imports can be resolved correctly. resultpath = ['Path/to/benchmark/file', 'Path/to/benchmark/file2'] resultlabel = ['Label', 'Label2'] plots = PlotFactory('benchmark') plots.set_files(resultpath, resultlabel) plots.pre_process() plots.produce_plot(type='scalability', title='My Scalability Plot')
mit
rs2/pandas
pandas/io/sql.py
1
62655
""" Collection of query wrappers / abstractions to both facilitate data retrieval and to reduce dependency on DB-specific API. """ from contextlib import contextmanager from datetime import date, datetime, time from functools import partial import re from typing import Iterator, Optional, Union, overload import warnings import numpy as np import pandas._libs.lib as lib from pandas.core.dtypes.common import is_datetime64tz_dtype, is_dict_like, is_list_like from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna from pandas.core.api import DataFrame, Series from pandas.core.base import PandasObject from pandas.core.tools.datetimes import to_datetime class SQLAlchemyRequired(ImportError): pass class DatabaseError(IOError): pass # ----------------------------------------------------------------------------- # -- Helper functions _SQLALCHEMY_INSTALLED = None def _is_sqlalchemy_connectable(con): global _SQLALCHEMY_INSTALLED if _SQLALCHEMY_INSTALLED is None: try: import sqlalchemy _SQLALCHEMY_INSTALLED = True except ImportError: _SQLALCHEMY_INSTALLED = False if _SQLALCHEMY_INSTALLED: import sqlalchemy # noqa: F811 return isinstance(con, sqlalchemy.engine.Connectable) else: return False def _convert_params(sql, params): """Convert SQL and params args to DBAPI2.0 compliant format.""" args = [sql] if params is not None: if hasattr(params, "keys"): # test if params is a mapping args += [params] else: args += [list(params)] return args def _process_parse_dates_argument(parse_dates): """Process parse_dates argument for read_sql functions""" # handle non-list entries for parse_dates gracefully if parse_dates is True or parse_dates is None or parse_dates is False: parse_dates = [] elif not hasattr(parse_dates, "__iter__"): parse_dates = [parse_dates] return parse_dates def _handle_date_column(col, utc=None, format=None): if isinstance(format, dict): return to_datetime(col, errors="ignore", **format) else: # Allow passing of formatting string for integers # GH17855 if format is None and ( issubclass(col.dtype.type, np.floating) or issubclass(col.dtype.type, np.integer) ): format = "s" if format in ["D", "d", "h", "m", "s", "ms", "us", "ns"]: return to_datetime(col, errors="coerce", unit=format, utc=utc) elif is_datetime64tz_dtype(col.dtype): # coerce to UTC timezone # GH11216 return to_datetime(col, utc=True) else: return to_datetime(col, errors="coerce", format=format, utc=utc) def _parse_date_columns(data_frame, parse_dates): """ Force non-datetime columns to be read as such. Supports both string formatted and integer timestamp columns. """ parse_dates = _process_parse_dates_argument(parse_dates) # we want to coerce datetime64_tz dtypes for now to UTC # we could in theory do a 'nice' conversion from a FixedOffset tz # GH11216 for col_name, df_col in data_frame.items(): if is_datetime64tz_dtype(df_col.dtype) or col_name in parse_dates: try: fmt = parse_dates[col_name] except TypeError: fmt = None data_frame[col_name] = _handle_date_column(df_col, format=fmt) return data_frame def _wrap_result(data, columns, index_col=None, coerce_float=True, parse_dates=None): """Wrap result set of query in a DataFrame.""" frame = DataFrame.from_records(data, columns=columns, coerce_float=coerce_float) frame = _parse_date_columns(frame, parse_dates) if index_col is not None: frame.set_index(index_col, inplace=True) return frame def execute(sql, con, cur=None, params=None): """ Execute the given SQL query using the provided connection object. Parameters ---------- sql : string SQL query to be executed. con : SQLAlchemy connectable(engine/connection) or sqlite3 connection Using SQLAlchemy makes it possible to use any DB supported by the library. If a DBAPI2 object, only sqlite3 is supported. cur : deprecated, cursor is obtained from connection, default: None params : list or tuple, optional, default: None List of parameters to pass to execute method. Returns ------- Results Iterable """ if cur is None: pandas_sql = pandasSQL_builder(con) else: pandas_sql = pandasSQL_builder(cur, is_cursor=True) args = _convert_params(sql, params) return pandas_sql.execute(*args) # ----------------------------------------------------------------------------- # -- Read and write to DataFrames @overload def read_sql_table( table_name, con, schema=None, index_col=None, coerce_float=True, parse_dates=None, columns=None, chunksize: None = None, ) -> DataFrame: ... @overload def read_sql_table( table_name, con, schema=None, index_col=None, coerce_float=True, parse_dates=None, columns=None, chunksize: int = 1, ) -> Iterator[DataFrame]: ... def read_sql_table( table_name, con, schema=None, index_col=None, coerce_float=True, parse_dates=None, columns=None, chunksize: Optional[int] = None, ) -> Union[DataFrame, Iterator[DataFrame]]: """ Read SQL database table into a DataFrame. Given a table name and a SQLAlchemy connectable, returns a DataFrame. This function does not support DBAPI connections. Parameters ---------- table_name : str Name of SQL table in database. con : SQLAlchemy connectable or str A database URI could be provided as as str. SQLite DBAPI connection mode not supported. schema : str, default None Name of SQL schema in database to query (if database flavor supports this). Uses default schema if None (default). index_col : str or list of str, optional, default: None Column(s) to set as index(MultiIndex). coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point. Can result in loss of Precision. parse_dates : list or dict, default None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. columns : list, default None List of column names to select from SQL table. chunksize : int, default None If specified, returns an iterator where `chunksize` is the number of rows to include in each chunk. Returns ------- DataFrame or Iterator[DataFrame] A SQL table is returned as two-dimensional data structure with labeled axes. See Also -------- read_sql_query : Read SQL query into a DataFrame. read_sql : Read SQL query or database table into a DataFrame. Notes ----- Any datetime values with time zone information will be converted to UTC. Examples -------- >>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP """ con = _engine_builder(con) if not _is_sqlalchemy_connectable(con): raise NotImplementedError( "read_sql_table only supported for SQLAlchemy connectable." ) import sqlalchemy from sqlalchemy.schema import MetaData meta = MetaData(con, schema=schema) try: meta.reflect(only=[table_name], views=True) except sqlalchemy.exc.InvalidRequestError as err: raise ValueError(f"Table {table_name} not found") from err pandas_sql = SQLDatabase(con, meta=meta) table = pandas_sql.read_table( table_name, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns, chunksize=chunksize, ) if table is not None: return table else: raise ValueError(f"Table {table_name} not found", con) @overload def read_sql_query( sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, chunksize: None = None, ) -> DataFrame: ... @overload def read_sql_query( sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, chunksize: int = 1, ) -> Iterator[DataFrame]: ... def read_sql_query( sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, chunksize: Optional[int] = None, ) -> Union[DataFrame, Iterator[DataFrame]]: """ Read SQL query into a DataFrame. Returns a DataFrame corresponding to the result set of the query string. Optionally provide an `index_col` parameter to use one of the columns as the index, otherwise default integer index will be used. Parameters ---------- sql : str SQL query or SQLAlchemy Selectable (select or text object) SQL query to be executed. con : SQLAlchemy connectable, str, or sqlite3 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. index_col : str or list of str, optional, default: None Column(s) to set as index(MultiIndex). coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point. Useful for SQL result sets. params : list, tuple or dict, optional, default: None List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}. parse_dates : list or dict, default: None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. chunksize : int, default None If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. Returns ------- DataFrame or Iterator[DataFrame] See Also -------- read_sql_table : Read SQL database table into a DataFrame. read_sql : Read SQL query or database table into a DataFrame. Notes ----- Any datetime values with time zone information parsed via the `parse_dates` parameter will be converted to UTC. """ pandas_sql = pandasSQL_builder(con) return pandas_sql.read_query( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates, chunksize=chunksize, ) @overload def read_sql( sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None, chunksize: None = None, ) -> DataFrame: ... @overload def read_sql( sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None, chunksize: int = 1, ) -> Iterator[DataFrame]: ... def read_sql( sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None, chunksize: Optional[int] = None, ) -> Union[DataFrame, Iterator[DataFrame]]: """ Read SQL query or database table into a DataFrame. This function is a convenience wrapper around ``read_sql_table`` and ``read_sql_query`` (for backward compatibility). It will delegate to the specific function depending on the provided input. A SQL query will be routed to ``read_sql_query``, while a database table name will be routed to ``read_sql_table``. Note that the delegated function might have more specific notes about their functionality not listed here. Parameters ---------- sql : str or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name. con : SQLAlchemy connectable, str, or sqlite3 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible for engine disposal and connection closure for the SQLAlchemy connectable; str connections are closed automatically. See `here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_. index_col : str or list of str, optional, default: None Column(s) to set as index(MultiIndex). coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. params : list, tuple or dict, optional, default: None List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}. parse_dates : list or dict, default: None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. columns : list, default: None List of column names to select from SQL table (only used when reading a table). chunksize : int, default None If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. Returns ------- DataFrame or Iterator[DataFrame] See Also -------- read_sql_table : Read SQL database table into a DataFrame. read_sql_query : Read SQL query into a DataFrame. """ pandas_sql = pandasSQL_builder(con) if isinstance(pandas_sql, SQLiteDatabase): return pandas_sql.read_query( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates, chunksize=chunksize, ) try: _is_table_name = pandas_sql.has_table(sql) except Exception: # using generic exception to catch errors from sql drivers (GH24988) _is_table_name = False if _is_table_name: pandas_sql.meta.reflect(only=[sql]) return pandas_sql.read_table( sql, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns, chunksize=chunksize, ) else: return pandas_sql.read_query( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates, chunksize=chunksize, ) def to_sql( frame, name, con, schema=None, if_exists="fail", index=True, index_label=None, chunksize=None, dtype=None, method=None, ) -> None: """ Write records stored in a DataFrame to a SQL database. Parameters ---------- frame : DataFrame, Series name : str Name of SQL table. con : SQLAlchemy connectable(engine/connection) or database string URI or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. schema : str, optional Name of SQL schema in database to write to (if database flavor supports this). If None, use default schema (default). if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. index : boolean, default True Write DataFrame index as a column. index_label : str or sequence, optional Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. chunksize : int, optional Specify the number of rows in each batch to be written at a time. By default, all rows will be written at once. dtype : dict or scalar, optional Specifying the datatype for columns. If a dictionary is used, the keys should be the column names and the values should be the SQLAlchemy types or strings for the sqlite3 fallback mode. If a scalar is provided, it will be applied to all columns. method : {None, 'multi', callable}, optional Controls the SQL insertion clause used: - None : Uses standard SQL ``INSERT`` clause (one per row). - 'multi': Pass multiple values in a single ``INSERT`` clause. - callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. .. versionadded:: 0.24.0 """ if if_exists not in ("fail", "replace", "append"): raise ValueError(f"'{if_exists}' is not valid for if_exists") pandas_sql = pandasSQL_builder(con, schema=schema) if isinstance(frame, Series): frame = frame.to_frame() elif not isinstance(frame, DataFrame): raise NotImplementedError( "'frame' argument should be either a Series or a DataFrame" ) pandas_sql.to_sql( frame, name, if_exists=if_exists, index=index, index_label=index_label, schema=schema, chunksize=chunksize, dtype=dtype, method=method, ) def has_table(table_name, con, schema=None): """ Check if DataBase has named table. Parameters ---------- table_name: string Name of SQL table. con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. schema : string, default None Name of SQL schema in database to write to (if database flavor supports this). If None, use default schema (default). Returns ------- boolean """ pandas_sql = pandasSQL_builder(con, schema=schema) return pandas_sql.has_table(table_name) table_exists = has_table def _engine_builder(con): """ Returns a SQLAlchemy engine from a URI (if con is a string) else it just return con without modifying it. """ global _SQLALCHEMY_INSTALLED if isinstance(con, str): try: import sqlalchemy except ImportError: _SQLALCHEMY_INSTALLED = False else: con = sqlalchemy.create_engine(con) return con return con def pandasSQL_builder(con, schema=None, meta=None, is_cursor=False): """ Convenience function to return the correct PandasSQL subclass based on the provided parameters. """ # When support for DBAPI connections is removed, # is_cursor should not be necessary. con = _engine_builder(con) if _is_sqlalchemy_connectable(con): return SQLDatabase(con, schema=schema, meta=meta) elif isinstance(con, str): raise ImportError("Using URI string without sqlalchemy installed.") else: return SQLiteDatabase(con, is_cursor=is_cursor) class SQLTable(PandasObject): """ For mapping Pandas tables to SQL tables. Uses fact that table is reflected by SQLAlchemy to do better type conversions. Also holds various flags needed to avoid having to pass them between functions all the time. """ # TODO: support for multiIndex def __init__( self, name, pandas_sql_engine, frame=None, index=True, if_exists="fail", prefix="pandas", index_label=None, schema=None, keys=None, dtype=None, ): self.name = name self.pd_sql = pandas_sql_engine self.prefix = prefix self.frame = frame self.index = self._index_name(index, index_label) self.schema = schema self.if_exists = if_exists self.keys = keys self.dtype = dtype if frame is not None: # We want to initialize based on a dataframe self.table = self._create_table_setup() else: # no data provided, read-only mode self.table = self.pd_sql.get_table(self.name, self.schema) if self.table is None: raise ValueError(f"Could not init table '{name}'") def exists(self): return self.pd_sql.has_table(self.name, self.schema) def sql_schema(self): from sqlalchemy.schema import CreateTable return str(CreateTable(self.table).compile(self.pd_sql.connectable)) def _execute_create(self): # Inserting table into database, add to MetaData object self.table = self.table.tometadata(self.pd_sql.meta) self.table.create() def create(self): if self.exists(): if self.if_exists == "fail": raise ValueError(f"Table '{self.name}' already exists.") elif self.if_exists == "replace": self.pd_sql.drop_table(self.name, self.schema) self._execute_create() elif self.if_exists == "append": pass else: raise ValueError(f"'{self.if_exists}' is not valid for if_exists") else: self._execute_create() def _execute_insert(self, conn, keys, data_iter): """ Execute SQL statement inserting data Parameters ---------- conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection keys : list of str Column names data_iter : generator of list Each item contains a list of values to be inserted """ data = [dict(zip(keys, row)) for row in data_iter] conn.execute(self.table.insert(), data) def _execute_insert_multi(self, conn, keys, data_iter): """ Alternative to _execute_insert for DBs support multivalue INSERT. Note: multi-value insert is usually faster for analytics DBs and tables containing a few columns but performance degrades quickly with increase of columns. """ data = [dict(zip(keys, row)) for row in data_iter] conn.execute(self.table.insert(data)) def insert_data(self): if self.index is not None: temp = self.frame.copy() temp.index.names = self.index try: temp.reset_index(inplace=True) except ValueError as err: raise ValueError(f"duplicate name in index/columns: {err}") from err else: temp = self.frame column_names = list(map(str, temp.columns)) ncols = len(column_names) data_list = [None] * ncols for i, (_, ser) in enumerate(temp.items()): vals = ser._values if vals.dtype.kind == "M": d = vals.to_pydatetime() elif vals.dtype.kind == "m": # store as integers, see GH#6921, GH#7076 d = vals.view("i8").astype(object) else: d = vals.astype(object) assert isinstance(d, np.ndarray), type(d) if ser._can_hold_na: # Note: this will miss timedeltas since they are converted to int mask = isna(d) d[mask] = None data_list[i] = d return column_names, data_list def insert(self, chunksize=None, method=None): # set insert method if method is None: exec_insert = self._execute_insert elif method == "multi": exec_insert = self._execute_insert_multi elif callable(method): exec_insert = partial(method, self) else: raise ValueError(f"Invalid parameter `method`: {method}") keys, data_list = self.insert_data() nrows = len(self.frame) if nrows == 0: return if chunksize is None: chunksize = nrows elif chunksize == 0: raise ValueError("chunksize argument should be non-zero") chunks = int(nrows / chunksize) + 1 with self.pd_sql.run_transaction() as conn: for i in range(chunks): start_i = i * chunksize end_i = min((i + 1) * chunksize, nrows) if start_i >= end_i: break chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list]) exec_insert(conn, keys, chunk_iter) def _query_iterator( self, result, chunksize, columns, coerce_float=True, parse_dates=None ): """Return generator through chunked result set.""" while True: data = result.fetchmany(chunksize) if not data: break else: self.frame = DataFrame.from_records( data, columns=columns, coerce_float=coerce_float ) self._harmonize_columns(parse_dates=parse_dates) if self.index is not None: self.frame.set_index(self.index, inplace=True) yield self.frame def read(self, coerce_float=True, parse_dates=None, columns=None, chunksize=None): if columns is not None and len(columns) > 0: from sqlalchemy import select cols = [self.table.c[n] for n in columns] if self.index is not None: for idx in self.index[::-1]: cols.insert(0, self.table.c[idx]) sql_select = select(cols) else: sql_select = self.table.select() result = self.pd_sql.execute(sql_select) column_names = result.keys() if chunksize is not None: return self._query_iterator( result, chunksize, column_names, coerce_float=coerce_float, parse_dates=parse_dates, ) else: data = result.fetchall() self.frame = DataFrame.from_records( data, columns=column_names, coerce_float=coerce_float ) self._harmonize_columns(parse_dates=parse_dates) if self.index is not None: self.frame.set_index(self.index, inplace=True) return self.frame def _index_name(self, index, index_label): # for writing: index=True to include index in sql table if index is True: nlevels = self.frame.index.nlevels # if index_label is specified, set this as index name(s) if index_label is not None: if not isinstance(index_label, list): index_label = [index_label] if len(index_label) != nlevels: raise ValueError( "Length of 'index_label' should match number of " f"levels, which is {nlevels}" ) else: return index_label # return the used column labels for the index columns if ( nlevels == 1 and "index" not in self.frame.columns and self.frame.index.name is None ): return ["index"] else: return [ l if l is not None else f"level_{i}" for i, l in enumerate(self.frame.index.names) ] # for reading: index=(list of) string to specify column to set as index elif isinstance(index, str): return [index] elif isinstance(index, list): return index else: return None def _get_column_names_and_types(self, dtype_mapper): column_names_and_types = [] if self.index is not None: for i, idx_label in enumerate(self.index): idx_type = dtype_mapper(self.frame.index._get_level_values(i)) column_names_and_types.append((str(idx_label), idx_type, True)) column_names_and_types += [ (str(self.frame.columns[i]), dtype_mapper(self.frame.iloc[:, i]), False) for i in range(len(self.frame.columns)) ] return column_names_and_types def _create_table_setup(self): from sqlalchemy import Column, PrimaryKeyConstraint, Table column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type) columns = [ Column(name, typ, index=is_index) for name, typ, is_index in column_names_and_types ] if self.keys is not None: if not is_list_like(self.keys): keys = [self.keys] else: keys = self.keys pkc = PrimaryKeyConstraint(*keys, name=self.name + "_pk") columns.append(pkc) schema = self.schema or self.pd_sql.meta.schema # At this point, attach to new metadata, only attach to self.meta # once table is created. from sqlalchemy.schema import MetaData meta = MetaData(self.pd_sql, schema=schema) return Table(self.name, meta, *columns, schema=schema) def _harmonize_columns(self, parse_dates=None): """ Make the DataFrame's column types align with the SQL table column types. Need to work around limited NA value support. Floats are always fine, ints must always be floats if there are Null values. Booleans are hard because converting bool column with None replaces all Nones with false. Therefore only convert bool if there are no NA values. Datetimes should already be converted to np.datetime64 if supported, but here we also force conversion if required. """ parse_dates = _process_parse_dates_argument(parse_dates) for sql_col in self.table.columns: col_name = sql_col.name try: df_col = self.frame[col_name] # Handle date parsing upfront; don't try to convert columns # twice if col_name in parse_dates: try: fmt = parse_dates[col_name] except TypeError: fmt = None self.frame[col_name] = _handle_date_column(df_col, format=fmt) continue # the type the dataframe column should have col_type = self._get_dtype(sql_col.type) if ( col_type is datetime or col_type is date or col_type is DatetimeTZDtype ): # Convert tz-aware Datetime SQL columns to UTC utc = col_type is DatetimeTZDtype self.frame[col_name] = _handle_date_column(df_col, utc=utc) elif col_type is float: # floats support NA, can always convert! self.frame[col_name] = df_col.astype(col_type, copy=False) elif len(df_col) == df_col.count(): # No NA values, can convert ints and bools if col_type is np.dtype("int64") or col_type is bool: self.frame[col_name] = df_col.astype(col_type, copy=False) except KeyError: pass # this column not in results def _sqlalchemy_type(self, col): dtype = self.dtype or {} if col.name in dtype: return self.dtype[col.name] # Infer type of column, while ignoring missing values. # Needed for inserting typed data containing NULLs, GH 8778. col_type = lib.infer_dtype(col, skipna=True) from sqlalchemy.types import ( TIMESTAMP, BigInteger, Boolean, Date, DateTime, Float, Integer, Text, Time, ) if col_type == "datetime64" or col_type == "datetime": # GH 9086: TIMESTAMP is the suggested type if the column contains # timezone information try: if col.dt.tz is not None: return TIMESTAMP(timezone=True) except AttributeError: # The column is actually a DatetimeIndex # GH 26761 or an Index with date-like data e.g. 9999-01-01 if getattr(col, "tz", None) is not None: return TIMESTAMP(timezone=True) return DateTime if col_type == "timedelta64": warnings.warn( "the 'timedelta' type is not supported, and will be " "written as integer values (ns frequency) to the database.", UserWarning, stacklevel=8, ) return BigInteger elif col_type == "floating": if col.dtype == "float32": return Float(precision=23) else: return Float(precision=53) elif col_type == "integer": if col.dtype == "int32": return Integer else: return BigInteger elif col_type == "boolean": return Boolean elif col_type == "date": return Date elif col_type == "time": return Time elif col_type == "complex": raise ValueError("Complex datatypes not supported") return Text def _get_dtype(self, sqltype): from sqlalchemy.types import TIMESTAMP, Boolean, Date, DateTime, Float, Integer if isinstance(sqltype, Float): return float elif isinstance(sqltype, Integer): # TODO: Refine integer size. return np.dtype("int64") elif isinstance(sqltype, TIMESTAMP): # we have a timezone capable type if not sqltype.timezone: return datetime return DatetimeTZDtype elif isinstance(sqltype, DateTime): # Caution: np.datetime64 is also a subclass of np.number. return datetime elif isinstance(sqltype, Date): return date elif isinstance(sqltype, Boolean): return bool return object class PandasSQL(PandasObject): """ Subclasses Should define read_sql and to_sql. """ def read_sql(self, *args, **kwargs): raise ValueError( "PandasSQL must be created with an SQLAlchemy " "connectable or sqlite connection" ) def to_sql(self, *args, **kwargs): raise ValueError( "PandasSQL must be created with an SQLAlchemy " "connectable or sqlite connection" ) class SQLDatabase(PandasSQL): """ This class enables conversion between DataFrame and SQL databases using SQLAlchemy to handle DataBase abstraction. Parameters ---------- engine : SQLAlchemy connectable Connectable to connect with the database. Using SQLAlchemy makes it possible to use any DB supported by that library. schema : string, default None Name of SQL schema in database to write to (if database flavor supports this). If None, use default schema (default). meta : SQLAlchemy MetaData object, default None If provided, this MetaData object is used instead of a newly created. This allows to specify database flavor specific arguments in the MetaData object. """ def __init__(self, engine, schema=None, meta=None): self.connectable = engine if not meta: from sqlalchemy.schema import MetaData meta = MetaData(self.connectable, schema=schema) self.meta = meta @contextmanager def run_transaction(self): with self.connectable.begin() as tx: if hasattr(tx, "execute"): yield tx else: yield self.connectable def execute(self, *args, **kwargs): """Simple passthrough to SQLAlchemy connectable""" return self.connectable.execution_options(no_parameters=True).execute( *args, **kwargs ) def read_table( self, table_name, index_col=None, coerce_float=True, parse_dates=None, columns=None, schema=None, chunksize=None, ): """ Read SQL database table into a DataFrame. Parameters ---------- table_name : string Name of SQL table in database. index_col : string, optional, default: None Column to set as index. coerce_float : boolean, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point. This can result in loss of precision. parse_dates : list or dict, default: None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg}``, where the arg corresponds to the keyword arguments of :func:`pandas.to_datetime`. Especially useful with databases without native Datetime support, such as SQLite. columns : list, default: None List of column names to select from SQL table. schema : string, default None Name of SQL schema in database to query (if database flavor supports this). If specified, this overwrites the default schema of the SQL database object. chunksize : int, default None If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. Returns ------- DataFrame See Also -------- pandas.read_sql_table SQLDatabase.read_query """ table = SQLTable(table_name, self, index=index_col, schema=schema) return table.read( coerce_float=coerce_float, parse_dates=parse_dates, columns=columns, chunksize=chunksize, ) @staticmethod def _query_iterator( result, chunksize, columns, index_col=None, coerce_float=True, parse_dates=None ): """Return generator through chunked result set""" while True: data = result.fetchmany(chunksize) if not data: break else: yield _wrap_result( data, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, ) def read_query( self, sql, index_col=None, coerce_float=True, parse_dates=None, params=None, chunksize=None, ): """ Read SQL query into a DataFrame. Parameters ---------- sql : string SQL query to be executed. index_col : string, optional, default: None Column name to use as index for the returned DataFrame object. coerce_float : boolean, default True Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. params : list, tuple or dict, optional, default: None List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'} parse_dates : list or dict, default: None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. chunksize : int, default None If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. Returns ------- DataFrame See Also -------- read_sql_table : Read SQL database table into a DataFrame. read_sql """ args = _convert_params(sql, params) result = self.execute(*args) columns = result.keys() if chunksize is not None: return self._query_iterator( result, chunksize, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, ) else: data = result.fetchall() frame = _wrap_result( data, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, ) return frame read_sql = read_query def to_sql( self, frame, name, if_exists="fail", index=True, index_label=None, schema=None, chunksize=None, dtype=None, method=None, ): """ Write records stored in a DataFrame to a SQL database. Parameters ---------- frame : DataFrame name : string Name of SQL table. if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. index : boolean, default True Write DataFrame index as a column. index_label : string or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. schema : string, default None Name of SQL schema in database to write to (if database flavor supports this). If specified, this overwrites the default schema of the SQLDatabase object. chunksize : int, default None If not None, then rows will be written in batches of this size at a time. If None, all rows will be written at once. dtype : single type or dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type. If all columns are of the same type, one single value can be used. method : {None', 'multi', callable}, default None Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. .. versionadded:: 0.24.0 """ if dtype and not is_dict_like(dtype): dtype = {col_name: dtype for col_name in frame} if dtype is not None: from sqlalchemy.types import TypeEngine, to_instance for col, my_type in dtype.items(): if not isinstance(to_instance(my_type), TypeEngine): raise ValueError(f"The type of {col} is not a SQLAlchemy type") table = SQLTable( name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label, schema=schema, dtype=dtype, ) table.create() from sqlalchemy import exc try: table.insert(chunksize, method=method) except exc.SQLAlchemyError as err: # GH34431 msg = "(1054, \"Unknown column 'inf' in 'field list'\")" err_text = str(err.orig) if re.search(msg, err_text): raise ValueError("inf cannot be used with MySQL") from err else: raise err if not name.isdigit() and not name.islower(): # check for potentially case sensitivity issues (GH7815) # Only check when name is not a number and name is not lower case engine = self.connectable.engine with self.connectable.connect() as conn: table_names = engine.table_names( schema=schema or self.meta.schema, connection=conn ) if name not in table_names: msg = ( f"The provided table name '{name}' is not found exactly as " "such in the database after writing the table, possibly " "due to case sensitivity issues. Consider using lower " "case table names." ) warnings.warn(msg, UserWarning) @property def tables(self): return self.meta.tables def has_table(self, name, schema=None): return self.connectable.run_callable( self.connectable.dialect.has_table, name, schema or self.meta.schema ) def get_table(self, table_name, schema=None): schema = schema or self.meta.schema if schema: tbl = self.meta.tables.get(".".join([schema, table_name])) else: tbl = self.meta.tables.get(table_name) # Avoid casting double-precision floats into decimals from sqlalchemy import Numeric for column in tbl.columns: if isinstance(column.type, Numeric): column.type.asdecimal = False return tbl def drop_table(self, table_name, schema=None): schema = schema or self.meta.schema if self.has_table(table_name, schema): self.meta.reflect(only=[table_name], schema=schema) self.get_table(table_name, schema).drop() self.meta.clear() def _create_sql_schema(self, frame, table_name, keys=None, dtype=None): table = SQLTable( table_name, self, frame=frame, index=False, keys=keys, dtype=dtype ) return str(table.sql_schema()) # ---- SQL without SQLAlchemy --- # sqlite-specific sql strings and handler class # dictionary used for readability purposes _SQL_TYPES = { "string": "TEXT", "floating": "REAL", "integer": "INTEGER", "datetime": "TIMESTAMP", "date": "DATE", "time": "TIME", "boolean": "INTEGER", } def _get_unicode_name(name): try: uname = str(name).encode("utf-8", "strict").decode("utf-8") except UnicodeError as err: raise ValueError(f"Cannot convert identifier to UTF-8: '{name}'") from err return uname def _get_valid_sqlite_name(name): # See https://stackoverflow.com/questions/6514274/how-do-you-escape-strings\ # -for-sqlite-table-column-names-in-python # Ensure the string can be encoded as UTF-8. # Ensure the string does not include any NUL characters. # Replace all " with "". # Wrap the entire thing in double quotes. uname = _get_unicode_name(name) if not len(uname): raise ValueError("Empty table or column name specified") nul_index = uname.find("\x00") if nul_index >= 0: raise ValueError("SQLite identifier cannot contain NULs") return '"' + uname.replace('"', '""') + '"' _SAFE_NAMES_WARNING = ( "The spaces in these column names will not be changed. " "In pandas versions < 0.14, spaces were converted to underscores." ) class SQLiteTable(SQLTable): """ Patch the SQLTable for fallback support. Instead of a table variable just use the Create Table statement. """ def __init__(self, *args, **kwargs): # GH 8341 # register an adapter callable for datetime.time object import sqlite3 # this will transform time(12,34,56,789) into '12:34:56.000789' # (this is what sqlalchemy does) sqlite3.register_adapter(time, lambda _: _.strftime("%H:%M:%S.%f")) super().__init__(*args, **kwargs) def sql_schema(self): return str(";\n".join(self.table)) def _execute_create(self): with self.pd_sql.run_transaction() as conn: for stmt in self.table: conn.execute(stmt) def insert_statement(self, *, num_rows): names = list(map(str, self.frame.columns)) wld = "?" # wildcard char escape = _get_valid_sqlite_name if self.index is not None: for idx in self.index[::-1]: names.insert(0, idx) bracketed_names = [escape(column) for column in names] col_names = ",".join(bracketed_names) row_wildcards = ",".join([wld] * len(names)) wildcards = ",".join(f"({row_wildcards})" for _ in range(num_rows)) insert_statement = ( f"INSERT INTO {escape(self.name)} ({col_names}) VALUES {wildcards}" ) return insert_statement def _execute_insert(self, conn, keys, data_iter): data_list = list(data_iter) conn.executemany(self.insert_statement(num_rows=1), data_list) def _execute_insert_multi(self, conn, keys, data_iter): data_list = list(data_iter) flattened_data = [x for row in data_list for x in row] conn.execute(self.insert_statement(num_rows=len(data_list)), flattened_data) def _create_table_setup(self): """ Return a list of SQL statements that creates a table reflecting the structure of a DataFrame. The first entry will be a CREATE TABLE statement while the rest will be CREATE INDEX statements. """ column_names_and_types = self._get_column_names_and_types(self._sql_type_name) pat = re.compile(r"\s+") column_names = [col_name for col_name, _, _ in column_names_and_types] if any(map(pat.search, column_names)): warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6) escape = _get_valid_sqlite_name create_tbl_stmts = [ escape(cname) + " " + ctype for cname, ctype, _ in column_names_and_types ] if self.keys is not None and len(self.keys): if not is_list_like(self.keys): keys = [self.keys] else: keys = self.keys cnames_br = ", ".join(escape(c) for c in keys) create_tbl_stmts.append( f"CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})" ) create_stmts = [ "CREATE TABLE " + escape(self.name) + " (\n" + ",\n ".join(create_tbl_stmts) + "\n)" ] ix_cols = [cname for cname, _, is_index in column_names_and_types if is_index] if len(ix_cols): cnames = "_".join(ix_cols) cnames_br = ",".join(escape(c) for c in ix_cols) create_stmts.append( "CREATE INDEX " + escape("ix_" + self.name + "_" + cnames) + "ON " + escape(self.name) + " (" + cnames_br + ")" ) return create_stmts def _sql_type_name(self, col): dtype = self.dtype or {} if col.name in dtype: return dtype[col.name] # Infer type of column, while ignoring missing values. # Needed for inserting typed data containing NULLs, GH 8778. col_type = lib.infer_dtype(col, skipna=True) if col_type == "timedelta64": warnings.warn( "the 'timedelta' type is not supported, and will be " "written as integer values (ns frequency) to the database.", UserWarning, stacklevel=8, ) col_type = "integer" elif col_type == "datetime64": col_type = "datetime" elif col_type == "empty": col_type = "string" elif col_type == "complex": raise ValueError("Complex datatypes not supported") if col_type not in _SQL_TYPES: col_type = "string" return _SQL_TYPES[col_type] class SQLiteDatabase(PandasSQL): """ Version of SQLDatabase to support SQLite connections (fallback without SQLAlchemy). This should only be used internally. Parameters ---------- con : sqlite connection object """ def __init__(self, con, is_cursor=False): self.is_cursor = is_cursor self.con = con @contextmanager def run_transaction(self): cur = self.con.cursor() try: yield cur self.con.commit() except Exception: self.con.rollback() raise finally: cur.close() def execute(self, *args, **kwargs): if self.is_cursor: cur = self.con else: cur = self.con.cursor() try: cur.execute(*args, **kwargs) return cur except Exception as exc: try: self.con.rollback() except Exception as inner_exc: # pragma: no cover ex = DatabaseError( f"Execution failed on sql: {args[0]}\n{exc}\nunable to rollback" ) raise ex from inner_exc ex = DatabaseError(f"Execution failed on sql '{args[0]}': {exc}") raise ex from exc @staticmethod def _query_iterator( cursor, chunksize, columns, index_col=None, coerce_float=True, parse_dates=None ): """Return generator through chunked result set""" while True: data = cursor.fetchmany(chunksize) if type(data) == tuple: data = list(data) if not data: cursor.close() break else: yield _wrap_result( data, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, ) def read_query( self, sql, index_col=None, coerce_float=True, params=None, parse_dates=None, chunksize=None, ): args = _convert_params(sql, params) cursor = self.execute(*args) columns = [col_desc[0] for col_desc in cursor.description] if chunksize is not None: return self._query_iterator( cursor, chunksize, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, ) else: data = self._fetchall_as_list(cursor) cursor.close() frame = _wrap_result( data, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, ) return frame def _fetchall_as_list(self, cur): result = cur.fetchall() if not isinstance(result, list): result = list(result) return result def to_sql( self, frame, name, if_exists="fail", index=True, index_label=None, schema=None, chunksize=None, dtype=None, method=None, ): """ Write records stored in a DataFrame to a SQL database. Parameters ---------- frame: DataFrame name: string Name of SQL table. if_exists: {'fail', 'replace', 'append'}, default 'fail' fail: If table exists, do nothing. replace: If table exists, drop it, recreate it, and insert data. append: If table exists, insert data. Create if it does not exist. index : boolean, default True Write DataFrame index as a column index_label : string or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. schema : string, default None Ignored parameter included for compatibility with SQLAlchemy version of ``to_sql``. chunksize : int, default None If not None, then rows will be written in batches of this size at a time. If None, all rows will be written at once. dtype : single type or dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a string. If all columns are of the same type, one single value can be used. method : {None, 'multi', callable}, default None Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. .. versionadded:: 0.24.0 """ if dtype and not is_dict_like(dtype): dtype = {col_name: dtype for col_name in frame} if dtype is not None: for col, my_type in dtype.items(): if not isinstance(my_type, str): raise ValueError(f"{col} ({my_type}) not a string") table = SQLiteTable( name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label, dtype=dtype, ) table.create() table.insert(chunksize, method) def has_table(self, name, schema=None): # TODO(wesm): unused? # escape = _get_valid_sqlite_name # esc_name = escape(name) wld = "?" query = f"SELECT name FROM sqlite_master WHERE type='table' AND name={wld};" return len(self.execute(query, [name]).fetchall()) > 0 def get_table(self, table_name, schema=None): return None # not supported in fallback mode def drop_table(self, name, schema=None): drop_sql = f"DROP TABLE {_get_valid_sqlite_name(name)}" self.execute(drop_sql) def _create_sql_schema(self, frame, table_name, keys=None, dtype=None): table = SQLiteTable( table_name, self, frame=frame, index=False, keys=keys, dtype=dtype ) return str(table.sql_schema()) def get_schema(frame, name, keys=None, con=None, dtype=None): """ Get the SQL db table schema for the given frame. Parameters ---------- frame : DataFrame name : string name of SQL table keys : string or sequence, default: None columns to use a primary key con: an open SQL database connection object or a SQLAlchemy connectable Using SQLAlchemy makes it possible to use any DB supported by that library, default: None If a DBAPI2 object, only sqlite3 is supported. dtype : dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type, or a string for sqlite3 fallback connection. """ pandas_sql = pandasSQL_builder(con=con) return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype)
bsd-3-clause
kenshay/ImageScripter
ProgramData/SystemFiles/Python/Lib/site-packages/pandas/io/stata.py
7
82769
""" Module contains tools for processing Stata files into DataFrames The StataReader below was originally written by Joe Presbrey as part of PyDTA. It has been extended and improved by Skipper Seabold from the Statsmodels project who also developed the StataWriter and was finally added to pandas in a once again improved version. You can find more information on http://presbrey.mit.edu/PyDTA and http://www.statsmodels.org/devel/ """ import numpy as np import sys import struct from dateutil.relativedelta import relativedelta from pandas.types.common import (is_categorical_dtype, is_datetime64_dtype, _ensure_object) from pandas.core.base import StringMixin from pandas.core.categorical import Categorical from pandas.core.frame import DataFrame from pandas.core.series import Series import datetime from pandas import compat, to_timedelta, to_datetime, isnull, DatetimeIndex from pandas.compat import lrange, lmap, lzip, text_type, string_types, range, \ zip, BytesIO from pandas.util.decorators import Appender import pandas as pd from pandas.io.common import get_filepath_or_buffer, BaseIterator from pandas.lib import max_len_string_array, infer_dtype from pandas.tslib import NaT, Timestamp _version_error = ("Version of given Stata file is not 104, 105, 108, " "111 (Stata 7SE), 113 (Stata 8/9), 114 (Stata 10/11), " "115 (Stata 12), 117 (Stata 13), or 118 (Stata 14)") _statafile_processing_params1 = """\ convert_dates : boolean, defaults to True Convert date variables to DataFrame time values convert_categoricals : boolean, defaults to True Read value labels and convert columns to Categorical/Factor variables""" _encoding_params = """\ encoding : string, None or encoding Encoding used to parse the files. None defaults to iso-8859-1.""" _statafile_processing_params2 = """\ index : identifier of index column identifier of column that should be used as index of the DataFrame convert_missing : boolean, defaults to False Flag indicating whether to convert missing values to their Stata representations. If False, missing values are replaced with nans. If True, columns containing missing values are returned with object data types and missing values are represented by StataMissingValue objects. preserve_dtypes : boolean, defaults to True Preserve Stata datatypes. If False, numeric data are upcast to pandas default types for foreign data (float64 or int64) columns : list or None Columns to retain. Columns will be returned in the given order. None returns all columns order_categoricals : boolean, defaults to True Flag indicating whether converted categorical data are ordered.""" _chunksize_params = """\ chunksize : int, default None Return StataReader object for iterations, returns chunks with given number of lines""" _iterator_params = """\ iterator : boolean, default False Return StataReader object""" _read_stata_doc = """Read Stata file into DataFrame Parameters ---------- filepath_or_buffer : string or file-like object Path to .dta file or object implementing a binary read() functions %s %s %s %s %s Returns ------- DataFrame or StataReader Examples -------- Read a Stata dta file: >>> df = pandas.read_stata('filename.dta') Read a Stata dta file in 10,000 line chunks: >>> itr = pandas.read_stata('filename.dta', chunksize=10000) >>> for chunk in itr: >>> do_something(chunk) """ % (_statafile_processing_params1, _encoding_params, _statafile_processing_params2, _chunksize_params, _iterator_params) _data_method_doc = """Reads observations from Stata file, converting them into a dataframe This is a legacy method. Use `read` in new code. Parameters ---------- %s %s Returns ------- DataFrame """ % (_statafile_processing_params1, _statafile_processing_params2) _read_method_doc = """\ Reads observations from Stata file, converting them into a dataframe Parameters ---------- nrows : int Number of lines to read from data file, if None read whole file. %s %s Returns ------- DataFrame """ % (_statafile_processing_params1, _statafile_processing_params2) _stata_reader_doc = """\ Class for reading Stata dta files. Parameters ---------- path_or_buf : string or file-like object Path to .dta file or object implementing a binary read() functions %s %s %s %s """ % (_statafile_processing_params1, _statafile_processing_params2, _encoding_params, _chunksize_params) @Appender(_read_stata_doc) def read_stata(filepath_or_buffer, convert_dates=True, convert_categoricals=True, encoding=None, index=None, convert_missing=False, preserve_dtypes=True, columns=None, order_categoricals=True, chunksize=None, iterator=False): reader = StataReader(filepath_or_buffer, convert_dates=convert_dates, convert_categoricals=convert_categoricals, index=index, convert_missing=convert_missing, preserve_dtypes=preserve_dtypes, columns=columns, order_categoricals=order_categoricals, chunksize=chunksize, encoding=encoding) if iterator or chunksize: data = reader else: data = reader.read() reader.close() return data _date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"] stata_epoch = datetime.datetime(1960, 1, 1) def _stata_elapsed_date_to_datetime_vec(dates, fmt): """ Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime Parameters ---------- dates : Series The Stata Internal Format date to convert to datetime according to fmt fmt : str The format to convert to. Can be, tc, td, tw, tm, tq, th, ty Returns Returns ------- converted : Series The converted dates Examples -------- >>> import pandas as pd >>> dates = pd.Series([52]) >>> _stata_elapsed_date_to_datetime_vec(dates , "%tw") 0 1961-01-01 dtype: datetime64[ns] Notes ----- datetime/c - tc milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day datetime/C - tC - NOT IMPLEMENTED milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds date - td days since 01jan1960 (01jan1960 = 0) weekly date - tw weeks since 1960w1 This assumes 52 weeks in a year, then adds 7 * remainder of the weeks. The datetime value is the start of the week in terms of days in the year, not ISO calendar weeks. monthly date - tm months since 1960m1 quarterly date - tq quarters since 1960q1 half-yearly date - th half-years since 1960h1 yearly date - ty years since 0000 If you don't have pandas with datetime support, then you can't do milliseconds accurately. """ MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000 MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000 def convert_year_month_safe(year, month): """ Convert year and month to datetimes, using pandas vectorized versions when the date range falls within the range supported by pandas. Other wise it falls back to a slower but more robust method using datetime. """ if year.max() < MAX_YEAR and year.min() > MIN_YEAR: return to_datetime(100 * year + month, format='%Y%m') else: index = getattr(year, 'index', None) return Series( [datetime.datetime(y, m, 1) for y, m in zip(year, month)], index=index) def convert_year_days_safe(year, days): """ Converts year (e.g. 1999) and days since the start of the year to a datetime or datetime64 Series """ if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR: return (to_datetime(year, format='%Y') + to_timedelta(days, unit='d')) else: index = getattr(year, 'index', None) value = [datetime.datetime(y, 1, 1) + relativedelta(days=int(d)) for y, d in zip(year, days)] return Series(value, index=index) def convert_delta_safe(base, deltas, unit): """ Convert base dates and deltas to datetimes, using pandas vectorized versions if the deltas satisfy restrictions required to be expressed as dates in pandas. """ index = getattr(deltas, 'index', None) if unit == 'd': if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA: values = [base + relativedelta(days=int(d)) for d in deltas] return Series(values, index=index) elif unit == 'ms': if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA: values = [base + relativedelta(microseconds=(int(d) * 1000)) for d in deltas] return Series(values, index=index) else: raise ValueError('format not understood') base = to_datetime(base) deltas = to_timedelta(deltas, unit=unit) return base + deltas # TODO: If/when pandas supports more than datetime64[ns], this should be # improved to use correct range, e.g. datetime[Y] for yearly bad_locs = np.isnan(dates) has_bad_values = False if bad_locs.any(): has_bad_values = True data_col = Series(dates) data_col[bad_locs] = 1.0 # Replace with NaT dates = dates.astype(np.int64) if fmt in ["%tc", "tc"]: # Delta ms relative to base base = stata_epoch ms = dates conv_dates = convert_delta_safe(base, ms, 'ms') elif fmt in ["%tC", "tC"]: from warnings import warn warn("Encountered %tC format. Leaving in Stata Internal Format.") conv_dates = Series(dates, dtype=np.object) if has_bad_values: conv_dates[bad_locs] = pd.NaT return conv_dates elif fmt in ["%td", "td", "%d", "d"]: # Delta days relative to base base = stata_epoch days = dates conv_dates = convert_delta_safe(base, days, 'd') elif fmt in ["%tw", "tw"]: # does not count leap days - 7 days is a week year = stata_epoch.year + dates // 52 days = (dates % 52) * 7 conv_dates = convert_year_days_safe(year, days) elif fmt in ["%tm", "tm"]: # Delta months relative to base year = stata_epoch.year + dates // 12 month = (dates % 12) + 1 conv_dates = convert_year_month_safe(year, month) elif fmt in ["%tq", "tq"]: # Delta quarters relative to base year = stata_epoch.year + dates // 4 month = (dates % 4) * 3 + 1 conv_dates = convert_year_month_safe(year, month) elif fmt in ["%th", "th"]: # Delta half-years relative to base year = stata_epoch.year + dates // 2 month = (dates % 2) * 6 + 1 conv_dates = convert_year_month_safe(year, month) elif fmt in ["%ty", "ty"]: # Years -- not delta year = dates month = np.ones_like(dates) conv_dates = convert_year_month_safe(year, month) else: raise ValueError("Date fmt %s not understood" % fmt) if has_bad_values: # Restore NaT for bad values conv_dates[bad_locs] = NaT return conv_dates def _datetime_to_stata_elapsed_vec(dates, fmt): """ Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime Parameters ---------- dates : Series Series or array containing datetime.datetime or datetime64[ns] to convert to the Stata Internal Format given by fmt fmt : str The format to convert to. Can be, tc, td, tw, tm, tq, th, ty """ index = dates.index NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000 US_PER_DAY = NS_PER_DAY / 1000 def parse_dates_safe(dates, delta=False, year=False, days=False): d = {} if is_datetime64_dtype(dates.values): if delta: delta = dates - stata_epoch d['delta'] = delta.values.astype( np.int64) // 1000 # microseconds if days or year: dates = DatetimeIndex(dates) d['year'], d['month'] = dates.year, dates.month if days: days = (dates.astype(np.int64) - to_datetime(d['year'], format='%Y').astype(np.int64)) d['days'] = days // NS_PER_DAY elif infer_dtype(dates) == 'datetime': if delta: delta = dates.values - stata_epoch f = lambda x: \ US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds v = np.vectorize(f) d['delta'] = v(delta) if year: year_month = dates.apply(lambda x: 100 * x.year + x.month) d['year'] = year_month.values // 100 d['month'] = (year_month.values - d['year'] * 100) if days: f = lambda x: (x - datetime.datetime(x.year, 1, 1)).days v = np.vectorize(f) d['days'] = v(dates) else: raise ValueError('Columns containing dates must contain either ' 'datetime64, datetime.datetime or null values.') return DataFrame(d, index=index) bad_loc = isnull(dates) index = dates.index if bad_loc.any(): dates = Series(dates) if is_datetime64_dtype(dates): dates[bad_loc] = to_datetime(stata_epoch) else: dates[bad_loc] = stata_epoch if fmt in ["%tc", "tc"]: d = parse_dates_safe(dates, delta=True) conv_dates = d.delta / 1000 elif fmt in ["%tC", "tC"]: from warnings import warn warn("Stata Internal Format tC not supported.") conv_dates = dates elif fmt in ["%td", "td"]: d = parse_dates_safe(dates, delta=True) conv_dates = d.delta // US_PER_DAY elif fmt in ["%tw", "tw"]: d = parse_dates_safe(dates, year=True, days=True) conv_dates = (52 * (d.year - stata_epoch.year) + d.days // 7) elif fmt in ["%tm", "tm"]: d = parse_dates_safe(dates, year=True) conv_dates = (12 * (d.year - stata_epoch.year) + d.month - 1) elif fmt in ["%tq", "tq"]: d = parse_dates_safe(dates, year=True) conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3 elif fmt in ["%th", "th"]: d = parse_dates_safe(dates, year=True) conv_dates = 2 * (d.year - stata_epoch.year) + \ (d.month > 6).astype(np.int) elif fmt in ["%ty", "ty"]: d = parse_dates_safe(dates, year=True) conv_dates = d.year else: raise ValueError("Format %s is not a known Stata date format" % fmt) conv_dates = Series(conv_dates, dtype=np.float64) missing_value = struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0] conv_dates[bad_loc] = missing_value return Series(conv_dates, index=index) excessive_string_length_error = """ Fixed width strings in Stata .dta files are limited to 244 (or fewer) characters. Column '%s' does not satisfy this restriction. """ class PossiblePrecisionLoss(Warning): pass precision_loss_doc = """ Column converted from %s to %s, and some data are outside of the lossless conversion range. This may result in a loss of precision in the saved data. """ class ValueLabelTypeMismatch(Warning): pass value_label_mismatch_doc = """ Stata value labels (pandas categories) must be strings. Column {0} contains non-string labels which will be converted to strings. Please check that the Stata data file created has not lost information due to duplicate labels. """ class InvalidColumnName(Warning): pass invalid_name_doc = """ Not all pandas column names were valid Stata variable names. The following replacements have been made: {0} If this is not what you expect, please make sure you have Stata-compliant column names in your DataFrame (strings only, max 32 characters, only alphanumerics and underscores, no Stata reserved words) """ def _cast_to_stata_types(data): """Checks the dtypes of the columns of a pandas DataFrame for compatibility with the data types and ranges supported by Stata, and converts if necessary. Parameters ---------- data : DataFrame The DataFrame to check and convert Notes ----- Numeric columns in Stata must be one of int8, int16, int32, float32 or float64, with some additional value restrictions. int8 and int16 columns are checked for violations of the value restrictions and upcast if needed. int64 data is not usable in Stata, and so it is downcast to int32 whenever the value are in the int32 range, and sidecast to float64 when larger than this range. If the int64 values are outside of the range of those perfectly representable as float64 values, a warning is raised. bool columns are cast to int8. uint colums are converted to int of the same size if there is no loss in precision, other wise are upcast to a larger type. uint64 is currently not supported since it is concerted to object in a DataFrame. """ ws = '' # original, if small, if large conversion_data = ((np.bool, np.int8, np.int8), (np.uint8, np.int8, np.int16), (np.uint16, np.int16, np.int32), (np.uint32, np.int32, np.int64)) float32_max = struct.unpack('<f', b'\xff\xff\xff\x7e')[0] float64_max = struct.unpack('<d', b'\xff\xff\xff\xff\xff\xff\xdf\x7f')[0] for col in data: dtype = data[col].dtype # Cast from unsupported types to supported types for c_data in conversion_data: if dtype == c_data[0]: if data[col].max() <= np.iinfo(c_data[1]).max: dtype = c_data[1] else: dtype = c_data[2] if c_data[2] == np.float64: # Warn if necessary if data[col].max() >= 2 ** 53: ws = precision_loss_doc % ('uint64', 'float64') data[col] = data[col].astype(dtype) # Check values and upcast if necessary if dtype == np.int8: if data[col].max() > 100 or data[col].min() < -127: data[col] = data[col].astype(np.int16) elif dtype == np.int16: if data[col].max() > 32740 or data[col].min() < -32767: data[col] = data[col].astype(np.int32) elif dtype == np.int64: if (data[col].max() <= 2147483620 and data[col].min() >= -2147483647): data[col] = data[col].astype(np.int32) else: data[col] = data[col].astype(np.float64) if data[col].max() >= 2 ** 53 or data[col].min() <= -2 ** 53: ws = precision_loss_doc % ('int64', 'float64') elif dtype in (np.float32, np.float64): value = data[col].max() if np.isinf(value): msg = 'Column {0} has a maximum value of infinity which is ' \ 'outside the range supported by Stata.' raise ValueError(msg.format(col)) if dtype == np.float32 and value > float32_max: data[col] = data[col].astype(np.float64) elif dtype == np.float64: if value > float64_max: msg = 'Column {0} has a maximum value ({1}) outside the ' \ 'range supported by Stata ({1})' raise ValueError(msg.format(col, value, float64_max)) if ws: import warnings warnings.warn(ws, PossiblePrecisionLoss) return data class StataValueLabel(object): """ Parse a categorical column and prepare formatted output Parameters ----------- value : int8, int16, int32, float32 or float64 The Stata missing value code Attributes ---------- string : string String representation of the Stata missing value value : int8, int16, int32, float32 or float64 The original encoded missing value Methods ------- generate_value_label """ def __init__(self, catarray): self.labname = catarray.name categories = catarray.cat.categories self.value_labels = list(zip(np.arange(len(categories)), categories)) self.value_labels.sort(key=lambda x: x[0]) self.text_len = np.int32(0) self.off = [] self.val = [] self.txt = [] self.n = 0 # Compute lengths and setup lists of offsets and labels for vl in self.value_labels: category = vl[1] if not isinstance(category, string_types): category = str(category) import warnings warnings.warn(value_label_mismatch_doc.format(catarray.name), ValueLabelTypeMismatch) self.off.append(self.text_len) self.text_len += len(category) + 1 # +1 for the padding self.val.append(vl[0]) self.txt.append(category) self.n += 1 if self.text_len > 32000: raise ValueError('Stata value labels for a single variable must ' 'have a combined length less than 32,000 ' 'characters.') # Ensure int32 self.off = np.array(self.off, dtype=np.int32) self.val = np.array(self.val, dtype=np.int32) # Total length self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len def _encode(self, s): """ Python 3 compatability shim """ if compat.PY3: return s.encode(self._encoding) else: return s def generate_value_label(self, byteorder, encoding): """ Parameters ---------- byteorder : str Byte order of the output encoding : str File encoding Returns ------- value_label : bytes Bytes containing the formatted value label """ self._encoding = encoding bio = BytesIO() null_string = '\x00' null_byte = b'\x00' # len bio.write(struct.pack(byteorder + 'i', self.len)) # labname labname = self._encode(_pad_bytes(self.labname[:32], 33)) bio.write(labname) # padding - 3 bytes for i in range(3): bio.write(struct.pack('c', null_byte)) # value_label_table # n - int32 bio.write(struct.pack(byteorder + 'i', self.n)) # textlen - int32 bio.write(struct.pack(byteorder + 'i', self.text_len)) # off - int32 array (n elements) for offset in self.off: bio.write(struct.pack(byteorder + 'i', offset)) # val - int32 array (n elements) for value in self.val: bio.write(struct.pack(byteorder + 'i', value)) # txt - Text labels, null terminated for text in self.txt: bio.write(self._encode(text + null_string)) bio.seek(0) return bio.read() class StataMissingValue(StringMixin): """ An observation's missing value. Parameters ----------- value : int8, int16, int32, float32 or float64 The Stata missing value code Attributes ---------- string : string String representation of the Stata missing value value : int8, int16, int32, float32 or float64 The original encoded missing value Notes ----- More information: <http://www.stata.com/help.cgi?missing> Integer missing values make the code '.', '.a', ..., '.z' to the ranges 101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ... 2147483647 (for int32). Missing values for floating point data types are more complex but the pattern is simple to discern from the following table. np.float32 missing values (float in Stata) 0000007f . 0008007f .a 0010007f .b ... 00c0007f .x 00c8007f .y 00d0007f .z np.float64 missing values (double in Stata) 000000000000e07f . 000000000001e07f .a 000000000002e07f .b ... 000000000018e07f .x 000000000019e07f .y 00000000001ae07f .z """ # Construct a dictionary of missing values MISSING_VALUES = {} bases = (101, 32741, 2147483621) for b in bases: # Conversion to long to avoid hash issues on 32 bit platforms #8968 MISSING_VALUES[compat.long(b)] = '.' for i in range(1, 27): MISSING_VALUES[compat.long(i + b)] = '.' + chr(96 + i) float32_base = b'\x00\x00\x00\x7f' increment = struct.unpack('<i', b'\x00\x08\x00\x00')[0] for i in range(27): value = struct.unpack('<f', float32_base)[0] MISSING_VALUES[value] = '.' if i > 0: MISSING_VALUES[value] += chr(96 + i) int_value = struct.unpack('<i', struct.pack('<f', value))[ 0] + increment float32_base = struct.pack('<i', int_value) float64_base = b'\x00\x00\x00\x00\x00\x00\xe0\x7f' increment = struct.unpack('q', b'\x00\x00\x00\x00\x00\x01\x00\x00')[0] for i in range(27): value = struct.unpack('<d', float64_base)[0] MISSING_VALUES[value] = '.' if i > 0: MISSING_VALUES[value] += chr(96 + i) int_value = struct.unpack('q', struct.pack('<d', value))[0] + increment float64_base = struct.pack('q', int_value) BASE_MISSING_VALUES = {'int8': 101, 'int16': 32741, 'int32': 2147483621, 'float32': struct.unpack('<f', float32_base)[0], 'float64': struct.unpack('<d', float64_base)[0]} def __init__(self, value): self._value = value # Conversion to long to avoid hash issues on 32 bit platforms #8968 value = compat.long(value) if value < 2147483648 else float(value) self._str = self.MISSING_VALUES[value] string = property(lambda self: self._str, doc="The Stata representation of the missing value: " "'.', '.a'..'.z'") value = property(lambda self: self._value, doc='The binary representation of the missing value.') def __unicode__(self): return self.string def __repr__(self): # not perfect :-/ return "%s(%s)" % (self.__class__, self) def __eq__(self, other): return (isinstance(other, self.__class__) and self.string == other.string and self.value == other.value) @classmethod def get_base_missing_value(cls, dtype): if dtype == np.int8: value = cls.BASE_MISSING_VALUES['int8'] elif dtype == np.int16: value = cls.BASE_MISSING_VALUES['int16'] elif dtype == np.int32: value = cls.BASE_MISSING_VALUES['int32'] elif dtype == np.float32: value = cls.BASE_MISSING_VALUES['float32'] elif dtype == np.float64: value = cls.BASE_MISSING_VALUES['float64'] else: raise ValueError('Unsupported dtype') return value class StataParser(object): _default_encoding = 'iso-8859-1' def __init__(self, encoding): self._encoding = encoding # type code. # -------------------- # str1 1 = 0x01 # str2 2 = 0x02 # ... # str244 244 = 0xf4 # byte 251 = 0xfb (sic) # int 252 = 0xfc # long 253 = 0xfd # float 254 = 0xfe # double 255 = 0xff # -------------------- # NOTE: the byte type seems to be reserved for categorical variables # with a label, but the underlying variable is -127 to 100 # we're going to drop the label and cast to int self.DTYPE_MAP = \ dict( lzip(range(1, 245), ['a' + str(i) for i in range(1, 245)]) + [ (251, np.int8), (252, np.int16), (253, np.int32), (254, np.float32), (255, np.float64) ] ) self.DTYPE_MAP_XML = \ dict( [ (32768, np.uint8), # Keys to GSO (65526, np.float64), (65527, np.float32), (65528, np.int32), (65529, np.int16), (65530, np.int8) ] ) self.TYPE_MAP = lrange(251) + list('bhlfd') self.TYPE_MAP_XML = \ dict( [ # Not really a Q, unclear how to handle byteswap (32768, 'Q'), (65526, 'd'), (65527, 'f'), (65528, 'l'), (65529, 'h'), (65530, 'b') ] ) # NOTE: technically, some of these are wrong. there are more numbers # that can be represented. it's the 27 ABOVE and BELOW the max listed # numeric data type in [U] 12.2.2 of the 11.2 manual float32_min = b'\xff\xff\xff\xfe' float32_max = b'\xff\xff\xff\x7e' float64_min = b'\xff\xff\xff\xff\xff\xff\xef\xff' float64_max = b'\xff\xff\xff\xff\xff\xff\xdf\x7f' self.VALID_RANGE = { 'b': (-127, 100), 'h': (-32767, 32740), 'l': (-2147483647, 2147483620), 'f': (np.float32(struct.unpack('<f', float32_min)[0]), np.float32(struct.unpack('<f', float32_max)[0])), 'd': (np.float64(struct.unpack('<d', float64_min)[0]), np.float64(struct.unpack('<d', float64_max)[0])) } self.OLD_TYPE_MAPPING = { 98: 251, # byte 105: 252, # int 108: 253, # long 102: 254 # float # don't know old code for double } # These missing values are the generic '.' in Stata, and are used # to replace nans self.MISSING_VALUES = { 'b': 101, 'h': 32741, 'l': 2147483621, 'f': np.float32(struct.unpack('<f', b'\x00\x00\x00\x7f')[0]), 'd': np.float64( struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0]) } self.NUMPY_TYPE_MAP = { 'b': 'i1', 'h': 'i2', 'l': 'i4', 'f': 'f4', 'd': 'f8', 'Q': 'u8' } # Reserved words cannot be used as variable names self.RESERVED_WORDS = ('aggregate', 'array', 'boolean', 'break', 'byte', 'case', 'catch', 'class', 'colvector', 'complex', 'const', 'continue', 'default', 'delegate', 'delete', 'do', 'double', 'else', 'eltypedef', 'end', 'enum', 'explicit', 'export', 'external', 'float', 'for', 'friend', 'function', 'global', 'goto', 'if', 'inline', 'int', 'local', 'long', 'NULL', 'pragma', 'protected', 'quad', 'rowvector', 'short', 'typedef', 'typename', 'virtual') class StataReader(StataParser, BaseIterator): __doc__ = _stata_reader_doc def __init__(self, path_or_buf, convert_dates=True, convert_categoricals=True, index=None, convert_missing=False, preserve_dtypes=True, columns=None, order_categoricals=True, encoding='iso-8859-1', chunksize=None): super(StataReader, self).__init__(encoding) self.col_sizes = () # Arguments to the reader (can be temporarily overridden in # calls to read). self._convert_dates = convert_dates self._convert_categoricals = convert_categoricals self._index = index self._convert_missing = convert_missing self._preserve_dtypes = preserve_dtypes self._columns = columns self._order_categoricals = order_categoricals self._encoding = encoding self._chunksize = chunksize # State variables for the file self._has_string_data = False self._missing_values = False self._can_read_value_labels = False self._column_selector_set = False self._value_labels_read = False self._data_read = False self._dtype = None self._lines_read = 0 self._native_byteorder = _set_endianness(sys.byteorder) if isinstance(path_or_buf, str): path_or_buf, encoding, _ = get_filepath_or_buffer( path_or_buf, encoding=self._default_encoding ) if isinstance(path_or_buf, (str, compat.text_type, bytes)): self.path_or_buf = open(path_or_buf, 'rb') else: # Copy to BytesIO, and ensure no encoding contents = path_or_buf.read() try: contents = contents.encode(self._default_encoding) except: pass self.path_or_buf = BytesIO(contents) self._read_header() def __enter__(self): """ enter context manager """ return self def __exit__(self, exc_type, exc_value, traceback): """ exit context manager """ self.close() def close(self): """ close the handle if its open """ try: self.path_or_buf.close() except IOError: pass def _read_header(self): first_char = self.path_or_buf.read(1) if struct.unpack('c', first_char)[0] == b'<': self._read_new_header(first_char) else: self._read_old_header(first_char) self.has_string_data = len([x for x in self.typlist if type(x) is int]) > 0 # calculate size of a data record self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist) # remove format details from %td self.fmtlist = ["%td" if x.startswith("%td") else x for x in self.fmtlist] def _read_new_header(self, first_char): # The first part of the header is common to 117 and 118. self.path_or_buf.read(27) # stata_dta><header><release> self.format_version = int(self.path_or_buf.read(3)) if self.format_version not in [117, 118]: raise ValueError(_version_error) self.path_or_buf.read(21) # </release><byteorder> self.byteorder = self.path_or_buf.read(3) == "MSF" and '>' or '<' self.path_or_buf.read(15) # </byteorder><K> self.nvar = struct.unpack(self.byteorder + 'H', self.path_or_buf.read(2))[0] self.path_or_buf.read(7) # </K><N> self.nobs = self._get_nobs() self.path_or_buf.read(11) # </N><label> self.data_label = self._get_data_label() self.path_or_buf.read(19) # </label><timestamp> self.time_stamp = self._get_time_stamp() self.path_or_buf.read(26) # </timestamp></header><map> self.path_or_buf.read(8) # 0x0000000000000000 self.path_or_buf.read(8) # position of <map> self._seek_vartypes = struct.unpack( self.byteorder + 'q', self.path_or_buf.read(8))[0] + 16 self._seek_varnames = struct.unpack( self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10 self._seek_sortlist = struct.unpack( self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10 self._seek_formats = struct.unpack( self.byteorder + 'q', self.path_or_buf.read(8))[0] + 9 self._seek_value_label_names = struct.unpack( self.byteorder + 'q', self.path_or_buf.read(8))[0] + 19 # Requires version-specific treatment self._seek_variable_labels = self._get_seek_variable_labels() self.path_or_buf.read(8) # <characteristics> self.data_location = struct.unpack( self.byteorder + 'q', self.path_or_buf.read(8))[0] + 6 self.seek_strls = struct.unpack( self.byteorder + 'q', self.path_or_buf.read(8))[0] + 7 self.seek_value_labels = struct.unpack( self.byteorder + 'q', self.path_or_buf.read(8))[0] + 14 self.typlist, self.dtyplist = self._get_dtypes(self._seek_vartypes) self.path_or_buf.seek(self._seek_varnames) self.varlist = self._get_varlist() self.path_or_buf.seek(self._seek_sortlist) self.srtlist = struct.unpack( self.byteorder + ('h' * (self.nvar + 1)), self.path_or_buf.read(2 * (self.nvar + 1)) )[:-1] self.path_or_buf.seek(self._seek_formats) self.fmtlist = self._get_fmtlist() self.path_or_buf.seek(self._seek_value_label_names) self.lbllist = self._get_lbllist() self.path_or_buf.seek(self._seek_variable_labels) self._variable_labels = self._get_variable_labels() # Get data type information, works for versions 117-118. def _get_dtypes(self, seek_vartypes): self.path_or_buf.seek(seek_vartypes) raw_typlist = [struct.unpack(self.byteorder + 'H', self.path_or_buf.read(2))[0] for i in range(self.nvar)] def f(typ): if typ <= 2045: return typ try: return self.TYPE_MAP_XML[typ] except KeyError: raise ValueError("cannot convert stata types [{0}]". format(typ)) typlist = [f(x) for x in raw_typlist] def f(typ): if typ <= 2045: return str(typ) try: return self.DTYPE_MAP_XML[typ] except KeyError: raise ValueError("cannot convert stata dtype [{0}]" .format(typ)) dtyplist = [f(x) for x in raw_typlist] return typlist, dtyplist def _get_varlist(self): if self.format_version == 117: b = 33 elif self.format_version == 118: b = 129 return [self._null_terminate(self.path_or_buf.read(b)) for i in range(self.nvar)] # Returns the format list def _get_fmtlist(self): if self.format_version == 118: b = 57 elif self.format_version > 113: b = 49 elif self.format_version > 104: b = 12 else: b = 7 return [self._null_terminate(self.path_or_buf.read(b)) for i in range(self.nvar)] # Returns the label list def _get_lbllist(self): if self.format_version >= 118: b = 129 elif self.format_version > 108: b = 33 else: b = 9 return [self._null_terminate(self.path_or_buf.read(b)) for i in range(self.nvar)] def _get_variable_labels(self): if self.format_version == 118: vlblist = [self._decode(self.path_or_buf.read(321)) for i in range(self.nvar)] elif self.format_version > 105: vlblist = [self._null_terminate(self.path_or_buf.read(81)) for i in range(self.nvar)] else: vlblist = [self._null_terminate(self.path_or_buf.read(32)) for i in range(self.nvar)] return vlblist def _get_nobs(self): if self.format_version == 118: return struct.unpack(self.byteorder + 'Q', self.path_or_buf.read(8))[0] else: return struct.unpack(self.byteorder + 'I', self.path_or_buf.read(4))[0] def _get_data_label(self): if self.format_version == 118: strlen = struct.unpack(self.byteorder + 'H', self.path_or_buf.read(2))[0] return self._decode(self.path_or_buf.read(strlen)) elif self.format_version == 117: strlen = struct.unpack('b', self.path_or_buf.read(1))[0] return self._null_terminate(self.path_or_buf.read(strlen)) elif self.format_version > 105: return self._null_terminate(self.path_or_buf.read(81)) else: return self._null_terminate(self.path_or_buf.read(32)) def _get_time_stamp(self): if self.format_version == 118: strlen = struct.unpack('b', self.path_or_buf.read(1))[0] return self.path_or_buf.read(strlen).decode("utf-8") elif self.format_version == 117: strlen = struct.unpack('b', self.path_or_buf.read(1))[0] return self._null_terminate(self.path_or_buf.read(strlen)) elif self.format_version > 104: return self._null_terminate(self.path_or_buf.read(18)) else: raise ValueError() def _get_seek_variable_labels(self): if self.format_version == 117: self.path_or_buf.read(8) # <variable_lables>, throw away # Stata 117 data files do not follow the described format. This is # a work around that uses the previous label, 33 bytes for each # variable, 20 for the closing tag and 17 for the opening tag return self._seek_value_label_names + (33 * self.nvar) + 20 + 17 elif self.format_version == 118: return struct.unpack(self.byteorder + 'q', self.path_or_buf.read(8))[0] + 17 else: raise ValueError() def _read_old_header(self, first_char): self.format_version = struct.unpack('b', first_char)[0] if self.format_version not in [104, 105, 108, 111, 113, 114, 115]: raise ValueError(_version_error) self.byteorder = struct.unpack('b', self.path_or_buf.read(1))[ 0] == 0x1 and '>' or '<' self.filetype = struct.unpack('b', self.path_or_buf.read(1))[0] self.path_or_buf.read(1) # unused self.nvar = struct.unpack(self.byteorder + 'H', self.path_or_buf.read(2))[0] self.nobs = self._get_nobs() self.data_label = self._get_data_label() self.time_stamp = self._get_time_stamp() # descriptors if self.format_version > 108: typlist = [ord(self.path_or_buf.read(1)) for i in range(self.nvar)] else: buf = self.path_or_buf.read(self.nvar) typlistb = np.frombuffer(buf, dtype=np.uint8) typlist = [] for tp in typlistb: if tp in self.OLD_TYPE_MAPPING: typlist.append(self.OLD_TYPE_MAPPING[tp]) else: typlist.append(tp - 127) # py2 string, py3 bytes try: self.typlist = [self.TYPE_MAP[typ] for typ in typlist] except: raise ValueError("cannot convert stata types [{0}]" .format(','.join(str(x) for x in typlist))) try: self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist] except: raise ValueError("cannot convert stata dtypes [{0}]" .format(','.join(str(x) for x in typlist))) if self.format_version > 108: self.varlist = [self._null_terminate(self.path_or_buf.read(33)) for i in range(self.nvar)] else: self.varlist = [self._null_terminate(self.path_or_buf.read(9)) for i in range(self.nvar)] self.srtlist = struct.unpack( self.byteorder + ('h' * (self.nvar + 1)), self.path_or_buf.read(2 * (self.nvar + 1)) )[:-1] self.fmtlist = self._get_fmtlist() self.lbllist = self._get_lbllist() self._variable_labels = self._get_variable_labels() # ignore expansion fields (Format 105 and later) # When reading, read five bytes; the last four bytes now tell you # the size of the next read, which you discard. You then continue # like this until you read 5 bytes of zeros. if self.format_version > 104: while True: data_type = struct.unpack(self.byteorder + 'b', self.path_or_buf.read(1))[0] if self.format_version > 108: data_len = struct.unpack(self.byteorder + 'i', self.path_or_buf.read(4))[0] else: data_len = struct.unpack(self.byteorder + 'h', self.path_or_buf.read(2))[0] if data_type == 0: break self.path_or_buf.read(data_len) # necessary data to continue parsing self.data_location = self.path_or_buf.tell() def _calcsize(self, fmt): return (type(fmt) is int and fmt or struct.calcsize(self.byteorder + fmt)) def _decode(self, s): s = s.partition(b"\0")[0] return s.decode('utf-8') def _null_terminate(self, s): if compat.PY3 or self._encoding is not None: # have bytes not strings, so must decode s = s.partition(b"\0")[0] return s.decode(self._encoding or self._default_encoding) else: null_byte = "\0" try: return s.lstrip(null_byte)[:s.index(null_byte)] except: return s def _read_value_labels(self): if self.format_version <= 108: # Value labels are not supported in version 108 and earlier. return if self._value_labels_read: # Don't read twice return if self.format_version >= 117: self.path_or_buf.seek(self.seek_value_labels) else: offset = self.nobs * self._dtype.itemsize self.path_or_buf.seek(self.data_location + offset) self._value_labels_read = True self.value_label_dict = dict() while True: if self.format_version >= 117: if self.path_or_buf.read(5) == b'</val': # <lbl> break # end of value label table slength = self.path_or_buf.read(4) if not slength: break # end of value label table (format < 117) if self.format_version <= 117: labname = self._null_terminate(self.path_or_buf.read(33)) else: labname = self._decode(self.path_or_buf.read(129)) self.path_or_buf.read(3) # padding n = struct.unpack(self.byteorder + 'I', self.path_or_buf.read(4))[0] txtlen = struct.unpack(self.byteorder + 'I', self.path_or_buf.read(4))[0] off = np.frombuffer(self.path_or_buf.read(4 * n), dtype=self.byteorder + "i4", count=n) val = np.frombuffer(self.path_or_buf.read(4 * n), dtype=self.byteorder + "i4", count=n) ii = np.argsort(off) off = off[ii] val = val[ii] txt = self.path_or_buf.read(txtlen) self.value_label_dict[labname] = dict() for i in range(n): end = off[i + 1] if i < n - 1 else txtlen if self.format_version <= 117: self.value_label_dict[labname][val[i]] = ( self._null_terminate(txt[off[i]:end])) else: self.value_label_dict[labname][val[i]] = ( self._decode(txt[off[i]:end])) if self.format_version >= 117: self.path_or_buf.read(6) # </lbl> self._value_labels_read = True def _read_strls(self): self.path_or_buf.seek(self.seek_strls) self.GSO = {0: ''} while True: if self.path_or_buf.read(3) != b'GSO': break if self.format_version == 117: v_o = struct.unpack(self.byteorder + 'Q', self.path_or_buf.read(8))[0] else: buf = self.path_or_buf.read(12) # Only tested on little endian file on little endian machine. if self.byteorder == '<': buf = buf[0:2] + buf[4:10] else: buf = buf[0:2] + buf[6:] v_o = struct.unpack('Q', buf)[0] typ = struct.unpack('B', self.path_or_buf.read(1))[0] length = struct.unpack(self.byteorder + 'I', self.path_or_buf.read(4))[0] va = self.path_or_buf.read(length) if typ == 130: encoding = 'utf-8' if self.format_version == 117: encoding = self._encoding or self._default_encoding va = va[0:-1].decode(encoding) self.GSO[v_o] = va # legacy @Appender('DEPRECATED: ' + _data_method_doc) def data(self, **kwargs): import warnings warnings.warn("'data' is deprecated, use 'read' instead") if self._data_read: raise Exception("Data has already been read.") self._data_read = True return self.read(None, **kwargs) def __next__(self): return self.read(nrows=self._chunksize or 1) def get_chunk(self, size=None): """ Reads lines from Stata file and returns as dataframe Parameters ---------- size : int, defaults to None Number of lines to read. If None, reads whole file. Returns ------- DataFrame """ if size is None: size = self._chunksize return self.read(nrows=size) @Appender(_read_method_doc) def read(self, nrows=None, convert_dates=None, convert_categoricals=None, index=None, convert_missing=None, preserve_dtypes=None, columns=None, order_categoricals=None): # Handle empty file or chunk. If reading incrementally raise # StopIteration. If reading the whole thing return an empty # data frame. if (self.nobs == 0) and (nrows is None): self._can_read_value_labels = True self._data_read = True self.close() return DataFrame(columns=self.varlist) # Handle options if convert_dates is None: convert_dates = self._convert_dates if convert_categoricals is None: convert_categoricals = self._convert_categoricals if convert_missing is None: convert_missing = self._convert_missing if preserve_dtypes is None: preserve_dtypes = self._preserve_dtypes if columns is None: columns = self._columns if order_categoricals is None: order_categoricals = self._order_categoricals if nrows is None: nrows = self.nobs if (self.format_version >= 117) and (self._dtype is None): self._can_read_value_labels = True self._read_strls() # Setup the dtype. if self._dtype is None: dtype = [] # Convert struct data types to numpy data type for i, typ in enumerate(self.typlist): if typ in self.NUMPY_TYPE_MAP: dtype.append(('s' + str(i), self.byteorder + self.NUMPY_TYPE_MAP[typ])) else: dtype.append(('s' + str(i), 'S' + str(typ))) dtype = np.dtype(dtype) self._dtype = dtype # Read data dtype = self._dtype max_read_len = (self.nobs - self._lines_read) * dtype.itemsize read_len = nrows * dtype.itemsize read_len = min(read_len, max_read_len) if read_len <= 0: # Iterator has finished, should never be here unless # we are reading the file incrementally if convert_categoricals: self._read_value_labels() self.close() raise StopIteration offset = self._lines_read * dtype.itemsize self.path_or_buf.seek(self.data_location + offset) read_lines = min(nrows, self.nobs - self._lines_read) data = np.frombuffer(self.path_or_buf.read(read_len), dtype=dtype, count=read_lines) self._lines_read += read_lines if self._lines_read == self.nobs: self._can_read_value_labels = True self._data_read = True # if necessary, swap the byte order to native here if self.byteorder != self._native_byteorder: data = data.byteswap().newbyteorder() if convert_categoricals: self._read_value_labels() if len(data) == 0: data = DataFrame(columns=self.varlist, index=index) else: data = DataFrame.from_records(data, index=index) data.columns = self.varlist # If index is not specified, use actual row number rather than # restarting at 0 for each chunk. if index is None: ix = np.arange(self._lines_read - read_lines, self._lines_read) data = data.set_index(ix) if columns is not None: try: data = self._do_select_columns(data, columns) except ValueError: self.close() raise # Decode strings for col, typ in zip(data, self.typlist): if type(typ) is int: data[col] = data[col].apply( self._null_terminate, convert_dtype=True) data = self._insert_strls(data) cols_ = np.where(self.dtyplist)[0] # Convert columns (if needed) to match input type index = data.index requires_type_conversion = False data_formatted = [] for i in cols_: if self.dtyplist[i] is not None: col = data.columns[i] dtype = data[col].dtype if dtype != np.dtype(object) and dtype != self.dtyplist[i]: requires_type_conversion = True data_formatted.append( (col, Series(data[col], index, self.dtyplist[i]))) else: data_formatted.append((col, data[col])) if requires_type_conversion: data = DataFrame.from_items(data_formatted) del data_formatted self._do_convert_missing(data, convert_missing) if convert_dates: cols = np.where(lmap(lambda x: x in _date_formats, self.fmtlist))[0] for i in cols: col = data.columns[i] try: data[col] = _stata_elapsed_date_to_datetime_vec( data[col], self.fmtlist[i]) except ValueError: self.close() raise if convert_categoricals and self.format_version > 108: data = self._do_convert_categoricals(data, self.value_label_dict, self.lbllist, order_categoricals) if not preserve_dtypes: retyped_data = [] convert = False for col in data: dtype = data[col].dtype if dtype in (np.float16, np.float32): dtype = np.float64 convert = True elif dtype in (np.int8, np.int16, np.int32): dtype = np.int64 convert = True retyped_data.append((col, data[col].astype(dtype))) if convert: data = DataFrame.from_items(retyped_data) return data def _do_convert_missing(self, data, convert_missing): # Check for missing values, and replace if found for i, colname in enumerate(data): fmt = self.typlist[i] if fmt not in self.VALID_RANGE: continue nmin, nmax = self.VALID_RANGE[fmt] series = data[colname] missing = np.logical_or(series < nmin, series > nmax) if not missing.any(): continue if convert_missing: # Replacement follows Stata notation missing_loc = np.argwhere(missing) umissing, umissing_loc = np.unique(series[missing], return_inverse=True) replacement = Series(series, dtype=np.object) for j, um in enumerate(umissing): missing_value = StataMissingValue(um) loc = missing_loc[umissing_loc == j] replacement.iloc[loc] = missing_value else: # All replacements are identical dtype = series.dtype if dtype not in (np.float32, np.float64): dtype = np.float64 replacement = Series(series, dtype=dtype) replacement[missing] = np.nan data[colname] = replacement def _insert_strls(self, data): if not hasattr(self, 'GSO') or len(self.GSO) == 0: return data for i, typ in enumerate(self.typlist): if typ != 'Q': continue data.iloc[:, i] = [self.GSO[k] for k in data.iloc[:, i]] return data def _do_select_columns(self, data, columns): if not self._column_selector_set: column_set = set(columns) if len(column_set) != len(columns): raise ValueError('columns contains duplicate entries') unmatched = column_set.difference(data.columns) if unmatched: raise ValueError('The following columns were not found in the ' 'Stata data set: ' + ', '.join(list(unmatched))) # Copy information for retained columns for later processing dtyplist = [] typlist = [] fmtlist = [] lbllist = [] for col in columns: i = data.columns.get_loc(col) dtyplist.append(self.dtyplist[i]) typlist.append(self.typlist[i]) fmtlist.append(self.fmtlist[i]) lbllist.append(self.lbllist[i]) self.dtyplist = dtyplist self.typlist = typlist self.fmtlist = fmtlist self.lbllist = lbllist self._column_selector_set = True return data[columns] def _do_convert_categoricals(self, data, value_label_dict, lbllist, order_categoricals): """ Converts categorical columns to Categorical type. """ value_labels = list(compat.iterkeys(value_label_dict)) cat_converted_data = [] for col, label in zip(data, lbllist): if label in value_labels: # Explicit call with ordered=True cat_data = Categorical(data[col], ordered=order_categoricals) categories = [] for category in cat_data.categories: if category in value_label_dict[label]: categories.append(value_label_dict[label][category]) else: categories.append(category) # Partially labeled try: cat_data.categories = categories except ValueError: vc = Series(categories).value_counts() repeats = list(vc.index[vc > 1]) repeats = '\n' + '-' * 80 + '\n'.join(repeats) msg = 'Value labels for column {0} are not unique. The ' \ 'repeated labels are:\n{1}'.format(col, repeats) raise ValueError(msg) # TODO: is the next line needed above in the data(...) method? cat_data = Series(cat_data, index=data.index) cat_converted_data.append((col, cat_data)) else: cat_converted_data.append((col, data[col])) data = DataFrame.from_items(cat_converted_data) return data def data_label(self): """Returns data label of Stata file""" return self.data_label def variable_labels(self): """Returns variable labels as a dict, associating each variable name with corresponding label """ return dict(zip(self.varlist, self._variable_labels)) def value_labels(self): """Returns a dict, associating each variable name a dict, associating each value its corresponding label """ if not self._value_labels_read: self._read_value_labels() return self.value_label_dict def _open_file_binary_write(fname, encoding): if hasattr(fname, 'write'): # if 'b' not in fname.mode: return fname return open(fname, "wb") def _set_endianness(endianness): if endianness.lower() in ["<", "little"]: return "<" elif endianness.lower() in [">", "big"]: return ">" else: # pragma : no cover raise ValueError("Endianness %s not understood" % endianness) def _pad_bytes(name, length): """ Takes a char string and pads it with null bytes until it's length chars """ return name + "\x00" * (length - len(name)) def _convert_datetime_to_stata_type(fmt): """ Converts from one of the stata date formats to a type in TYPE_MAP """ if fmt in ["tc", "%tc", "td", "%td", "tw", "%tw", "tm", "%tm", "tq", "%tq", "th", "%th", "ty", "%ty"]: return np.float64 # Stata expects doubles for SIFs else: raise NotImplementedError("Format %s not implemented" % fmt) def _maybe_convert_to_int_keys(convert_dates, varlist): new_dict = {} for key in convert_dates: if not convert_dates[key].startswith("%"): # make sure proper fmts convert_dates[key] = "%" + convert_dates[key] if key in varlist: new_dict.update({varlist.index(key): convert_dates[key]}) else: if not isinstance(key, int): raise ValueError("convert_dates key must be a " "column or an integer") new_dict.update({key: convert_dates[key]}) return new_dict def _dtype_to_stata_type(dtype, column): """ Converts dtype types to stata types. Returns the byte of the given ordinal. See TYPE_MAP and comments for an explanation. This is also explained in the dta spec. 1 - 244 are strings of this length Pandas Stata 251 - chr(251) - for int8 byte 252 - chr(252) - for int16 int 253 - chr(253) - for int32 long 254 - chr(254) - for float32 float 255 - chr(255) - for double double If there are dates to convert, then dtype will already have the correct type inserted. """ # TODO: expand to handle datetime to integer conversion if dtype.type == np.string_: return chr(dtype.itemsize) elif dtype.type == np.object_: # try to coerce it to the biggest string # not memory efficient, what else could we # do? itemsize = max_len_string_array(_ensure_object(column.values)) return chr(max(itemsize, 1)) elif dtype == np.float64: return chr(255) elif dtype == np.float32: return chr(254) elif dtype == np.int32: return chr(253) elif dtype == np.int16: return chr(252) elif dtype == np.int8: return chr(251) else: # pragma : no cover raise NotImplementedError("Data type %s not supported." % dtype) def _dtype_to_default_stata_fmt(dtype, column): """ Maps numpy dtype to stata's default format for this type. Not terribly important since users can change this in Stata. Semantics are object -> "%DDs" where DD is the length of the string. If not a string, raise ValueError float64 -> "%10.0g" float32 -> "%9.0g" int64 -> "%9.0g" int32 -> "%12.0g" int16 -> "%8.0g" int8 -> "%8.0g" """ # TODO: Refactor to combine type with format # TODO: expand this to handle a default datetime format? if dtype.type == np.object_: inferred_dtype = infer_dtype(column.dropna()) if not (inferred_dtype in ('string', 'unicode') or len(column) == 0): raise ValueError('Writing general object arrays is not supported') itemsize = max_len_string_array(_ensure_object(column.values)) if itemsize > 244: raise ValueError(excessive_string_length_error % column.name) return "%" + str(max(itemsize, 1)) + "s" elif dtype == np.float64: return "%10.0g" elif dtype == np.float32: return "%9.0g" elif dtype == np.int32: return "%12.0g" elif dtype == np.int8 or dtype == np.int16: return "%8.0g" else: # pragma : no cover raise NotImplementedError("Data type %s not supported." % dtype) class StataWriter(StataParser): """ A class for writing Stata binary dta files Parameters ---------- fname : str or buffer String path of file-like object data : DataFrame Input to save convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when wirting the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information write_index : bool Write the index to Stata dataset. encoding : str Default is latin-1. Unicode is not supported byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder` time_stamp : datetime A datetime to use as file creation date. Default is the current time dataset_label : str A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. .. versionadded:: 0.19.0 Returns ------- writer : StataWriter instance The StataWriter instance has a write_file method, which will write the file to the given `fname`. Raises ------ NotImplementedError * If datetimes contain timezone information ValueError * Columns listed in convert_dates are noth either datetime64[ns] or datetime.datetime * Column dtype is not representable in Stata * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters Examples -------- >>> import pandas as pd >>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b']) >>> writer = StataWriter('./data_file.dta', data) >>> writer.write_file() Or with dates >>> from datetime import datetime >>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date']) >>> writer = StataWriter('./date_data_file.dta', data, {'date' : 'tw'}) >>> writer.write_file() """ def __init__(self, fname, data, convert_dates=None, write_index=True, encoding="latin-1", byteorder=None, time_stamp=None, data_label=None, variable_labels=None): super(StataWriter, self).__init__(encoding) self._convert_dates = {} if convert_dates is None else convert_dates self._write_index = write_index self._time_stamp = time_stamp self._data_label = data_label self._variable_labels = variable_labels # attach nobs, nvars, data, varlist, typlist self._prepare_pandas(data) if byteorder is None: byteorder = sys.byteorder self._byteorder = _set_endianness(byteorder) self._fname = fname self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8} def _write(self, to_write): """ Helper to call encode before writing to file for Python 3 compat. """ if compat.PY3: self._file.write(to_write.encode(self._encoding or self._default_encoding)) else: self._file.write(to_write) def _prepare_categoricals(self, data): """Check for categorical columns, retain categorical information for Stata file and convert categorical data to int""" is_cat = [is_categorical_dtype(data[col]) for col in data] self._is_col_cat = is_cat self._value_labels = [] if not any(is_cat): return data get_base_missing_value = StataMissingValue.get_base_missing_value index = data.index data_formatted = [] for col, col_is_cat in zip(data, is_cat): if col_is_cat: self._value_labels.append(StataValueLabel(data[col])) dtype = data[col].cat.codes.dtype if dtype == np.int64: raise ValueError('It is not possible to export ' 'int64-based categorical data to Stata.') values = data[col].cat.codes.values.copy() # Upcast if needed so that correct missing values can be set if values.max() >= get_base_missing_value(dtype): if dtype == np.int8: dtype = np.int16 elif dtype == np.int16: dtype = np.int32 else: dtype = np.float64 values = np.array(values, dtype=dtype) # Replace missing values with Stata missing value for type values[values == -1] = get_base_missing_value(dtype) data_formatted.append((col, values, index)) else: data_formatted.append((col, data[col])) return DataFrame.from_items(data_formatted) def _replace_nans(self, data): # return data """Checks floating point data columns for nans, and replaces these with the generic Stata for missing value (.)""" for c in data: dtype = data[c].dtype if dtype in (np.float32, np.float64): if dtype == np.float32: replacement = self.MISSING_VALUES['f'] else: replacement = self.MISSING_VALUES['d'] data[c] = data[c].fillna(replacement) return data def _check_column_names(self, data): """ Checks column names to ensure that they are valid Stata column names. This includes checks for: * Non-string names * Stata keywords * Variables that start with numbers * Variables with names that are too long When an illegal variable name is detected, it is converted, and if dates are exported, the variable name is propagated to the date conversion dictionary """ converted_names = [] columns = list(data.columns) original_columns = columns[:] duplicate_var_id = 0 for j, name in enumerate(columns): orig_name = name if not isinstance(name, string_types): name = text_type(name) for c in name: if (c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and \ (c < '0' or c > '9') and c != '_': name = name.replace(c, '_') # Variable name must not be a reserved word if name in self.RESERVED_WORDS: name = '_' + name # Variable name may not start with a number if name[0] >= '0' and name[0] <= '9': name = '_' + name name = name[:min(len(name), 32)] if not name == orig_name: # check for duplicates while columns.count(name) > 0: # prepend ascending number to avoid duplicates name = '_' + str(duplicate_var_id) + name name = name[:min(len(name), 32)] duplicate_var_id += 1 # need to possibly encode the orig name if its unicode try: orig_name = orig_name.encode('utf-8') except: pass converted_names.append( '{0} -> {1}'.format(orig_name, name)) columns[j] = name data.columns = columns # Check date conversion, and fix key if needed if self._convert_dates: for c, o in zip(columns, original_columns): if c != o: self._convert_dates[c] = self._convert_dates[o] del self._convert_dates[o] if converted_names: import warnings ws = invalid_name_doc.format('\n '.join(converted_names)) warnings.warn(ws, InvalidColumnName) return data def _prepare_pandas(self, data): # NOTE: we might need a different API / class for pandas objects so # we can set different semantics - handle this with a PR to pandas.io data = data.copy() if self._write_index: data = data.reset_index() # Ensure column names are strings data = self._check_column_names(data) # Check columns for compatibility with stata, upcast if necessary # Raise if outside the supported range data = _cast_to_stata_types(data) # Replace NaNs with Stata missing values data = self._replace_nans(data) # Convert categoricals to int data, and strip labels data = self._prepare_categoricals(data) self.nobs, self.nvar = data.shape self.data = data self.varlist = data.columns.tolist() dtypes = data.dtypes # Ensure all date columns are converted for col in data: if col in self._convert_dates: continue if is_datetime64_dtype(data[col]): self._convert_dates[col] = 'tc' self._convert_dates = _maybe_convert_to_int_keys(self._convert_dates, self.varlist) for key in self._convert_dates: new_type = _convert_datetime_to_stata_type( self._convert_dates[key] ) dtypes[key] = np.dtype(new_type) self.typlist = [] self.fmtlist = [] for col, dtype in dtypes.iteritems(): self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, data[col])) self.typlist.append(_dtype_to_stata_type(dtype, data[col])) # set the given format for the datetime cols if self._convert_dates is not None: for key in self._convert_dates: self.fmtlist[key] = self._convert_dates[key] def write_file(self): self._file = _open_file_binary_write( self._fname, self._encoding or self._default_encoding ) try: self._write_header(time_stamp=self._time_stamp, data_label=self._data_label) self._write_descriptors() self._write_variable_labels() # write 5 zeros for expansion fields self._write(_pad_bytes("", 5)) self._prepare_data() self._write_data() self._write_value_labels() finally: self._file.close() def _write_value_labels(self): for vl in self._value_labels: self._file.write(vl.generate_value_label(self._byteorder, self._encoding)) def _write_header(self, data_label=None, time_stamp=None): byteorder = self._byteorder # ds_format - just use 114 self._file.write(struct.pack("b", 114)) # byteorder self._write(byteorder == ">" and "\x01" or "\x02") # filetype self._write("\x01") # unused self._write("\x00") # number of vars, 2 bytes self._file.write(struct.pack(byteorder + "h", self.nvar)[:2]) # number of obs, 4 bytes self._file.write(struct.pack(byteorder + "i", self.nobs)[:4]) # data label 81 bytes, char, null terminated if data_label is None: self._file.write(self._null_terminate(_pad_bytes("", 80))) else: self._file.write( self._null_terminate(_pad_bytes(data_label[:80], 80)) ) # time stamp, 18 bytes, char, null terminated # format dd Mon yyyy hh:mm if time_stamp is None: time_stamp = datetime.datetime.now() elif not isinstance(time_stamp, datetime.datetime): raise ValueError("time_stamp should be datetime type") self._file.write( self._null_terminate(time_stamp.strftime("%d %b %Y %H:%M")) ) def _write_descriptors(self, typlist=None, varlist=None, srtlist=None, fmtlist=None, lbllist=None): nvar = self.nvar # typlist, length nvar, format byte array for typ in self.typlist: self._write(typ) # varlist names are checked by _check_column_names # varlist, requires null terminated for name in self.varlist: name = self._null_terminate(name, True) name = _pad_bytes(name[:32], 33) self._write(name) # srtlist, 2*(nvar+1), int array, encoded by byteorder srtlist = _pad_bytes("", 2 * (nvar + 1)) self._write(srtlist) # fmtlist, 49*nvar, char array for fmt in self.fmtlist: self._write(_pad_bytes(fmt, 49)) # lbllist, 33*nvar, char array for i in range(nvar): # Use variable name when categorical if self._is_col_cat[i]: name = self.varlist[i] name = self._null_terminate(name, True) name = _pad_bytes(name[:32], 33) self._write(name) else: # Default is empty label self._write(_pad_bytes("", 33)) def _write_variable_labels(self): # Missing labels are 80 blank characters plus null termination blank = _pad_bytes('', 81) if self._variable_labels is None: for i in range(self.nvar): self._write(blank) return for col in self.data: if col in self._variable_labels: label = self._variable_labels[col] if len(label) > 80: raise ValueError('Variable labels must be 80 characters ' 'or fewer') is_latin1 = all(ord(c) < 256 for c in label) if not is_latin1: raise ValueError('Variable labels must contain only ' 'characters that can be encoded in ' 'Latin-1') self._write(_pad_bytes(label, 81)) else: self._write(blank) def _prepare_data(self): data = self.data typlist = self.typlist convert_dates = self._convert_dates # 1. Convert dates if self._convert_dates is not None: for i, col in enumerate(data): if i in convert_dates: data[col] = _datetime_to_stata_elapsed_vec(data[col], self.fmtlist[i]) # 2. Convert bad string data to '' and pad to correct length dtype = [] data_cols = [] has_strings = False for i, col in enumerate(data): typ = ord(typlist[i]) if typ <= 244: has_strings = True data[col] = data[col].fillna('').apply(_pad_bytes, args=(typ,)) stype = 'S%d' % typ dtype.append(('c' + str(i), stype)) string = data[col].str.encode(self._encoding) data_cols.append(string.values.astype(stype)) else: dtype.append(('c' + str(i), data[col].dtype)) data_cols.append(data[col].values) dtype = np.dtype(dtype) if has_strings: self.data = np.fromiter(zip(*data_cols), dtype=dtype) else: self.data = data.to_records(index=False) def _write_data(self): data = self.data data.tofile(self._file) def _null_terminate(self, s, as_string=False): null_byte = '\x00' if compat.PY3 and not as_string: s += null_byte return s.encode(self._encoding) else: s += null_byte return s
gpl-3.0
JPFrancoia/scikit-learn
sklearn/preprocessing/data.py
13
70436
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Olivier Grisel <olivier.grisel@ensta.org> # Andreas Mueller <amueller@ais.uni-bonn.de> # Eric Martin <eric@ericmart.in> # Giorgio Patrini <giorgio.patrini@anu.edu.au> # License: BSD 3 clause from itertools import chain, combinations import numbers import warnings import numpy as np from scipy import sparse from ..base import BaseEstimator, TransformerMixin from ..externals import six from ..utils import check_array from ..utils import deprecated from ..utils.extmath import row_norms from ..utils.extmath import _incremental_mean_and_var from ..utils.fixes import combinations_with_replacement as combinations_w_r from ..utils.fixes import bincount from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1, inplace_csr_row_normalize_l2) from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis, incr_mean_variance_axis, min_max_axis) from ..utils.validation import check_is_fitted, FLOAT_DTYPES zip = six.moves.zip map = six.moves.map range = six.moves.range __all__ = [ 'Binarizer', 'KernelCenterer', 'MinMaxScaler', 'MaxAbsScaler', 'Normalizer', 'OneHotEncoder', 'RobustScaler', 'StandardScaler', 'add_dummy_feature', 'binarize', 'normalize', 'scale', 'robust_scale', 'maxabs_scale', 'minmax_scale', ] DEPRECATION_MSG_1D = ( "Passing 1d arrays as data is deprecated in 0.17 and will " "raise ValueError in 0.19. Reshape your data either using " "X.reshape(-1, 1) if your data has a single feature or " "X.reshape(1, -1) if it contains a single sample." ) def _handle_zeros_in_scale(scale, copy=True): ''' Makes sure that whenever scale is zero, we handle it correctly. This happens in most scalers when we have constant features.''' # if we are fitting on 1D arrays, scale might be a scalar if np.isscalar(scale): if scale == .0: scale = 1. return scale elif isinstance(scale, np.ndarray): if copy: # New array to avoid side-effects scale = scale.copy() scale[scale == 0.0] = 1.0 return scale def scale(X, axis=0, with_mean=True, with_std=True, copy=True): """Standardize a dataset along any axis Center to the mean and component wise scale to unit variance. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : {array-like, sparse matrix} The data to center and scale. axis : int (0 by default) axis used to compute the means and standard deviations along. If 0, independently standardize each feature, otherwise (if 1) standardize each sample. with_mean : boolean, True by default If True, center the data before scaling. with_std : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSC matrix and if axis is 1). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_mean=False` (in that case, only variance scaling will be performed on the features of the CSC matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSC matrix. See also -------- StandardScaler: Performs scaling to unit variance using the``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). """ # noqa X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False, warn_on_dtype=True, estimator='the scale function', dtype=FLOAT_DTYPES) if sparse.issparse(X): if with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` instead" " See docstring for motivation and alternatives.") if axis != 0: raise ValueError("Can only scale sparse matrix on axis=0, " " got axis=%d" % axis) if with_std: _, var = mean_variance_axis(X, axis=0) var = _handle_zeros_in_scale(var, copy=False) inplace_column_scale(X, 1 / np.sqrt(var)) else: X = np.asarray(X) if with_mean: mean_ = np.mean(X, axis) if with_std: scale_ = np.std(X, axis) # Xr is a view on the original array that enables easy use of # broadcasting on the axis in which we are interested in Xr = np.rollaxis(X, axis) if with_mean: Xr -= mean_ mean_1 = Xr.mean(axis=0) # Verify that mean_1 is 'close to zero'. If X contains very # large values, mean_1 can also be very large, due to a lack of # precision of mean_. In this case, a pre-scaling of the # concerned feature is efficient, for instance by its mean or # maximum. if not np.allclose(mean_1, 0): warnings.warn("Numerical issues were encountered " "when centering the data " "and might not be solved. Dataset may " "contain too large values. You may need " "to prescale your features.") Xr -= mean_1 if with_std: scale_ = _handle_zeros_in_scale(scale_, copy=False) Xr /= scale_ if with_mean: mean_2 = Xr.mean(axis=0) # If mean_2 is not 'close to zero', it comes from the fact that # scale_ is very small so that mean_2 = mean_1/scale_ > 0, even # if mean_1 was close to zero. The problem is thus essentially # due to the lack of precision of mean_. A solution is then to # subtract the mean again: if not np.allclose(mean_2, 0): warnings.warn("Numerical issues were encountered " "when scaling the data " "and might not be solved. The standard " "deviation of the data is probably " "very close to 0. ") Xr -= mean_2 return X class MinMaxScaler(BaseEstimator, TransformerMixin): """Transforms features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by:: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. This transformation is often used as an alternative to zero mean, unit variance scaling. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- feature_range : tuple (min, max), default=(0, 1) Desired range of transformed data. copy : boolean, optional, default True Set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array). Attributes ---------- min_ : ndarray, shape (n_features,) Per feature adjustment for minimum. scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. .. versionadded:: 0.17 *scale_* attribute. data_min_ : ndarray, shape (n_features,) Per feature minimum seen in the data .. versionadded:: 0.17 *data_min_* instead of deprecated *data_min*. data_max_ : ndarray, shape (n_features,) Per feature maximum seen in the data .. versionadded:: 0.17 *data_max_* instead of deprecated *data_max*. data_range_ : ndarray, shape (n_features,) Per feature range ``(data_max_ - data_min_)`` seen in the data .. versionadded:: 0.17 *data_range_* instead of deprecated *data_range*. See also -------- minmax_scale: Equivalent function without the object oriented API. """ def __init__(self, feature_range=(0, 1), copy=True): self.feature_range = feature_range self.copy = copy @property @deprecated("Attribute data_range will be removed in " "0.19. Use ``data_range_`` instead") def data_range(self): return self.data_range_ @property @deprecated("Attribute data_min will be removed in " "0.19. Use ``data_min_`` instead") def data_min(self): return self.data_min_ def _reset(self): """Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched. """ # Checking one attribute is enough, becase they are all set together # in partial_fit if hasattr(self, 'scale_'): del self.scale_ del self.min_ del self.n_samples_seen_ del self.data_min_ del self.data_max_ del self.data_range_ def fit(self, X, y=None): """Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y) def partial_fit(self, X, y=None): """Online computation of min and max on X for later scaling. All of X is processed as a single batch. This is intended for cases when `fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y : Passthrough for ``Pipeline`` compatibility. """ feature_range = self.feature_range if feature_range[0] >= feature_range[1]: raise ValueError("Minimum of desired feature range must be smaller" " than maximum. Got %s." % str(feature_range)) if sparse.issparse(X): raise TypeError("MinMaxScaler does no support sparse input. " "You may consider to use MaxAbsScaler instead.") X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) data_min = np.min(X, axis=0) data_max = np.max(X, axis=0) # First pass if not hasattr(self, 'n_samples_seen_'): self.n_samples_seen_ = X.shape[0] # Next steps else: data_min = np.minimum(self.data_min_, data_min) data_max = np.maximum(self.data_max_, data_max) self.n_samples_seen_ += X.shape[0] data_range = data_max - data_min self.scale_ = ((feature_range[1] - feature_range[0]) / _handle_zeros_in_scale(data_range)) self.min_ = feature_range[0] - data_min * self.scale_ self.data_min_ = data_min self.data_max_ = data_max self.data_range_ = data_range return self def transform(self, X): """Scaling features of X according to feature_range. Parameters ---------- X : array-like, shape [n_samples, n_features] Input data that will be transformed. """ check_is_fitted(self, 'scale_') X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) X *= self.scale_ X += self.min_ return X def inverse_transform(self, X): """Undo the scaling of X according to feature_range. Parameters ---------- X : array-like, shape [n_samples, n_features] Input data that will be transformed. It cannot be sparse. """ check_is_fitted(self, 'scale_') X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) X -= self.min_ X /= self.scale_ return X def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True): """Transforms features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by:: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. This transformation is often used as an alternative to zero mean, unit variance scaling. Read more in the :ref:`User Guide <preprocessing_scaler>`. .. versionadded:: 0.17 *minmax_scale* function interface to :class:`sklearn.preprocessing.MinMaxScaler`. Parameters ---------- feature_range : tuple (min, max), default=(0, 1) Desired range of transformed data. axis : int (0 by default) axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). See also -------- MinMaxScaler: Performs scaling to a given range using the``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). """ # noqa # To allow retro-compatibility, we handle here the case of 1D-input # From 0.17, 1D-input are deprecated in scaler objects # Although, we want to allow the users to keep calling this function # with 1D-input. # Cast input to array, as we need to check ndim. Prior to 0.17, that was # done inside the scaler object fit_transform. # If copy is required, it will be done inside the scaler object. X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True, dtype=FLOAT_DTYPES) original_ndim = X.ndim if original_ndim == 1: X = X.reshape(X.shape[0], 1) s = MinMaxScaler(feature_range=feature_range, copy=copy) if axis == 0: X = s.fit_transform(X) else: X = s.fit_transform(X.T).T if original_ndim == 1: X = X.ravel() return X class StandardScaler(BaseEstimator, TransformerMixin): """Standardize features by removing the mean and scaling to unit variance Centering and scaling happen independently on each feature by computing the relevant statistics on the samples in the training set. Mean and standard deviation are then stored to be used on later data using the `transform` method. Standardization of a dataset is a common requirement for many machine learning estimators: they might behave badly if the individual feature do not more or less look like standard normally distributed data (e.g. Gaussian with 0 mean and unit variance). For instance many elements used in the objective function of a learning algorithm (such as the RBF kernel of Support Vector Machines or the L1 and L2 regularizers of linear models) assume that all features are centered around 0 and have variance in the same order. If a feature has a variance that is orders of magnitude larger that others, it might dominate the objective function and make the estimator unable to learn from other features correctly as expected. This scaler can also be applied to sparse CSR or CSC matrices by passing `with_mean=False` to avoid breaking the sparsity structure of the data. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- with_mean : boolean, True by default If True, center the data before scaling. This does not work (and will raise an exception) when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory. with_std : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default True If False, try to avoid a copy and do inplace scaling instead. This is not guaranteed to always work inplace; e.g. if the data is not a NumPy array or scipy.sparse CSR matrix, a copy may still be returned. Attributes ---------- scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. .. versionadded:: 0.17 *scale_* is recommended instead of deprecated *std_*. mean_ : array of floats with shape [n_features] The mean value for each feature in the training set. var_ : array of floats with shape [n_features] The variance for each feature in the training set. Used to compute `scale_` n_samples_seen_ : int The number of samples processed by the estimator. Will be reset on new calls to fit, but increments across ``partial_fit`` calls. See also -------- scale: Equivalent function without the object oriented API. :class:`sklearn.decomposition.PCA` Further removes the linear correlation across features with 'whiten=True'. """ # noqa def __init__(self, copy=True, with_mean=True, with_std=True): self.with_mean = with_mean self.with_std = with_std self.copy = copy @property @deprecated("Attribute ``std_`` will be removed in 0.19. " "Use ``scale_`` instead") def std_(self): return self.scale_ def _reset(self): """Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched. """ # Checking one attribute is enough, becase they are all set together # in partial_fit if hasattr(self, 'scale_'): del self.scale_ del self.n_samples_seen_ del self.mean_ del self.var_ def fit(self, X, y=None): """Compute the mean and std to be used for later scaling. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y: Passthrough for ``Pipeline`` compatibility. """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y) def partial_fit(self, X, y=None): """Online computation of mean and std on X for later scaling. All of X is processed as a single batch. This is intended for cases when `fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. The algorithm for incremental mean and std is given in Equation 1.5a,b in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms for computing the sample variance: Analysis and recommendations." The American Statistician 37.3 (1983): 242-247: Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y: Passthrough for ``Pipeline`` compatibility. """ X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) # Even in the case of `with_mean=False`, we update the mean anyway # This is needed for the incremental computation of the var # See incr_mean_variance_axis and _incremental_mean_variance_axis if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.with_std: # First pass if not hasattr(self, 'n_samples_seen_'): self.mean_, self.var_ = mean_variance_axis(X, axis=0) self.n_samples_seen_ = X.shape[0] # Next passes else: self.mean_, self.var_, self.n_samples_seen_ = \ incr_mean_variance_axis(X, axis=0, last_mean=self.mean_, last_var=self.var_, last_n=self.n_samples_seen_) else: self.mean_ = None self.var_ = None else: # First pass if not hasattr(self, 'n_samples_seen_'): self.mean_ = .0 self.n_samples_seen_ = 0 if self.with_std: self.var_ = .0 else: self.var_ = None self.mean_, self.var_, self.n_samples_seen_ = \ _incremental_mean_and_var(X, self.mean_, self.var_, self.n_samples_seen_) if self.with_std: self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_)) else: self.scale_ = None return self def transform(self, X, y=None, copy=None): """Perform standardization by centering and scaling Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to scale along the features axis. """ check_is_fitted(self, 'scale_') copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.scale_ is not None: inplace_column_scale(X, 1 / self.scale_) else: if self.with_mean: X -= self.mean_ if self.with_std: X /= self.scale_ return X def inverse_transform(self, X, copy=None): """Scale back the data to the original representation Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to scale along the features axis. """ check_is_fitted(self, 'scale_') copy = copy if copy is not None else self.copy if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot uncenter sparse matrices: pass `with_mean=False` " "instead See docstring for motivation and alternatives.") if not sparse.isspmatrix_csr(X): X = X.tocsr() copy = False if copy: X = X.copy() if self.scale_ is not None: inplace_column_scale(X, self.scale_) else: X = np.asarray(X) if copy: X = X.copy() if self.with_std: X *= self.scale_ if self.with_mean: X += self.mean_ return X class MaxAbsScaler(BaseEstimator, TransformerMixin): """Scale each feature by its maximum absolute value. This estimator scales and translates each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. It does not shift/center the data, and thus does not destroy any sparsity. This scaler can also be applied to sparse CSR or CSC matrices. .. versionadded:: 0.17 Parameters ---------- copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). Attributes ---------- scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. .. versionadded:: 0.17 *scale_* attribute. max_abs_ : ndarray, shape (n_features,) Per feature maximum absolute value. n_samples_seen_ : int The number of samples processed by the estimator. Will be reset on new calls to fit, but increments across ``partial_fit`` calls. See also -------- maxabs_scale: Equivalent function without the object oriented API. """ def __init__(self, copy=True): self.copy = copy def _reset(self): """Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched. """ # Checking one attribute is enough, becase they are all set together # in partial_fit if hasattr(self, 'scale_'): del self.scale_ del self.n_samples_seen_ del self.max_abs_ def fit(self, X, y=None): """Compute the maximum absolute value to be used for later scaling. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y) def partial_fit(self, X, y=None): """Online computation of max absolute value of X for later scaling. All of X is processed as a single batch. This is intended for cases when `fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y: Passthrough for ``Pipeline`` compatibility. """ X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): mins, maxs = min_max_axis(X, axis=0) max_abs = np.maximum(np.abs(mins), np.abs(maxs)) else: max_abs = np.abs(X).max(axis=0) # First pass if not hasattr(self, 'n_samples_seen_'): self.n_samples_seen_ = X.shape[0] # Next passes else: max_abs = np.maximum(self.max_abs_, max_abs) self.n_samples_seen_ += X.shape[0] self.max_abs_ = max_abs self.scale_ = _handle_zeros_in_scale(max_abs) return self def transform(self, X, y=None): """Scale the data Parameters ---------- X : {array-like, sparse matrix} The data that should be scaled. """ check_is_fitted(self, 'scale_') X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): inplace_column_scale(X, 1.0 / self.scale_) else: X /= self.scale_ return X def inverse_transform(self, X): """Scale back the data to the original representation Parameters ---------- X : {array-like, sparse matrix} The data that should be transformed back. """ check_is_fitted(self, 'scale_') X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): inplace_column_scale(X, self.scale_) else: X *= self.scale_ return X def maxabs_scale(X, axis=0, copy=True): """Scale each feature to the [-1, 1] range without breaking the sparsity. This estimator scales each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. This scaler can also be applied to sparse CSR or CSC matrices. Parameters ---------- axis : int (0 by default) axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). See also -------- MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). """ # noqa # To allow retro-compatibility, we handle here the case of 1D-input # From 0.17, 1D-input are deprecated in scaler objects # Although, we want to allow the users to keep calling this function # with 1D-input. # Cast input to array, as we need to check ndim. Prior to 0.17, that was # done inside the scaler object fit_transform. # If copy is required, it will be done inside the scaler object. X = check_array(X, accept_sparse=('csr', 'csc'), copy=False, ensure_2d=False, dtype=FLOAT_DTYPES) original_ndim = X.ndim if original_ndim == 1: X = X.reshape(X.shape[0], 1) s = MaxAbsScaler(copy=copy) if axis == 0: X = s.fit_transform(X) else: X = s.fit_transform(X.T).T if original_ndim == 1: X = X.ravel() return X class RobustScaler(BaseEstimator, TransformerMixin): """Scale features using statistics that are robust to outliers. This Scaler removes the median and scales the data according to the quantile range (defaults to IQR: Interquartile Range). The IQR is the range between the 1st quartile (25th quantile) and the 3rd quartile (75th quantile). Centering and scaling happen independently on each feature (or each sample, depending on the `axis` argument) by computing the relevant statistics on the samples in the training set. Median and interquartile range are then stored to be used on later data using the `transform` method. Standardization of a dataset is a common requirement for many machine learning estimators. Typically this is done by removing the mean and scaling to unit variance. However, outliers can often influence the sample mean / variance in a negative way. In such cases, the median and the interquartile range often give better results. .. versionadded:: 0.17 Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- with_centering : boolean, True by default If True, center the data before scaling. This does not work (and will raise an exception) when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory. with_scaling : boolean, True by default If True, scale the data to interquartile range. quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0 Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR Quantile range used to calculate ``scale_``. .. versionadded:: 0.18 copy : boolean, optional, default is True If False, try to avoid a copy and do inplace scaling instead. This is not guaranteed to always work inplace; e.g. if the data is not a NumPy array or scipy.sparse CSR matrix, a copy may still be returned. Attributes ---------- center_ : array of floats The median value for each feature in the training set. scale_ : array of floats The (scaled) interquartile range for each feature in the training set. .. versionadded:: 0.17 *scale_* attribute. See also -------- robust_scale: Equivalent function without the object oriented API. :class:`sklearn.decomposition.PCA` Further removes the linear correlation across features with 'whiten=True'. Notes ----- See examples/preprocessing/plot_robust_scaling.py for an example. https://en.wikipedia.org/wiki/Median_(statistics) https://en.wikipedia.org/wiki/Interquartile_range """ def __init__(self, with_centering=True, with_scaling=True, quantile_range=(25.0, 75.0), copy=True): self.with_centering = with_centering self.with_scaling = with_scaling self.quantile_range = quantile_range self.copy = copy def _check_array(self, X, copy): """Makes sure centering is not enabled for sparse matrices.""" X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): if self.with_centering: raise ValueError( "Cannot center sparse matrices: use `with_centering=False`" " instead. See docstring for motivation and alternatives.") return X def fit(self, X, y=None): """Compute the median and quantiles to be used for scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the median and quantiles used for later scaling along the features axis. """ if sparse.issparse(X): raise TypeError("RobustScaler cannot be fitted on sparse inputs") X = self._check_array(X, self.copy) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if self.with_centering: self.center_ = np.median(X, axis=0) if self.with_scaling: q_min, q_max = self.quantile_range if not 0 <= q_min <= q_max <= 100: raise ValueError("Invalid quantile range: %s" % str(self.quantile_range)) q = np.percentile(X, self.quantile_range, axis=0) self.scale_ = (q[1] - q[0]) self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False) return self def transform(self, X, y=None): """Center and scale the data Parameters ---------- X : array-like The data used to scale along the specified axis. """ if self.with_centering: check_is_fitted(self, 'center_') if self.with_scaling: check_is_fitted(self, 'scale_') X = self._check_array(X, self.copy) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): if self.with_scaling: inplace_column_scale(X, 1.0 / self.scale_) else: if self.with_centering: X -= self.center_ if self.with_scaling: X /= self.scale_ return X def inverse_transform(self, X): """Scale back the data to the original representation Parameters ---------- X : array-like The data used to scale along the specified axis. """ if self.with_centering: check_is_fitted(self, 'center_') if self.with_scaling: check_is_fitted(self, 'scale_') X = self._check_array(X, self.copy) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): if self.with_scaling: inplace_column_scale(X, self.scale_) else: if self.with_scaling: X *= self.scale_ if self.with_centering: X += self.center_ return X def robust_scale(X, axis=0, with_centering=True, with_scaling=True, quantile_range=(25.0, 75.0), copy=True): """Standardize a dataset along any axis Center to the median and component wise scale according to the interquartile range. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : array-like The data to center and scale. axis : int (0 by default) axis used to compute the medians and IQR along. If 0, independently scale each feature, otherwise (if 1) scale each sample. with_centering : boolean, True by default If True, center the data before scaling. with_scaling : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0 Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR Quantile range used to calculate ``scale_``. .. versionadded:: 0.18 copy : boolean, optional, default is True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_centering=False` (in that case, only variance scaling will be performed on the features of the CSR matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSR matrix. See also -------- RobustScaler: Performs centering and scaling using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). """ s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling, quantile_range=quantile_range, copy=copy) if axis == 0: return s.fit_transform(X) else: return s.fit_transform(X.T).T class PolynomialFeatures(BaseEstimator, TransformerMixin): """Generate polynomial and interaction features. Generate a new feature matrix consisting of all polynomial combinations of the features with degree less than or equal to the specified degree. For example, if an input sample is two dimensional and of the form [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2]. Parameters ---------- degree : integer The degree of the polynomial features. Default = 2. interaction_only : boolean, default = False If true, only interaction features are produced: features that are products of at most ``degree`` *distinct* input features (so not ``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.). include_bias : boolean If True (default), then include a bias column, the feature in which all polynomial powers are zero (i.e. a column of ones - acts as an intercept term in a linear model). Examples -------- >>> X = np.arange(6).reshape(3, 2) >>> X array([[0, 1], [2, 3], [4, 5]]) >>> poly = PolynomialFeatures(2) >>> poly.fit_transform(X) array([[ 1., 0., 1., 0., 0., 1.], [ 1., 2., 3., 4., 6., 9.], [ 1., 4., 5., 16., 20., 25.]]) >>> poly = PolynomialFeatures(interaction_only=True) >>> poly.fit_transform(X) array([[ 1., 0., 1., 0.], [ 1., 2., 3., 6.], [ 1., 4., 5., 20.]]) Attributes ---------- powers_ : array, shape (n_output_features, n_input_features) powers_[i, j] is the exponent of the jth input in the ith output. n_input_features_ : int The total number of input features. n_output_features_ : int The total number of polynomial output features. The number of output features is computed by iterating over all suitably sized combinations of input features. Notes ----- Be aware that the number of features in the output array scales polynomially in the number of features of the input array, and exponentially in the degree. High degrees can cause overfitting. See :ref:`examples/linear_model/plot_polynomial_interpolation.py <sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>` """ def __init__(self, degree=2, interaction_only=False, include_bias=True): self.degree = degree self.interaction_only = interaction_only self.include_bias = include_bias @staticmethod def _combinations(n_features, degree, interaction_only, include_bias): comb = (combinations if interaction_only else combinations_w_r) start = int(not include_bias) return chain.from_iterable(comb(range(n_features), i) for i in range(start, degree + 1)) @property def powers_(self): check_is_fitted(self, 'n_input_features_') combinations = self._combinations(self.n_input_features_, self.degree, self.interaction_only, self.include_bias) return np.vstack(bincount(c, minlength=self.n_input_features_) for c in combinations) def get_feature_names(self, input_features=None): """ Return feature names for output features Parameters ---------- input_features : list of string, length n_features, optional String names for input features if available. By default, "x0", "x1", ... "xn_features" is used. Returns ------- output_feature_names : list of string, length n_output_features """ powers = self.powers_ if input_features is None: input_features = ['x%d' % i for i in range(powers.shape[1])] feature_names = [] for row in powers: inds = np.where(row)[0] if len(inds): name = " ".join("%s^%d" % (input_features[ind], exp) if exp != 1 else input_features[ind] for ind, exp in zip(inds, row[inds])) else: name = "1" feature_names.append(name) return feature_names def fit(self, X, y=None): """ Compute number of output features. """ n_samples, n_features = check_array(X).shape combinations = self._combinations(n_features, self.degree, self.interaction_only, self.include_bias) self.n_input_features_ = n_features self.n_output_features_ = sum(1 for _ in combinations) return self def transform(self, X, y=None): """Transform data to polynomial features Parameters ---------- X : array-like, shape [n_samples, n_features] The data to transform, row by row. Returns ------- XP : np.ndarray shape [n_samples, NP] The matrix of features, where NP is the number of polynomial features generated from the combination of inputs. """ check_is_fitted(self, ['n_input_features_', 'n_output_features_']) X = check_array(X, dtype=FLOAT_DTYPES) n_samples, n_features = X.shape if n_features != self.n_input_features_: raise ValueError("X shape does not match training shape") # allocate output data XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype) combinations = self._combinations(n_features, self.degree, self.interaction_only, self.include_bias) for i, c in enumerate(combinations): XP[:, i] = X[:, c].prod(1) return XP def normalize(X, norm='l2', axis=1, copy=True, return_norm=False): """Scale input vectors individually to unit norm (vector length). Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to normalize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. norm : 'l1', 'l2', or 'max', optional ('l2' by default) The norm to use to normalize each non zero sample (or each non-zero feature if axis is 0). axis : 0 or 1, optional (1 by default) axis used to normalize the data along. If 1, independently normalize each sample, otherwise (if 0) normalize each feature. copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). return_norm : boolean, default False whether to return the computed norms See also -------- Normalizer: Performs normalization using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). """ if norm not in ('l1', 'l2', 'max'): raise ValueError("'%s' is not a supported norm" % norm) if axis == 0: sparse_format = 'csc' elif axis == 1: sparse_format = 'csr' else: raise ValueError("'%d' is not a supported axis" % axis) X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True, estimator='the normalize function', dtype=FLOAT_DTYPES) if axis == 0: X = X.T if sparse.issparse(X): if norm == 'l1': inplace_csr_row_normalize_l1(X) elif norm == 'l2': inplace_csr_row_normalize_l2(X) elif norm == 'max': _, norms = min_max_axis(X, 1) norms = norms.repeat(np.diff(X.indptr)) mask = norms != 0 X.data[mask] /= norms[mask] else: if norm == 'l1': norms = np.abs(X).sum(axis=1) elif norm == 'l2': norms = row_norms(X) elif norm == 'max': norms = np.max(X, axis=1) norms = _handle_zeros_in_scale(norms, copy=False) X /= norms[:, np.newaxis] if axis == 0: X = X.T if return_norm: return X, norms else: return X class Normalizer(BaseEstimator, TransformerMixin): """Normalize samples individually to unit norm. Each sample (i.e. each row of the data matrix) with at least one non zero component is rescaled independently of other samples so that its norm (l1 or l2) equals one. This transformer is able to work both with dense numpy arrays and scipy.sparse matrix (use CSR format if you want to avoid the burden of a copy / conversion). Scaling inputs to unit norms is a common operation for text classification or clustering for instance. For instance the dot product of two l2-normalized TF-IDF vectors is the cosine similarity of the vectors and is the base similarity metric for the Vector Space Model commonly used by the Information Retrieval community. Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- norm : 'l1', 'l2', or 'max', optional ('l2' by default) The norm to use to normalize each non zero sample. copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix). Notes ----- This estimator is stateless (besides constructor parameters), the fit method does nothing but is useful when used in a pipeline. See also -------- normalize: Equivalent function without the object oriented API. """ def __init__(self, norm='l2', copy=True): self.norm = norm self.copy = copy def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. """ X = check_array(X, accept_sparse='csr') return self def transform(self, X, y=None, copy=None): """Scale each non zero row of X to unit norm Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to normalize, row by row. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. """ copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr') return normalize(X, norm=self.norm, axis=1, copy=copy) def binarize(X, threshold=0.0, copy=True): """Boolean thresholding of array-like or scipy.sparse matrix Read more in the :ref:`User Guide <preprocessing_binarization>`. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to binarize, element by element. scipy.sparse matrices should be in CSR or CSC format to avoid an un-necessary copy. threshold : float, optional (0.0 by default) Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : boolean, optional, default True set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR / CSC matrix and if axis is 1). See also -------- Binarizer: Performs binarization using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). """ X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy) if sparse.issparse(X): if threshold < 0: raise ValueError('Cannot binarize a sparse matrix with threshold ' '< 0') cond = X.data > threshold not_cond = np.logical_not(cond) X.data[cond] = 1 X.data[not_cond] = 0 X.eliminate_zeros() else: cond = X > threshold not_cond = np.logical_not(cond) X[cond] = 1 X[not_cond] = 0 return X class Binarizer(BaseEstimator, TransformerMixin): """Binarize data (set feature values to 0 or 1) according to a threshold Values greater than the threshold map to 1, while values less than or equal to the threshold map to 0. With the default threshold of 0, only positive values map to 1. Binarization is a common operation on text count data where the analyst can decide to only consider the presence or absence of a feature rather than a quantified number of occurrences for instance. It can also be used as a pre-processing step for estimators that consider boolean random variables (e.g. modelled using the Bernoulli distribution in a Bayesian setting). Read more in the :ref:`User Guide <preprocessing_binarization>`. Parameters ---------- threshold : float, optional (0.0 by default) Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : boolean, optional, default True set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix). Notes ----- If the input is a sparse matrix, only the non-zero values are subject to update by the Binarizer class. This estimator is stateless (besides constructor parameters), the fit method does nothing but is useful when used in a pipeline. See also -------- binarize: Equivalent function without the object oriented API. """ def __init__(self, threshold=0.0, copy=True): self.threshold = threshold self.copy = copy def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. """ check_array(X, accept_sparse='csr') return self def transform(self, X, y=None, copy=None): """Binarize each element of X Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to binarize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. """ copy = copy if copy is not None else self.copy return binarize(X, threshold=self.threshold, copy=copy) class KernelCenterer(BaseEstimator, TransformerMixin): """Center a kernel matrix Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a function mapping x to a Hilbert space. KernelCenterer centers (i.e., normalize to have zero mean) the data without explicitly computing phi(x). It is equivalent to centering phi(x) with sklearn.preprocessing.StandardScaler(with_std=False). Read more in the :ref:`User Guide <kernel_centering>`. """ def fit(self, K, y=None): """Fit KernelCenterer Parameters ---------- K : numpy array of shape [n_samples, n_samples] Kernel matrix. Returns ------- self : returns an instance of self. """ K = check_array(K, dtype=FLOAT_DTYPES) n_samples = K.shape[0] self.K_fit_rows_ = np.sum(K, axis=0) / n_samples self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples return self def transform(self, K, y=None, copy=True): """Center kernel matrix. Parameters ---------- K : numpy array of shape [n_samples1, n_samples2] Kernel matrix. copy : boolean, optional, default True Set to False to perform inplace computation. Returns ------- K_new : numpy array of shape [n_samples1, n_samples2] """ check_is_fitted(self, 'K_fit_all_') K = check_array(K, copy=copy, dtype=FLOAT_DTYPES) K_pred_cols = (np.sum(K, axis=1) / self.K_fit_rows_.shape[0])[:, np.newaxis] K -= self.K_fit_rows_ K -= K_pred_cols K += self.K_fit_all_ return K @property def _pairwise(self): return True def add_dummy_feature(X, value=1.0): """Augment dataset with an additional dummy feature. This is useful for fitting an intercept term with implementations which cannot otherwise fit it directly. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] Data. value : float Value to use for the dummy feature. Returns ------- X : {array, sparse matrix}, shape [n_samples, n_features + 1] Same data with dummy feature added as first column. Examples -------- >>> from sklearn.preprocessing import add_dummy_feature >>> add_dummy_feature([[0, 1], [1, 0]]) array([[ 1., 0., 1.], [ 1., 1., 0.]]) """ X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES) n_samples, n_features = X.shape shape = (n_samples, n_features + 1) if sparse.issparse(X): if sparse.isspmatrix_coo(X): # Shift columns to the right. col = X.col + 1 # Column indices of dummy feature are 0 everywhere. col = np.concatenate((np.zeros(n_samples), col)) # Row indices of dummy feature are 0, ..., n_samples-1. row = np.concatenate((np.arange(n_samples), X.row)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.ones(n_samples) * value, X.data)) return sparse.coo_matrix((data, (row, col)), shape) elif sparse.isspmatrix_csc(X): # Shift index pointers since we need to add n_samples elements. indptr = X.indptr + n_samples # indptr[0] must be 0. indptr = np.concatenate((np.array([0]), indptr)) # Row indices of dummy feature are 0, ..., n_samples-1. indices = np.concatenate((np.arange(n_samples), X.indices)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.ones(n_samples) * value, X.data)) return sparse.csc_matrix((data, indices, indptr), shape) else: klass = X.__class__ return klass(add_dummy_feature(X.tocoo(), value)) else: return np.hstack((np.ones((n_samples, 1)) * value, X)) def _transform_selected(X, transform, selected="all", copy=True): """Apply a transform function to portion of selected features Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] Dense array or sparse matrix. transform : callable A callable transform(X) -> X_transformed copy : boolean, optional Copy X even if it could be avoided. selected: "all" or array of indices or mask Specify which features to apply the transform to. Returns ------- X : array or sparse matrix, shape=(n_samples, n_features_new) """ X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES) if isinstance(selected, six.string_types) and selected == "all": return transform(X) if len(selected) == 0: return X n_features = X.shape[1] ind = np.arange(n_features) sel = np.zeros(n_features, dtype=bool) sel[np.asarray(selected)] = True not_sel = np.logical_not(sel) n_selected = np.sum(sel) if n_selected == 0: # No features selected. return X elif n_selected == n_features: # All features selected. return transform(X) else: X_sel = transform(X[:, ind[sel]]) X_not_sel = X[:, ind[not_sel]] if sparse.issparse(X_sel) or sparse.issparse(X_not_sel): return sparse.hstack((X_sel, X_not_sel)) else: return np.hstack((X_sel, X_not_sel)) class OneHotEncoder(BaseEstimator, TransformerMixin): """Encode categorical integer features using a one-hot aka one-of-K scheme. The input to this transformer should be a matrix of integers, denoting the values taken on by categorical (discrete) features. The output will be a sparse matrix where each column corresponds to one possible value of one feature. It is assumed that input features take on values in the range [0, n_values). This encoding is needed for feeding categorical data to many scikit-learn estimators, notably linear models and SVMs with the standard kernels. Note: a one-hot encoding of y labels should use a LabelBinarizer instead. Read more in the :ref:`User Guide <preprocessing_categorical_features>`. Parameters ---------- n_values : 'auto', int or array of ints Number of values per feature. - 'auto' : determine value range from training data. - int : number of categorical values per feature. Each feature value should be in ``range(n_values)`` - array : ``n_values[i]`` is the number of categorical values in ``X[:, i]``. Each feature value should be in ``range(n_values[i])`` categorical_features : "all" or array of indices or mask Specify what features are treated as categorical. - 'all' (default): All features are treated as categorical. - array of indices: Array of categorical feature indices. - mask: Array of length n_features and with dtype=bool. Non-categorical features are always stacked to the right of the matrix. dtype : number type, default=np.float Desired dtype of output. sparse : boolean, default=True Will return sparse matrix if set True else will return an array. handle_unknown : str, 'error' or 'ignore' Whether to raise an error or ignore if a unknown categorical feature is present during transform. Attributes ---------- active_features_ : array Indices for active features, meaning values that actually occur in the training set. Only available when n_values is ``'auto'``. feature_indices_ : array of shape (n_features,) Indices to feature ranges. Feature ``i`` in the original data is mapped to features from ``feature_indices_[i]`` to ``feature_indices_[i+1]`` (and then potentially masked by `active_features_` afterwards) n_values_ : array of shape (n_features,) Maximum number of values per feature. Examples -------- Given a dataset with three features and two samples, we let the encoder find the maximum value per feature and transform the data to a binary one-hot encoding. >>> from sklearn.preprocessing import OneHotEncoder >>> enc = OneHotEncoder() >>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \ [1, 0, 2]]) # doctest: +ELLIPSIS OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>, handle_unknown='error', n_values='auto', sparse=True) >>> enc.n_values_ array([2, 3, 4]) >>> enc.feature_indices_ array([0, 2, 5, 9]) >>> enc.transform([[0, 1, 1]]).toarray() array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]]) See also -------- sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of dictionary items (also handles string-valued features). sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot encoding of dictionary items or strings. sklearn.preprocessing.LabelBinarizer : binarizes labels in a one-vs-all fashion. sklearn.preprocessing.MultiLabelBinarizer : transforms between iterable of iterables and a multilabel format, e.g. a (samples x classes) binary matrix indicating the presence of a class label. sklearn.preprocessing.LabelEncoder : encodes labels with values between 0 and n_classes-1. """ def __init__(self, n_values="auto", categorical_features="all", dtype=np.float64, sparse=True, handle_unknown='error'): self.n_values = n_values self.categorical_features = categorical_features self.dtype = dtype self.sparse = sparse self.handle_unknown = handle_unknown def fit(self, X, y=None): """Fit OneHotEncoder to X. Parameters ---------- X : array-like, shape [n_samples, n_feature] Input array of type int. Returns ------- self """ self.fit_transform(X) return self def _fit_transform(self, X): """Assumes X contains only categorical features.""" X = check_array(X, dtype=np.int) if np.any(X < 0): raise ValueError("X needs to contain only non-negative integers.") n_samples, n_features = X.shape if (isinstance(self.n_values, six.string_types) and self.n_values == 'auto'): n_values = np.max(X, axis=0) + 1 elif isinstance(self.n_values, numbers.Integral): if (np.max(X, axis=0) >= self.n_values).any(): raise ValueError("Feature out of bounds for n_values=%d" % self.n_values) n_values = np.empty(n_features, dtype=np.int) n_values.fill(self.n_values) else: try: n_values = np.asarray(self.n_values, dtype=int) except (ValueError, TypeError): raise TypeError("Wrong type for parameter `n_values`. Expected" " 'auto', int or array of ints, got %r" % type(X)) if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]: raise ValueError("Shape mismatch: if n_values is an array," " it has to be of shape (n_features,).") self.n_values_ = n_values n_values = np.hstack([[0], n_values]) indices = np.cumsum(n_values) self.feature_indices_ = indices column_indices = (X + indices[:-1]).ravel() row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features) data = np.ones(n_samples * n_features) out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr() if (isinstance(self.n_values, six.string_types) and self.n_values == 'auto'): mask = np.array(out.sum(axis=0)).ravel() != 0 active_features = np.where(mask)[0] out = out[:, active_features] self.active_features_ = active_features return out if self.sparse else out.toarray() def fit_transform(self, X, y=None): """Fit OneHotEncoder to X, then transform X. Equivalent to self.fit(X).transform(X), but more convenient and more efficient. See fit for the parameters, transform for the return value. """ return _transform_selected(X, self._fit_transform, self.categorical_features, copy=True) def _transform(self, X): """Assumes X contains only categorical features.""" X = check_array(X, dtype=np.int) if np.any(X < 0): raise ValueError("X needs to contain only non-negative integers.") n_samples, n_features = X.shape indices = self.feature_indices_ if n_features != indices.shape[0] - 1: raise ValueError("X has different shape than during fitting." " Expected %d, got %d." % (indices.shape[0] - 1, n_features)) # We use only those categorical features of X that are known using fit. # i.e lesser than n_values_ using mask. # This means, if self.handle_unknown is "ignore", the row_indices and # col_indices corresponding to the unknown categorical feature are # ignored. mask = (X < self.n_values_).ravel() if np.any(~mask): if self.handle_unknown not in ['error', 'ignore']: raise ValueError("handle_unknown should be either error or " "unknown got %s" % self.handle_unknown) if self.handle_unknown == 'error': raise ValueError("unknown categorical feature present %s " "during transform." % X.ravel()[~mask]) column_indices = (X + indices[:-1]).ravel()[mask] row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features)[mask] data = np.ones(np.sum(mask)) out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr() if (isinstance(self.n_values, six.string_types) and self.n_values == 'auto'): out = out[:, self.active_features_] return out if self.sparse else out.toarray() def transform(self, X): """Transform X using one-hot encoding. Parameters ---------- X : array-like, shape [n_samples, n_features] Input array of type int. Returns ------- X_out : sparse matrix if sparse=True else a 2-d array, dtype=int Transformed input. """ return _transform_selected(X, self._transform, self.categorical_features, copy=True)
bsd-3-clause
nsalomonis/AltAnalyze
AltAnalyzeViewer.py
1
282646
import os.path, sys, shutil import os import string, re import subprocess import numpy as np import unique import traceback import wx import wx.lib.scrolledpanel import wx.grid as gridlib try: import warnings with warnings.catch_warnings(): warnings.filterwarnings("ignore",category=UserWarning) ### hides import warnings import matplotlib #try: matplotlib.use('TkAgg') #except Exception: pass #import matplotlib.pyplot as plt ### Backend conflict issue when called prior to the actual Wx window appearing #matplotlib.use('WXAgg') from matplotlib.backends.backend_wx import NavigationToolbar2Wx from matplotlib.figure import Figure from numpy import arange, sin, pi except Exception: pass if os.name == 'nt': bheight=20 else: bheight=10 rootDirectory = unique.filepath(str(os.getcwd())) currentDirectory = unique.filepath(str(os.getcwd())) + "/" + "Config/" ### NS-91615 alternative to __file__ currentDirectory = string.replace(currentDirectory,'AltAnalyzeViewer.app/Contents/Resources','') os.chdir(currentDirectory) parentDirectory = str(os.getcwd()) ### NS-91615 gives the parent AltAnalyze directory sys.path.insert(1,parentDirectory) ### NS-91615 adds the AltAnalyze modules to the system path to from visualization_scripts import clustering and others import UI #These classes set up the "tab" feature in the program, allowing you to switch the viewer to different modes. class PageTwo(wx.Panel): def __init__(self, parent): wx.Panel.__init__(self, parent) self.SetBackgroundColour("white") myGrid = "" class PageThree(wx.Panel): def __init__(self, parent): wx.Panel.__init__(self, parent) self.SetBackgroundColour("white") class PageFour(wx.Panel): def __init__(self, parent): wx.Panel.__init__(self, parent) class PageFive(wx.Panel): def __init__(self, parent): wx.Panel.__init__(self, parent) class Main(wx.Frame): def __init__(self,parent,id): wx.Frame.__init__(self, parent, id,'AltAnalyze Results Viewer', size=(900,610)) self.Show() self.Maximize(True) #### This allows the frame to resize to the host machine's max size self.heatmap_translation = {} self.heatmap_run = {} self.species = 'Hs' self.platform = 'RNASeq' self.geneset_type = 'WikiPathways' self.supported_genesets = [] self.runPCA = False self.SetBackgroundColour((230, 230, 230)) self.species='' #PANELS & WIDGETS #self.panel is one of the TOP PANELS. These are used for title display, the open project button, and sort & filter buttons. self.panel = wx.Panel(self, id=2, pos=(200,0), size=(600,45), style=wx.RAISED_BORDER) self.panel.SetBackgroundColour((110, 150, 250)) #Panel 2 is the main view panel. self.panel2 = wx.Panel(self, id=3, pos=(200,50), size=(1400,605), style=wx.RAISED_BORDER) self.panel2.SetBackgroundColour((218, 218, 218)) #Panel 3 contains the pseudo-directory tree. self.panel3 = wx.Panel(self, id=4, pos=(0,50), size=(200,625), style=wx.RAISED_BORDER) self.panel3.SetBackgroundColour("white") self.panel4 = wx.Panel(self, id=5, pos=(200,650), size=(1400,150), style=wx.RAISED_BORDER) self.panel4.SetBackgroundColour("black") #These are the other top panels. self.panel_left = wx.Panel(self, id=12, pos=(0,0), size=(200,45), style=wx.RAISED_BORDER) self.panel_left.SetBackgroundColour((218, 218, 218)) self.panel_right = wx.Panel(self, id=11, pos=(1100,0), size=(200,45), style=wx.RAISED_BORDER) self.panel_right.SetBackgroundColour((218, 218, 218)) self.panel_right2 = wx.Panel(self, id=13, pos=(1300,0), size=(300,45), style=wx.RAISED_BORDER) self.panel_right2.SetBackgroundColour((218, 218, 218)) self.panel_right2.SetMaxSize([300, 45]) #Lines 81-93 set up the user input box for the "sort" function (used on the table). self.sortbox = wx.TextCtrl(self.panel_right2, id=7, pos=(55,10), size=(40,25)) wx.Button(self.panel_right2, id=8, label="Sort", pos=(5, 12), size=(40, bheight)) self.Bind(wx.EVT_BUTTON, self.SortTablefromButton, id=8) self.AscendingRadio = wx.RadioButton(self.panel_right2, id=17, label="Sort", pos=(100, 3), size=(12, 12)) self.DescendingRadio = wx.RadioButton(self.panel_right2, id=18, label="Sort", pos=(100, 23), size=(12, 12)) font = wx.Font(10, wx.SWISS, wx.NORMAL, wx.BOLD) self.AscendingOpt = wx.StaticText(self.panel_right2, label="Ascending", pos=(115, 1)) self.AscendingOpt.SetFont(font) self.DescendingOpt = wx.StaticText(self.panel_right2, label="Descending", pos=(115, 21)) self.DescendingOpt.SetFont(font) #Lines 96-98 set up the user input box for the "filter" function (used on the table). self.filterbox = wx.TextCtrl(self.panel_right, id=9, pos=(60,10), size=(125,25)) wx.Button(self.panel_right, id=10, label="Filter", pos=(0, 12), size=(50, bheight)) self.Bind(wx.EVT_BUTTON, self.FilterTablefromButton, id=10) #Lines 101-103 set up the in-program log. self.control = wx.TextCtrl(self.panel4, id=6, pos=(1,1), size=(1400,150), style=wx.TE_MULTILINE) self.control.write("Welcome to AltAnalyze Results Viewer!" + "\n") self.Show(True) self.main_results_directory = "" #self.browser is the "directory tree" where groups of files are instantiated in self.browser2. self.browser = wx.TreeCtrl(self.panel3, id=2000, pos=(0,0), size=(200,325)) #self.browser2 is the "file group" where groups of files are accessed, respective to the directory selected in self.browser. self.browser2 = wx.TreeCtrl(self.panel3, id=2001, pos=(0,325), size=(200,325)) self.tree = self.browser #self.sortdict groups the table headers to integers---this works with sort function. self.sortdict = {"A" : 0, "B" : 1, "C" : 2, "D" : 3, "E" : 4, "F" : 5, "G" : 6, "H" : 7, "I" : 8, "J" : 9, "K" : 10, "L" : 11, "M" : 12, "N" : 13, "O" : 14, "P" : 15, "Q" : 16, "R" : 17, "S" : 18, "T" : 19, "U" : 20, "V" : 21, "W" : 22, "X" : 23, "Y" : 24, "Z" : 25, "AA" : 26, "AB" : 27, "AC" : 28, "AD" : 29, "AE" : 30, "AF" : 31, "AG" : 32, "AH" : 33, "AI" : 34, "AJ" : 35, "AK" : 36, "AL" : 37, "AM" : 38, "AN" : 39, "AO" : 40, "AP" : 41, "AQ" : 42, "AR" : 43, "AS" : 44, "AT" : 45, "AU" : 46, "AV" : 47, "AW" : 48, "AX" : 49, "AY" : 50, "AZ" : 51} #SIZER--main sizer for the program. ver = wx.BoxSizer(wx.VERTICAL) verpan2 = wx.BoxSizer(wx.VERTICAL) hpan1 = wx.BoxSizer(wx.HORIZONTAL) hpan2 = wx.BoxSizer(wx.HORIZONTAL) hpan3 = wx.BoxSizer(wx.HORIZONTAL) verpan2.Add(self.panel2, 8, wx.ALL|wx.EXPAND, 2) hpan1.Add(self.panel_left, 5, wx.ALL|wx.EXPAND, 2) hpan1.Add(self.panel, 24, wx.ALL|wx.EXPAND, 2) hpan1.Add(self.panel_right, 3, wx.ALL|wx.EXPAND, 2) hpan1.Add(self.panel_right2, 3, wx.ALL|wx.EXPAND, 2) hpan2.Add(self.panel3, 1, wx.ALL|wx.EXPAND, 2) hpan2.Add(verpan2, 7, wx.ALL|wx.EXPAND, 2) hpan3.Add(self.panel4, 1, wx.ALL|wx.EXPAND, 2) ver.Add(hpan1, 1, wx.EXPAND) ver.Add(hpan2, 18, wx.EXPAND) ver.Add(hpan3, 4, wx.EXPAND) self.browser.SetSize(self.panel3.GetSize()) self.SetSizer(ver) #TABS: lines 137-159 instantiate the tabs for the main viewing panel. self.nb = wx.Notebook(self.panel2, id=7829, style = wx.NB_BOTTOM) self.page1 = wx.ScrolledWindow(self.nb, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.HSCROLL|wx.VSCROLL ) self.page1.SetScrollRate( 5, 5 ) self.page2 = PageTwo(self.nb) self.page3 = PageThree(self.nb) self.page4 = PageFour(self.nb) self.nb.AddPage(self.page2, "Table") self.nb.AddPage(self.page1, "PNG") self.nb.AddPage(self.page3, "Interactive") self.page3.SetBackgroundColour((218, 218, 218)) sizer = wx.BoxSizer() sizer.Add(self.nb, 1, wx.EXPAND) self.panel2.SetSizer(sizer) self.page1.SetBackgroundColour("white") self.myGrid = gridlib.Grid(self.page2, id=1002) #self.myGrid.CreateGrid(100, self.dataset_file_length) ### Sets this at 400 columns rather than 100 - Excel like self.Bind(gridlib.EVT_GRID_CELL_RIGHT_CLICK, self.GridRightClick, id=1002) self.Bind(gridlib.EVT_GRID_CELL_LEFT_DCLICK, self.GridRowColor, id=1002) self.HighlightedCells = [] gridsizer = wx.BoxSizer(wx.VERTICAL) gridsizer.Add(self.myGrid) self.page2.SetSizer(gridsizer) self.page2.Layout() #In the event that the interactive tab is chosen, a function must immediately run. self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.InteractiveTabChoose, id=7829) #INTERACTIVE PANEL LAYOUT: lines 167-212 #Pca Setup self.RunButton1 = wx.Button(self.page3, id=43, label="Run", pos=(275, 150), size=(120, bheight)) self.Bind(wx.EVT_BUTTON, self.InteractiveRun, id=43) self.Divider1 = self.ln = wx.StaticLine(self.page3, pos=(5,100)) self.ln.SetSize((415,10)) IntTitleFont = wx.Font(15, wx.SWISS, wx.NORMAL, wx.BOLD) self.InteractiveTitle = wx.StaticText(self.page3, label="Main Dataset Parameters", pos=(10, 15)) self.InteractiveDefaultMessage = wx.StaticText(self.page3, label="No interactive options available.", pos=(10, 45)) self.InteractiveTitle.SetFont(IntTitleFont) self.IntFileTxt = wx.TextCtrl(self.page3, id=43, pos=(105,45), size=(375,20)) self.InteractiveFileLabel = wx.StaticText(self.page3, label="Selected File:", pos=(10, 45)) self.Yes1Label = wx.StaticText(self.page3, label="Yes", pos=(305, 80)) self.No1Label = wx.StaticText(self.page3, label="No", pos=(375, 80)) self.D_3DLabel = wx.StaticText(self.page3, label="3D", pos=(305, 120)) self.D_2DLabel = wx.StaticText(self.page3, label="2D", pos=(375, 120)) self.IncludeLabelsRadio = wx.RadioButton(self.page3, id=40, pos=(285, 83), size=(12, 12), style=wx.RB_GROUP) self.No1Radio = wx.RadioButton(self.page3, id=41, pos=(355, 83), size=(12, 12)) self.IncludeLabelsRadio.SetValue(True) #self.EnterPCAGenes = wx.TextCtrl(self.page3, id=48, pos=(105,45), size=(375,20)) self.D_3DRadio = wx.RadioButton(self.page3, id=46, pos=(285, 123), size=(12, 12), style=wx.RB_GROUP) self.D_2DRadio = wx.RadioButton(self.page3, id=47, pos=(355, 123), size=(12, 12)) self.D_3DRadio.SetValue(True) self.Opt1Desc = wx.StaticText(self.page3, label="Display sample labels next to each object", pos=(10, 80)) self.Opt2Desc = wx.StaticText(self.page3, label="Dimensions to display", pos=(10, 120)) self.IntFileTxt.Hide() self.InteractiveFileLabel.Hide() self.Yes1Label.Hide() self.No1Label.Hide() self.D_3DLabel.Hide() self.D_2DLabel.Hide() self.IncludeLabelsRadio.Hide() self.No1Radio.Hide() self.D_3DRadio.Hide() self.D_2DRadio.Hide() self.Opt1Desc.Hide() self.Opt2Desc.Hide() self.RunButton1.Hide() self.Divider1.Hide() #TERMINAL SETUP TxtBox = wx.BoxSizer(wx.VERTICAL) TxtBox.Add(self.control, 1, wx.EXPAND) self.panel4.SetSizer(TxtBox) self.panel4.Layout() #SELECTION LIST self.TopSelectList = [] self.SearchArray = [] self.SearchArrayFiltered = [] self.TopID = "" self.ColoredCellList = [] #LOGO self.png = wx.Image("logo.gif", wx.BITMAP_TYPE_ANY).ConvertToBitmap() self.LOGO = wx.StaticBitmap(self.page1, -1, self.png, (0,0), (self.png.GetWidth(), self.png.GetHeight()), style=wx.ALIGN_CENTER) imgsizer_v = wx.BoxSizer(wx.VERTICAL) imgsizer_v.Add(self.LOGO, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL) self.page1.SetSizer(imgsizer_v) self.page1.Layout() self.nb.SetSelection(1) browspan = wx.BoxSizer(wx.VERTICAL) browspan.Add(self.browser, 1, wx.EXPAND) browspan.Add(self.browser2, 1, wx.EXPAND) self.panel3.SetSizer(browspan) self.PanelTitle = wx.StaticText(self.panel, label="", pos=(210, 15)) #Open Button ButtonMan = wx.Button(self.panel_left, id=1001, label="Open Project", pos=(0,0), size=(100,100)) self.Bind(wx.EVT_BUTTON, self.OnOpen, id=1001) OpenSizer = wx.BoxSizer(wx.HORIZONTAL) OpenSizer.Add(ButtonMan, 1, wx.EXPAND) self.panel_left.SetSizer(OpenSizer) #STATUS BAR CREATE --- not all of these are currently functional. The "edit" menu still needs to be implemented. status = self.CreateStatusBar() menubar = wx.MenuBar() file = wx.Menu() edit = wx.Menu() view = wx.Menu() search = wx.Menu() filter_table = wx.Menu() help_menu = wx.Menu() open_menu = wx.Menu() open_menu.Append(120, 'Project') open_menu.Append(121, 'File') file.AppendMenu(101, '&Open\tCtrl+O', open_menu) file.Append(102, '&Save\tCtrl+S', 'Save the document') file.AppendSeparator() file.Append(103, 'Options', '') file.AppendSeparator() quit = wx.MenuItem(file, 105, '&Quit\tCtrl+Q', 'Quit the Application') file.AppendItem(quit) edit.Append(109, 'Undo', '') edit.Append(110, 'Redo', '') edit.AppendSeparator() edit.Append(106, '&Cut\tCtrl+X', '') edit.Append(107, '&Copy\tCtrl+C', '') edit.Append(108, '&Paste\tCtrl+V', '') edit.AppendSeparator() edit.Append(111, '&Select All\tCtrl+A', '') view.Append(112, '&Clear Panel\tCtrl+.', '') search.Append(113, 'Tree', '') search.Append(114, 'Table', '') filter_table.Append(116, 'Filter', '') filter_table.Append(117, 'Sort', '') help_menu.AppendSeparator() help_menu.Append(139, 'Help', '') help_menu.Append(140, 'About', '') menubar.Append(file, "File") menubar.Append(edit, "Edit") menubar.Append(view, "View") menubar.Append(search, "Search") menubar.Append(filter_table, "Table") menubar.Append(help_menu, "Help") self.SetMenuBar(menubar) #STATUS BAR BINDINGS self.Bind(wx.EVT_MENU, self.OnOpen, id=120) self.Bind(wx.EVT_MENU, self.OnOpenSingleFile, id=121) self.Bind(wx.EVT_MENU, self.OnQuit, id=105) self.Bind(wx.EVT_MENU, self.ClearVisualPanel, id=112) self.Bind(wx.EVT_MENU, self.TreeSearch, id=113) self.Bind(wx.EVT_MENU, self.GridSearch, id=114) self.Bind(wx.EVT_MENU, self.FilterTable, id=116) self.Bind(wx.EVT_MENU, self.SortTable, id=117) self.Bind(wx.EVT_MENU, self.OnAbout, id=140) self.Bind(wx.EVT_MENU, self.OnHelp, id=139) self.Layout() def OnQuit(self, event): popup = wx.MessageDialog(None, "Are you sure you want to quit?", "Warning", wx.YES_NO) popup_answer = popup.ShowModal() #print popup_answer if(popup_answer == 5103): self.Close() else: return def GridRowColor(self, event): #This colors any row that has been selected and resets it accordingly: may be removed in future versions. if len(self.HighlightedCells) > 0: for i in self.HighlightedCells: self.myGrid.SetCellBackgroundColour(i[0], i[1], (255, 255, 255)) self.HighlightedCells = [] self.GridRowEvent = event.GetRow() for i in range(50): self.myGrid.SetCellBackgroundColour(self.GridRowEvent, i, (235, 255, 255)) self.HighlightedCells.append((self.GridRowEvent, i)) def GridRightClick(self, event): #Pop-up menu instantiation for a right click on the table. self.GridRowEvent = event.GetRow() # only do this part the first time so the events are only bound once if not hasattr(self, "popupID3"): self.popupID1 = wx.NewId() self.popupID2 = wx.NewId() if self.analyzeSplicing: self.popupID3 = wx.NewId() self.popupID4 = wx.NewId() self.popupID5 = wx.NewId() self.Bind(wx.EVT_MENU, self.GeneExpressionSummaryPlot, id=self.popupID1) self.Bind(wx.EVT_MENU, self.PrintGraphVariables, id=self.popupID2) if self.analyzeSplicing: self.Bind(wx.EVT_MENU, self.AltExonViewInitiate, id=self.popupID3) self.Bind(wx.EVT_MENU, self.IsoformViewInitiate, id=self.popupID4) self.Bind(wx.EVT_MENU, self.SashimiPlotInitiate, id=self.popupID5) # build the menu menu = wx.Menu() itemOne = menu.Append(self.popupID1, "Gene Plot") #itemTwo = menu.Append(self.popupID2, "Print Variables") if self.analyzeSplicing: itemThree = menu.Append(self.popupID3, "Exon Plot") itemFour = menu.Append(self.popupID4, "Isoform Plot") itemFive = menu.Append(self.popupID5, "SashimiPlot") # show the popup menu self.PopupMenu(menu) menu.Destroy() def AltExonViewInitiate(self, event): ### Temporary option for exon visualization until the main tool is complete and database can be bundled with the program i=0; values=[] while i<1000: try: val = str(self.myGrid.GetCellValue(self.GridRowEvent, i)) values.append(val) if ('G000' in val) and '->' not in val: geneID_temp = string.split(val,":")[0] if ('G000' in geneID_temp) and '->' not in geneID_temp: geneID = geneID_temp if ' ' in geneID: geneID = string.split(geneID,' ')[0] else: geneID_temp = string.split(val,":")[1] if ('G000' in geneID_temp): geneID = geneID_temp if ' ' in geneID: geneID = string.split(geneID,' ')[0] i+=1 except Exception: break datasetDir = self.main_results_directory #print datasetDir self.control.write("Plotting... " + geneID + "\n") data_type = 'raw expression' show_introns = 'no' analysisType = 'graph-plot' exp_dir = unique.filepath(datasetDir+'/ExpressionInput') #print exp_dir exp_file = UI.getValidExpFile(exp_dir) #print print exp_file UI.altExonViewer(self.species,self.platform,exp_file,geneID,show_introns,analysisType,'') def IsoformViewInitiate(self, event): #print os.getcwd() #This function is a part of the pop-up menu for the table: it plots a gene and protein level view. os.chdir(parentDirectory) t = os.getcwd() #self.control.write(str(os.listdir(t)) + "\n") gene = self.myGrid.GetCellValue(self.GridRowEvent, 0) i=0; values=[]; spliced_junctions=[] while i<1000: try: val = str(self.myGrid.GetCellValue(self.GridRowEvent, i)) values.append(val) if ('G000' in val) and 'ENSP' not in val and 'ENST' not in val and '->' not in val: geneID_temp = string.split(val,":")[0] if ('G000' in geneID_temp) and '->' not in geneID_temp: geneID = geneID_temp if ' ' in geneID: geneID = string.split(geneID,' ')[0] elif '->' in geneID_temp: pass else: geneID_temp = string.split(val,":")[1] if ('G000' in geneID_temp): geneID = geneID_temp if ' ' in geneID: geneID = string.split(geneID,' ')[0] i+=1 except Exception: break #print [geneID] self.control.write("Plotting... " + geneID + "\n") from visualization_scripts import ExPlot reload(ExPlot) ExPlot.remoteGene(geneID,self.species,self.main_results_directory,self.CurrentFile) #Q = subprocess.Popen(['python', 'ExPlot13.py', str(R)]) #os.chdir(currentDirectory) def SashimiPlotInitiate(self, event): #This function is a part of the pop-up menu for the table: it plots a SashimiPlot datasetDir = str(self.main_results_directory) geneID = None #self.control.write(str(os.listdir(t)) + "\n") i=0; values=[]; spliced_junctions=[] while i<1000: try: val = str(self.myGrid.GetCellValue(self.GridRowEvent, i)) values.append(val) if ('G000' in val) and ':E' in val: #if 'ASPIRE' in self.DirFileTxt: if ':ENS' in val: val = 'ENS'+string.split(val,':ENS')[1] val = string.replace(val,'|', ' ') #Can also refer to MarkerFinder files if ' ' in val: if '.' not in string.split(val,' ')[1]: val = string.split(val,' ')[0] ### get the gene if 'Combined-junction' in self.DirFileTxt: if '-' in val and '|' in val: junctions = string.split(val,'|')[0] val = 'ENS'+string.split(junctions,'-ENS')[-1] spliced_junctions.append(val) ### exclusion junction if 'index' in self.DirFileTxt: ### Splicing-index analysis spliced_junctions.append(val) elif '-' in val: spliced_junctions.append(val) ### junction-level if ('G000' in val) and geneID == None and '->' not in val: geneID = string.split(val,":")[0] if ' ' in geneID: geneID = string.split(geneID,' ')[0] i+=1 except Exception: break if len(spliced_junctions)>0: spliced_junctions = [spliced_junctions[-1]] ### Select the exclusion junction else: spliced_junctions = [geneID] if 'DATASET' in self.DirFileTxt: spliced_junctions = [geneID] from visualization_scripts import SashimiPlot reload(SashimiPlot) self.control.write("Attempting to build SashimiPlots for " + str(spliced_junctions[0]) + "\n") SashimiPlot.remoteSashimiPlot(self.species,datasetDir,datasetDir,None,events=spliced_junctions,show=True) ### assuming the bam files are in the root-dir def GeneExpressionSummaryPlot(self, event): #This function is a part of the pop-up menu for the table: it plots expression levels. Wikipathway_Flag = 0 Protein_Flag = 0 VarGridSet = [] try: for i in range(3000): try: p = self.myGrid.GetCellValue(0, i) VarGridSet.append(p) except Exception: pass for i in VarGridSet: y = re.findall("WikiPathways", i) if len(y) > 0: Wikipathway_Flag = 1 break if Wikipathway_Flag == 0: for i in VarGridSet: y = re.findall("Select Protein Classes", i) if len(y) > 0: Protein_Flag = 1 break if Protein_Flag == 1: VariableBox = [] for i in range(len(VarGridSet)): y = re.findall("avg", VarGridSet[i]) if(len(y) > 0): VariableBox.append(i) if Wikipathway_Flag == 1: VariableBox = [] for i in range(len(VarGridSet)): y = re.findall("avg", VarGridSet[i]) if(len(y) > 0): VariableBox.append(i) q_barrel = [] for i in VariableBox: q_box = [] q = i for p in range(500): if(q < 0): break q = q - 1 #Regular expression is needed to find the appropriate columns to match from. FLAG_log_fold = re.findall("log_fold",VarGridSet[q]) FLAG_adjp = re.findall("adjp",VarGridSet[q]) FLAG_rawp = re.findall("rawp",VarGridSet[q]) FLAG_wiki = re.findall("Wiki",VarGridSet[q]) FLAG_pc = re.findall("Protein Classes",VarGridSet[q]) FLAG_avg = re.findall("avg",VarGridSet[q]) if(len(FLAG_log_fold) > 0 or len(FLAG_adjp) > 0 or len(FLAG_rawp) > 0 or len(FLAG_wiki) > 0 or len(FLAG_pc) > 0 or len(FLAG_avg) > 0): break q_box.append(q) q_barrel.append((q_box)) Values_List = [] HeaderList = [] TitleList = self.myGrid.GetCellValue(self.GridRowEvent, 0) for i in VariableBox: HeaderList.append(self.myGrid.GetCellValue(0, i)) for box in q_barrel: output_box = [] for value in box: output_var = self.myGrid.GetCellValue(self.GridRowEvent, value) output_box.append(float(output_var)) Values_List.append((output_box)) self.control.write("Plotting values from: " + str(self.myGrid.GetCellValue(self.GridRowEvent, 0)) + "\n") Output_Values_List = [] Output_std_err = [] for box in Values_List: T = 0 for item in box: T = T + item output_item = T / float(len(box)) Output_Values_List.append(output_item) for box in Values_List: box_std = np.std(box) box_power = np.power((len(box)), 0.5) std_err = box_std / float(box_power) Output_std_err.append(std_err) n_groups = len(Output_Values_List) #PLOTTING STARTS -- means_men = Output_Values_List import matplotlib.pyplot as plt fig, ax = plt.subplots() index = np.arange(n_groups) bar_width = 0.35 pos = bar_width / float(2) opacity = 0.4 error_config = {'ecolor': '0.3'} with warnings.catch_warnings(): rects1 = plt.bar((index + pos), Output_Values_List, bar_width, alpha=opacity, color='b', yerr=Output_std_err, label="") #plt.title(self.myGrid.GetCellValue(self.GridRowEvent, 2)) plt.title(TitleList) plt.xticks(index + bar_width, HeaderList) plt.legend() plt.tight_layout() plt.show() #-- PLOTTING STOPS except Exception: self.control.write("Plot failed to output... only applicalbe for the file with prefix DATASET") def PrintGraphVariables(self, event): #This function is a part of the pop-up menu for the table: it prints the variables for the expression levels. Used for testing mainly. Wikipathway_Flag = 0 Protein_Flag = 0 VarGridSet = [] for i in range(100): p = self.myGrid.GetCellValue(0, i) VarGridSet.append(p) for i in VarGridSet: y = re.findall("WikiPathways", i) if len(y) > 0: Wikipathway_Flag = 1 break if Wikipathway_Flag == 0: for i in VarGridSet: y = re.findall("Select Protein Classes", i) if len(y) > 0: Protein_Flag = 1 break if Protein_Flag == 1: VariableBox = [] for i in range(len(VarGridSet)): y = re.findall("avg", VarGridSet[i]) if(len(y) > 0): VariableBox.append(i) if Wikipathway_Flag == 1: VariableBox = [] for i in range(len(VarGridSet)): y = re.findall("avg", VarGridSet[i]) if(len(y) > 0): VariableBox.append(i) q_barrel = [] for i in VariableBox: q_box = [] q = i for p in range(500): if(q < 0): break q = q - 1 FLAG_log_fold = re.findall("log_fold",VarGridSet[q]) FLAG_adjp = re.findall("adjp",VarGridSet[q]) FLAG_rawp = re.findall("rawp",VarGridSet[q]) FLAG_wiki = re.findall("Wiki",VarGridSet[q]) FLAG_pc = re.findall("Protein Classes",VarGridSet[q]) FLAG_avg = re.findall("avg",VarGridSet[q]) if(len(FLAG_log_fold) > 0 or len(FLAG_adjp) > 0 or len(FLAG_rawp) > 0 or len(FLAG_wiki) > 0 or len(FLAG_pc) > 0 or len(FLAG_avg) > 0): break q_box.append(q) q_barrel.append((q_box)) self.control.write("Selected Row: " + str(self.myGrid.GetCellValue(self.GridRowEvent, 0)) + "\n") self.control.write("Selected Columns: " + str(q_barrel) + "\n") Values_List = [] HeaderList = [] for i in VariableBox: HeaderList.append(self.myGrid.GetCellValue(0, i)) for box in q_barrel: output_box = [] for value in box: output_var = self.myGrid.GetCellValue(self.GridRowEvent, value) output_box.append(float(output_var)) Values_List.append((output_box)) self.control.write("Selected Values: " + str(Values_List) + "\n") def InteractiveTabChoose(self, event): #If the interactive tab is chosen, a plot will immediately appear with the default variables. try: #The PCA and Heatmap flags are set; a different UI will appear for each of them. PCA_RegEx = re.findall("PCA", self.DirFile) Heatmap_RegEx = re.findall("hierarchical", self.DirFile) if(self.nb.GetSelection() == 2): if(len(PCA_RegEx) > 0 or len(Heatmap_RegEx) > 0): self.InteractiveRun(event) except: pass def getDatasetVariables(self): for file in os.listdir(self.main_results_directory): if 'AltAnalyze_report' in file and '.log' in file: log_file = unique.filepath(self.main_results_directory+'/'+file) log_contents = open(log_file, "rU") species = ' species: ' platform = ' method: ' for line in log_contents: line = line.rstrip() if species in line: self.species = string.split(line,species)[1] if platform in line: self.platform = string.split(line,platform)[1] try: self.supported_genesets = UI.listAllGeneSetCategories(self.species,'WikiPathways','gene-mapp') self.geneset_type = 'WikiPathways' except Exception: try: self.supported_genesets = UI.listAllGeneSetCategories(self.species,'GeneOntology','gene-mapp') self.geneset_type = 'GeneOntology' except Exception: self.supported_genesets = [] self.geneset_type = 'None Selected' #print 'Using',self.geneset_type, len(self.supported_genesets),'pathways' break try: for file in os.listdir(self.main_results_directory+'/ExpressionOutput'): if 'DATASET' in file: dataset_file = unique.filepath(self.main_results_directory+'/ExpressionOutput/'+file) for line in open(dataset_file,'rU').xreadlines(): self.dataset_file_length = len(string.split(line,'\t')) break except Exception: pass try: if self.dataset_file_length<50: self.dataset_file_length=50 except Exception: self.dataset_file_length=50 self.myGrid.CreateGrid(100, self.dataset_file_length) ### Re-set the grid width based on the DATASET- file width def OnOpen(self, event): #Bound to the open tab from the menu and the "Open Project" button. openFileDialog = wx.DirDialog(None, "Choose project", "", wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST) if openFileDialog.ShowModal() == wx.ID_CANCEL: return #self.input stream is the path of our project's main directory. self.main_results_directory = openFileDialog.GetPath() if (len(self.main_results_directory) > 0): if self.species == '': self.getDatasetVariables() self.SearchArray = [] self.SearchArrayFiltered = [] self.control.write("Working..." + "\n") #FLAG COLLECT root = 'Data' for (dirpath, dirnames, filenames) in os.walk(root): for dirname in dirnames: #fullpath = os.path.join(dirpath, dirname) fullpath = currentDirectory+'/'+dirpath+'/'+dirname for filename in sorted(filenames): if filename == "location.txt": #file_fullpath = unique.filepath(os.path.join(dirpath, filename)) file_fullpath = currentDirectory+'/'+dirpath+'/'+filename file_location = open(file_fullpath, "r") fl_array = [] for line in file_location: line = line.rstrip(); line = string.replace(line,'"','') line = line.split("\r") if len(line) > 1: fl_array.append(line[0]) fl_array.append(line[1]) else: fl_array.append(line[0]) file_location.close() #if dirname == 'ExonGraph': print fl_array if(len(fl_array) == 3): fl_array.append(dirpath) self.SearchArray.append(fl_array) self.control.write("Opening project at: " + self.main_results_directory + "\n") self.browser2.DeleteAllItems() #SEARCH USING FLAGS count = 0 for FLAG in self.SearchArray: if((FLAG[0][-1] != "/") and (FLAG[0][-1] != "\\")): SearchingFlag = FLAG[0] + "/" SearchingFlag = FLAG[0] SearchingFlagPath = self.main_results_directory + "/" + SearchingFlag try: SFP_Contents = os.listdir(SearchingFlagPath) for filename in SFP_Contents: Search_base = FLAG[1] Search_base = Search_base.split(":") Search_base = Search_base[1] Split_Extension = str(FLAG[2]) Split_Extension = Split_Extension.split(":") S_E = str(Split_Extension[1]).split(",") GOOD_FLAG = 0 if(Search_base != "*"): for i in S_E: if(filename[-4:] == i): GOOD_FLAG = 1 if(Search_base != "*"): candidate = re.findall(Search_base, filename) if(Search_base == "*"): candidate = "True" GOOD_FLAG = 1 if (len(Search_base) == 0 or GOOD_FLAG == 0): continue if len(candidate) > 0: self.SearchArrayFiltered.append(FLAG) except: continue count = count + 1 #AVAILABLE DATA SET try: shutil.rmtree("AvailableData") except: pass for i in self.SearchArrayFiltered: AvailablePath = "Available" + i[3] if '\\' in AvailablePath: ### Windows AvailablePath = string.replace(AvailablePath,'/','\\') if '/' in AvailablePath: Path_List = AvailablePath.split("/") else: Path_List = AvailablePath.split("\\") Created_Directory = "" for directorynum in range(len(Path_List)): if directorynum == 0: Created_Directory = Created_Directory + Path_List[directorynum] try: os.mkdir(Created_Directory) except: continue else: Created_Directory = Created_Directory + "/" + Path_List[directorynum] try: os.mkdir(Created_Directory) except: continue #TOP BROWSER SET root = 'AvailableData' color_root = [253, 253, 253] self.tree.DeleteAllItems() self.ids = {root : self.tree.AddRoot(root)} self.analyzeSplicing=False for (dirpath, dirnames, filenames) in os.walk(root): #print 'x',[dirpath, dirnames, filenames]#;sys.exit() for dirname in dirnames: #print dirpath, dirname if 'Splicing' in dirpath: self.analyzeSplicing=True fullpath = os.path.join(dirpath, dirname) #print currentDirectory+'/'+dirpath self.ids[fullpath] = self.tree.AppendItem(self.ids[dirpath], dirname) DisplayColor = [255, 255, 255] DisplayColor[0] = color_root[0] - len(dirpath) DisplayColor[1] = color_root[1] - len(dirpath) DisplayColor[2] = color_root[2] - len(dirpath) self.tree.SetItemBackgroundColour(self.ids[fullpath], DisplayColor) for i in self.SearchArrayFiltered: SearchRoot = "Available" + i[3] if(SearchRoot == fullpath): SearchSplit = i[1].split(":") SearchSplit = SearchSplit[1] SearchSplit = SearchSplit + ";" + i[0] SearchSplit = SearchSplit + ";" + i[2] DisplayColor = [130, 170, 250] self.tree.SetItemData(self.ids[fullpath],wx.TreeItemData(SearchSplit)) self.tree.SetItemBackgroundColour(self.ids[fullpath], DisplayColor) self.tree.SetItemBackgroundColour(self.ids[root], [100, 140, 240]) self.tree.Expand(self.ids[root]) try: self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.SelectedTopTreeID, self.tree) except Exception: pass #OPENING DISPLAY try: self.LOGO.Destroy() except: pass self.png = wx.Image(rootDirectory+"/Config/no-image-available.png", wx.BITMAP_TYPE_ANY).ConvertToBitmap() self.LOGO = wx.StaticBitmap(self.page1, -1, self.png, (0,0), (self.png.GetWidth(), self.png.GetHeight()), style=wx.ALIGN_CENTER) imgsizer_v = wx.BoxSizer(wx.VERTICAL) imgsizer_v.Add(self.LOGO, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL) self.page1.SetSizer(imgsizer_v) self.page1.Layout() self.control.write("Resetting grid..." + "\n") self.control.write("Currently displaying: " + "SUMMARY" + "\n") self.myGrid.ClearGrid() if 'ExpressionInput' in self.main_results_directory: self.main_results_directory = string.split(self.main_results_directory,'ExpressionInput')[0] if 'AltResults' in self.main_results_directory: self.main_results_directory = string.split(self.main_results_directory,'AltResults')[0] if 'ExpressionOutput' in self.main_results_directory: self.main_results_directory = string.split(self.main_results_directory,'ExpressionOutput')[0] if 'GO-Elite' in self.main_results_directory: self.main_results_directory = string.split(self.main_results_directory,'GO-Elite')[0] if 'ICGS' in self.main_results_directory: self.main_results_directory = string.split(self.main_results_directory,'ICGS')[0] if 'DataPlots' in self.main_results_directory: self.main_results_directory = string.split(self.main_results_directory,'DataPlots')[0] if 'AltExpression' in self.main_results_directory: self.main_results_directory = string.split(self.main_results_directory,'AltExpression')[0] if 'AltDatabase' in self.main_results_directory: self.main_results_directory = string.split(self.main_results_directory,'AltDatabase')[0] if 'ExonPlots' in self.main_results_directory: self.main_results_directory = string.split(self.main_results_directory,'ExonPlots')[0] if 'SashimiPlots' in self.main_results_directory: self.main_results_directory = string.split(self.main_results_directory,'SashimiPlots')[0] opening_display_folder = self.main_results_directory + "/ExpressionOutput" try: list_contents = os.listdir(opening_display_folder) target_file = "" for file in list_contents: candidate = re.findall("SUMMARY", file) if len(candidate) > 0: target_file = file break except Exception: opening_display_folder = self.main_results_directory list_contents = os.listdir(opening_display_folder) for file in list_contents: candidate = re.findall(".log", file) if len(candidate) > 0: target_file = file ### get the last log file target_file = unique.filepath(opening_display_folder + "/" + target_file) opened_target_file = open(target_file, "r") opened_target_file_contents = [] for line in opened_target_file: line = line.rstrip(); line = string.replace(line,'"','') line = line.split("\t") if len(line)==1: line += ['']*5 opened_target_file_contents.append((line)) self.table_length = len(opened_target_file_contents) for cell in self.ColoredCellList: try: self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE) except Exception: pass self.ColoredCellList = [] x_count = 0 for item_list in opened_target_file_contents: y_count = 0 for item in item_list: try: self.myGrid.SetCellValue(x_count, y_count, item) except Exception: pass ### if the length of the row is 0 if(x_count == 0): self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD)) y_count = y_count + 1 x_count = x_count + 1 self.myGrid.AutoSize() for i in range(50): colsize = self.myGrid.GetColSize(i) if(colsize > 200): self.myGrid.SetColSize(i, 200) self.page2.Layout() #This line always sets the opening display to the "Table" tab. self.nb.SetSelection(0) def OnOpenSingleFile(self, event): #Opens only one file as opposed to the whole project; possibly unstable and needs further testing. openFileDialog = wx.FileDialog(self, "", "", "", "", wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) if openFileDialog.ShowModal() == wx.ID_CANCEL: return single_input_stream = openFileDialog.GetPath() self.control.write(str(single_input_stream) + "\n") if single_input_stream[-4:] == ".txt": self.myGrid.ClearGrid() self.DirFileTxt = single_input_stream self.DirFile = single_input_stream table_file = open(self.DirFileTxt, "r") table_file_contents = [] for line in table_file: line = line.rstrip(); line = string.replace(line,'"','') line = line.split("\t") if(len(table_file_contents) >= 5000): break table_file_contents.append((line)) for cell in self.ColoredCellList: self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE) self.ColoredCellList = [] x_count = 0 for item_list in table_file_contents: y_count = 0 for item in item_list: self.myGrid.SetCellValue(x_count, y_count, item) if(x_count == 0): self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD)) y_count = y_count + 1 x_count = x_count + 1 self.myGrid.AutoSize() for i in range(50): try: colsize = self.myGrid.GetColSize(i) if(colsize > 200): self.myGrid.SetColSize(i, 200) except Exception: pass self.page2.Layout() if single_input_stream[-4:] == ".png": self.myGrid.ClearGrid() try: self.LOGO.Destroy() except: pass self.png = wx.Image(single_input_stream, wx.BITMAP_TYPE_ANY).ConvertToBitmap() self.LOGO = wx.StaticBitmap(self.page1, -1, self.png, (0,0), (self.png.GetWidth(), self.png.GetHeight()), style=wx.ALIGN_CENTER) imgsizer_v = wx.BoxSizer(wx.VERTICAL) imgsizer_v.Add(self.LOGO, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL) self.page1.SetSizer(imgsizer_v) self.page1.Layout() if single_input_stream[-4:] == ".pdf": #http://wxpython.org/Phoenix/docs/html/lib.pdfviewer.html pass def OnSave(self, event): #Save function is currently not implemented but is a priority for future updates. saveFileDialog = wx.FileDialog(self, "", "", "", "", wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) if saveFileDialog.ShowModal() == wx.ID_CANCEL: return def OnSearch(self, event): #This handles the search prompt pop-up box when using "search -> table" from the status bar menu. popup = wx.TextEntryDialog(None, "Enter filter for results.", "Search", "Enter search here.") if popup.ShowModal()==wx.ID_OK: answer=popup.GetValue() popup.Destroy() else: popup.Destroy() return def TreeSearch(self, event): #Search tree function: searches the tree for a given phrase and opens the tree to that object. popup = wx.TextEntryDialog(None, "Search the browser tree for directories and files.", "Search", "Enter search here.") if popup.ShowModal()==wx.ID_OK: answer=popup.GetValue() self.control.write("K" + str(answer) + "\n") os.chdir(currentDirectory) ### NS-91615 alternative to __file__ rootman = "AvailableData" search_box = [] found = "" for (dirpath, dirnames, filenames) in os.walk(rootman): for dirname in dirnames: fullpath = dirpath + "/" + dirname search_box.append(fullpath) self.control.write("Searching..." + "\n") for path in search_box: path2 = path.split("/") search_candidate = path2[-1] self.control.write(search_candidate + " " + str(answer) + "\n") if(str(answer) == search_candidate): found = path break self.control.write(found + "\n") tree_recreate = found.split("/") treepath = "" self.control.write(str(range(len(tree_recreate))) + "\n") tree_length = len(tree_recreate) last_tree_value = len(tree_recreate) - 1 for i in range(tree_length): self.control.write(str(i) + "\n") if(i == 0): self.tree.Expand(self.ids[tree_recreate[i]]) treepath = treepath + tree_recreate[i] self.control.write(treepath + "\n") if(i > 0 and i < last_tree_value): treepath = treepath + "/" + tree_recreate[i] self.control.write(treepath + "\n") self.tree.Expand(self.ids[treepath]) if(i == last_tree_value): treepath = treepath + "/" + tree_recreate[i] self.control.write(treepath + "\n") self.tree.SelectItem(self.ids[treepath]) popup.Destroy() else: popup.Destroy() return def GridSearch(self, event): #Search table function: this searchs the table and highlights the search query in the table; also zooms to the nearest match. popup = wx.TextEntryDialog(None, "Search the table.", "Search", "Enter search here.") if popup.ShowModal()==wx.ID_OK: PageDownFound = "False" match_count = 0 answer=popup.GetValue() for cell in self.ColoredCellList: self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE) self.ColoredCellList = [] if(self.table_length > 5100): y_range = range(5100) y_range = range(self.table_length) x_range = range(100) y_count = 0 for number in y_range: x_count = 0 for number in x_range: cellvalue = self.myGrid.GetCellValue(y_count, x_count) gridmatch = re.findall(answer, cellvalue) if(len(gridmatch) > 0): if(PageDownFound == "False"): PageScrollY = y_count PageScrollX = x_count PageDownFound = "True" match_count = match_count + 1 self.ColoredCellList.append((y_count, x_count)) self.myGrid.SetCellBackgroundColour(y_count, x_count, (255, 255, 125)) x_count = x_count + 1 y_count = y_count + 1 #"MakeCellVisible" zooms to the given coordinates. self.myGrid.MakeCellVisible(PageScrollY, PageScrollX) terminal_list = [] for cell in self.ColoredCellList: newrow = cell[0] + 1 newcolumn = cell[1] + 1 terminal_list.append((newrow, newcolumn)) self.control.write(str(match_count) + " matches found for " + answer + "\n") self.control.write("At positions (row, column): " + str(terminal_list) + "\n") popup.Destroy() self.nb.SetSelection(0) else: popup.Destroy() return def FilterTable(self, event): #The filter function displays ONLY the rows that have matches for the given search. Does not delete the filtered out data---table data is still fully functional and usable. popup = wx.TextEntryDialog(None, "Filter the table.", "Search", "Enter filter phrase.") if popup.ShowModal()==wx.ID_OK: self.myGrid.ClearGrid() answer=popup.GetValue() try: table_file = open(self.DirFileTxt, "r") table_file_contents = [] count = 0 for line in table_file: line = line.rstrip(); line = string.replace(line,'"','') regex_test = re.findall(answer.upper(), line.upper()) line = line.split("\t") if(len(regex_test) > 0 or count == 0): if(len(table_file_contents) >= 5100): break table_file_contents.append((line)) count = count + 1 for cell in self.ColoredCellList: self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE) self.ColoredCellList = [] x_count = 0 for item_list in table_file_contents: y_count = 0 for item in item_list: self.myGrid.SetCellValue(x_count, y_count, item) if(x_count == 0): self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD)) y_count = y_count + 1 x_count = x_count + 1 self.myGrid.AutoSize() for i in range(50): colsize = self.myGrid.GetColSize(i) if(colsize > 200): self.myGrid.SetColSize(i, 200) self.page2.Layout() except: self.control.write("Unable to open txt." + "\n") self.nb.SetSelection(0) def SortTable(self, event): #The sort function re-writes the table sorting by either descending or ascending values in a given column. popup = wx.TextEntryDialog(None, "Sort the table.", "Sort", "Which column to sort from?") if popup.ShowModal()==wx.ID_OK: self.myGrid.ClearGrid() answer=popup.GetValue() answer = answer.upper() try: table_file = open(self.DirFileTxt, "r") table_file_contents = [] pre_sort2 = [] header = [] t_c = 0 column_clusters_flat = 0 for line in table_file: line=string.replace(line,'Insufficient Expression','0') try: line = line.rstrip(); line = string.replace(line,'"','') line = line.split("\t") if(t_c == 0): header.append((line)) t_c = t_c + 1 continue if(line[0] == "column_clusters-flat"): header.append((line)) column_clusters_flat = 1 continue line_sort_select = line[self.sortdict[answer]] pre_sort1 = [] count = 0 for i in line: if(count == 0): try: pre_sort1.append(float(line_sort_select)) except: pre_sort1.append(line_sort_select) pre_sort1.append(i) if(count == self.sortdict[answer]): count = count + 1 continue if(count != 0): pre_sort1.append(i) count = count + 1 pre_sort2.append((pre_sort1)) except: continue table_file_contents.append(header[0]) if(column_clusters_flat == 1): table_file_contents.append(header[1]) pre_sort2 = sorted(pre_sort2, reverse = True) for line in pre_sort2: try: final_count1 = 0 final_count2 = 1 send_list = [] for item in line: if(final_count1 == 0): send_list.append(line[final_count2]) if(final_count1 == self.sortdict[answer]): send_list.append(str(line[0])) if(final_count1 != 0 and final_count1 != self.sortdict[answer]): if(final_count1 < self.sortdict[answer]): send_list.append(line[final_count2]) if(final_count1 > self.sortdict[answer]): send_list.append(line[final_count1]) final_count1 = final_count1 + 1 final_count2 = final_count2 + 1 if(len(table_file_contents) >= 5100): break table_file_contents.append((send_list)) except: continue n_table_file_contents = [] if(answer.upper() == "A"): for i in range(len(table_file_contents)): if(i == 0): max_length = len(table_file_contents[i]) if(max_length < len(table_file_contents[i])): n_l = table_file_contents[i][2:] else: n_l = table_file_contents[i] n_table_file_contents.append((n_l)) table_file_contents = n_table_file_contents for cell in self.ColoredCellList: self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE) self.ColoredCellList = [] x_count = 0 for item_list in table_file_contents: y_count = 0 for item in item_list: self.myGrid.SetCellValue(x_count, y_count, item) if(x_count == 0): self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD)) y_count = y_count + 1 x_count = x_count + 1 self.myGrid.AutoSizeRows(True) for i in range(50): colsize = self.myGrid.GetColSize(i) if(colsize > 200): self.myGrid.SetColSize(i, 200) self.page2.Layout() except: self.control.write("Unable to sort." + "\n") self.nb.SetSelection(0) def FilterTablefromButton(self, event): #Same filter function as before, but this function is bound to the button in the top-right corner of the main GUI. self.myGrid.ClearGrid() #In single line text boxes, you must always set 0 to the GetLineText value; 0 represents the first and only line. answer = self.filterbox.GetLineText(0) try: try: self.myGrid.DeleteRows(100, self.AppendTotal, True) except: pass table_file_contents = [] count = 0 for line in open(self.DirFileTxt,'rU').xreadlines(): line = line.rstrip(); line = string.replace(line,'"','') regex_test = re.findall(answer.upper(), line.upper()) line = line.split("\t") if(len(regex_test) > 0 or count == 0): if(len(table_file_contents) >= 5100): break table_file_contents.append((line)) count = count + 1 self.table_length = len(table_file_contents) self.control.write("Table Length: " + str(self.table_length) + "\n") if(self.table_length > 100): self.AppendTotal = self.table_length - 100 self.myGrid.AppendRows(self.AppendTotal, True) for cell in self.ColoredCellList: self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE) self.ColoredCellList = [] x_count = 0 for item_list in table_file_contents: y_count = 0 for item in item_list: self.myGrid.SetCellValue(x_count, y_count, item) if(x_count == 0): self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD)) y_count = y_count + 1 x_count = x_count + 1 self.myGrid.AutoSize() for i in range(50): colsize = self.myGrid.GetColSize(i) if(colsize > 200): self.myGrid.SetColSize(i, 200) self.page2.Layout() except: self.control.write("Unable to open txt." + "\n") self.nb.SetSelection(0) def SortTablefromButton(self, event): #Same sort function as before, but this function is bound to the button in the top-right corner of the main GUI. answer = self.sortbox.GetLineText(0) self.myGrid.ClearGrid() answer = answer.upper() try: table_file = open(self.DirFileTxt, "r") table_file_contents = [] pre_sort2 = [] header = [] t_c = 0 column_clusters_flat = 0 for line in table_file: line=string.replace(line,'Insufficient Expression','0') try: line = line.rstrip(); line = string.replace(line,'"','') line = line.split("\t") if(t_c == 0): header.append((line)) t_c = t_c + 1 continue if(line[0] == "column_clusters-flat"): header.append((line)) column_clusters_flat = 1 continue line_sort_select = line[self.sortdict[answer]] pre_sort1 = [] count = 0 for i in line: if(count == 0): try: pre_sort1.append(float(line_sort_select)) except: pre_sort1.append(line_sort_select) pre_sort1.append(i) if(count == self.sortdict[answer]): count = count + 1 continue if(count != 0): pre_sort1.append(i) count = count + 1 pre_sort2.append((pre_sort1)) except: continue table_file_contents.append(header[0]) if(column_clusters_flat == 1): table_file_contents.append(header[1]) if(self.DescendingRadio.GetValue() == True): pre_sort2 = sorted(pre_sort2, reverse = True) if(self.AscendingRadio.GetValue() == True): pre_sort2 = sorted(pre_sort2) for line in pre_sort2: try: final_count1 = 0 final_count2 = 1 send_list = [] for item in line: if(final_count1 == 0): send_list.append(line[final_count2]) if(final_count1 == self.sortdict[answer]): send_list.append(str(line[0])) if(final_count1 != 0 and final_count1 != self.sortdict[answer]): if(final_count1 < self.sortdict[answer]): send_list.append(line[final_count2]) if(final_count1 > self.sortdict[answer]): send_list.append(line[final_count1]) final_count1 = final_count1 + 1 final_count2 = final_count2 + 1 if(len(table_file_contents) >= 5100): break table_file_contents.append((send_list)) except: continue n_table_file_contents = [] if(answer.upper() == "A"): for i in range(len(table_file_contents)): if(i == 0): max_length = len(table_file_contents[i]) if(max_length < len(table_file_contents[i])): n_l = table_file_contents[i][2:] else: n_l = table_file_contents[i] n_table_file_contents.append((n_l)) table_file_contents = n_table_file_contents for cell in self.ColoredCellList: self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE) self.ColoredCellList = [] x_count = 0 for item_list in table_file_contents: y_count = 0 for item in item_list: self.myGrid.SetCellValue(x_count, y_count, item) if(x_count == 0): self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD)) y_count = y_count + 1 x_count = x_count + 1 self.myGrid.AutoSizeRows(True) for i in range(50): colsize = self.myGrid.GetColSize(i) if(colsize > 200): self.myGrid.SetColSize(i, 200) self.page2.Layout() except: self.control.write("Unable to sort." + "\n") self.nb.SetSelection(0) def SelectedTopTreeID(self, event): item = event.GetItem() try: #This handles the selection of an item in the TOP tree browser. item = event.GetItem() itemObject = self.tree.GetItemData(item).GetData() SearchObject = itemObject.split(";") SearchSuffix = SearchObject[0] SearchPath = SearchObject[1] SearchExtension = SearchObject[2] SearchExtension = SearchExtension.split(":") SearchExtension = SearchExtension[1:] SearchExtension = SearchExtension[0] SearchExtension = SearchExtension.split(",") #SELECTION IMPLEMENT ID_Strings = [] self.TopSelectList = [] self.TopID = SearchSuffix root = self.main_results_directory + "/" + SearchPath root_display = self.main_results_directory + "/" + SearchPath root_contents = os.listdir(root) root_contents_display = os.listdir(root) for obj in root_contents: if(SearchSuffix != "*"): FindList = re.findall(SearchSuffix, obj) if(len(FindList) > 0): self.TopSelectList.append(obj) #print obj self.browser2.DeleteAllItems() for filename in root_contents: if(SearchSuffix != "*"): FindList2 = re.findall(SearchSuffix, filename) if(len(FindList2) > 0): display_name = filename[0:-4] ID_Strings.append(display_name) else: if(filename[-4] == "."): display_name = filename[0:-4] if "AVERAGE-" not in display_name and "COUNTS-" not in display_name: ID_Strings.append(display_name) ID_Strings = list(set(ID_Strings)) change_path = currentDirectory + "/UseDir" ### NS-91615 alternative to __file__ shutil.rmtree("UseDir") os.mkdir("UseDir") #self.control.write(ID_Strings[0] + "\n") os.chdir(change_path) for marker in ID_Strings: try: os.mkdir(marker) except: pass os.chdir(currentDirectory) ### NS-91615 alternative to __file__ root = "UseDir" color_root2 = [223, 250, 223] self.ids2 = {root : self.browser2.AddRoot(root)} for (dirpath, dirnames, filenames) in os.walk(root): color_root2[0] = color_root2[0] - 1 color_root2[1] = color_root2[1] - 0 color_root2[2] = color_root2[2] - 1 for dirname in dirnames: #self.control.write(str(SearchExtension) + "\n") Extensions = dirname + "|" + str(SearchExtension) + "|" + str(SearchPath) fullpath = os.path.join(dirpath, dirname) self.ids2[fullpath] = self.browser2.AppendItem(self.ids2[dirpath], dirname) self.browser2.SetItemData(self.ids2[fullpath],wx.TreeItemData(Extensions)) T = re.findall("DATASET", fullpath) if(len(T) > 0): self.browser2.SetItemBackgroundColour(self.ids2[fullpath], [250, 100, 100]) else: self.browser2.SetItemBackgroundColour(self.ids2[fullpath], [130, 170, 250]) self.browser2.SetItemBackgroundColour(self.ids2[root], [110, 150, 250]) self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.SelectedBottomTreeID, self.browser2) self.browser2.ExpandAll() #OPENING DISPLAY display_file_selected = "" TXT_FLAG = 0 PNG_FLAG = 0 if(root_display[-1] != "/"): root_display = root_display + "/" for possible in root_contents_display: total_filepath = unique.filepath(root_display + possible) if(possible[-4:] == ".txt"): self.control.write("Displaying File: " + str(total_filepath) + "\n") display_file_selected = total_filepath break TXT_FLAG = 0 PNG_FLAG = 0 #self.control.write(str(os.listdir(root)) + "\n") #self.control.write(str(SearchExtension) + "\n") for i in SearchExtension: if(i == ".txt"): TXT_FLAG = 1 #self.control.write(str(i) + "\n") if(i == ".png"): PNG_FLAG = 1 #self.control.write(str(i) + "\n") if(root_display[-1] != "/"): root_display = root_display + "/" Pitch = os.listdir(root) PitchSelect = Pitch[0] self.CurrentFile = PitchSelect #self.control.write(str(PitchSelect) + " " + root_display + "\n") self.DirFile = unique.filepath(root_display + PitchSelect) self.IntFileTxt.Clear() self.IntFileTxt.write(self.DirFile) self.DirFileTxt = unique.filepath(root_display + PitchSelect + ".txt") DirFilePng = unique.filepath(root_display + PitchSelect + ".png") self.myGrid.ClearGrid() title_name = PitchSelect try: self.LOGO.Destroy() except: pass try: self.PanelTitle.Destroy() except: pass font = wx.Font(16, wx.SWISS, wx.NORMAL, wx.BOLD) self.PanelTitle = wx.StaticText(self.panel, label=title_name, pos=(5, 7)) self.PanelTitle.SetFont(font) if(TXT_FLAG == 1): try: self.myGrid.DeleteRows(100, self.AppendTotal, True) except: pass try: #First time the DATASET file is imported #font = wx.Font(16, wx.DECORATIVE, wx.BOLD, wx.NORMAL) #self.PanelTitle = wx.StaticText(self.panel, label=title_name, pos=(210, 15)) #self.PanelTitle.SetFont(font) #table_file = open(self.DirFileTxt, "rU") table_file_contents = [] column_lengths = [] count=0 for line in open(self.DirFileTxt,'rU').xreadlines(): line = line.rstrip(); line = string.replace(line,'"','') line = line.split("\t") column_lengths.append(len(line)) table_file_contents.append((line)) if count>2000: break count+=1 self.max_column_length = max(column_lengths) self.table_length = len(table_file_contents) if(self.table_length > 100 and self.table_length < 5000): self.AppendTotal = self.table_length - 100 self.myGrid.AppendRows(self.AppendTotal, True) if(self.table_length >= 5000): self.AppendTotal = 5000 self.myGrid.AppendRows(self.AppendTotal, True) for cell in self.ColoredCellList: self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE) self.ColoredCellList = [] x_count = 0 try: for item_list in table_file_contents: y_count = 0 for item in item_list: self.myGrid.SetCellValue(x_count, y_count, item) if(x_count == 0): self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD)) y_count = y_count + 1 x_count = x_count + 1 except: pass self.myGrid.AutoSize() for i in range(50): colsize = self.myGrid.GetColSize(i) if(colsize > 200): self.myGrid.SetColSize(i, 200) self.page2.Layout() except: TXT_FLAG = 0 self.control.write("Unable to open txt." + "\n") try: self.myGrid.AutoSize() for i in range(50): colsize = self.myGrid.GetColSize(i) if(colsize > 200): self.myGrid.SetColSize(i, 200) self.page2.Layout() except: pass if(PNG_FLAG == 1): try: open(DirFilePng, "r") self.png = wx.Image(DirFilePng, wx.BITMAP_TYPE_ANY).ConvertToBitmap() self.LOGO = wx.StaticBitmap(self.page1, -1, self.png, (0,0), (self.png.GetWidth(), self.png.GetHeight()), style=wx.ALIGN_CENTER) imgsizer_v = wx.BoxSizer(wx.VERTICAL) imgsizer_v.Add(self.LOGO, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL) self.page1.SetSizer(imgsizer_v) self.page1.Layout() except: PNG_FLAG = 0 self.control.write("Unable to open png." + "\n") try: self.root_widget_id = 500 self.root_widget_text = 550 for i in range(self.root_widget_id, self.root_widget_end): self.heatmap_ids[i].Destroy() for i in range(self.root_widget_text, self.rwtend): self.heatmap_ids[i].Destroy() self.RunButton2.Destroy() except: pass self.InteractivePanelUpdate(event) if(PNG_FLAG == 1 and TXT_FLAG == 0): self.nb.SetSelection(1) self.Layout() self.page1.Layout() if(PNG_FLAG == 0 and TXT_FLAG == 1): self.nb.SetSelection(0) if(PNG_FLAG == 1 and TXT_FLAG == 1): self.nb.SetSelection(1) self.Layout() self.page1.Layout() except Exception: pass def SelectedBottomTreeID(self, event): #This handles the selection of an item in the BOTTOM tree browser; represents a file most of the time. item = event.GetItem() itemObject = self.browser2.GetItemData(item).GetData() Parameters = itemObject.split("|") file_extension = Parameters[1][1:-1] file_extension.replace("'", "") file_extension = file_extension.split(",") file_exts = [] TXT_FLAG = 0 PNG_FLAG = 0 for i in file_extension: i = i.replace("'", "") i = i.replace(" ", "") file_exts.append(i) for i in file_exts: if(i == ".txt"): TXT_FLAG = 1 if(i == ".png"): PNG_FLAG = 1 DirPath = self.main_results_directory + "/" + Parameters[2] if(DirPath[-1] != "/"): DirPath = DirPath + "/" DirFile = DirPath + Parameters[0] self.CurrentFile = DirFile self.control.write("Displaying file: " + DirFile + "\n") title_name = DirFile.split("/") title_name = title_name[-1] self.DirFile = unique.filepath(DirFile) self.IntFileTxt.Clear() self.IntFileTxt.write(self.DirFile) self.DirFileTxt = DirFile + ".txt" DirFilePng = DirFile + ".png" self.myGrid.ClearGrid() try: self.LOGO.Destroy() except: pass try: self.PanelTitle.Destroy() except: pass font = wx.Font(16, wx.SWISS, wx.NORMAL, wx.BOLD) self.PanelTitle = wx.StaticText(self.panel, label=title_name, pos=(5, 7)) self.PanelTitle.SetFont(font) #PNG_FLAG and TXT_FLAG are flags that sense the presence of an image or text file. if(PNG_FLAG == 1): try: open(DirFilePng, "r") self.png = wx.Image(DirFilePng, wx.BITMAP_TYPE_ANY).ConvertToBitmap() self.LOGO = wx.StaticBitmap(self.page1, -1, self.png, (0,0), (self.png.GetWidth(), self.png.GetHeight()), style=wx.ALIGN_CENTER) imgsizer_v = wx.BoxSizer(wx.VERTICAL) imgsizer_v.Add(self.LOGO, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL) self.page1.SetSizer(imgsizer_v) self.page1.Layout() except: PNG_FLAG = 0 self.control.write("Unable to open png." + "\n") if(TXT_FLAG == 1): try: self.myGrid.DeleteRows(100, self.AppendTotal, True) except: pass try: count=0 #table_file = open(self.DirFileTxt, "r") table_file_contents = [] column_lengths = [] for line in open(self.DirFileTxt,'rU').xreadlines(): line = line.rstrip(); line = string.replace(line,'"','') line = line.split("\t") column_lengths.append(len(line)) table_file_contents.append((line)) count+=1 if count>2000:break self.max_column_length = max(column_lengths) self.table_length = len(table_file_contents) if(self.table_length > 100 and self.table_length < 5000): self.AppendTotal = self.table_length - 100 self.myGrid.AppendRows(self.AppendTotal, True) if(self.table_length >= 5000): self.AppendTotal = 5000 self.myGrid.AppendRows(self.AppendTotal, True) for cell in self.ColoredCellList: self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE) self.ColoredCellList = [] x_count = 0 for item_list in table_file_contents: y_count = 0 for item in item_list: try: self.myGrid.SetCellValue(x_count, y_count, item) ###Here except Exception: ### Unclear why this is throwing an error #print traceback.format_exc() #print x_count, y_count, item;sys.exit() pass if(x_count == 0): self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD)) y_count = y_count + 1 x_count = x_count + 1 self.myGrid.AutoSize() for i in range(50): colsize = self.myGrid.GetColSize(i) if(colsize > 200): self.myGrid.SetColSize(i, 200) self.page2.Layout() except: print traceback.format_exc() TXT_FLAG = 0 self.control.write("Unable to open txt." + "\n") DATASET_FIND_FLAG = re.findall("DATASET", self.DirFileTxt) count=0 if(len(DATASET_FIND_FLAG) > 0): try: #table_file = open(self.DirFileTxt, "rU") table_file_contents = [] pre_sort2 = [] header = [] t_c = 0 column_clusters_flat = 0 answer = "AC" for line in open(self.DirFileTxt,'rU').xreadlines(): #for line in table_file: count+=1 if count>2000: break try: line = line.rstrip(); line = string.replace(line,'"','') line = line.split("\t") if(t_c == 0): header.append((line)) t_c = t_c + 1 index=0 for i in line: if 'ANOVA-rawp' in i: answer = index index+=1 continue if(line[0] == "column_clusters-flat"): header.append((line)) column_clusters_flat = 1 continue line_sort_select = line[answer] pre_sort1 = [] count = 0 for i in line: if(count == 0): try: pre_sort1.append(float(line_sort_select)) except: pre_sort1.append(line_sort_select) pre_sort1.append(i) if(count == answer): count = count + 1 continue if(count != 0): pre_sort1.append(i) count = count + 1 pre_sort2.append((pre_sort1)) except: continue table_file_contents.append(header[0]) if(column_clusters_flat == 1): table_file_contents.append(header[1]) pre_sort2 = sorted(pre_sort2) for line in pre_sort2: try: final_count1 = 0 final_count2 = 1 send_list = [] for item in line: if(final_count1 == 0): send_list.append(line[final_count2]) if(final_count1 == answer): send_list.append(str(line[0])) if(final_count1 != 0 and final_count1 != answer): if(final_count1 < answer): send_list.append(line[final_count2]) if(final_count1 > answer): send_list.append(line[final_count1]) final_count1 = final_count1 + 1 final_count2 = final_count2 + 1 if(len(table_file_contents) >= 5100): break table_file_contents.append((send_list)) except: continue for cell in self.ColoredCellList: self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE) self.ColoredCellList = [] x_count = 0 try: for item_list in table_file_contents: y_count = 0 for item in item_list: self.myGrid.SetCellValue(x_count, y_count, item) if(x_count == 0): self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD)) y_count = y_count + 1 x_count = x_count + 1 except: pass self.myGrid.AutoSizeRows(True) for i in range(50): colsize = self.myGrid.GetColSize(i) if(colsize > 200): self.myGrid.SetColSize(i, 200) self.page2.Layout() except: self.control.write("Unable to sort." + "\n") self.nb.SetSelection(0) self.InteractivePanelUpdate(event) try: self.myGrid.AutoSize() for i in range(50): colsize = self.myGrid.GetColSize(i) if(colsize > 200): self.myGrid.SetColSize(i, 200) self.page2.Layout() except: pass if(PNG_FLAG == 1 and TXT_FLAG == 0): self.nb.SetSelection(1) self.Layout() self.page1.Layout() if(PNG_FLAG == 0 and TXT_FLAG == 1): self.nb.SetSelection(0) if(PNG_FLAG == 1 and TXT_FLAG == 1): self.nb.SetSelection(1) self.Layout() self.page1.Layout() def InteractivePanelUpdate(self, event): #Both the PCA UI and Heatmap UI share the same panel, so buttons and text boxes (as well as other GUI) will have to be destroyed/hidden #whenever a new type of interactivity is selected. self.IntFileTxt.Hide() self.InteractiveFileLabel.Hide() self.Yes1Label.Hide() self.No1Label.Hide() self.D_3DLabel.Hide() self.D_2DLabel.Hide() self.IncludeLabelsRadio.Hide() self.No1Radio.Hide() self.D_3DRadio.Hide() self.D_2DRadio.Hide() self.Opt1Desc.Hide() self.Opt2Desc.Hide() self.RunButton1.Hide() self.Divider1.Hide() self.InteractiveDefaultMessage.Hide() try: self.root_widget_id = 500 self.root_widget_text = 550 for i in range(self.root_widget_id, self.root_widget_end): self.heatmap_ids[i].Destroy() for i in range(self.root_widget_text, self.rwtend): self.heatmap_ids[i].Destroy() self.RunButton2.Destroy() except: pass PCA_RegEx = re.findall("PCA", self.DirFile) if(len(PCA_RegEx) > 0): self.IntFileTxt.Show() self.InteractiveFileLabel.Show() self.Yes1Label.Show() self.No1Label.Show() self.D_3DLabel.Show() self.D_2DLabel.Show() self.IncludeLabelsRadio.Show() self.No1Radio.Show() self.D_3DRadio.Show() self.D_2DRadio.Show() self.Opt1Desc.Show() self.Opt2Desc.Show() self.RunButton1.Show() self.Divider1.Show() Heatmap_RegEx = re.findall("hierarchical", self.DirFile) if(len(Heatmap_RegEx) > 0): #Heatmap Setup os.chdir(parentDirectory) options_open = open(unique.filepath(currentDirectory+"/options.txt"), "rU") heatmap_array = [] self.heatmap_ids = {} self.heatmap_translation = {} supported_geneset_types = UI.getSupportedGeneSetTypes(self.species,'gene-mapp') supported_geneset_types += UI.getSupportedGeneSetTypes(self.species,'gene-go') supported_geneset_types_alt = [self.geneset_type] supported_genesets = self.supported_genesets for line in options_open: line = line.split("\t") variable_name,displayed_title,display_object,group,notes,description,global_default,options = line[:8] options = string.split(options,'|') if(group == "heatmap"): if(display_object == "file"): continue od = UI.OptionData(variable_name,displayed_title,display_object,notes,options,global_default) od.setDefaultOption(global_default) #""" if variable_name == 'ClusterGOElite': od.setArrayOptions(['None Selected','all']+supported_geneset_types) elif variable_name == 'GeneSetSelection': od.setArrayOptions(['None Selected']+supported_geneset_types_alt) elif variable_name == 'PathwaySelection': od.setArrayOptions(['None Selected']+supported_genesets) elif od.DefaultOption() == '': od.setDefaultOption(od.Options()[0]) if od.DefaultOption() == '---': od.setDefaultOption('')#""" heatmap_array.append(od) #heatmap_array.append((line[1], line[2], line[7], line[6])) os.chdir(currentDirectory) root_widget_y_pos = 45 self.root_widget_id = 500 self.root_widget_text = 550 for od in heatmap_array: #od.VariableName() id = wx.NewId() #print od.VariableName(),od.Options() self.heatmap_translation[od.VariableName()] = self.root_widget_id self.heatmap_ids[self.root_widget_text] = wx.StaticText(self.page3, self.root_widget_text, label=od.Display(), pos=(150, root_widget_y_pos)) if(od.DisplayObject() == "comboBox" or od.DisplayObject() == "multiple-comboBox"): self.heatmap_ids[self.root_widget_id] = wx.ComboBox(self.page3, self.root_widget_id, od.DefaultOption(), (10, root_widget_y_pos), (120,25), od.Options(), wx.CB_DROPDOWN) else: self.heatmap_ids[self.root_widget_id] = wx.TextCtrl(self.page3, self.root_widget_id, od.DefaultOption(), (10, root_widget_y_pos), (120,25)) self.root_widget_id = self.root_widget_id + 1 self.root_widget_text = self.root_widget_text + 1 root_widget_y_pos = root_widget_y_pos + 25 self.rwtend = self.root_widget_text self.root_widget_end = self.root_widget_id self.RunButton2 = wx.Button(self.page3, id=599, label="Run", pos=(175, (self.root_widget_end + 10)), size=(120, bheight)) self.Bind(wx.EVT_BUTTON, self.InteractiveRun, id=599) if(len(PCA_RegEx) == 0 and len(Heatmap_RegEx) == 0): self.InteractiveDefaultMessage.Show() def ClearVisualPanel(self, event): #Deletes the current image on the viewing panel. Unstable and mostly broken; may be removed from future versions. popup = wx.MessageDialog(None, "Are you sure you want to clear the visual panel?", "Warning", wx.YES_NO) popup_answer = popup.ShowModal() if(popup_answer == 5103): try: self.LOGO.Destroy() self.panel2.Layout() except: pass try: self.myGrid.ClearGrid() self.panel2.Layout() except: pass popup.Destroy() self.control.write("Visual panel cleared." + "\n") else: return def InteractiveRun(self, event): #This function is bound to the "Run" button on the interactive tab GUI. Generates an interactive plot. #Currently updates on the panel are a priority and many changes may come with it. RegExHeat = re.findall("hierarchical", self.DirFile) if(len(RegExHeat) > 0): for VariableName in self.heatmap_translation: #self.control.write(str(self.heatmap_ids[self.heatmap_translation[VariableName]].GetValue()) + " " + str(VariableName) + " " + str(self.heatmap_ids[self.heatmap_translation[VariableName]]) + "\n") try: self.heatmap_translation[VariableName] = str(self.heatmap_ids[self.heatmap_translation[VariableName]].GetValue()) #print self.heatmap_translation[VariableName] except Exception: pass try: #self.control.write(self.DirFile + "\n") input_file_dir = self.DirFile + ".txt" column_metric = self.heatmap_translation['column_metric']; #self.control.write(column_metric + "\n") column_method = self.heatmap_translation['column_method']; #self.control.write(column_method + "\n") row_metric = self.heatmap_translation['row_metric']; #self.control.write(row_metric + "\n") row_method = self.heatmap_translation['row_method']; #self.control.write(row_method+ "\n") color_gradient = self.heatmap_translation['color_selection']; #self.control.write(color_gradient + "\n") cluster_rows = self.heatmap_translation['cluster_rows']; #self.control.write(cluster_rows + "\n") cluster_columns = self.heatmap_translation['cluster_columns']; #self.control.write(cluster_columns + "\n") normalization = self.heatmap_translation['normalization']; #self.control.write(normalization + "\n") contrast = self.heatmap_translation['contrast']; #self.control.write(contrast + "\n") transpose = self.heatmap_translation['transpose']; #self.control.write(transpose + "\n") GeneSetSelection = self.heatmap_translation['GeneSetSelection']; #self.control.write(GeneSetSelection + "\n") PathwaySelection = self.heatmap_translation['PathwaySelection']; #self.control.write(PathwaySelection + "\n") OntologyID = self.heatmap_translation['OntologyID']; #self.control.write(OntologyID + "\n") GeneSelection = self.heatmap_translation['GeneSelection']; #self.control.write(GeneSelection + "\n") justShowTheseIDs = self.heatmap_translation['JustShowTheseIDs']; #self.control.write(JustShowTheseIDs + "\n") HeatmapAdvanced = self.heatmap_translation['HeatmapAdvanced']; #self.control.write(HeatmapAdvanced + "\n") clusterGOElite = self.heatmap_translation['ClusterGOElite']; #self.control.write(ClusterGOElite + "\n") heatmapGeneSets = self.heatmap_translation['heatmapGeneSets']; #self.control.write(heatmapGeneSets + "\n") if cluster_rows == 'no': row_method = None if cluster_columns == 'no': column_method = None HeatmapAdvanced = (HeatmapAdvanced,) #print ['JustShowTheseIDs',justShowTheseIDs] if self.DirFile not in self.heatmap_run: self.heatmap_run[self.DirFile]=None ### occurs when automatically running the heatmap column_method = None row_method = None color_gradient = 'yellow_black_blue' normalization = 'median' translate={'None Selected':'','Exclude Cell Cycle Effects':'excludeCellCycle','Top Correlated Only':'top','Positive Correlations Only':'positive','Perform Iterative Discovery':'driver', 'Intra-Correlated Only':'IntraCorrelatedOnly', 'Perform Monocle':'monocle'} try: if 'None Selected' in HeatmapAdvanced: ('None Selected') except Exception: HeatmapAdvanced = ('None Selected') if ('None Selected' in HeatmapAdvanced and len(HeatmapAdvanced)==1) or 'None Selected' == HeatmapAdvanced: pass else: #print HeatmapAdvanced,'kill' try: GeneSelection += ' '+string.join(list(HeatmapAdvanced),' ') for name in translate: GeneSelection = string.replace(GeneSelection,name,translate[name]) GeneSelection = string.replace(GeneSelection,' ',' ') if 'top' in GeneSelection or 'driver' in GeneSelection or 'excludeCellCycle' in GeneSelection or 'positive' in GeneSelection or 'IntraCorrelatedOnly' in GeneSelection: GeneSelection+=' amplify' except Exception: pass GeneSetSelection = string.replace(GeneSetSelection,'\n',' ') GeneSetSelection = string.replace(GeneSetSelection,'\r',' ') if justShowTheseIDs == '': justShowTheseIDs = 'None Selected' if GeneSetSelection== '': GeneSetSelection = 'None Selected' if PathwaySelection== '': PathwaySelection = 'None Selected' try: rho = float(self.heatmap_translation['CorrelationCutoff']) except Exception: rho=None if transpose == 'yes': transpose = True else: transpose = False vendor = 'RNASeq' color_gradient = string.replace(color_gradient,'-','_') if GeneSetSelection != 'None Selected' or GeneSelection != '' or normalization != 'NA' or JustShowTheseIDs != '' or JustShowTheseIDs != 'None Selected': gsp = UI.GeneSelectionParameters(self.species,self.platform,vendor) if rho!=None: try: gsp.setRhoCutoff(rho) GeneSelection = 'amplify '+GeneSelection except Exception: print 'Must enter a valid Pearson correlation cutoff (float)',traceback.format_exc() gsp.setGeneSet(GeneSetSelection) gsp.setPathwaySelect(PathwaySelection) gsp.setGeneSelection(GeneSelection) gsp.setOntologyID(OntologyID) gsp.setTranspose(transpose) gsp.setNormalize(normalization) gsp.setJustShowTheseIDs(justShowTheseIDs) gsp.setClusterGOElite(clusterGOElite) transpose = gsp ### this allows methods that don't transmit this object to also work if row_method == 'no': row_method = None if column_method == 'no': column_method = None #print [GeneSetSelection, PathwaySelection,OntologyID] remoteCallToAltAnalyze = False #try: print [gsp.ClusterGOElite()] #except Exception: print 'dog', traceback.format_exc() except Exception: print traceback.format_exc() if remoteCallToAltAnalyze == False: try: UI.createHeatMap(input_file_dir, row_method, row_metric, column_method, column_metric, color_gradient, transpose, contrast, None, display=True) except Exception: print traceback.format_exc() else: try: command = ['--image', 'hierarchical','--species', self.species,'--platform',self.platform,'--input',input_file_dir, '--display', 'True'] command += ['--column_method',str(column_method),'--column_metric',column_metric] command += ['--row_method',str(row_method),'--row_metric',row_metric] command += ['--normalization',normalization,'--transpose',str(transpose),'--contrast',contrast,'--color_gradient',color_gradient] #print command command_str = string.join(['']+command,' ') #print command package_path = unique.filepath('python') mac_package_path = string.replace(package_path,'python','AltAnalyze.app/Contents/MacOS/AltAnalyze') #os.system(mac_package_path+command_str);sys.exit() import subprocess #subprocess.call([mac_package_path, 'C:\\test.txt']) usePopen = True if os.name == 'nt': command = [mac_package_path]+command DETACHED_PROCESS = 0x00000008 pid = subprocess.Popen(command, creationflags=DETACHED_PROCESS).pid else: command = [mac_package_path]+command if usePopen: alt_command = ["start"]+command alt_command = ["start",mac_package_path] subprocess.call(command) #works but runs in back of the application, detatched if usePopen==False: ### sampe issue as subprocess.Popen pid = os.fork() if pid ==0: os.execv(mac_package_path,command) ### Kills the parent app os._exit(0) """ retcode = subprocess.call([ apt_file, "-d", cdf_file, "--kill-list", kill_list_dir, "-a", algorithm, "-o", output_dir, "--cel-files", cel_dir, "-a", "pm-mm,mas5-detect.calls=1.pairs=1"])""" except Exception: print traceback.format_exc() else: os.chdir(parentDirectory) RegExMatch = re.findall("exp.", self.DirFile) if(len(RegExMatch) == 0): InputFile = self.DirFile.replace("-3D", "") InputFile = InputFile.replace("-PCA", "") InputFile = InputFile.replace("DataPlots/Clustering-", "ExpressionOutput/Clustering/") input_file_dir= InputFile + ".txt" else: InputFile = self.DirFile.replace("-3D", "") InputFile = InputFile.replace("-PCA", "") InputFile = InputFile.replace("DataPlots/Clustering-", "ExpressionInput/") input_file_dir= InputFile + ".txt" if(self.IncludeLabelsRadio.GetValue() == True): include_labels= 'yes' else: include_labels= 'no' pca_algorithm = 'SVD' transpose = False if self.runPCA == False: include_labels = 'no' if(self.D_3DRadio.GetValue() == True): plotType = '3D' else: plotType = '2D' display = True self.runPCA = True count,columns = self.verifyFileLength(input_file_dir) if columns == 3: plotType = '2D' ### only 2 components possible for 2 samples if count>0: UI.performPCA(input_file_dir, include_labels, pca_algorithm, transpose, None, plotType=plotType, display=display) else: self.control.write('PCA input file not present: '+input_file_dir+'\n') os.chdir(currentDirectory) self.InteractivePanelUpdate(event) def verifyFileLength(self,filename): count = 0; columns=0 try: fn=unique.filepath(filename) for line in open(fn,'rU').xreadlines(): t = string.split(line,'\t') columns = len(t) count+=1 if count>9: break except Exception: null=[] return count,columns def OnAbout(self, event): #Brings up the developer information. Non-functional currently but will be updated eventually. dial = wx.MessageDialog(None, 'AltAnalyze Results Viewer\nVersion 0.5\n2015', 'About', wx.OK) dial.ShowModal() def OnHelp(self, event): #Brings up the tutorial and dorumentation. Will be updated to a .pdf in the future. os.chdir(parentDirectory) ManualPath = rootDirectory + "/Documentation/ViewerManual.pdf" subprocess.Popen(['open', ManualPath]) os.chdir(currentDirectory) class ImageFrame(wx.Frame): #Obsolete code, will be removed almost certainly. title = "Image" def __init__(self): wx.Frame.__init__(self, None, title=self.title) def remoteViewer(app): fr = Main(parent=None,id=1) fr.Show() app.MainLoop() if __name__ == "__main__": app = wx.App(False) fr = Main(parent=None,id=1) fr.Show() app.MainLoop()
apache-2.0
jereze/scikit-learn
sklearn/preprocessing/data.py
68
57385
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Olivier Grisel <olivier.grisel@ensta.org> # Andreas Mueller <amueller@ais.uni-bonn.de> # Eric Martin <eric@ericmart.in> # License: BSD 3 clause from itertools import chain, combinations import numbers import warnings import numpy as np from scipy import sparse from ..base import BaseEstimator, TransformerMixin from ..externals import six from ..utils import check_array from ..utils.extmath import row_norms from ..utils.fixes import combinations_with_replacement as combinations_w_r from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1, inplace_csr_row_normalize_l2) from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis, min_max_axis, inplace_row_scale) from ..utils.validation import check_is_fitted, FLOAT_DTYPES zip = six.moves.zip map = six.moves.map range = six.moves.range __all__ = [ 'Binarizer', 'KernelCenterer', 'MinMaxScaler', 'MaxAbsScaler', 'Normalizer', 'OneHotEncoder', 'RobustScaler', 'StandardScaler', 'add_dummy_feature', 'binarize', 'normalize', 'scale', 'robust_scale', 'maxabs_scale', 'minmax_scale', ] DEPRECATION_MSG_1D = ( "Passing 1d arrays as data is deprecated in 0.17 and will " "raise ValueError in 0.19. Reshape your data either using " "X.reshape(-1, 1) if your data has a single feature or " "X.reshape(1, -1) if it contains a single sample." ) def _mean_and_std(X, axis=0, with_mean=True, with_std=True): """Compute mean and std deviation for centering, scaling. Zero valued std components are reset to 1.0 to avoid NaNs when scaling. """ X = np.asarray(X) Xr = np.rollaxis(X, axis) if with_mean: mean_ = Xr.mean(axis=0) else: mean_ = None if with_std: std_ = Xr.std(axis=0) std_ = _handle_zeros_in_scale(std_) else: std_ = None return mean_, std_ def _handle_zeros_in_scale(scale): ''' Makes sure that whenever scale is zero, we handle it correctly. This happens in most scalers when we have constant features.''' # if we are fitting on 1D arrays, scale might be a scalar if np.isscalar(scale): if scale == 0: scale = 1. elif isinstance(scale, np.ndarray): scale[scale == 0.0] = 1.0 scale[~np.isfinite(scale)] = 1.0 return scale def scale(X, axis=0, with_mean=True, with_std=True, copy=True): """Standardize a dataset along any axis Center to the mean and component wise scale to unit variance. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : array-like or CSR matrix. The data to center and scale. axis : int (0 by default) axis used to compute the means and standard deviations along. If 0, independently standardize each feature, otherwise (if 1) standardize each sample. with_mean : boolean, True by default If True, center the data before scaling. with_std : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_mean=False` (in that case, only variance scaling will be performed on the features of the CSR matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSR matrix. See also -------- :class:`sklearn.preprocessing.StandardScaler` to perform centering and scaling using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False, warn_on_dtype=True, estimator='the scale function', dtype=FLOAT_DTYPES) if sparse.issparse(X): if with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` instead" " See docstring for motivation and alternatives.") if axis != 0: raise ValueError("Can only scale sparse matrix on axis=0, " " got axis=%d" % axis) if not sparse.isspmatrix_csr(X): X = X.tocsr() copy = False if copy: X = X.copy() _, var = mean_variance_axis(X, axis=0) var = _handle_zeros_in_scale(var) inplace_column_scale(X, 1 / np.sqrt(var)) else: X = np.asarray(X) mean_, std_ = _mean_and_std( X, axis, with_mean=with_mean, with_std=with_std) if copy: X = X.copy() # Xr is a view on the original array that enables easy use of # broadcasting on the axis in which we are interested in Xr = np.rollaxis(X, axis) if with_mean: Xr -= mean_ mean_1 = Xr.mean(axis=0) # Verify that mean_1 is 'close to zero'. If X contains very # large values, mean_1 can also be very large, due to a lack of # precision of mean_. In this case, a pre-scaling of the # concerned feature is efficient, for instance by its mean or # maximum. if not np.allclose(mean_1, 0): warnings.warn("Numerical issues were encountered " "when centering the data " "and might not be solved. Dataset may " "contain too large values. You may need " "to prescale your features.") Xr -= mean_1 if with_std: Xr /= std_ if with_mean: mean_2 = Xr.mean(axis=0) # If mean_2 is not 'close to zero', it comes from the fact that # std_ is very small so that mean_2 = mean_1/std_ > 0, even if # mean_1 was close to zero. The problem is thus essentially due # to the lack of precision of mean_. A solution is then to # substract the mean again: if not np.allclose(mean_2, 0): warnings.warn("Numerical issues were encountered " "when scaling the data " "and might not be solved. The standard " "deviation of the data is probably " "very close to 0. ") Xr -= mean_2 return X class MinMaxScaler(BaseEstimator, TransformerMixin): """Transforms features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by:: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. This transformation is often used as an alternative to zero mean, unit variance scaling. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- feature_range: tuple (min, max), default=(0, 1) Desired range of transformed data. copy : boolean, optional, default True Set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array). Attributes ---------- min_ : ndarray, shape (n_features,) Per feature adjustment for minimum. scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. """ def __init__(self, feature_range=(0, 1), copy=True): self.feature_range = feature_range self.copy = copy def fit(self, X, y=None): """Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) feature_range = self.feature_range if feature_range[0] >= feature_range[1]: raise ValueError("Minimum of desired feature range must be smaller" " than maximum. Got %s." % str(feature_range)) data_min = np.min(X, axis=0) data_range = np.max(X, axis=0) - data_min data_range = _handle_zeros_in_scale(data_range) self.scale_ = (feature_range[1] - feature_range[0]) / data_range self.min_ = feature_range[0] - data_min * self.scale_ self.data_range = data_range self.data_min = data_min return self def transform(self, X): """Scaling features of X according to feature_range. Parameters ---------- X : array-like with shape [n_samples, n_features] Input data that will be transformed. """ check_is_fitted(self, 'scale_') X = check_array(X, copy=self.copy, ensure_2d=False) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) X *= self.scale_ X += self.min_ return X def inverse_transform(self, X): """Undo the scaling of X according to feature_range. Parameters ---------- X : array-like with shape [n_samples, n_features] Input data that will be transformed. """ check_is_fitted(self, 'scale_') X = check_array(X, copy=self.copy, ensure_2d=False) X -= self.min_ X /= self.scale_ return X def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True): """Transforms features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by:: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. This transformation is often used as an alternative to zero mean, unit variance scaling. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- feature_range: tuple (min, max), default=(0, 1) Desired range of transformed data. axis : int (0 by default) axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). """ s = MinMaxScaler(feature_range=feature_range, copy=copy) if axis == 0: return s.fit_transform(X) else: return s.fit_transform(X.T).T class StandardScaler(BaseEstimator, TransformerMixin): """Standardize features by removing the mean and scaling to unit variance Centering and scaling happen independently on each feature by computing the relevant statistics on the samples in the training set. Mean and standard deviation are then stored to be used on later data using the `transform` method. Standardization of a dataset is a common requirement for many machine learning estimators: they might behave badly if the individual feature do not more or less look like standard normally distributed data (e.g. Gaussian with 0 mean and unit variance). For instance many elements used in the objective function of a learning algorithm (such as the RBF kernel of Support Vector Machines or the L1 and L2 regularizers of linear models) assume that all features are centered around 0 and have variance in the same order. If a feature has a variance that is orders of magnitude larger that others, it might dominate the objective function and make the estimator unable to learn from other features correctly as expected. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- with_mean : boolean, True by default If True, center the data before scaling. This does not work (and will raise an exception) when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory. with_std : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default True If False, try to avoid a copy and do inplace scaling instead. This is not guaranteed to always work inplace; e.g. if the data is not a NumPy array or scipy.sparse CSR matrix, a copy may still be returned. Attributes ---------- mean_ : array of floats with shape [n_features] The mean value for each feature in the training set. std_ : array of floats with shape [n_features] The standard deviation for each feature in the training set. Set to one if the standard deviation is zero for a given feature. See also -------- :func:`sklearn.preprocessing.scale` to perform centering and scaling without using the ``Transformer`` object oriented API :class:`sklearn.decomposition.RandomizedPCA` with `whiten=True` to further remove the linear correlation across features. """ def __init__(self, copy=True, with_mean=True, with_std=True): self.with_mean = with_mean self.with_std = with_std self.copy = copy def fit(self, X, y=None): """Compute the mean and std to be used for later scaling. Parameters ---------- X : array-like or CSR matrix with shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. """ X = check_array(X, accept_sparse='csr', copy=self.copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") self.mean_ = None if self.with_std: var = mean_variance_axis(X, axis=0)[1] self.std_ = np.sqrt(var) self.std_ = _handle_zeros_in_scale(self.std_) else: self.std_ = None return self else: self.mean_, self.std_ = _mean_and_std( X, axis=0, with_mean=self.with_mean, with_std=self.with_std) return self def transform(self, X, y=None, copy=None): """Perform standardization by centering and scaling Parameters ---------- X : array-like with shape [n_samples, n_features] The data used to scale along the features axis. """ check_is_fitted(self, 'std_') copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.std_ is not None: inplace_column_scale(X, 1 / self.std_) else: if self.with_mean: X -= self.mean_ if self.with_std: X /= self.std_ return X def inverse_transform(self, X, copy=None): """Scale back the data to the original representation Parameters ---------- X : array-like with shape [n_samples, n_features] The data used to scale along the features axis. """ check_is_fitted(self, 'std_') copy = copy if copy is not None else self.copy if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot uncenter sparse matrices: pass `with_mean=False` " "instead See docstring for motivation and alternatives.") if not sparse.isspmatrix_csr(X): X = X.tocsr() copy = False if copy: X = X.copy() if self.std_ is not None: inplace_column_scale(X, self.std_) else: X = np.asarray(X) if copy: X = X.copy() if self.with_std: X *= self.std_ if self.with_mean: X += self.mean_ return X class MaxAbsScaler(BaseEstimator, TransformerMixin): """Scale each feature by its maximum absolute value. This estimator scales and translates each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. It does not shift/center the data, and thus does not destroy any sparsity. This scaler can also be applied to sparse CSR or CSC matrices. Parameters ---------- copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). Attributes ---------- scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. """ def __init__(self, copy=True): self.copy = copy def fit(self, X, y=None): """Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): mins, maxs = min_max_axis(X, axis=0) scales = np.maximum(np.abs(mins), np.abs(maxs)) else: scales = np.abs(X).max(axis=0) scales = np.array(scales) scales = scales.reshape(-1) self.scale_ = _handle_zeros_in_scale(scales) return self def transform(self, X, y=None): """Scale the data Parameters ---------- X : array-like or CSR matrix. The data that should be scaled. """ check_is_fitted(self, 'scale_') X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): if X.shape[0] == 1: inplace_row_scale(X, 1.0 / self.scale_) else: inplace_column_scale(X, 1.0 / self.scale_) else: X /= self.scale_ return X def inverse_transform(self, X): """Scale back the data to the original representation Parameters ---------- X : array-like or CSR matrix. The data that should be transformed back. """ check_is_fitted(self, 'scale_') X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if X.shape[0] == 1: inplace_row_scale(X, self.scale_) else: inplace_column_scale(X, self.scale_) else: X *= self.scale_ return X def maxabs_scale(X, axis=0, copy=True): """Scale each feature to the [-1, 1] range without breaking the sparsity. This estimator scales each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. This scaler can also be applied to sparse CSR or CSC matrices. Parameters ---------- axis : int (0 by default) axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). """ s = MaxAbsScaler(copy=copy) if axis == 0: return s.fit_transform(X) else: return s.fit_transform(X.T).T class RobustScaler(BaseEstimator, TransformerMixin): """Scale features using statistics that are robust to outliers. This Scaler removes the median and scales the data according to the Interquartile Range (IQR). The IQR is the range between the 1st quartile (25th quantile) and the 3rd quartile (75th quantile). Centering and scaling happen independently on each feature (or each sample, depending on the `axis` argument) by computing the relevant statistics on the samples in the training set. Median and interquartile range are then stored to be used on later data using the `transform` method. Standardization of a dataset is a common requirement for many machine learning estimators. Typically this is done by removing the mean and scaling to unit variance. However, outliers can often influence the sample mean / variance in a negative way. In such cases, the median and the interquartile range often give better results. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- with_centering : boolean, True by default If True, center the data before scaling. This does not work (and will raise an exception) when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory. with_scaling : boolean, True by default If True, scale the data to interquartile range. copy : boolean, optional, default is True If False, try to avoid a copy and do inplace scaling instead. This is not guaranteed to always work inplace; e.g. if the data is not a NumPy array or scipy.sparse CSR matrix, a copy may still be returned. Attributes ---------- center_ : array of floats The median value for each feature in the training set. scale_ : array of floats The (scaled) interquartile range for each feature in the training set. See also -------- :class:`sklearn.preprocessing.StandardScaler` to perform centering and scaling using mean and variance. :class:`sklearn.decomposition.RandomizedPCA` with `whiten=True` to further remove the linear correlation across features. Notes ----- See examples/preprocessing/plot_robust_scaling.py for an example. http://en.wikipedia.org/wiki/Median_(statistics) http://en.wikipedia.org/wiki/Interquartile_range """ def __init__(self, with_centering=True, with_scaling=True, copy=True): self.with_centering = with_centering self.with_scaling = with_scaling self.copy = copy def _check_array(self, X, copy): """Makes sure centering is not enabled for sparse matrices.""" X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): if self.with_centering: raise ValueError( "Cannot center sparse matrices: use `with_centering=False`" " instead. See docstring for motivation and alternatives.") return X def fit(self, X, y=None): """Compute the median and quantiles to be used for scaling. Parameters ---------- X : array-like with shape [n_samples, n_features] The data used to compute the median and quantiles used for later scaling along the features axis. """ if sparse.issparse(X): raise TypeError("RobustScaler cannot be fitted on sparse inputs") X = self._check_array(X, self.copy) if self.with_centering: self.center_ = np.median(X, axis=0) if self.with_scaling: q = np.percentile(X, (25, 75), axis=0) self.scale_ = (q[1] - q[0]) self.scale_ = _handle_zeros_in_scale(self.scale_) return self def transform(self, X, y=None): """Center and scale the data Parameters ---------- X : array-like or CSR matrix. The data used to scale along the specified axis. """ if self.with_centering: check_is_fitted(self, 'center_') if self.with_scaling: check_is_fitted(self, 'scale_') X = self._check_array(X, self.copy) if sparse.issparse(X): if self.with_scaling: if X.shape[0] == 1: inplace_row_scale(X, 1.0 / self.scale_) elif self.axis == 0: inplace_column_scale(X, 1.0 / self.scale_) else: if self.with_centering: X -= self.center_ if self.with_scaling: X /= self.scale_ return X def inverse_transform(self, X): """Scale back the data to the original representation Parameters ---------- X : array-like or CSR matrix. The data used to scale along the specified axis. """ if self.with_centering: check_is_fitted(self, 'center_') if self.with_scaling: check_is_fitted(self, 'scale_') X = self._check_array(X, self.copy) if sparse.issparse(X): if self.with_scaling: if X.shape[0] == 1: inplace_row_scale(X, self.scale_) else: inplace_column_scale(X, self.scale_) else: if self.with_scaling: X *= self.scale_ if self.with_centering: X += self.center_ return X def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True): """Standardize a dataset along any axis Center to the median and component wise scale according to the interquartile range. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : array-like. The data to center and scale. axis : int (0 by default) axis used to compute the medians and IQR along. If 0, independently scale each feature, otherwise (if 1) scale each sample. with_centering : boolean, True by default If True, center the data before scaling. with_scaling : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default is True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_centering=False` (in that case, only variance scaling will be performed on the features of the CSR matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSR matrix. See also -------- :class:`sklearn.preprocessing.RobustScaler` to perform centering and scaling using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling, copy=copy) if axis == 0: return s.fit_transform(X) else: return s.fit_transform(X.T).T class PolynomialFeatures(BaseEstimator, TransformerMixin): """Generate polynomial and interaction features. Generate a new feature matrix consisting of all polynomial combinations of the features with degree less than or equal to the specified degree. For example, if an input sample is two dimensional and of the form [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2]. Parameters ---------- degree : integer The degree of the polynomial features. Default = 2. interaction_only : boolean, default = False If true, only interaction features are produced: features that are products of at most ``degree`` *distinct* input features (so not ``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.). include_bias : boolean If True (default), then include a bias column, the feature in which all polynomial powers are zero (i.e. a column of ones - acts as an intercept term in a linear model). Examples -------- >>> X = np.arange(6).reshape(3, 2) >>> X array([[0, 1], [2, 3], [4, 5]]) >>> poly = PolynomialFeatures(2) >>> poly.fit_transform(X) array([[ 1, 0, 1, 0, 0, 1], [ 1, 2, 3, 4, 6, 9], [ 1, 4, 5, 16, 20, 25]]) >>> poly = PolynomialFeatures(interaction_only=True) >>> poly.fit_transform(X) array([[ 1, 0, 1, 0], [ 1, 2, 3, 6], [ 1, 4, 5, 20]]) Attributes ---------- powers_ : array, shape (n_input_features, n_output_features) powers_[i, j] is the exponent of the jth input in the ith output. n_input_features_ : int The total number of input features. n_output_features_ : int The total number of polynomial output features. The number of output features is computed by iterating over all suitably sized combinations of input features. Notes ----- Be aware that the number of features in the output array scales polynomially in the number of features of the input array, and exponentially in the degree. High degrees can cause overfitting. See :ref:`examples/linear_model/plot_polynomial_interpolation.py <example_linear_model_plot_polynomial_interpolation.py>` """ def __init__(self, degree=2, interaction_only=False, include_bias=True): self.degree = degree self.interaction_only = interaction_only self.include_bias = include_bias @staticmethod def _combinations(n_features, degree, interaction_only, include_bias): comb = (combinations if interaction_only else combinations_w_r) start = int(not include_bias) return chain.from_iterable(comb(range(n_features), i) for i in range(start, degree + 1)) @property def powers_(self): check_is_fitted(self, 'n_input_features_') combinations = self._combinations(self.n_input_features_, self.degree, self.interaction_only, self.include_bias) return np.vstack(np.bincount(c, minlength=self.n_input_features_) for c in combinations) def fit(self, X, y=None): """ Compute number of output features. """ n_samples, n_features = check_array(X).shape combinations = self._combinations(n_features, self.degree, self.interaction_only, self.include_bias) self.n_input_features_ = n_features self.n_output_features_ = sum(1 for _ in combinations) return self def transform(self, X, y=None): """Transform data to polynomial features Parameters ---------- X : array with shape [n_samples, n_features] The data to transform, row by row. Returns ------- XP : np.ndarray shape [n_samples, NP] The matrix of features, where NP is the number of polynomial features generated from the combination of inputs. """ check_is_fitted(self, ['n_input_features_', 'n_output_features_']) X = check_array(X) n_samples, n_features = X.shape if n_features != self.n_input_features_: raise ValueError("X shape does not match training shape") # allocate output data XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype) combinations = self._combinations(n_features, self.degree, self.interaction_only, self.include_bias) for i, c in enumerate(combinations): XP[:, i] = X[:, c].prod(1) return XP def normalize(X, norm='l2', axis=1, copy=True): """Scale input vectors individually to unit norm (vector length). Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] The data to normalize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. norm : 'l1', 'l2', or 'max', optional ('l2' by default) The norm to use to normalize each non zero sample (or each non-zero feature if axis is 0). axis : 0 or 1, optional (1 by default) axis used to normalize the data along. If 1, independently normalize each sample, otherwise (if 0) normalize each feature. copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). See also -------- :class:`sklearn.preprocessing.Normalizer` to perform normalization using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ if norm not in ('l1', 'l2', 'max'): raise ValueError("'%s' is not a supported norm" % norm) if axis == 0: sparse_format = 'csc' elif axis == 1: sparse_format = 'csr' else: raise ValueError("'%d' is not a supported axis" % axis) X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True, estimator='the normalize function', dtype=FLOAT_DTYPES) if axis == 0: X = X.T if sparse.issparse(X): if norm == 'l1': inplace_csr_row_normalize_l1(X) elif norm == 'l2': inplace_csr_row_normalize_l2(X) elif norm == 'max': _, norms = min_max_axis(X, 1) norms = norms.repeat(np.diff(X.indptr)) mask = norms != 0 X.data[mask] /= norms[mask] else: if norm == 'l1': norms = np.abs(X).sum(axis=1) elif norm == 'l2': norms = row_norms(X) elif norm == 'max': norms = np.max(X, axis=1) norms = _handle_zeros_in_scale(norms) X /= norms[:, np.newaxis] if axis == 0: X = X.T return X class Normalizer(BaseEstimator, TransformerMixin): """Normalize samples individually to unit norm. Each sample (i.e. each row of the data matrix) with at least one non zero component is rescaled independently of other samples so that its norm (l1 or l2) equals one. This transformer is able to work both with dense numpy arrays and scipy.sparse matrix (use CSR format if you want to avoid the burden of a copy / conversion). Scaling inputs to unit norms is a common operation for text classification or clustering for instance. For instance the dot product of two l2-normalized TF-IDF vectors is the cosine similarity of the vectors and is the base similarity metric for the Vector Space Model commonly used by the Information Retrieval community. Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- norm : 'l1', 'l2', or 'max', optional ('l2' by default) The norm to use to normalize each non zero sample. copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix). Notes ----- This estimator is stateless (besides constructor parameters), the fit method does nothing but is useful when used in a pipeline. See also -------- :func:`sklearn.preprocessing.normalize` equivalent function without the object oriented API """ def __init__(self, norm='l2', copy=True): self.norm = norm self.copy = copy def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. """ X = check_array(X, accept_sparse='csr') return self def transform(self, X, y=None, copy=None): """Scale each non zero row of X to unit norm Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] The data to normalize, row by row. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. """ copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr') return normalize(X, norm=self.norm, axis=1, copy=copy) def binarize(X, threshold=0.0, copy=True): """Boolean thresholding of array-like or scipy.sparse matrix Read more in the :ref:`User Guide <preprocessing_binarization>`. Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] The data to binarize, element by element. scipy.sparse matrices should be in CSR or CSC format to avoid an un-necessary copy. threshold : float, optional (0.0 by default) Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : boolean, optional, default True set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR / CSC matrix and if axis is 1). See also -------- :class:`sklearn.preprocessing.Binarizer` to perform binarization using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy) if sparse.issparse(X): if threshold < 0: raise ValueError('Cannot binarize a sparse matrix with threshold ' '< 0') cond = X.data > threshold not_cond = np.logical_not(cond) X.data[cond] = 1 X.data[not_cond] = 0 X.eliminate_zeros() else: cond = X > threshold not_cond = np.logical_not(cond) X[cond] = 1 X[not_cond] = 0 return X class Binarizer(BaseEstimator, TransformerMixin): """Binarize data (set feature values to 0 or 1) according to a threshold Values greater than the threshold map to 1, while values less than or equal to the threshold map to 0. With the default threshold of 0, only positive values map to 1. Binarization is a common operation on text count data where the analyst can decide to only consider the presence or absence of a feature rather than a quantified number of occurrences for instance. It can also be used as a pre-processing step for estimators that consider boolean random variables (e.g. modelled using the Bernoulli distribution in a Bayesian setting). Read more in the :ref:`User Guide <preprocessing_binarization>`. Parameters ---------- threshold : float, optional (0.0 by default) Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : boolean, optional, default True set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix). Notes ----- If the input is a sparse matrix, only the non-zero values are subject to update by the Binarizer class. This estimator is stateless (besides constructor parameters), the fit method does nothing but is useful when used in a pipeline. """ def __init__(self, threshold=0.0, copy=True): self.threshold = threshold self.copy = copy def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. """ check_array(X, accept_sparse='csr') return self def transform(self, X, y=None, copy=None): """Binarize each element of X Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] The data to binarize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. """ copy = copy if copy is not None else self.copy return binarize(X, threshold=self.threshold, copy=copy) class KernelCenterer(BaseEstimator, TransformerMixin): """Center a kernel matrix Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a function mapping x to a Hilbert space. KernelCenterer centers (i.e., normalize to have zero mean) the data without explicitly computing phi(x). It is equivalent to centering phi(x) with sklearn.preprocessing.StandardScaler(with_std=False). Read more in the :ref:`User Guide <kernel_centering>`. """ def fit(self, K, y=None): """Fit KernelCenterer Parameters ---------- K : numpy array of shape [n_samples, n_samples] Kernel matrix. Returns ------- self : returns an instance of self. """ K = check_array(K) n_samples = K.shape[0] self.K_fit_rows_ = np.sum(K, axis=0) / n_samples self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples return self def transform(self, K, y=None, copy=True): """Center kernel matrix. Parameters ---------- K : numpy array of shape [n_samples1, n_samples2] Kernel matrix. copy : boolean, optional, default True Set to False to perform inplace computation. Returns ------- K_new : numpy array of shape [n_samples1, n_samples2] """ check_is_fitted(self, 'K_fit_all_') K = check_array(K) if copy: K = K.copy() K_pred_cols = (np.sum(K, axis=1) / self.K_fit_rows_.shape[0])[:, np.newaxis] K -= self.K_fit_rows_ K -= K_pred_cols K += self.K_fit_all_ return K def add_dummy_feature(X, value=1.0): """Augment dataset with an additional dummy feature. This is useful for fitting an intercept term with implementations which cannot otherwise fit it directly. Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] Data. value : float Value to use for the dummy feature. Returns ------- X : array or scipy.sparse matrix with shape [n_samples, n_features + 1] Same data with dummy feature added as first column. Examples -------- >>> from sklearn.preprocessing import add_dummy_feature >>> add_dummy_feature([[0, 1], [1, 0]]) array([[ 1., 0., 1.], [ 1., 1., 0.]]) """ X = check_array(X, accept_sparse=['csc', 'csr', 'coo']) n_samples, n_features = X.shape shape = (n_samples, n_features + 1) if sparse.issparse(X): if sparse.isspmatrix_coo(X): # Shift columns to the right. col = X.col + 1 # Column indices of dummy feature are 0 everywhere. col = np.concatenate((np.zeros(n_samples), col)) # Row indices of dummy feature are 0, ..., n_samples-1. row = np.concatenate((np.arange(n_samples), X.row)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.ones(n_samples) * value, X.data)) return sparse.coo_matrix((data, (row, col)), shape) elif sparse.isspmatrix_csc(X): # Shift index pointers since we need to add n_samples elements. indptr = X.indptr + n_samples # indptr[0] must be 0. indptr = np.concatenate((np.array([0]), indptr)) # Row indices of dummy feature are 0, ..., n_samples-1. indices = np.concatenate((np.arange(n_samples), X.indices)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.ones(n_samples) * value, X.data)) return sparse.csc_matrix((data, indices, indptr), shape) else: klass = X.__class__ return klass(add_dummy_feature(X.tocoo(), value)) else: return np.hstack((np.ones((n_samples, 1)) * value, X)) def _transform_selected(X, transform, selected="all", copy=True): """Apply a transform function to portion of selected features Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Dense array or sparse matrix. transform : callable A callable transform(X) -> X_transformed copy : boolean, optional Copy X even if it could be avoided. selected: "all" or array of indices or mask Specify which features to apply the transform to. Returns ------- X : array or sparse matrix, shape=(n_samples, n_features_new) """ if selected == "all": return transform(X) X = check_array(X, accept_sparse='csc', copy=copy) if len(selected) == 0: return X n_features = X.shape[1] ind = np.arange(n_features) sel = np.zeros(n_features, dtype=bool) sel[np.asarray(selected)] = True not_sel = np.logical_not(sel) n_selected = np.sum(sel) if n_selected == 0: # No features selected. return X elif n_selected == n_features: # All features selected. return transform(X) else: X_sel = transform(X[:, ind[sel]]) X_not_sel = X[:, ind[not_sel]] if sparse.issparse(X_sel) or sparse.issparse(X_not_sel): return sparse.hstack((X_sel, X_not_sel)) else: return np.hstack((X_sel, X_not_sel)) class OneHotEncoder(BaseEstimator, TransformerMixin): """Encode categorical integer features using a one-hot aka one-of-K scheme. The input to this transformer should be a matrix of integers, denoting the values taken on by categorical (discrete) features. The output will be a sparse matrix where each column corresponds to one possible value of one feature. It is assumed that input features take on values in the range [0, n_values). This encoding is needed for feeding categorical data to many scikit-learn estimators, notably linear models and SVMs with the standard kernels. Read more in the :ref:`User Guide <preprocessing_categorical_features>`. Parameters ---------- n_values : 'auto', int or array of ints Number of values per feature. - 'auto' : determine value range from training data. - int : maximum value for all features. - array : maximum value per feature. categorical_features: "all" or array of indices or mask Specify what features are treated as categorical. - 'all' (default): All features are treated as categorical. - array of indices: Array of categorical feature indices. - mask: Array of length n_features and with dtype=bool. Non-categorical features are always stacked to the right of the matrix. dtype : number type, default=np.float Desired dtype of output. sparse : boolean, default=True Will return sparse matrix if set True else will return an array. handle_unknown : str, 'error' or 'ignore' Whether to raise an error or ignore if a unknown categorical feature is present during transform. Attributes ---------- active_features_ : array Indices for active features, meaning values that actually occur in the training set. Only available when n_values is ``'auto'``. feature_indices_ : array of shape (n_features,) Indices to feature ranges. Feature ``i`` in the original data is mapped to features from ``feature_indices_[i]`` to ``feature_indices_[i+1]`` (and then potentially masked by `active_features_` afterwards) n_values_ : array of shape (n_features,) Maximum number of values per feature. Examples -------- Given a dataset with three features and two samples, we let the encoder find the maximum value per feature and transform the data to a binary one-hot encoding. >>> from sklearn.preprocessing import OneHotEncoder >>> enc = OneHotEncoder() >>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \ [1, 0, 2]]) # doctest: +ELLIPSIS OneHotEncoder(categorical_features='all', dtype=<... 'float'>, handle_unknown='error', n_values='auto', sparse=True) >>> enc.n_values_ array([2, 3, 4]) >>> enc.feature_indices_ array([0, 2, 5, 9]) >>> enc.transform([[0, 1, 1]]).toarray() array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]]) See also -------- sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of dictionary items (also handles string-valued features). sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot encoding of dictionary items or strings. """ def __init__(self, n_values="auto", categorical_features="all", dtype=np.float, sparse=True, handle_unknown='error'): self.n_values = n_values self.categorical_features = categorical_features self.dtype = dtype self.sparse = sparse self.handle_unknown = handle_unknown def fit(self, X, y=None): """Fit OneHotEncoder to X. Parameters ---------- X : array-like, shape=(n_samples, n_feature) Input array of type int. Returns ------- self """ self.fit_transform(X) return self def _fit_transform(self, X): """Assumes X contains only categorical features.""" X = check_array(X, dtype=np.int) if np.any(X < 0): raise ValueError("X needs to contain only non-negative integers.") n_samples, n_features = X.shape if self.n_values == 'auto': n_values = np.max(X, axis=0) + 1 elif isinstance(self.n_values, numbers.Integral): if (np.max(X, axis=0) >= self.n_values).any(): raise ValueError("Feature out of bounds for n_values=%d" % self.n_values) n_values = np.empty(n_features, dtype=np.int) n_values.fill(self.n_values) else: try: n_values = np.asarray(self.n_values, dtype=int) except (ValueError, TypeError): raise TypeError("Wrong type for parameter `n_values`. Expected" " 'auto', int or array of ints, got %r" % type(X)) if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]: raise ValueError("Shape mismatch: if n_values is an array," " it has to be of shape (n_features,).") self.n_values_ = n_values n_values = np.hstack([[0], n_values]) indices = np.cumsum(n_values) self.feature_indices_ = indices column_indices = (X + indices[:-1]).ravel() row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features) data = np.ones(n_samples * n_features) out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr() if self.n_values == 'auto': mask = np.array(out.sum(axis=0)).ravel() != 0 active_features = np.where(mask)[0] out = out[:, active_features] self.active_features_ = active_features return out if self.sparse else out.toarray() def fit_transform(self, X, y=None): """Fit OneHotEncoder to X, then transform X. Equivalent to self.fit(X).transform(X), but more convenient and more efficient. See fit for the parameters, transform for the return value. """ return _transform_selected(X, self._fit_transform, self.categorical_features, copy=True) def _transform(self, X): """Assumes X contains only categorical features.""" X = check_array(X, dtype=np.int) if np.any(X < 0): raise ValueError("X needs to contain only non-negative integers.") n_samples, n_features = X.shape indices = self.feature_indices_ if n_features != indices.shape[0] - 1: raise ValueError("X has different shape than during fitting." " Expected %d, got %d." % (indices.shape[0] - 1, n_features)) # We use only those catgorical features of X that are known using fit. # i.e lesser than n_values_ using mask. # This means, if self.handle_unknown is "ignore", the row_indices and # col_indices corresponding to the unknown categorical feature are # ignored. mask = (X < self.n_values_).ravel() if np.any(~mask): if self.handle_unknown not in ['error', 'ignore']: raise ValueError("handle_unknown should be either error or " "unknown got %s" % self.handle_unknown) if self.handle_unknown == 'error': raise ValueError("unknown categorical feature present %s " "during transform." % X[~mask]) column_indices = (X + indices[:-1]).ravel()[mask] row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features)[mask] data = np.ones(np.sum(mask)) out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr() if self.n_values == 'auto': out = out[:, self.active_features_] return out if self.sparse else out.toarray() def transform(self, X): """Transform X using one-hot encoding. Parameters ---------- X : array-like, shape=(n_samples, n_features) Input array of type int. Returns ------- X_out : sparse matrix if sparse=True else a 2-d array, dtype=int Transformed input. """ return _transform_selected(X, self._transform, self.categorical_features, copy=True)
bsd-3-clause
valexandersaulys/airbnb_kaggle_contest
venv/lib/python3.4/site-packages/pandas/tseries/tests/test_base.py
9
82416
from __future__ import print_function import re from datetime import datetime, timedelta import numpy as np import pandas as pd from pandas.tseries.base import DatetimeIndexOpsMixin from pandas.util.testing import assertRaisesRegexp, assertIsInstance from pandas.tseries.common import is_datetimelike from pandas import (Series, Index, Int64Index, Timestamp, DatetimeIndex, PeriodIndex, TimedeltaIndex, Timedelta, timedelta_range, date_range, Float64Index) import pandas.tseries.offsets as offsets import pandas.tslib as tslib import nose import pandas.util.testing as tm from pandas.tests.test_base import Ops class TestDatetimeIndexOps(Ops): tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore', 'dateutil/US/Pacific'] def setUp(self): super(TestDatetimeIndexOps, self).setUp() mask = lambda x: isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex) self.is_valid_objs = [ o for o in self.objs if mask(o) ] self.not_valid_objs = [ o for o in self.objs if not mask(o) ] def test_ops_properties(self): self.check_ops_properties(['year','month','day','hour','minute','second','weekofyear','week','dayofweek','dayofyear','quarter']) self.check_ops_properties(['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end'], lambda x: isinstance(x,DatetimeIndex)) def test_ops_properties_basic(self): # sanity check that the behavior didn't change # GH7206 for op in ['year','day','second','weekday']: self.assertRaises(TypeError, lambda x: getattr(self.dt_series,op)) # attribute access should still work! s = Series(dict(year=2000,month=1,day=10)) self.assertEqual(s.year,2000) self.assertEqual(s.month,1) self.assertEqual(s.day,10) self.assertRaises(AttributeError, lambda : s.weekday) def test_astype_str(self): # test astype string - #10442 result = date_range('2012-01-01', periods=4, name='test_name').astype(str) expected = Index(['2012-01-01', '2012-01-02', '2012-01-03','2012-01-04'], name='test_name', dtype=object) tm.assert_index_equal(result, expected) # test astype string with tz and name result = date_range('2012-01-01', periods=3, name='test_name', tz='US/Eastern').astype(str) expected = Index(['2012-01-01 00:00:00-05:00', '2012-01-02 00:00:00-05:00', '2012-01-03 00:00:00-05:00'], name='test_name', dtype=object) tm.assert_index_equal(result, expected) # test astype string with freqH and name result = date_range('1/1/2011', periods=3, freq='H', name='test_name').astype(str) expected = Index(['2011-01-01 00:00:00', '2011-01-01 01:00:00', '2011-01-01 02:00:00'], name='test_name', dtype=object) tm.assert_index_equal(result, expected) # test astype string with freqH and timezone result = date_range('3/6/2012 00:00', periods=2, freq='H', tz='Europe/London', name='test_name').astype(str) expected = Index(['2012-03-06 00:00:00+00:00', '2012-03-06 01:00:00+00:00'], dtype=object, name='test_name') tm.assert_index_equal(result, expected) def test_asobject_tolist(self): idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx') expected_list = [pd.Timestamp('2013-01-31'), pd.Timestamp('2013-02-28'), pd.Timestamp('2013-03-31'), pd.Timestamp('2013-04-30')] expected = pd.Index(expected_list, dtype=object, name='idx') result = idx.asobject self.assertTrue(isinstance(result, Index)) self.assertEqual(result.dtype, object) self.assertTrue(result.equals(expected)) self.assertEqual(result.name, expected.name) self.assertEqual(idx.tolist(), expected_list) idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx', tz='Asia/Tokyo') expected_list = [pd.Timestamp('2013-01-31', tz='Asia/Tokyo'), pd.Timestamp('2013-02-28', tz='Asia/Tokyo'), pd.Timestamp('2013-03-31', tz='Asia/Tokyo'), pd.Timestamp('2013-04-30', tz='Asia/Tokyo')] expected = pd.Index(expected_list, dtype=object, name='idx') result = idx.asobject self.assertTrue(isinstance(result, Index)) self.assertEqual(result.dtype, object) self.assertTrue(result.equals(expected)) self.assertEqual(result.name, expected.name) self.assertEqual(idx.tolist(), expected_list) idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT, datetime(2013, 1, 4)], name='idx') expected_list = [pd.Timestamp('2013-01-01'), pd.Timestamp('2013-01-02'), pd.NaT, pd.Timestamp('2013-01-04')] expected = pd.Index(expected_list, dtype=object, name='idx') result = idx.asobject self.assertTrue(isinstance(result, Index)) self.assertEqual(result.dtype, object) self.assertTrue(result.equals(expected)) self.assertEqual(result.name, expected.name) self.assertEqual(idx.tolist(), expected_list) def test_minmax(self): for tz in self.tz: # monotonic idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], tz=tz) self.assertTrue(idx1.is_monotonic) # non-monotonic idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03', '2011-01-02', pd.NaT], tz=tz) self.assertFalse(idx2.is_monotonic) for idx in [idx1, idx2]: self.assertEqual(idx.min(), pd.Timestamp('2011-01-01', tz=tz)) self.assertEqual(idx.max(), pd.Timestamp('2011-01-03', tz=tz)) for op in ['min', 'max']: # Return NaT obj = DatetimeIndex([]) self.assertTrue(pd.isnull(getattr(obj, op)())) obj = DatetimeIndex([pd.NaT]) self.assertTrue(pd.isnull(getattr(obj, op)())) obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT]) self.assertTrue(pd.isnull(getattr(obj, op)())) def test_representation(self): idx = [] idx.append(DatetimeIndex([], freq='D')) idx.append(DatetimeIndex(['2011-01-01'], freq='D')) idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')) idx.append(DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')) idx.append(DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')) idx.append(DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern')) idx.append(DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC')) exp = [] exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""") exp.append("""DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', freq='D')""") exp.append("""DatetimeIndex(['2011-01-01', '2011-01-02'], dtype='datetime64[ns]', freq='D')""") exp.append("""DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], dtype='datetime64[ns]', freq='D')""") exp.append("""DatetimeIndex(['2011-01-01 09:00:00+09:00', '2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00'], dtype='datetime64[ns, Asia/Tokyo]', freq='H')""") exp.append("""DatetimeIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', 'NaT'], dtype='datetime64[ns, US/Eastern]', freq=None)""") exp.append("""DatetimeIndex(['2011-01-01 09:00:00+00:00', '2011-01-01 10:00:00+00:00', 'NaT'], dtype='datetime64[ns, UTC]', freq=None)""") with pd.option_context('display.width', 300): for indx, expected in zip(idx, exp): for func in ['__repr__', '__unicode__', '__str__']: result = getattr(indx, func)() self.assertEqual(result, expected) def test_representation_to_series(self): idx1 = DatetimeIndex([], freq='D') idx2 = DatetimeIndex(['2011-01-01'], freq='D') idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D') idx4 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D') idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], freq='H', tz='Asia/Tokyo') idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern') idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15']) exp1 = """Series([], dtype: datetime64[ns])""" exp2 = """0 2011-01-01 dtype: datetime64[ns]""" exp3 = """0 2011-01-01 1 2011-01-02 dtype: datetime64[ns]""" exp4 = """0 2011-01-01 1 2011-01-02 2 2011-01-03 dtype: datetime64[ns]""" exp5 = """0 2011-01-01 09:00:00+09:00 1 2011-01-01 10:00:00+09:00 2 2011-01-01 11:00:00+09:00 dtype: datetime64[ns, Asia/Tokyo]""" exp6 = """0 2011-01-01 09:00:00-05:00 1 2011-01-01 10:00:00-05:00 2 NaT dtype: datetime64[ns, US/Eastern]""" exp7 = """0 2011-01-01 09:00:00 1 2011-01-02 10:15:00 dtype: datetime64[ns]""" with pd.option_context('display.width', 300): for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7], [exp1, exp2, exp3, exp4, exp5, exp6, exp7]): result = repr(Series(idx)) self.assertEqual(result, expected) def test_summary(self): # GH9116 idx1 = DatetimeIndex([], freq='D') idx2 = DatetimeIndex(['2011-01-01'], freq='D') idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D') idx4 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D') idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], freq='H', tz='Asia/Tokyo') idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern') exp1 = """DatetimeIndex: 0 entries Freq: D""" exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01 Freq: D""" exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02 Freq: D""" exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03 Freq: D""" exp5 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 to 2011-01-01 11:00:00+09:00 Freq: H""" exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT""" for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6], [exp1, exp2, exp3, exp4, exp5, exp6]): result = idx.summary() self.assertEqual(result, expected) def test_resolution(self): for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'], ['day', 'day', 'day', 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond']): for tz in [None, 'Asia/Tokyo', 'US/Eastern']: idx = pd.date_range(start='2013-04-01', periods=30, freq=freq, tz=tz) self.assertEqual(idx.resolution, expected) def test_add_iadd(self): for tz in self.tz: # union rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz) expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz) rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz) expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz) rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) other3 = pd.DatetimeIndex([], tz=tz) expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2), (rng3, other3, expected3)]: # GH9094 with tm.assert_produces_warning(FutureWarning): result_add = rng + other result_union = rng.union(other) tm.assert_index_equal(result_add, expected) tm.assert_index_equal(result_union, expected) # GH9094 with tm.assert_produces_warning(FutureWarning): rng += other tm.assert_index_equal(rng, expected) # offset offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'), Timedelta(hours=2)] for delta in offsets: rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz) result = rng + delta expected = pd.date_range('2000-01-01 02:00', '2000-02-01 02:00', tz=tz) tm.assert_index_equal(result, expected) rng += delta tm.assert_index_equal(rng, expected) # int rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz) result = rng + 1 expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10, tz=tz) tm.assert_index_equal(result, expected) rng += 1 tm.assert_index_equal(rng, expected) def test_sub_isub(self): for tz in self.tz: # diff rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz) expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz) expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz) rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) other3 = pd.DatetimeIndex([], tz=tz) expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2), (rng3, other3, expected3)]: result_union = rng.difference(other) tm.assert_index_equal(result_union, expected) # offset offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'), Timedelta(hours=2)] for delta in offsets: rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz) result = rng - delta expected = pd.date_range('1999-12-31 22:00', '2000-01-31 22:00', tz=tz) tm.assert_index_equal(result, expected) rng -= delta tm.assert_index_equal(rng, expected) # int rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz) result = rng - 1 expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10, tz=tz) tm.assert_index_equal(result, expected) rng -= 1 tm.assert_index_equal(rng, expected) def test_value_counts_unique(self): # GH 7735 for tz in [None, 'UTC', 'Asia/Tokyo', 'US/Eastern']: idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10) # create repeated values, 'n'th element is repeated by n+1 times idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), tz=tz) exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10, tz=tz) expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64') tm.assert_series_equal(idx.value_counts(), expected) expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10, tz=tz) tm.assert_index_equal(idx.unique(), expected) idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 08:00', '2013-01-01 08:00', pd.NaT], tz=tz) exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'], tz=tz) expected = Series([3, 2], index=exp_idx) tm.assert_series_equal(idx.value_counts(), expected) exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00', pd.NaT], tz=tz) expected = Series([3, 2, 1], index=exp_idx) tm.assert_series_equal(idx.value_counts(dropna=False), expected) tm.assert_index_equal(idx.unique(), exp_idx) def test_nonunique_contains(self): # GH 9512 for idx in map(DatetimeIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1], ['2015', '2015', '2016'], ['2015', '2015', '2014'])): tm.assertIn(idx[0], idx) def test_order(self): # with freq idx1 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D', name='idx') idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], freq='H', tz='Asia/Tokyo', name='tzidx') for idx in [idx1, idx2]: ordered = idx.sort_values() self.assert_index_equal(ordered, idx) self.assertEqual(ordered.freq, idx.freq) ordered = idx.sort_values(ascending=False) expected = idx[::-1] self.assert_index_equal(ordered, expected) self.assertEqual(ordered.freq, expected.freq) self.assertEqual(ordered.freq.n, -1) ordered, indexer = idx.sort_values(return_indexer=True) self.assert_index_equal(ordered, idx) self.assert_numpy_array_equal(indexer, np.array([0, 1, 2])) self.assertEqual(ordered.freq, idx.freq) ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) expected = idx[::-1] self.assert_index_equal(ordered, expected) self.assert_numpy_array_equal(indexer, np.array([2, 1, 0])) self.assertEqual(ordered.freq, expected.freq) self.assertEqual(ordered.freq.n, -1) # without freq idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05', '2011-01-02', '2011-01-01'], name='idx1') exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02', '2011-01-03', '2011-01-05'], name='idx1') idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05', '2011-01-02', '2011-01-01'], tz='Asia/Tokyo', name='idx2') exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02', '2011-01-03', '2011-01-05'], tz='Asia/Tokyo', name='idx2') idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05', '2011-01-02', pd.NaT], name='idx3') exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03', '2011-01-05'], name='idx3') for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]: ordered = idx.sort_values() self.assert_index_equal(ordered, expected) self.assertIsNone(ordered.freq) ordered = idx.sort_values(ascending=False) self.assert_index_equal(ordered, expected[::-1]) self.assertIsNone(ordered.freq) ordered, indexer = idx.sort_values(return_indexer=True) self.assert_index_equal(ordered, expected) self.assert_numpy_array_equal(indexer, np.array([0, 4, 3, 1, 2])) self.assertIsNone(ordered.freq) ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) self.assert_index_equal(ordered, expected[::-1]) self.assert_numpy_array_equal(indexer, np.array([2, 1, 3, 4, 0])) self.assertIsNone(ordered.freq) def test_getitem(self): idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D', tz='Asia/Tokyo', name='idx') for idx in [idx1, idx2]: result = idx[0] self.assertEqual(result, pd.Timestamp('2011-01-01', tz=idx.tz)) result = idx[0:5] expected = pd.date_range('2011-01-01', '2011-01-05', freq='D', tz=idx.tz, name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx[0:10:2] expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D', tz=idx.tz, name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx[-20:-5:3] expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D', tz=idx.tz, name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx[4::-1] expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03', '2011-01-02', '2011-01-01'], freq='-1D', tz=idx.tz, name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) def test_drop_duplicates_metadata(self): #GH 10115 idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') result = idx.drop_duplicates() self.assert_index_equal(idx, result) self.assertEqual(idx.freq, result.freq) idx_dup = idx.append(idx) self.assertIsNone(idx_dup.freq) # freq is reset result = idx_dup.drop_duplicates() self.assert_index_equal(idx, result) self.assertIsNone(result.freq) def test_take(self): #GH 10295 idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D', tz='Asia/Tokyo', name='idx') for idx in [idx1, idx2]: result = idx.take([0]) self.assertEqual(result, pd.Timestamp('2011-01-01', tz=idx.tz)) result = idx.take([0, 1, 2]) expected = pd.date_range('2011-01-01', '2011-01-03', freq='D', tz=idx.tz, name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx.take([0, 2, 4]) expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D', tz=idx.tz, name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx.take([7, 4, 1]) expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D', tz=idx.tz, name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx.take([3, 2, 5]) expected = DatetimeIndex(['2011-01-04', '2011-01-03', '2011-01-06'], freq=None, tz=idx.tz, name='idx') self.assert_index_equal(result, expected) self.assertIsNone(result.freq) result = idx.take([-3, 2, 5]) expected = DatetimeIndex(['2011-01-29', '2011-01-03', '2011-01-06'], freq=None, tz=idx.tz, name='idx') self.assert_index_equal(result, expected) self.assertIsNone(result.freq) def test_infer_freq(self): # GH 11018 for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D', '-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S']: idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10) result = pd.DatetimeIndex(idx.asi8, freq='infer') tm.assert_index_equal(idx, result) self.assertEqual(result.freq, freq) class TestTimedeltaIndexOps(Ops): def setUp(self): super(TestTimedeltaIndexOps, self).setUp() mask = lambda x: isinstance(x, TimedeltaIndex) self.is_valid_objs = [ o for o in self.objs if mask(o) ] self.not_valid_objs = [ ] def test_ops_properties(self): self.check_ops_properties(['days','hours','minutes','seconds','milliseconds']) self.check_ops_properties(['microseconds','nanoseconds']) def test_asobject_tolist(self): idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx') expected_list = [Timedelta('1 days'),Timedelta('2 days'),Timedelta('3 days'), Timedelta('4 days')] expected = pd.Index(expected_list, dtype=object, name='idx') result = idx.asobject self.assertTrue(isinstance(result, Index)) self.assertEqual(result.dtype, object) self.assertTrue(result.equals(expected)) self.assertEqual(result.name, expected.name) self.assertEqual(idx.tolist(), expected_list) idx = TimedeltaIndex([timedelta(days=1),timedelta(days=2),pd.NaT, timedelta(days=4)], name='idx') expected_list = [Timedelta('1 days'),Timedelta('2 days'),pd.NaT, Timedelta('4 days')] expected = pd.Index(expected_list, dtype=object, name='idx') result = idx.asobject self.assertTrue(isinstance(result, Index)) self.assertEqual(result.dtype, object) self.assertTrue(result.equals(expected)) self.assertEqual(result.name, expected.name) self.assertEqual(idx.tolist(), expected_list) def test_minmax(self): # monotonic idx1 = TimedeltaIndex(['1 days', '2 days', '3 days']) self.assertTrue(idx1.is_monotonic) # non-monotonic idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT']) self.assertFalse(idx2.is_monotonic) for idx in [idx1, idx2]: self.assertEqual(idx.min(), Timedelta('1 days')), self.assertEqual(idx.max(), Timedelta('3 days')), for op in ['min', 'max']: # Return NaT obj = TimedeltaIndex([]) self.assertTrue(pd.isnull(getattr(obj, op)())) obj = TimedeltaIndex([pd.NaT]) self.assertTrue(pd.isnull(getattr(obj, op)())) obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT]) self.assertTrue(pd.isnull(getattr(obj, op)())) def test_representation(self): idx1 = TimedeltaIndex([], freq='D') idx2 = TimedeltaIndex(['1 days'], freq='D') idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D') idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D') idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days']) exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')""" exp2 = """TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', freq='D')""" exp3 = """TimedeltaIndex(['1 days', '2 days'], dtype='timedelta64[ns]', freq='D')""" exp4 = """TimedeltaIndex(['1 days', '2 days', '3 days'], dtype='timedelta64[ns]', freq='D')""" exp5 = """TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', '3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)""" with pd.option_context('display.width',300): for idx, expected in zip([idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5]): for func in ['__repr__', '__unicode__', '__str__']: result = getattr(idx, func)() self.assertEqual(result, expected) def test_representation_to_series(self): idx1 = TimedeltaIndex([], freq='D') idx2 = TimedeltaIndex(['1 days'], freq='D') idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D') idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D') idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days']) exp1 = """Series([], dtype: timedelta64[ns])""" exp2 = """0 1 days dtype: timedelta64[ns]""" exp3 = """0 1 days 1 2 days dtype: timedelta64[ns]""" exp4 = """0 1 days 1 2 days 2 3 days dtype: timedelta64[ns]""" exp5 = """0 1 days 00:00:01 1 2 days 00:00:00 2 3 days 00:00:00 dtype: timedelta64[ns]""" with pd.option_context('display.width',300): for idx, expected in zip([idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5]): result = repr(pd.Series(idx)) self.assertEqual(result, expected) def test_summary(self): # GH9116 idx1 = TimedeltaIndex([], freq='D') idx2 = TimedeltaIndex(['1 days'], freq='D') idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D') idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D') idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days']) exp1 = """TimedeltaIndex: 0 entries Freq: D""" exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days Freq: D""" exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days Freq: D""" exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days Freq: D""" exp5 = """TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days 00:00:00""" for idx, expected in zip([idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5]): result = idx.summary() self.assertEqual(result, expected) def test_add_iadd(self): # only test adding/sub offsets as + is now numeric # offset offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'), Timedelta(hours=2)] for delta in offsets: rng = timedelta_range('1 days','10 days') result = rng + delta expected = timedelta_range('1 days 02:00:00','10 days 02:00:00',freq='D') tm.assert_index_equal(result, expected) rng += delta tm.assert_index_equal(rng, expected) # int rng = timedelta_range('1 days 09:00:00', freq='H', periods=10) result = rng + 1 expected = timedelta_range('1 days 10:00:00', freq='H', periods=10) tm.assert_index_equal(result, expected) rng += 1 tm.assert_index_equal(rng, expected) def test_sub_isub(self): # only test adding/sub offsets as - is now numeric # offset offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'), Timedelta(hours=2)] for delta in offsets: rng = timedelta_range('1 days','10 days') result = rng - delta expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00') tm.assert_index_equal(result, expected) rng -= delta tm.assert_index_equal(rng, expected) # int rng = timedelta_range('1 days 09:00:00', freq='H', periods=10) result = rng - 1 expected = timedelta_range('1 days 08:00:00', freq='H', periods=10) tm.assert_index_equal(result, expected) rng -= 1 tm.assert_index_equal(rng, expected) def test_ops_compat(self): offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'), Timedelta(hours=2)] rng = timedelta_range('1 days','10 days',name='foo') # multiply for offset in offsets: self.assertRaises(TypeError, lambda : rng * offset) # divide expected = Int64Index((np.arange(10)+1)*12,name='foo') for offset in offsets: result = rng / offset tm.assert_index_equal(result,expected) # divide with nats rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo') expected = Float64Index([12, np.nan, 24], name='foo') for offset in offsets: result = rng / offset tm.assert_index_equal(result,expected) # don't allow division by NaT (make could in the future) self.assertRaises(TypeError, lambda : rng / pd.NaT) def test_subtraction_ops(self): # with datetimes/timedelta and tdi/dti tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo') dti = date_range('20130101', periods=3, name='bar') td = Timedelta('1 days') dt = Timestamp('20130101') self.assertRaises(TypeError, lambda : tdi - dt) self.assertRaises(TypeError, lambda : tdi - dti) self.assertRaises(TypeError, lambda : td - dt) self.assertRaises(TypeError, lambda : td - dti) result = dt - dti expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar') tm.assert_index_equal(result, expected) result = dti - dt expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar') tm.assert_index_equal(result, expected) result = tdi - td expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo') tm.assert_index_equal(result, expected, check_names=False) result = td - tdi expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo') tm.assert_index_equal(result, expected, check_names=False) result = dti - td expected = DatetimeIndex(['20121231', '20130101', '20130102'], name='bar') tm.assert_index_equal(result, expected, check_names=False) result = dt - tdi expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo') tm.assert_index_equal(result, expected) def test_subtraction_ops_with_tz(self): # check that dt/dti subtraction ops with tz are validated dti = date_range('20130101',periods=3) ts = Timestamp('20130101') dt = ts.to_datetime() dti_tz = date_range('20130101',periods=3).tz_localize('US/Eastern') ts_tz = Timestamp('20130101').tz_localize('US/Eastern') ts_tz2 = Timestamp('20130101').tz_localize('CET') dt_tz = ts_tz.to_datetime() td = Timedelta('1 days') def _check(result, expected): self.assertEqual(result,expected) self.assertIsInstance(result, Timedelta) # scalars result = ts - ts expected = Timedelta('0 days') _check(result, expected) result = dt_tz - ts_tz expected = Timedelta('0 days') _check(result, expected) result = ts_tz - dt_tz expected = Timedelta('0 days') _check(result, expected) # tz mismatches self.assertRaises(TypeError, lambda : dt_tz - ts) self.assertRaises(TypeError, lambda : dt_tz - dt) self.assertRaises(TypeError, lambda : dt_tz - ts_tz2) self.assertRaises(TypeError, lambda : dt - dt_tz) self.assertRaises(TypeError, lambda : ts - dt_tz) self.assertRaises(TypeError, lambda : ts_tz2 - ts) self.assertRaises(TypeError, lambda : ts_tz2 - dt) self.assertRaises(TypeError, lambda : ts_tz - ts_tz2) # with dti self.assertRaises(TypeError, lambda : dti - ts_tz) self.assertRaises(TypeError, lambda : dti_tz - ts) self.assertRaises(TypeError, lambda : dti_tz - ts_tz2) result = dti_tz-dt_tz expected = TimedeltaIndex(['0 days','1 days','2 days']) tm.assert_index_equal(result,expected) result = dt_tz-dti_tz expected = TimedeltaIndex(['0 days','-1 days','-2 days']) tm.assert_index_equal(result,expected) result = dti_tz-ts_tz expected = TimedeltaIndex(['0 days','1 days','2 days']) tm.assert_index_equal(result,expected) result = ts_tz-dti_tz expected = TimedeltaIndex(['0 days','-1 days','-2 days']) tm.assert_index_equal(result,expected) result = td - td expected = Timedelta('0 days') _check(result, expected) result = dti_tz - td expected = DatetimeIndex(['20121231','20130101','20130102'],tz='US/Eastern') tm.assert_index_equal(result,expected) def test_dti_dti_deprecated_ops(self): # deprecated in 0.16.0 (GH9094) # change to return subtraction -> TimeDeltaIndex in 0.17.0 # shoudl move to the appropriate sections above dti = date_range('20130101',periods=3) dti_tz = date_range('20130101',periods=3).tz_localize('US/Eastern') with tm.assert_produces_warning(FutureWarning): result = dti-dti expected = Index([]) tm.assert_index_equal(result,expected) with tm.assert_produces_warning(FutureWarning): result = dti+dti expected = dti tm.assert_index_equal(result,expected) with tm.assert_produces_warning(FutureWarning): result = dti_tz-dti_tz expected = Index([]) tm.assert_index_equal(result,expected) with tm.assert_produces_warning(FutureWarning): result = dti_tz+dti_tz expected = dti_tz tm.assert_index_equal(result,expected) with tm.assert_produces_warning(FutureWarning): result = dti_tz-dti expected = dti_tz tm.assert_index_equal(result,expected) with tm.assert_produces_warning(FutureWarning): result = dti-dti_tz expected = dti tm.assert_index_equal(result,expected) with tm.assert_produces_warning(FutureWarning): self.assertRaises(TypeError, lambda : dti_tz+dti) with tm.assert_produces_warning(FutureWarning): self.assertRaises(TypeError, lambda : dti+dti_tz) def test_dti_tdi_numeric_ops(self): # These are normally union/diff set-like ops tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo') dti = date_range('20130101', periods=3, name='bar') td = Timedelta('1 days') dt = Timestamp('20130101') result = tdi - tdi expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo') tm.assert_index_equal(result, expected) result = tdi + tdi expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo') tm.assert_index_equal(result, expected) result = dti - tdi # name will be reset expected = DatetimeIndex(['20121231', pd.NaT, '20130101']) tm.assert_index_equal(result, expected) def test_addition_ops(self): # with datetimes/timedelta and tdi/dti tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo') dti = date_range('20130101', periods=3, name='bar') td = Timedelta('1 days') dt = Timestamp('20130101') result = tdi + dt expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo') tm.assert_index_equal(result, expected) result = dt + tdi expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo') tm.assert_index_equal(result, expected) result = td + tdi expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo') tm.assert_index_equal(result, expected) result = tdi + td expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo') tm.assert_index_equal(result, expected) # unequal length self.assertRaises(ValueError, lambda : tdi + dti[0:1]) self.assertRaises(ValueError, lambda : tdi[0:1] + dti) # random indexes self.assertRaises(TypeError, lambda : tdi + Int64Index([1,2,3])) # this is a union! #self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi) result = tdi + dti # name will be reset expected = DatetimeIndex(['20130102', pd.NaT, '20130105']) tm.assert_index_equal(result, expected) result = dti + tdi # name will be reset expected = DatetimeIndex(['20130102', pd.NaT, '20130105']) tm.assert_index_equal(result, expected) result = dt + td expected = Timestamp('20130102') self.assertEqual(result, expected) result = td + dt expected = Timestamp('20130102') self.assertEqual(result, expected) def test_value_counts_unique(self): # GH 7735 idx = timedelta_range('1 days 09:00:00', freq='H', periods=10) # create repeated values, 'n'th element is repeated by n+1 times idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1))) exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10) expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64') tm.assert_series_equal(idx.value_counts(), expected) expected = timedelta_range('1 days 09:00:00', freq='H', periods=10) tm.assert_index_equal(idx.unique(), expected) idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00', '1 days 09:00:00', '1 days 08:00:00', '1 days 08:00:00', pd.NaT]) exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00']) expected = Series([3, 2], index=exp_idx) tm.assert_series_equal(idx.value_counts(), expected) exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00', pd.NaT]) expected = Series([3, 2, 1], index=exp_idx) tm.assert_series_equal(idx.value_counts(dropna=False), expected) tm.assert_index_equal(idx.unique(), exp_idx) def test_nonunique_contains(self): # GH 9512 for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1], ['00:01:00', '00:01:00', '00:02:00'], ['00:01:00', '00:01:00', '00:00:01'])): tm.assertIn(idx[0], idx) def test_unknown_attribute(self): #GH 9680 tdi = pd.timedelta_range(start=0,periods=10,freq='1s') ts = pd.Series(np.random.normal(size=10),index=tdi) self.assertNotIn('foo',ts.__dict__.keys()) self.assertRaises(AttributeError,lambda : ts.foo) def test_order(self): #GH 10295 idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D', name='idx') idx2 = TimedeltaIndex(['1 hour', '2 hour', '3 hour'], freq='H', name='idx') for idx in [idx1, idx2]: ordered = idx.sort_values() self.assert_index_equal(ordered, idx) self.assertEqual(ordered.freq, idx.freq) ordered = idx.sort_values(ascending=False) expected = idx[::-1] self.assert_index_equal(ordered, expected) self.assertEqual(ordered.freq, expected.freq) self.assertEqual(ordered.freq.n, -1) ordered, indexer = idx.sort_values(return_indexer=True) self.assert_index_equal(ordered, idx) self.assert_numpy_array_equal(indexer, np.array([0, 1, 2])) self.assertEqual(ordered.freq, idx.freq) ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) self.assert_index_equal(ordered, idx[::-1]) self.assertEqual(ordered.freq, expected.freq) self.assertEqual(ordered.freq.n, -1) idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour', '2 hour ', '1 hour'], name='idx1') exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour', '3 hour', '5 hour'], name='idx1') idx2 = TimedeltaIndex(['1 day', '3 day', '5 day', '2 day', '1 day'], name='idx2') exp2 = TimedeltaIndex(['1 day', '1 day', '2 day', '3 day', '5 day'], name='idx2') idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute', '2 minute', pd.NaT], name='idx3') exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute', '5 minute'], name='idx3') for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]: ordered = idx.sort_values() self.assert_index_equal(ordered, expected) self.assertIsNone(ordered.freq) ordered = idx.sort_values(ascending=False) self.assert_index_equal(ordered, expected[::-1]) self.assertIsNone(ordered.freq) ordered, indexer = idx.sort_values(return_indexer=True) self.assert_index_equal(ordered, expected) self.assert_numpy_array_equal(indexer, np.array([0, 4, 3, 1, 2])) self.assertIsNone(ordered.freq) ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) self.assert_index_equal(ordered, expected[::-1]) self.assert_numpy_array_equal(indexer, np.array([2, 1, 3, 4, 0])) self.assertIsNone(ordered.freq) def test_getitem(self): idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx') for idx in [idx1]: result = idx[0] self.assertEqual(result, pd.Timedelta('1 day')) result = idx[0:5] expected = pd.timedelta_range('1 day', '5 day', freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx[0:10:2] expected = pd.timedelta_range('1 day', '9 day', freq='2D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx[-20:-5:3] expected = pd.timedelta_range('12 day', '24 day', freq='3D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx[4::-1] expected = TimedeltaIndex(['5 day', '4 day', '3 day', '2 day', '1 day'], freq='-1D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) def test_drop_duplicates_metadata(self): #GH 10115 idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx') result = idx.drop_duplicates() self.assert_index_equal(idx, result) self.assertEqual(idx.freq, result.freq) idx_dup = idx.append(idx) self.assertIsNone(idx_dup.freq) # freq is reset result = idx_dup.drop_duplicates() self.assert_index_equal(idx, result) self.assertIsNone(result.freq) def test_take(self): #GH 10295 idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx') for idx in [idx1]: result = idx.take([0]) self.assertEqual(result, pd.Timedelta('1 day')) result = idx.take([-1]) self.assertEqual(result, pd.Timedelta('31 day')) result = idx.take([0, 1, 2]) expected = pd.timedelta_range('1 day', '3 day', freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx.take([0, 2, 4]) expected = pd.timedelta_range('1 day', '5 day', freq='2D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx.take([7, 4, 1]) expected = pd.timedelta_range('8 day', '2 day', freq='-3D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) result = idx.take([3, 2, 5]) expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx') self.assert_index_equal(result, expected) self.assertIsNone(result.freq) result = idx.take([-3, 2, 5]) expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx') self.assert_index_equal(result, expected) self.assertIsNone(result.freq) def test_infer_freq(self): # GH 11018 for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S']: idx = pd.timedelta_range('1', freq=freq, periods=10) result = pd.TimedeltaIndex(idx.asi8, freq='infer') tm.assert_index_equal(idx, result) self.assertEqual(result.freq, freq) class TestPeriodIndexOps(Ops): def setUp(self): super(TestPeriodIndexOps, self).setUp() mask = lambda x: isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex) self.is_valid_objs = [ o for o in self.objs if mask(o) ] self.not_valid_objs = [ o for o in self.objs if not mask(o) ] def test_ops_properties(self): self.check_ops_properties(['year','month','day','hour','minute','second','weekofyear','week','dayofweek','dayofyear','quarter']) self.check_ops_properties(['qyear'], lambda x: isinstance(x,PeriodIndex)) def test_asobject_tolist(self): idx = pd.period_range(start='2013-01-01', periods=4, freq='M', name='idx') expected_list = [pd.Period('2013-01-31', freq='M'), pd.Period('2013-02-28', freq='M'), pd.Period('2013-03-31', freq='M'), pd.Period('2013-04-30', freq='M')] expected = pd.Index(expected_list, dtype=object, name='idx') result = idx.asobject self.assertTrue(isinstance(result, Index)) self.assertEqual(result.dtype, object) self.assertTrue(result.equals(expected)) self.assertEqual(result.name, expected.name) self.assertEqual(idx.tolist(), expected_list) idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT', '2013-01-04'], freq='D', name='idx') expected_list = [pd.Period('2013-01-01', freq='D'), pd.Period('2013-01-02', freq='D'), pd.Period('NaT', freq='D'), pd.Period('2013-01-04', freq='D')] expected = pd.Index(expected_list, dtype=object, name='idx') result = idx.asobject self.assertTrue(isinstance(result, Index)) self.assertEqual(result.dtype, object) for i in [0, 1, 3]: self.assertTrue(result[i], expected[i]) self.assertTrue(result[2].ordinal, pd.tslib.iNaT) self.assertTrue(result[2].freq, 'D') self.assertEqual(result.name, expected.name) result_list = idx.tolist() for i in [0, 1, 3]: self.assertTrue(result_list[i], expected_list[i]) self.assertTrue(result_list[2].ordinal, pd.tslib.iNaT) self.assertTrue(result_list[2].freq, 'D') def test_minmax(self): # monotonic idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02', '2011-01-03'], freq='D') self.assertTrue(idx1.is_monotonic) # non-monotonic idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03', '2011-01-02', pd.NaT], freq='D') self.assertFalse(idx2.is_monotonic) for idx in [idx1, idx2]: self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D')) self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D')) for op in ['min', 'max']: # Return NaT obj = PeriodIndex([], freq='M') result = getattr(obj, op)() self.assertEqual(result.ordinal, tslib.iNaT) self.assertEqual(result.freq, 'M') obj = PeriodIndex([pd.NaT], freq='M') result = getattr(obj, op)() self.assertEqual(result.ordinal, tslib.iNaT) self.assertEqual(result.freq, 'M') obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M') result = getattr(obj, op)() self.assertEqual(result.ordinal, tslib.iNaT) self.assertEqual(result.freq, 'M') def test_representation(self): # GH 7601 idx1 = PeriodIndex([], freq='D') idx2 = PeriodIndex(['2011-01-01'], freq='D') idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D') idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D') idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A') idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H') idx7 = pd.period_range('2013Q1', periods=1, freq="Q") idx8 = pd.period_range('2013Q1', periods=2, freq="Q") idx9 = pd.period_range('2013Q1', periods=3, freq="Q") exp1 = """PeriodIndex([], dtype='int64', freq='D')""" exp2 = """PeriodIndex(['2011-01-01'], dtype='int64', freq='D')""" exp3 = """PeriodIndex(['2011-01-01', '2011-01-02'], dtype='int64', freq='D')""" exp4 = """PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], dtype='int64', freq='D')""" exp5 = """PeriodIndex(['2011', '2012', '2013'], dtype='int64', freq='A-DEC')""" exp6 = """PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], dtype='int64', freq='H')""" exp7 = """PeriodIndex(['2013Q1'], dtype='int64', freq='Q-DEC')""" exp8 = """PeriodIndex(['2013Q1', '2013Q2'], dtype='int64', freq='Q-DEC')""" exp9 = """PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], dtype='int64', freq='Q-DEC')""" for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9], [exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]): for func in ['__repr__', '__unicode__', '__str__']: result = getattr(idx, func)() self.assertEqual(result, expected) def test_representation_to_series(self): # GH 10971 idx1 = PeriodIndex([], freq='D') idx2 = PeriodIndex(['2011-01-01'], freq='D') idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D') idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D') idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A') idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H') idx7 = pd.period_range('2013Q1', periods=1, freq="Q") idx8 = pd.period_range('2013Q1', periods=2, freq="Q") idx9 = pd.period_range('2013Q1', periods=3, freq="Q") exp1 = """Series([], dtype: object)""" exp2 = """0 2011-01-01 dtype: object""" exp3 = """0 2011-01-01 1 2011-01-02 dtype: object""" exp4 = """0 2011-01-01 1 2011-01-02 2 2011-01-03 dtype: object""" exp5 = """0 2011 1 2012 2 2013 dtype: object""" exp6 = """0 2011-01-01 09:00 1 2012-02-01 10:00 2 NaT dtype: object""" exp7 = """0 2013Q1 dtype: object""" exp8 = """0 2013Q1 1 2013Q2 dtype: object""" exp9 = """0 2013Q1 1 2013Q2 2 2013Q3 dtype: object""" for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9], [exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]): result = repr(pd.Series(idx)) self.assertEqual(result, expected) def test_summary(self): # GH9116 idx1 = PeriodIndex([], freq='D') idx2 = PeriodIndex(['2011-01-01'], freq='D') idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D') idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D') idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A') idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H') idx7 = pd.period_range('2013Q1', periods=1, freq="Q") idx8 = pd.period_range('2013Q1', periods=2, freq="Q") idx9 = pd.period_range('2013Q1', periods=3, freq="Q") exp1 = """PeriodIndex: 0 entries Freq: D""" exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01 Freq: D""" exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02 Freq: D""" exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03 Freq: D""" exp5 = """PeriodIndex: 3 entries, 2011 to 2013 Freq: A-DEC""" exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT Freq: H""" exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1 Freq: Q-DEC""" exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2 Freq: Q-DEC""" exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3 Freq: Q-DEC""" for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9], [exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]): result = idx.summary() self.assertEqual(result, expected) def test_resolution(self): for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'], ['day', 'day', 'day', 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond']): idx = pd.period_range(start='2013-04-01', periods=30, freq=freq) self.assertEqual(idx.resolution, expected) def test_add_iadd(self): # union rng1 = pd.period_range('1/1/2000', freq='D', periods=5) other1 = pd.period_range('1/6/2000', freq='D', periods=5) expected1 = pd.period_range('1/1/2000', freq='D', periods=10) rng2 = pd.period_range('1/1/2000', freq='D', periods=5) other2 = pd.period_range('1/4/2000', freq='D', periods=5) expected2 = pd.period_range('1/1/2000', freq='D', periods=8) rng3 = pd.period_range('1/1/2000', freq='D', periods=5) other3 = pd.PeriodIndex([], freq='D') expected3 = pd.period_range('1/1/2000', freq='D', periods=5) rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5) other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5) expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00', '2000-01-01 11:00', '2000-01-01 12:00', '2000-01-01 13:00', '2000-01-02 09:00', '2000-01-02 10:00', '2000-01-02 11:00', '2000-01-02 12:00', '2000-01-02 13:00'], freq='H') rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03', '2000-01-01 09:05'], freq='T') other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05' '2000-01-01 09:08'], freq='T') expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03', '2000-01-01 09:05', '2000-01-01 09:08'], freq='T') rng6 = pd.period_range('2000-01-01', freq='M', periods=7) other6 = pd.period_range('2000-04-01', freq='M', periods=7) expected6 = pd.period_range('2000-01-01', freq='M', periods=10) rng7 = pd.period_range('2003-01-01', freq='A', periods=5) other7 = pd.period_range('1998-01-01', freq='A', periods=8) expected7 = pd.period_range('1998-01-01', freq='A', periods=10) for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2), (rng3, other3, expected3), (rng4, other4, expected4), (rng5, other5, expected5), (rng6, other6, expected6), (rng7, other7, expected7)]: # GH9094 with tm.assert_produces_warning(FutureWarning): result_add = rng + other result_union = rng.union(other) tm.assert_index_equal(result_add, expected) tm.assert_index_equal(result_union, expected) # GH 6527 # GH9094 with tm.assert_produces_warning(FutureWarning): rng += other tm.assert_index_equal(rng, expected) # offset # DateOffset rng = pd.period_range('2014', '2024', freq='A') result = rng + pd.offsets.YearEnd(5) expected = pd.period_range('2019', '2029', freq='A') tm.assert_index_equal(result, expected) rng += pd.offsets.YearEnd(5) tm.assert_index_equal(rng, expected) for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(), np.timedelta64(365, 'D'), timedelta(365), Timedelta(days=365)]: msg = 'Input has different freq from PeriodIndex\\(freq=A-DEC\\)' with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'): rng + o rng = pd.period_range('2014-01', '2016-12', freq='M') result = rng + pd.offsets.MonthEnd(5) expected = pd.period_range('2014-06', '2017-05', freq='M') tm.assert_index_equal(result, expected) rng += pd.offsets.MonthEnd(5) tm.assert_index_equal(rng, expected) for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(), np.timedelta64(365, 'D'), timedelta(365), Timedelta(days=365)]: rng = pd.period_range('2014-01', '2016-12', freq='M') msg = 'Input has different freq from PeriodIndex\\(freq=M\\)' with tm.assertRaisesRegexp(ValueError, msg): rng + o # Tick offsets = [pd.offsets.Day(3), timedelta(days=3), np.timedelta64(3, 'D'), pd.offsets.Hour(72), timedelta(minutes=60*24*3), np.timedelta64(72, 'h'), Timedelta('72:00:00')] for delta in offsets: rng = pd.period_range('2014-05-01', '2014-05-15', freq='D') result = rng + delta expected = pd.period_range('2014-05-04', '2014-05-18', freq='D') tm.assert_index_equal(result, expected) rng += delta tm.assert_index_equal(rng, expected) for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(), np.timedelta64(4, 'h'), timedelta(hours=23), Timedelta('23:00:00')]: rng = pd.period_range('2014-05-01', '2014-05-15', freq='D') msg = 'Input has different freq from PeriodIndex\\(freq=D\\)' with tm.assertRaisesRegexp(ValueError, msg): rng + o offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'), pd.offsets.Minute(120), timedelta(minutes=120), np.timedelta64(120, 'm'), Timedelta(minutes=120)] for delta in offsets: rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H') result = rng + delta expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00', freq='H') tm.assert_index_equal(result, expected) rng += delta tm.assert_index_equal(rng, expected) for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30), np.timedelta64(30, 's'), Timedelta(seconds=30)]: rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H') msg = 'Input has different freq from PeriodIndex\\(freq=H\\)' with tm.assertRaisesRegexp(ValueError, msg): result = rng + delta with tm.assertRaisesRegexp(ValueError, msg): rng += delta # int rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10) result = rng + 1 expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10) tm.assert_index_equal(result, expected) rng += 1 tm.assert_index_equal(rng, expected) def test_sub_isub(self): # diff rng1 = pd.period_range('1/1/2000', freq='D', periods=5) other1 = pd.period_range('1/6/2000', freq='D', periods=5) expected1 = pd.period_range('1/1/2000', freq='D', periods=5) rng2 = pd.period_range('1/1/2000', freq='D', periods=5) other2 = pd.period_range('1/4/2000', freq='D', periods=5) expected2 = pd.period_range('1/1/2000', freq='D', periods=3) rng3 = pd.period_range('1/1/2000', freq='D', periods=5) other3 = pd.PeriodIndex([], freq='D') expected3 = pd.period_range('1/1/2000', freq='D', periods=5) rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5) other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5) expected4 = rng4 rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03', '2000-01-01 09:05'], freq='T') other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'], freq='T') expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T') rng6 = pd.period_range('2000-01-01', freq='M', periods=7) other6 = pd.period_range('2000-04-01', freq='M', periods=7) expected6 = pd.period_range('2000-01-01', freq='M', periods=3) rng7 = pd.period_range('2003-01-01', freq='A', periods=5) other7 = pd.period_range('1998-01-01', freq='A', periods=8) expected7 = pd.period_range('2006-01-01', freq='A', periods=2) for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2), (rng3, other3, expected3), (rng4, other4, expected4), (rng5, other5, expected5), (rng6, other6, expected6), (rng7, other7, expected7),]: result_union = rng.difference(other) tm.assert_index_equal(result_union, expected) # offset # DateOffset rng = pd.period_range('2014', '2024', freq='A') result = rng - pd.offsets.YearEnd(5) expected = pd.period_range('2009', '2019', freq='A') tm.assert_index_equal(result, expected) rng -= pd.offsets.YearEnd(5) tm.assert_index_equal(rng, expected) for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(), np.timedelta64(365, 'D'), timedelta(365)]: rng = pd.period_range('2014', '2024', freq='A') msg = 'Input has different freq from PeriodIndex\\(freq=A-DEC\\)' with tm.assertRaisesRegexp(ValueError, msg): rng - o rng = pd.period_range('2014-01', '2016-12', freq='M') result = rng - pd.offsets.MonthEnd(5) expected = pd.period_range('2013-08', '2016-07', freq='M') tm.assert_index_equal(result, expected) rng -= pd.offsets.MonthEnd(5) tm.assert_index_equal(rng, expected) for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(), np.timedelta64(365, 'D'), timedelta(365)]: rng = pd.period_range('2014-01', '2016-12', freq='M') msg = 'Input has different freq from PeriodIndex\\(freq=M\\)' with tm.assertRaisesRegexp(ValueError, msg): rng - o # Tick offsets = [pd.offsets.Day(3), timedelta(days=3), np.timedelta64(3, 'D'), pd.offsets.Hour(72), timedelta(minutes=60*24*3), np.timedelta64(72, 'h')] for delta in offsets: rng = pd.period_range('2014-05-01', '2014-05-15', freq='D') result = rng - delta expected = pd.period_range('2014-04-28', '2014-05-12', freq='D') tm.assert_index_equal(result, expected) rng -= delta tm.assert_index_equal(rng, expected) for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(), np.timedelta64(4, 'h'), timedelta(hours=23)]: rng = pd.period_range('2014-05-01', '2014-05-15', freq='D') msg = 'Input has different freq from PeriodIndex\\(freq=D\\)' with tm.assertRaisesRegexp(ValueError, msg): rng - o offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'), pd.offsets.Minute(120), timedelta(minutes=120), np.timedelta64(120, 'm')] for delta in offsets: rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H') result = rng - delta expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00', freq='H') tm.assert_index_equal(result, expected) rng -= delta tm.assert_index_equal(rng, expected) for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30), np.timedelta64(30, 's')]: rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H') msg = 'Input has different freq from PeriodIndex\\(freq=H\\)' with tm.assertRaisesRegexp(ValueError, msg): result = rng + delta with tm.assertRaisesRegexp(ValueError, msg): rng += delta # int rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10) result = rng - 1 expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10) tm.assert_index_equal(result, expected) rng -= 1 tm.assert_index_equal(rng, expected) def test_value_counts_unique(self): # GH 7735 idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10) # create repeated values, 'n'th element is repeated by n+1 times idx = PeriodIndex(np.repeat(idx.values, range(1, len(idx) + 1)), freq='H') exp_idx = PeriodIndex(['2011-01-01 18:00', '2011-01-01 17:00', '2011-01-01 16:00', '2011-01-01 15:00', '2011-01-01 14:00', '2011-01-01 13:00', '2011-01-01 12:00', '2011-01-01 11:00', '2011-01-01 10:00', '2011-01-01 09:00'], freq='H') expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64') tm.assert_series_equal(idx.value_counts(), expected) expected = pd.period_range('2011-01-01 09:00', freq='H', periods=10) tm.assert_index_equal(idx.unique(), expected) idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 08:00', '2013-01-01 08:00', pd.NaT], freq='H') exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00'], freq='H') expected = Series([3, 2], index=exp_idx) tm.assert_series_equal(idx.value_counts(), expected) exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00', pd.NaT], freq='H') expected = Series([3, 2, 1], index=exp_idx) tm.assert_series_equal(idx.value_counts(dropna=False), expected) tm.assert_index_equal(idx.unique(), exp_idx) def test_drop_duplicates_metadata(self): #GH 10115 idx = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx') result = idx.drop_duplicates() self.assert_index_equal(idx, result) self.assertEqual(idx.freq, result.freq) idx_dup = idx.append(idx) # freq will not be reset result = idx_dup.drop_duplicates() self.assert_index_equal(idx, result) self.assertEqual(idx.freq, result.freq) def test_order_compat(self): def _check_freq(index, expected_index): if isinstance(index, PeriodIndex): self.assertEqual(index.freq, expected_index.freq) pidx = PeriodIndex(['2011', '2012', '2013'], name='pidx', freq='A') # for compatibility check iidx = Index([2011, 2012, 2013], name='idx') for idx in [pidx, iidx]: ordered = idx.sort_values() self.assert_index_equal(ordered, idx) _check_freq(ordered, idx) ordered = idx.sort_values(ascending=False) self.assert_index_equal(ordered, idx[::-1]) _check_freq(ordered, idx[::-1]) ordered, indexer = idx.sort_values(return_indexer=True) self.assert_index_equal(ordered, idx) self.assert_numpy_array_equal(indexer, np.array([0, 1, 2])) _check_freq(ordered, idx) ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) self.assert_index_equal(ordered, idx[::-1]) self.assert_numpy_array_equal(indexer, np.array([2, 1, 0])) _check_freq(ordered, idx[::-1]) pidx = PeriodIndex(['2011', '2013', '2015', '2012', '2011'], name='pidx', freq='A') pexpected = PeriodIndex(['2011', '2011', '2012', '2013', '2015'], name='pidx', freq='A') # for compatibility check iidx = Index([2011, 2013, 2015, 2012, 2011], name='idx') iexpected = Index([2011, 2011, 2012, 2013, 2015], name='idx') for idx, expected in [(pidx, pexpected), (iidx, iexpected)]: ordered = idx.sort_values() self.assert_index_equal(ordered, expected) _check_freq(ordered, idx) ordered = idx.sort_values(ascending=False) self.assert_index_equal(ordered, expected[::-1]) _check_freq(ordered, idx) ordered, indexer = idx.sort_values(return_indexer=True) self.assert_index_equal(ordered, expected) self.assert_numpy_array_equal(indexer, np.array([0, 4, 3, 1, 2])) _check_freq(ordered, idx) ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) self.assert_index_equal(ordered, expected[::-1]) self.assert_numpy_array_equal(indexer, np.array([2, 1, 3, 4, 0])) _check_freq(ordered, idx) pidx = PeriodIndex(['2011', '2013', 'NaT', '2011'], name='pidx', freq='D') result = pidx.sort_values() expected = PeriodIndex(['NaT', '2011', '2011', '2013'], name='pidx', freq='D') self.assert_index_equal(result, expected) self.assertEqual(result.freq, 'D') result = pidx.sort_values(ascending=False) expected = PeriodIndex(['2013', '2011', '2011', 'NaT'], name='pidx', freq='D') self.assert_index_equal(result, expected) self.assertEqual(result.freq, 'D') def test_order(self): for freq in ['D', '2D', '4D']: idx = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq=freq, name='idx') ordered = idx.sort_values() self.assert_index_equal(ordered, idx) self.assertEqual(ordered.freq, idx.freq) ordered = idx.sort_values(ascending=False) expected = idx[::-1] self.assert_index_equal(ordered, expected) self.assertEqual(ordered.freq, expected.freq) self.assertEqual(ordered.freq, freq) ordered, indexer = idx.sort_values(return_indexer=True) self.assert_index_equal(ordered, idx) self.assert_numpy_array_equal(indexer, np.array([0, 1, 2])) self.assertEqual(ordered.freq, idx.freq) self.assertEqual(ordered.freq, freq) ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) expected = idx[::-1] self.assert_index_equal(ordered, expected) self.assert_numpy_array_equal(indexer, np.array([2, 1, 0])) self.assertEqual(ordered.freq, expected.freq) self.assertEqual(ordered.freq, freq) idx1 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05', '2011-01-02', '2011-01-01'], freq='D', name='idx1') exp1 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02', '2011-01-03', '2011-01-05'], freq='D', name='idx1') idx2 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05', '2011-01-02', '2011-01-01'], freq='D', name='idx2') exp2 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02', '2011-01-03', '2011-01-05'], freq='D', name='idx2') idx3 = PeriodIndex([pd.NaT, '2011-01-03', '2011-01-05', '2011-01-02', pd.NaT], freq='D', name='idx3') exp3 = PeriodIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03', '2011-01-05'], freq='D', name='idx3') for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]: ordered = idx.sort_values() self.assert_index_equal(ordered, expected) self.assertEqual(ordered.freq, 'D') ordered = idx.sort_values(ascending=False) self.assert_index_equal(ordered, expected[::-1]) self.assertEqual(ordered.freq, 'D') ordered, indexer = idx.sort_values(return_indexer=True) self.assert_index_equal(ordered, expected) self.assert_numpy_array_equal(indexer, np.array([0, 4, 3, 1, 2])) self.assertEqual(ordered.freq, 'D') ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) self.assert_index_equal(ordered, expected[::-1]) self.assert_numpy_array_equal(indexer, np.array([2, 1, 3, 4, 0])) self.assertEqual(ordered.freq, 'D') def test_getitem(self): idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx') for idx in [idx1]: result = idx[0] self.assertEqual(result, pd.Period('2011-01-01', freq='D')) result = idx[-1] self.assertEqual(result, pd.Period('2011-01-31', freq='D')) result = idx[0:5] expected = pd.period_range('2011-01-01', '2011-01-05', freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) self.assertEqual(result.freq, 'D') result = idx[0:10:2] expected = pd.PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05', '2011-01-07', '2011-01-09'], freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) self.assertEqual(result.freq, 'D') result = idx[-20:-5:3] expected = pd.PeriodIndex(['2011-01-12', '2011-01-15', '2011-01-18', '2011-01-21', '2011-01-24'], freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) self.assertEqual(result.freq, 'D') result = idx[4::-1] expected = PeriodIndex(['2011-01-05', '2011-01-04', '2011-01-03', '2011-01-02', '2011-01-01'], freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) self.assertEqual(result.freq, 'D') def test_take(self): #GH 10295 idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx') for idx in [idx1]: result = idx.take([0]) self.assertEqual(result, pd.Period('2011-01-01', freq='D')) result = idx.take([5]) self.assertEqual(result, pd.Period('2011-01-06', freq='D')) result = idx.take([0, 1, 2]) expected = pd.period_range('2011-01-01', '2011-01-03', freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, 'D') self.assertEqual(result.freq, expected.freq) result = idx.take([0, 2, 4]) expected = pd.PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05'], freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) self.assertEqual(result.freq, 'D') result = idx.take([7, 4, 1]) expected = pd.PeriodIndex(['2011-01-08', '2011-01-05', '2011-01-02'], freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) self.assertEqual(result.freq, 'D') result = idx.take([3, 2, 5]) expected = PeriodIndex(['2011-01-04', '2011-01-03', '2011-01-06'], freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) self.assertEqual(result.freq, 'D') result = idx.take([-3, 2, 5]) expected = PeriodIndex(['2011-01-29', '2011-01-03', '2011-01-06'], freq='D', name='idx') self.assert_index_equal(result, expected) self.assertEqual(result.freq, expected.freq) self.assertEqual(result.freq, 'D') if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], # '--with-coverage', '--cover-package=pandas.core'], exit=False)
gpl-2.0
timcera/hspfbintoolbox
tests/test_catalog.py
1
115314
# -*- coding: utf-8 -*- """ catalog ---------------------------------- Tests for `hspfbintoolbox` module. """ import csv import shlex import subprocess import sys from unittest import TestCase from pandas.testing import assert_frame_equal try: from cStringIO import StringIO except: from io import StringIO import pandas as pd from hspfbintoolbox import hspfbintoolbox interval2codemap = {"yearly": 5, "monthly": 4, "daily": 3, "bivl": 2} def capture(func, *args, **kwds): sys.stdout = StringIO() # capture output out = func(*args, **kwds) out = sys.stdout.getvalue() # release output try: out = bytes(out, "utf-8") except: pass return out def read_unicode_csv( filename, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL, lineterminator="\n", encoding="utf-8", ): # Python 3 version if sys.version_info[0] >= 3: # Open the file in text mode with given encoding # Set newline arg to '' # (see https://docs.python.org/3/library/csv.html) # Next, get the csv reader, with unicode delimiter and quotechar csv_reader = csv.reader( filename, delimiter=delimiter, quotechar=quotechar, quoting=quoting, lineterminator=lineterminator, ) # Now, iterate over the (already decoded) csv_reader generator for row in csv_reader: yield row # Python 2 version else: # Next, get the csv reader, passing delimiter and quotechar as # bytestrings rather than unicode csv_reader = csv.reader( filename, delimiter=delimiter.encode(encoding), quotechar=quotechar.encode(encoding), quoting=quoting, lineterminator=lineterminator, ) # Iterate over the file and decode each string into unicode for row in csv_reader: yield [cell.decode(encoding) for cell in row] class TestDescribe(TestCase): def setUp(self): self.catalog = b"""\ LUE , LC,GROUP ,VAR , TC,START ,END ,TC IMPLND, 11,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 11,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 11,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 11,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 11,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 11,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 12,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 12,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 12,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 12,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 12,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 12,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 13,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 13,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 13,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 13,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 13,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 13,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 14,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 14,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 14,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 14,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 14,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 14,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 21,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 21,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 21,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 21,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 21,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 21,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 22,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 22,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 22,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 22,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 22,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 22,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 23,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 23,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 23,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 23,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 23,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 23,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 24,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 24,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 24,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 24,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 24,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 24,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 31,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 31,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 31,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 31,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 31,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 31,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 32,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 32,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 32,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 32,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 32,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 32,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 33,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 33,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 33,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 33,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 33,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 33,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 111,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 111,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 111,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 111,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 111,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 111,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 112,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 112,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 112,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 112,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 112,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 112,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 113,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 113,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 113,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 113,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 113,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 113,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 114,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 114,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 114,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 114,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 114,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 114,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 211,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 211,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 211,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 211,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 211,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 211,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 212,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 212,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 212,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 212,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 212,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 212,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 213,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 213,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 213,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 213,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 213,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 213,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 214,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 214,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 214,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 214,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 214,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 214,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 301,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 301,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 301,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 301,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 301,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 301,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 302,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 302,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 302,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 302,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 302,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 302,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 303,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 303,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 303,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 303,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 303,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 303,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 304,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 304,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 304,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 304,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 304,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 304,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 311,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 311,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 311,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 311,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 311,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 311,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 312,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 312,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 312,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 312,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 312,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 312,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 313,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 313,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 313,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 313,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 313,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 313,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 314,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 314,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 314,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 314,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 314,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 314,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 411,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 411,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 411,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 411,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 411,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 411,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 412,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 412,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 412,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 412,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 412,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 412,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 413,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 413,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 413,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 413,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 413,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 413,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 414,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 414,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 414,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 414,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 414,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 414,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 511,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 511,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 511,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 511,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 511,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 511,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 512,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 512,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 512,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 512,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 512,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 512,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 513,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 513,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 513,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 513,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 513,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 513,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 514,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 514,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 514,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 514,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 514,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 514,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 611,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 611,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 611,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 611,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 611,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 611,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 612,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 612,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 612,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 612,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 612,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 612,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 613,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 613,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 613,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 613,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 613,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 613,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 614,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 614,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 614,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 614,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 614,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 614,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 711,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 711,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 711,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 711,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 711,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 711,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 712,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 712,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 712,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 712,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 712,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 712,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 713,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 713,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 713,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 713,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 713,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 713,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 714,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 714,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 714,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 714,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 714,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 714,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 811,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 811,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 811,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 811,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 811,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 811,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 812,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 812,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 812,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 812,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 812,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 812,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 813,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 813,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 813,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 813,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 813,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 813,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 814,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 814,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 814,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 814,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 814,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 814,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 822,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 822,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 822,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 822,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 822,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 822,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 823,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 823,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 823,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 823,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 823,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 823,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 824,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 824,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 824,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 824,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 824,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 824,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 901,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 901,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 901,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 901,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 901,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 901,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 902,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 902,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 902,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 902,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 902,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 902,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 903,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 903,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 903,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 903,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 903,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 903,IWATER ,SURS , 5,1951 ,2001 ,yearly IMPLND, 904,IWATER ,IMPEV, 5,1951 ,2001 ,yearly IMPLND, 904,IWATER ,PET , 5,1951 ,2001 ,yearly IMPLND, 904,IWATER ,RETS , 5,1951 ,2001 ,yearly IMPLND, 904,IWATER ,SUPY , 5,1951 ,2001 ,yearly IMPLND, 904,IWATER ,SURO , 5,1951 ,2001 ,yearly IMPLND, 904,IWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 11,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 12,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 13,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 14,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 15,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 21,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 22,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 23,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 24,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 25,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 31,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 32,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 33,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 35,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 111,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 112,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 113,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 114,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 115,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 211,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 212,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 213,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 214,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 215,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 301,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 302,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 303,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 304,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 305,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 311,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 312,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 313,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 314,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 315,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 411,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 412,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 413,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 414,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 415,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 511,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 512,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 513,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 514,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 515,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 611,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 612,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 613,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 614,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 615,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 711,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 712,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 713,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 714,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 715,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 811,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 812,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 813,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 814,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 815,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 822,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 823,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 824,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 825,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 901,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 902,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 903,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 904,PWATER ,UZS , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,AGWET, 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,AGWI , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,AGWO , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,AGWS , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,BASET, 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,CEPE , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,CEPS , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,GWVS , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,IFWI , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,IFWO , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,IFWS , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,IGWI , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,INFIL, 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,LZET , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,LZI , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,LZS , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,PERC , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,PERO , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,PERS , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,PET , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,SUPY , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,SURO , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,SURS , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,TAET , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,UZET , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,UZI , 5,1951 ,2001 ,yearly PERLND, 905,PWATER ,UZS , 5,1951 ,2001 ,yearly """ ndict = [] rd = read_unicode_csv(StringIO(self.catalog.decode())) next(rd) for row in rd: if len(row) == 0: continue nrow = [i.strip() for i in row] ndict.append( (nrow[0], int(nrow[1]), nrow[2], nrow[3], interval2codemap[nrow[7]]) ) self.ncatalog = sorted(ndict) def test_catalog_api(self): out = hspfbintoolbox.catalog("tests/6b_np1.hbn") out = [i[:5] for i in out] self.assertEqual(out, self.ncatalog) def test_catalog_cli(self): args = "hspfbintoolbox catalog --tablefmt csv tests/6b_np1.hbn" args = shlex.split(args) out = subprocess.Popen( args, stdout=subprocess.PIPE, stdin=subprocess.PIPE ).communicate()[0] self.assertEqual(out, self.catalog)
bsd-3-clause
blaze/dask
dask/base.py
1
37839
from collections import OrderedDict from collections.abc import Mapping, Iterator from contextlib import contextmanager from functools import partial from hashlib import md5 from operator import getitem import inspect import pickle import os import threading import uuid from distutils.version import LooseVersion from tlz import merge, groupby, curry, identity from tlz.functoolz import Compose from .compatibility import is_dataclass, dataclass_fields from .context import thread_state from .core import flatten, quote, get as simple_get, literal from .hashing import hash_buffer_hex from .utils import Dispatch, ensure_dict, apply from . import config, local, threaded __all__ = ( "DaskMethodsMixin", "annotate", "is_dask_collection", "compute", "persist", "optimize", "visualize", "tokenize", "normalize_token", ) @contextmanager def annotate(**annotations): """Context Manager for setting HighLevelGraph Layer annotations. Annotations are metadata or soft constraints associated with tasks that dask schedulers may choose to respect: They signal intent without enforcing hard constraints. As such, they are primarily designed for use with the distributed scheduler. Almost any object can serve as an annotation, but small Python objects are preferred, while large objects such as NumPy arrays are discouraged. Callables supplied as an annotation should take a single *key* argument and produce the appropriate annotation. Individual task keys in the annotated collection are supplied to the callable. Parameters ---------- **annotations : key-value pairs Examples -------- All tasks within array A should have priority 100 and be retried 3 times on failure. >>> import dask >>> import dask.array as da >>> with dask.annotate(priority=100, retries=3): ... A = da.ones((10000, 10000)) Prioritise tasks within Array A on flattened block ID. >>> nblocks = (10, 10) >>> with dask.annotate(priority=lambda k: k[1]*nblocks[1] + k[2]): ... A = da.ones((1000, 1000), chunks=(100, 100)) Annotations may be nested. >>> with dask.annotate(priority=1): ... with dask.annotate(retries=3): ... A = da.ones((1000, 1000)) ... B = A + 1 """ prev_annotations = config.get("annotations", {}) new_annotations = { **prev_annotations, **{f"annotations.{k}": v for k, v in annotations.items()}, } with config.set(new_annotations): yield def is_dask_collection(x): """Returns ``True`` if ``x`` is a dask collection""" try: return x.__dask_graph__() is not None except (AttributeError, TypeError): return False class DaskMethodsMixin(object): """A mixin adding standard dask collection methods""" __slots__ = () def visualize(self, filename="mydask", format=None, optimize_graph=False, **kwargs): """Render the computation of this object's task graph using graphviz. Requires ``graphviz`` to be installed. Parameters ---------- filename : str or None, optional The name of the file to write to disk. If the provided `filename` doesn't include an extension, '.png' will be used by default. If `filename` is None, no file will be written, and we communicate with dot using only pipes. format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional Format in which to write output file. Default is 'png'. optimize_graph : bool, optional If True, the graph is optimized before rendering. Otherwise, the graph is displayed as is. Default is False. color: {None, 'order'}, optional Options to color nodes. Provide ``cmap=`` keyword for additional colormap **kwargs Additional keyword arguments to forward to ``to_graphviz``. Examples -------- >>> x.visualize(filename='dask.pdf') # doctest: +SKIP >>> x.visualize(filename='dask.pdf', color='order') # doctest: +SKIP Returns ------- result : IPython.diplay.Image, IPython.display.SVG, or None See dask.dot.dot_graph for more information. See Also -------- dask.base.visualize dask.dot.dot_graph Notes ----- For more information on optimization see here: https://docs.dask.org/en/latest/optimize.html """ return visualize( self, filename=filename, format=format, optimize_graph=optimize_graph, **kwargs, ) def persist(self, **kwargs): """Persist this dask collection into memory This turns a lazy Dask collection into a Dask collection with the same metadata, but now with the results fully computed or actively computing in the background. The action of function differs significantly depending on the active task scheduler. If the task scheduler supports asynchronous computing, such as is the case of the dask.distributed scheduler, then persist will return *immediately* and the return value's task graph will contain Dask Future objects. However if the task scheduler only supports blocking computation then the call to persist will *block* and the return value's task graph will contain concrete Python results. This function is particularly useful when using distributed systems, because the results will be kept in distributed memory, rather than returned to the local process as with compute. Parameters ---------- scheduler : string, optional Which scheduler to use like "threads", "synchronous" or "processes". If not provided, the default is to check the global settings first, and then fall back to the collection defaults. optimize_graph : bool, optional If True [default], the graph is optimized before computation. Otherwise the graph is run as is. This can be useful for debugging. **kwargs Extra keywords to forward to the scheduler function. Returns ------- New dask collections backed by in-memory data See Also -------- dask.base.persist """ (result,) = persist(self, traverse=False, **kwargs) return result def compute(self, **kwargs): """Compute this dask collection This turns a lazy Dask collection into its in-memory equivalent. For example a Dask array turns into a NumPy array and a Dask dataframe turns into a Pandas dataframe. The entire dataset must fit into memory before calling this operation. Parameters ---------- scheduler : string, optional Which scheduler to use like "threads", "synchronous" or "processes". If not provided, the default is to check the global settings first, and then fall back to the collection defaults. optimize_graph : bool, optional If True [default], the graph is optimized before computation. Otherwise the graph is run as is. This can be useful for debugging. kwargs Extra keywords to forward to the scheduler function. See Also -------- dask.base.compute """ (result,) = compute(self, traverse=False, **kwargs) return result def __await__(self): try: from distributed import wait, futures_of except ImportError as e: raise ImportError( "Using async/await with dask requires the `distributed` package" ) from e from tornado import gen @gen.coroutine def f(): if futures_of(self): yield wait(self) raise gen.Return(self) return f().__await__() def compute_as_if_collection(cls, dsk, keys, scheduler=None, get=None, **kwargs): """Compute a graph as if it were of type cls. Allows for applying the same optimizations and default scheduler.""" schedule = get_scheduler(scheduler=scheduler, cls=cls, get=get) dsk2 = optimization_function(cls)(ensure_dict(dsk), keys, **kwargs) return schedule(dsk2, keys, **kwargs) def dont_optimize(dsk, keys, **kwargs): return dsk def optimization_function(x): return getattr(x, "__dask_optimize__", dont_optimize) def collections_to_dsk(collections, optimize_graph=True, **kwargs): """ Convert many collections into a single dask graph, after optimization """ from .highlevelgraph import HighLevelGraph optimizations = kwargs.pop("optimizations", None) or config.get("optimizations", []) if optimize_graph: groups = groupby(optimization_function, collections) _opt_list = [] for opt, val in groups.items(): dsk, keys = _extract_graph_and_keys(val) groups[opt] = (dsk, keys) _opt = opt(dsk, keys, **kwargs) _opt_list.append(_opt) for opt in optimizations: _opt_list = [] group = {} for k, (dsk, keys) in groups.items(): _opt = opt(dsk, keys, **kwargs) group[k] = (_opt, keys) _opt_list.append(_opt) groups = group # Merge all graphs if any(isinstance(graph, HighLevelGraph) for graph in _opt_list): dsk = HighLevelGraph.merge(*_opt_list) else: dsk = merge(*map(ensure_dict, _opt_list)) else: dsk, _ = _extract_graph_and_keys(collections) return dsk def _extract_graph_and_keys(vals): """Given a list of dask vals, return a single graph and a list of keys such that ``get(dsk, keys)`` is equivalent to ``[v.compute() for v in vals]``.""" from .highlevelgraph import HighLevelGraph graphs, keys = [], [] for v in vals: graphs.append(v.__dask_graph__()) keys.append(v.__dask_keys__()) if any(isinstance(graph, HighLevelGraph) for graph in graphs): graph = HighLevelGraph.merge(*graphs) else: graph = merge(*map(ensure_dict, graphs)) return graph, keys def unpack_collections(*args, **kwargs): """Extract collections in preparation for compute/persist/etc... Intended use is to find all collections in a set of (possibly nested) python objects, do something to them (compute, etc...), then repackage them in equivalent python objects. Parameters ---------- *args Any number of objects. If it is a dask collection, it's extracted and added to the list of collections returned. By default, python builtin collections are also traversed to look for dask collections (for more information see the ``traverse`` keyword). traverse : bool, optional If True (default), builtin python collections are traversed looking for any dask collections they might contain. Returns ------- collections : list A list of all dask collections contained in ``args`` repack : callable A function to call on the transformed collections to repackage them as they were in the original ``args``. """ traverse = kwargs.pop("traverse", True) collections = [] repack_dsk = {} collections_token = uuid.uuid4().hex def _unpack(expr): if is_dask_collection(expr): tok = tokenize(expr) if tok not in repack_dsk: repack_dsk[tok] = (getitem, collections_token, len(collections)) collections.append(expr) return tok tok = uuid.uuid4().hex if not traverse: tsk = quote(expr) else: # Treat iterators like lists typ = list if isinstance(expr, Iterator) else type(expr) if typ in (list, tuple, set): tsk = (typ, [_unpack(i) for i in expr]) elif typ in (dict, OrderedDict): tsk = (typ, [[_unpack(k), _unpack(v)] for k, v in expr.items()]) elif is_dataclass(expr) and not isinstance(expr, type): tsk = ( apply, typ, (), ( dict, [ [f.name, _unpack(getattr(expr, f.name))] for f in dataclass_fields(expr) ], ), ) else: return expr repack_dsk[tok] = tsk return tok out = uuid.uuid4().hex repack_dsk[out] = (tuple, [_unpack(i) for i in args]) def repack(results): dsk = repack_dsk.copy() dsk[collections_token] = quote(results) return simple_get(dsk, out) return collections, repack def optimize(*args, **kwargs): """Optimize several dask collections at once. Returns equivalent dask collections that all share the same merged and optimized underlying graph. This can be useful if converting multiple collections to delayed objects, or to manually apply the optimizations at strategic points. Note that in most cases you shouldn't need to call this method directly. Parameters ---------- *args : objects Any number of objects. If a dask object, its graph is optimized and merged with all those of all other dask objects before returning an equivalent dask collection. Non-dask arguments are passed through unchanged. traverse : bool, optional By default dask traverses builtin python collections looking for dask objects passed to ``optimize``. For large collections this can be expensive. If none of the arguments contain any dask objects, set ``traverse=False`` to avoid doing this traversal. optimizations : list of callables, optional Additional optimization passes to perform. **kwargs Extra keyword arguments to forward to the optimization passes. Examples -------- >>> import dask as d >>> import dask.array as da >>> a = da.arange(10, chunks=2).sum() >>> b = da.arange(10, chunks=2).mean() >>> a2, b2 = d.optimize(a, b) >>> a2.compute() == a.compute() True >>> b2.compute() == b.compute() True """ collections, repack = unpack_collections(*args, **kwargs) if not collections: return args dsk = collections_to_dsk(collections, **kwargs) postpersists = [] for a in collections: r, s = a.__dask_postpersist__() postpersists.append(r(dsk, *s)) return repack(postpersists) def compute(*args, **kwargs): """Compute several dask collections at once. Parameters ---------- args : object Any number of objects. If it is a dask object, it's computed and the result is returned. By default, python builtin collections are also traversed to look for dask objects (for more information see the ``traverse`` keyword). Non-dask arguments are passed through unchanged. traverse : bool, optional By default dask traverses builtin python collections looking for dask objects passed to ``compute``. For large collections this can be expensive. If none of the arguments contain any dask objects, set ``traverse=False`` to avoid doing this traversal. scheduler : string, optional Which scheduler to use like "threads", "synchronous" or "processes". If not provided, the default is to check the global settings first, and then fall back to the collection defaults. optimize_graph : bool, optional If True [default], the optimizations for each collection are applied before computation. Otherwise the graph is run as is. This can be useful for debugging. kwargs Extra keywords to forward to the scheduler function. Examples -------- >>> import dask as d >>> import dask.array as da >>> a = da.arange(10, chunks=2).sum() >>> b = da.arange(10, chunks=2).mean() >>> d.compute(a, b) (45, 4.5) By default, dask objects inside python collections will also be computed: >>> d.compute({'a': a, 'b': b, 'c': 1}) ({'a': 45, 'b': 4.5, 'c': 1},) """ traverse = kwargs.pop("traverse", True) optimize_graph = kwargs.pop("optimize_graph", True) collections, repack = unpack_collections(*args, traverse=traverse) if not collections: return args schedule = get_scheduler( scheduler=kwargs.pop("scheduler", None), collections=collections, get=kwargs.pop("get", None), ) dsk = collections_to_dsk(collections, optimize_graph, **kwargs) keys, postcomputes = [], [] for x in collections: keys.append(x.__dask_keys__()) postcomputes.append(x.__dask_postcompute__()) results = schedule(dsk, keys, **kwargs) return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)]) def visualize(*args, **kwargs): """ Visualize several dask graphs at once. Requires ``graphviz`` to be installed. All options that are not the dask graph(s) should be passed as keyword arguments. Parameters ---------- dsk : dict(s) or collection(s) The dask graph(s) to visualize. filename : str or None, optional The name of the file to write to disk. If the provided `filename` doesn't include an extension, '.png' will be used by default. If `filename` is None, no file will be written, and we communicate with dot using only pipes. format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional Format in which to write output file. Default is 'png'. optimize_graph : bool, optional If True, the graph is optimized before rendering. Otherwise, the graph is displayed as is. Default is False. color : {None, 'order'}, optional Options to color nodes. Provide ``cmap=`` keyword for additional colormap collapse_outputs : bool, optional Whether to collapse output boxes, which often have empty labels. Default is False. verbose : bool, optional Whether to label output and input boxes even if the data aren't chunked. Beware: these labels can get very long. Default is False. **kwargs Additional keyword arguments to forward to ``to_graphviz``. Examples -------- >>> x.visualize(filename='dask.pdf') # doctest: +SKIP >>> x.visualize(filename='dask.pdf', color='order') # doctest: +SKIP Returns ------- result : IPython.diplay.Image, IPython.display.SVG, or None See dask.dot.dot_graph for more information. See Also -------- dask.dot.dot_graph Notes ----- For more information on optimization see here: https://docs.dask.org/en/latest/optimize.html """ from dask.dot import dot_graph filename = kwargs.pop("filename", "mydask") optimize_graph = kwargs.pop("optimize_graph", False) dsks = [] args3 = [] for arg in args: if isinstance(arg, (list, tuple, set)): for a in arg: if isinstance(a, Mapping): dsks.append(a) if is_dask_collection(a): args3.append(a) else: if isinstance(arg, Mapping): dsks.append(arg) if is_dask_collection(arg): args3.append(arg) dsk = dict(collections_to_dsk(args3, optimize_graph=optimize_graph)) for d in dsks: dsk.update(d) color = kwargs.get("color") if color == "order": from .order import order import matplotlib.pyplot as plt o = order(dsk) try: cmap = kwargs.pop("cmap") except KeyError: cmap = plt.cm.RdBu if isinstance(cmap, str): import matplotlib.pyplot as plt cmap = getattr(plt.cm, cmap) mx = max(o.values()) + 1 colors = {k: _colorize(cmap(v / mx, bytes=True)) for k, v in o.items()} kwargs["function_attributes"] = { k: {"color": v, "label": str(o[k])} for k, v in colors.items() } kwargs["data_attributes"] = {k: {"color": v} for k, v in colors.items()} elif color: raise NotImplementedError("Unknown value color=%s" % color) return dot_graph(dsk, filename=filename, **kwargs) def persist(*args, **kwargs): """Persist multiple Dask collections into memory This turns lazy Dask collections into Dask collections with the same metadata, but now with their results fully computed or actively computing in the background. For example a lazy dask.array built up from many lazy calls will now be a dask.array of the same shape, dtype, chunks, etc., but now with all of those previously lazy tasks either computed in memory as many small :class:`numpy.array` (in the single-machine case) or asynchronously running in the background on a cluster (in the distributed case). This function operates differently if a ``dask.distributed.Client`` exists and is connected to a distributed scheduler. In this case this function will return as soon as the task graph has been submitted to the cluster, but before the computations have completed. Computations will continue asynchronously in the background. When using this function with the single machine scheduler it blocks until the computations have finished. When using Dask on a single machine you should ensure that the dataset fits entirely within memory. Examples -------- >>> df = dd.read_csv('/path/to/*.csv') # doctest: +SKIP >>> df = df[df.name == 'Alice'] # doctest: +SKIP >>> df['in-debt'] = df.balance < 0 # doctest: +SKIP >>> df = df.persist() # triggers computation # doctest: +SKIP >>> df.value().min() # future computations are now fast # doctest: +SKIP -10 >>> df.value().max() # doctest: +SKIP 100 >>> from dask import persist # use persist function on multiple collections >>> a, b = persist(a, b) # doctest: +SKIP Parameters ---------- *args: Dask collections scheduler : string, optional Which scheduler to use like "threads", "synchronous" or "processes". If not provided, the default is to check the global settings first, and then fall back to the collection defaults. traverse : bool, optional By default dask traverses builtin python collections looking for dask objects passed to ``persist``. For large collections this can be expensive. If none of the arguments contain any dask objects, set ``traverse=False`` to avoid doing this traversal. optimize_graph : bool, optional If True [default], the graph is optimized before computation. Otherwise the graph is run as is. This can be useful for debugging. **kwargs Extra keywords to forward to the scheduler function. Returns ------- New dask collections backed by in-memory data """ traverse = kwargs.pop("traverse", True) optimize_graph = kwargs.pop("optimize_graph", True) collections, repack = unpack_collections(*args, traverse=traverse) if not collections: return args schedule = get_scheduler( scheduler=kwargs.pop("scheduler", None), collections=collections ) if inspect.ismethod(schedule): try: from distributed.client import default_client except ImportError: pass else: try: client = default_client() except ValueError: pass else: if client.get == schedule: results = client.persist( collections, optimize_graph=optimize_graph, **kwargs ) return repack(results) dsk = collections_to_dsk(collections, optimize_graph, **kwargs) keys, postpersists = [], [] for a in collections: a_keys = list(flatten(a.__dask_keys__())) rebuild, state = a.__dask_postpersist__() keys.extend(a_keys) postpersists.append((rebuild, a_keys, state)) results = schedule(dsk, keys, **kwargs) d = dict(zip(keys, results)) results2 = [r({k: d[k] for k in ks}, *s) for r, ks, s in postpersists] return repack(results2) ############ # Tokenize # ############ def tokenize(*args, **kwargs): """Deterministic token >>> tokenize([1, 2, '3']) '7d6a880cd9ec03506eee6973ff551339' >>> tokenize('Hello') == tokenize('Hello') True """ if kwargs: args = args + (kwargs,) return md5(str(tuple(map(normalize_token, args))).encode()).hexdigest() normalize_token = Dispatch() normalize_token.register( (int, float, str, bytes, type(None), type, slice, complex, type(Ellipsis)), identity ) @normalize_token.register(dict) def normalize_dict(d): return normalize_token(sorted(d.items(), key=str)) @normalize_token.register(OrderedDict) def normalize_ordered_dict(d): return type(d).__name__, normalize_token(list(d.items())) @normalize_token.register(set) def normalize_set(s): return normalize_token(sorted(s, key=str)) @normalize_token.register((tuple, list)) def normalize_seq(seq): def func(seq): try: return list(map(normalize_token, seq)) except RecursionError: return str(uuid.uuid4()) return type(seq).__name__, func(seq) @normalize_token.register(literal) def normalize_literal(lit): return "literal", normalize_token(lit()) @normalize_token.register(range) def normalize_range(r): return list(map(normalize_token, [r.start, r.stop, r.step])) @normalize_token.register(object) def normalize_object(o): method = getattr(o, "__dask_tokenize__", None) if method is not None: return method() return normalize_function(o) if callable(o) else uuid.uuid4().hex function_cache = {} function_cache_lock = threading.Lock() def normalize_function(func): try: return function_cache[func] except KeyError: result = _normalize_function(func) if len(function_cache) >= 500: # clear half of cache if full with function_cache_lock: if len(function_cache) >= 500: for k in list(function_cache)[::2]: del function_cache[k] function_cache[func] = result return result except TypeError: # not hashable return _normalize_function(func) def _normalize_function(func): if isinstance(func, Compose): first = getattr(func, "first", None) funcs = reversed((first,) + func.funcs) if first else func.funcs return tuple(normalize_function(f) for f in funcs) elif isinstance(func, (partial, curry)): args = tuple(normalize_token(i) for i in func.args) if func.keywords: kws = tuple( (k, normalize_token(v)) for k, v in sorted(func.keywords.items()) ) else: kws = None return (normalize_function(func.func), args, kws) else: try: result = pickle.dumps(func, protocol=0) if b"__main__" not in result: # abort on dynamic functions return result except Exception: pass try: import cloudpickle return cloudpickle.dumps(func, protocol=0) except Exception: return str(func) @normalize_token.register_lazy("pandas") def register_pandas(): import pandas as pd # Intentionally not importing PANDAS_GT_0240 from dask.dataframe._compat # to avoid ImportErrors from extra dependencies PANDAS_GT_0240 = LooseVersion(pd.__version__) >= LooseVersion("0.24.0") @normalize_token.register(pd.Index) def normalize_index(ind): if PANDAS_GT_0240: values = ind.array else: values = ind.values return [ind.name, normalize_token(values)] @normalize_token.register(pd.MultiIndex) def normalize_index(ind): codes = ind.codes if PANDAS_GT_0240 else ind.levels return ( [ind.name] + [normalize_token(x) for x in ind.levels] + [normalize_token(x) for x in codes] ) @normalize_token.register(pd.Categorical) def normalize_categorical(cat): return [normalize_token(cat.codes), normalize_token(cat.dtype)] if PANDAS_GT_0240: @normalize_token.register(pd.arrays.PeriodArray) @normalize_token.register(pd.arrays.DatetimeArray) @normalize_token.register(pd.arrays.TimedeltaArray) def normalize_period_array(arr): return [normalize_token(arr.asi8), normalize_token(arr.dtype)] @normalize_token.register(pd.arrays.IntervalArray) def normalize_interval_array(arr): return [ normalize_token(arr.left), normalize_token(arr.right), normalize_token(arr.closed), ] @normalize_token.register(pd.Series) def normalize_series(s): return [ s.name, s.dtype, normalize_token(s._data.blocks[0].values), normalize_token(s.index), ] @normalize_token.register(pd.DataFrame) def normalize_dataframe(df): data = [block.values for block in df._data.blocks] data.extend([df.columns, df.index]) return list(map(normalize_token, data)) @normalize_token.register(pd.api.extensions.ExtensionArray) def normalize_extension_array(arr): import numpy as np return normalize_token(np.asarray(arr)) # Dtypes @normalize_token.register(pd.api.types.CategoricalDtype) def normalize_categorical_dtype(dtype): return [normalize_token(dtype.categories), normalize_token(dtype.ordered)] @normalize_token.register(pd.api.extensions.ExtensionDtype) def normalize_period_dtype(dtype): return normalize_token(dtype.name) @normalize_token.register_lazy("numpy") def register_numpy(): import numpy as np @normalize_token.register(np.ndarray) def normalize_array(x): if not x.shape: return (x.item(), x.dtype) if hasattr(x, "mode") and getattr(x, "filename", None): if hasattr(x.base, "ctypes"): offset = ( x.ctypes.get_as_parameter().value - x.base.ctypes.get_as_parameter().value ) else: offset = 0 # root memmap's have mmap object as base if hasattr( x, "offset" ): # offset numpy used while opening, and not the offset to the beginning of the file offset += getattr(x, "offset") return ( x.filename, os.path.getmtime(x.filename), x.dtype, x.shape, x.strides, offset, ) if x.dtype.hasobject: try: try: # string fast-path data = hash_buffer_hex( "-".join(x.flat).encode( encoding="utf-8", errors="surrogatepass" ) ) except UnicodeDecodeError: # bytes fast-path data = hash_buffer_hex(b"-".join(x.flat)) except (TypeError, UnicodeDecodeError): try: data = hash_buffer_hex(pickle.dumps(x, pickle.HIGHEST_PROTOCOL)) except Exception: # pickling not supported, use UUID4-based fallback data = uuid.uuid4().hex else: try: data = hash_buffer_hex(x.ravel(order="K").view("i1")) except (BufferError, AttributeError, ValueError): data = hash_buffer_hex(x.copy().ravel(order="K").view("i1")) return (data, x.dtype, x.shape, x.strides) @normalize_token.register(np.matrix) def normalize_matrix(x): return type(x).__name__, normalize_array(x.view(type=np.ndarray)) normalize_token.register(np.dtype, repr) normalize_token.register(np.generic, repr) @normalize_token.register(np.ufunc) def normalize_ufunc(x): try: name = x.__name__ if getattr(np, name) is x: return "np." + name except AttributeError: return normalize_function(x) @normalize_token.register_lazy("scipy") def register_scipy(): import scipy.sparse as sp def normalize_sparse_matrix(x, attrs): return ( type(x).__name__, normalize_seq((normalize_token(getattr(x, key)) for key in attrs)), ) for cls, attrs in [ (sp.dia_matrix, ("data", "offsets", "shape")), (sp.bsr_matrix, ("data", "indices", "indptr", "blocksize", "shape")), (sp.coo_matrix, ("data", "row", "col", "shape")), (sp.csr_matrix, ("data", "indices", "indptr", "shape")), (sp.csc_matrix, ("data", "indices", "indptr", "shape")), (sp.lil_matrix, ("data", "rows", "shape")), ]: normalize_token.register(cls, partial(normalize_sparse_matrix, attrs=attrs)) @normalize_token.register(sp.dok_matrix) def normalize_dok_matrix(x): return type(x).__name__, normalize_token(sorted(x.items())) def _colorize(t): """Convert (r, g, b) triple to "#RRGGBB" string For use with ``visualize(color=...)`` Examples -------- >>> _colorize((255, 255, 255)) '#FFFFFF' >>> _colorize((0, 32, 128)) '#002080' """ t = t[:3] i = sum(v * 256 ** (len(t) - i - 1) for i, v in enumerate(t)) h = hex(int(i))[2:].upper() h = "0" * (6 - len(h)) + h return "#" + h named_schedulers = { "sync": local.get_sync, "synchronous": local.get_sync, "single-threaded": local.get_sync, "threads": threaded.get, "threading": threaded.get, } try: from dask import multiprocessing as dask_multiprocessing except ImportError: pass else: named_schedulers.update( { "processes": dask_multiprocessing.get, "multiprocessing": dask_multiprocessing.get, } ) get_err_msg = """ The get= keyword has been removed. Please use the scheduler= keyword instead with the name of the desired scheduler like 'threads' or 'processes' x.compute(scheduler='single-threaded') x.compute(scheduler='threads') x.compute(scheduler='processes') or with a function that takes the graph and keys x.compute(scheduler=my_scheduler_function) or with a Dask client x.compute(scheduler=client) """.strip() def get_scheduler(get=None, scheduler=None, collections=None, cls=None): """Get scheduler function There are various ways to specify the scheduler to use: 1. Passing in scheduler= parameters 2. Passing these into global configuration 3. Using defaults of a dask collection This function centralizes the logic to determine the right scheduler to use from those many options """ if get: raise TypeError(get_err_msg) if scheduler is not None: if callable(scheduler): return scheduler elif "Client" in type(scheduler).__name__ and hasattr(scheduler, "get"): return scheduler.get elif scheduler.lower() in named_schedulers: return named_schedulers[scheduler.lower()] elif scheduler.lower() in ("dask.distributed", "distributed"): from distributed.worker import get_client return get_client().get else: raise ValueError( "Expected one of [distributed, %s]" % ", ".join(sorted(named_schedulers)) ) # else: # try to connect to remote scheduler with this name # return get_client(scheduler).get if config.get("scheduler", None): return get_scheduler(scheduler=config.get("scheduler", None)) if config.get("get", None): raise ValueError(get_err_msg) if getattr(thread_state, "key", False): from distributed.worker import get_worker return get_worker().client.get if cls is not None: return cls.__dask_scheduler__ if collections: collections = [c for c in collections if c is not None] if collections: get = collections[0].__dask_scheduler__ if not all(c.__dask_scheduler__ == get for c in collections): raise ValueError( "Compute called on multiple collections with " "differing default schedulers. Please specify a " "scheduler=` parameter explicitly in compute or " "globally with `dask.config.set`." ) return get return None def wait(x, timeout=None, return_when="ALL_COMPLETED"): """Wait until computation has finished This is a compatibility alias for ``dask.distributed.wait``. If it is applied onto Dask collections without Dask Futures or if Dask distributed is not installed then it is a no-op """ try: from distributed import wait return wait(x, timeout=timeout, return_when=return_when) except (ImportError, ValueError): return x
bsd-3-clause
LeeYiFang/Carkinos
src/probes/views.py
1
122212
from django.shortcuts import render,render_to_response from django.http import HttpResponse, Http404,JsonResponse from django.views.decorators.http import require_GET from .models import Dataset, CellLine, ProbeID, Sample, Platform, Clinical_Dataset,Clinical_sample,Gene from django.template import RequestContext from django.utils.html import mark_safe import json import pandas as pd import numpy as np from pathlib import Path import sklearn from sklearn.decomposition import PCA from scipy import stats import os import matplotlib as mpl mpl.use('agg') import matplotlib.pyplot as plt import seaborn as sns from matplotlib.colors import LinearSegmentedColormap import uuid from rpy2.robjects.packages import importr import rpy2.robjects as ro r=ro.r #lumi= importr('lumi') from rpy2.robjects import pandas2ri pandas2ri.activate() import csv #import logging #logger = logging.getLogger("__name__") show_row=4000 #more than how many rows will become download file mode def generate_samples(): d=Dataset.objects.all() cell_d_name=list(d.values_list('name',flat=True)) same_name=[] cell_datasets=[] #[[dataset_name,[[primary_site,[cell line]]]] for i in cell_d_name: if i=="Sanger Cell Line Project": alias='sanger' same_name.append('sanger') elif i=="NCI60": alias='nci' same_name.append('nci') elif i=="GSE36133": alias='ccle' same_name.append('ccle') else: alias=i same_name.append(i) sample=Sample.objects.filter(dataset_id__name=i).order_by('cell_line_id__primary_site').select_related('cell_line_id') cell_datasets.append([i,alias,list(sample),[]]) sites=list(sample.values_list('cell_line_id__primary_site',flat=True)) hists=list(sample.values_list('cell_line_id__name',flat=True)) dis_prim=list(sample.values_list('cell_line_id__primary_site',flat=True).distinct()) hists=list(hists) id_counter=0 for p in range(0,len(dis_prim)): temp=sites.count(dis_prim[p]) cell_datasets[-1][3].append([dis_prim[p],list(set(hists[id_counter:id_counter+temp]))]) id_counter+=temp d=Clinical_Dataset.objects.all() d_name=list(d.values_list('name',flat=True)) datasets=[] #[[dataset_name,[[primary_site,[primary_histology]]],[[filter_type,[filter_choice]]]] primarys=[] #[[primary_site,[primary_hist]]] primh_filter=[] #[[filter_type,[filter_choice]]] f_type=['age','gender','ethnic','grade','stage','stageT','stageN','stageM','metastatic'] for i in d_name: same_name.append(i) datasets.append([i,[],[]]) sample=Clinical_sample.objects.filter(dataset_id__name=i).order_by('primary_site') sites=list(sample.values_list('primary_site',flat=True)) hists=list(sample.values_list('primary_hist',flat=True)) dis_prim=list(sample.values_list('primary_site',flat=True).distinct()) hists=list(hists) id_counter=0 for p in range(0,len(dis_prim)): temp=sites.count(dis_prim[p]) datasets[-1][1].append([dis_prim[p],list(set(hists[id_counter:id_counter+temp]))]) id_counter+=temp for f in f_type: temp=list(set(sample.values_list(f,flat=True))) datasets[-1][2].append([f,temp]) sample=Clinical_sample.objects.all().order_by('primary_site') sites=list(sample.values_list('primary_site',flat=True)) hists=list(sample.values_list('primary_hist',flat=True)) dis_prim=list(sample.values_list('primary_site',flat=True).distinct()) hists=list(hists) id_counter=0 for p in range(0,len(dis_prim)): temp=sites.count(dis_prim[p]) primarys.append([dis_prim[p],list(set(hists[id_counter:id_counter+temp]))]) id_counter+=temp s=Clinical_sample.objects.all() for f in f_type: temp=list(set(s.values_list(f,flat=True))) primh_filter.append([f,temp]) all_full_name=cell_d_name+d_name return { 'all_full_name':mark_safe(json.dumps(all_full_name)), #full name of all datasets 'same_name':mark_safe(json.dumps(same_name)), #short name for all datasets 'cell_d_name':mark_safe(json.dumps(cell_d_name)), #cell line dataset name(full) 'cell_datasets':cell_datasets, 'd_name': mark_safe(json.dumps(d_name)), #clinical dataset name 'datasets': datasets, 'primarys': primarys, 'primh_filter':primh_filter, } def sample_microarray(request): d=Clinical_Dataset.objects.all() d_name=list(d.values_list('name',flat=True)) datasets=[] #[[dataset_name,[[primary_site,[primary_histology]]],[[filter_type,[filter_choice]]]] primarys=[] #[[primary_site,[primary_hist]]] primh_filter=[] #[[filter_type,[filter_choice]]] f_type=['age','gender','ethnic','grade','stage','stageT','stageN','stageM','metastatic'] for i in d_name: datasets.append([i,[],[]]) sample=Clinical_sample.objects.filter(dataset_id__name=i).order_by('primary_site') sites=list(sample.values_list('primary_site',flat=True)) hists=list(sample.values_list('primary_hist',flat=True)) dis_prim=list(sample.values_list('primary_site',flat=True).distinct()) hists=list(hists) id_counter=0 for p in range(0,len(dis_prim)): temp=sites.count(dis_prim[p]) datasets[-1][1].append([dis_prim[p],list(set(hists[id_counter:id_counter+temp]))]) id_counter+=temp for f in f_type: temp=list(set(sample.values_list(f,flat=True))) datasets[-1][2].append([f,temp]) sample=Clinical_sample.objects.all().order_by('primary_site') sites=list(sample.values_list('primary_site',flat=True)) hists=list(sample.values_list('primary_hist',flat=True)) dis_prim=list(sample.values_list('primary_site',flat=True).distinct()) hists=list(hists) id_counter=0 for p in range(0,len(dis_prim)): temp=sites.count(dis_prim[p]) primarys.append([dis_prim[p],list(set(hists[id_counter:id_counter+temp]))]) id_counter+=temp s=Clinical_sample.objects.all() for f in f_type: temp=list(set(s.values_list(f,flat=True))) primh_filter.append([f,temp]) return render(request, 'sample_microarray.html', { 'd_name': mark_safe(json.dumps(d_name)), 'datasets': datasets, 'primarys': primarys, 'primh_filter':primh_filter, }) def user_pca(request): #load the ranking file and the table of probe first,open files pform=request.POST['data_platform'] uni=[] #to store valid probe offset for getting the correct data uni_probe=[] gene_flag=0 if(pform=="others"): #gene level gene_flag=1 if(request.POST['ngs']=="ngs_u133a"): pform="U133A" else: pform="PLUS2" elif (pform=="U133A"): quantile=list(np.load('ranking_u133a.npy')) probe_path=Path('../').resolve().joinpath('src','Affy_U133A_probe_info.csv') probe_list = pd.read_csv(probe_path.as_posix()) uni_probe=pd.unique(probe_list['PROBEID']) else: quantile=np.load('ranking_u133plus2.npy') probe_path=Path('../').resolve().joinpath('src','Affy_U133plus2_probe_info.csv') probe_list = pd.read_csv(probe_path.as_posix()) uni_probe=pd.unique(probe_list['PROBEID']) propotion=0 table_propotion=0 show=request.POST['show_type'] #get the pca show type nci_size=Sample.objects.filter(dataset_id__name__in=["NCI60"]).count() gse_size=Sample.objects.filter(dataset_id__name__in=["GSE36133"]).count() group_counter=1 user_out_group=[] s_group_dict={} #store sample offset_group_dict={} #store offset cell_line_dict={} #this part is for selecting cell lines base on dataset #count how many group group_counter=1 while True: temp_name='dataset_g'+str(group_counter) if temp_name in request.POST: group_counter=group_counter+1 else: group_counter=group_counter-1 break s_group_dict={} #store sample group_name=[] offset_group_dict={} #store offset clinic=list(Clinical_Dataset.objects.all().values_list('name',flat=True)) clline=list(Dataset.objects.all().values_list('name',flat=True)) all_exist_dataset=[] for i in range(1,group_counter+1): dname='dataset_g'+str(i) all_exist_dataset=all_exist_dataset+request.POST.getlist(dname) all_exist_dataset=list(set(all_exist_dataset)) all_base=[0] for i in range(0,len(all_exist_dataset)-1): if all_exist_dataset[i] in clline: all_base.append(all_base[i]+Sample.objects.filter(dataset_id__name__in=[all_exist_dataset[i]]).count()) else: all_base.append(all_base[i]+Clinical_sample.objects.filter(dataset_id__name__in=[all_exist_dataset[i]]).count()) all_c=[] for i in range(1,group_counter+1): s_group_dict['g'+str(i)]=[] offset_group_dict['g'+str(i)]=[] cell_line_dict['g'+str(i)]=[] dname='dataset_g'+str(i) datasets=request.POST.getlist(dname) group_name.append('g'+str(i)) for dn in datasets: if dn=='Sanger Cell Line Project': c='select_sanger_g'+str(i) elif dn=='NCI60': c='select_nci_g'+str(i) elif dn=='GSE36133': c='select_ccle_g'+str(i) if dn in clline: temp=list(set(request.POST.getlist(c))) if 'd_sample' in show: if all_c==[]: all_c=all_c+temp uni=temp else: uni=list(set(temp)-set(all_c)) all_c=all_c+uni else: uni=list(temp) #do not filter duplicate input only when select+centroid s=Sample.objects.filter(cell_line_id__name__in=uni,dataset_id__name__in=[dn]).order_by('dataset_id' ).select_related('cell_line_id__name','cell_line_id__primary_site','cell_line_id__primary_hist','dataset_id','dataset_id__name') cell_line_dict['g'+str(i)]=cell_line_dict['g'+str(i)]+list(s.values_list('cell_line_id__name',flat=True)) s_group_dict['g'+str(i)]=s_group_dict['g'+str(i)]+list(s) offset_group_dict['g'+str(i)]=offset_group_dict['g'+str(i)]+list(np.add(list(s.values_list('offset',flat=True)),all_base[all_exist_dataset.index(dn)])) else: #dealing with clinical sample datasets com_hists=list(set(request.POST.getlist('primd_'+dn+'_g'+str(i)))) #can I get this by label to reduce number of queries? com_hists=[w1 for segments in com_hists for w1 in segments.split('/')] #print(com_hists) prims=com_hists[0::2] hists=com_hists[1::2] temp=request.POST.getlist('filter_'+dn+'_g'+str(i)) age=[] gender=[] ethnic=[] grade=[] stage=[] T=[] N=[] M=[] metas=[] for t in temp: if 'stage/' in t: stage.append(t[6:]) elif 'gender/' in t: gender.append(t[7:]) elif 'ethnic/' in t: ethnic.append(t[7:]) elif 'grade/' in t: grade.append(t[6:]) elif 'stageT/' in t: T.append(t[7:]) elif 'stageN/' in t: N.append(t[7:]) elif 'stageM/' in t: M.append(t[7:]) elif 'metastatic/' in t: metas.append(t[11:]) ''' if t[11:]=='False': metas.append(0) else: metas.append(1) ''' else: #"age/" age.append(t[4:]) #print(len(prims)) #print(len(hists)) for x in range(0,len(prims)): s=Clinical_sample.objects.filter(dataset_id__name=dn,primary_site=prims[x], primary_hist=hists[x], age__in=age, gender__in=gender, ethnic__in=ethnic, stage__in=stage, grade__in=grade, stageT__in=T, stageN__in=N, stageM__in=M, metastatic__in=metas, ).select_related('dataset_id').order_by('id') s_group_dict['g'+str(i)]=s_group_dict['g'+str(i)]+list(s) cell_line_dict['g'+str(i)]=cell_line_dict['g'+str(i)]+list(s.values_list('name',flat=True)) offset_group_dict['g'+str(i)]=offset_group_dict['g'+str(i)]+list(np.add(list(s.values_list('offset',flat=True)),all_base[all_exist_dataset.index(dn)])) #return render_to_response('welcome.html',locals()) all_sample=[] all_cellline=[] cell_object=[] all_offset=[] sample_counter={} group_cell=[] g_s_counter=[0] for i in range(1,group_counter+1): all_sample=all_sample+s_group_dict['g'+str(i)] #will not exist duplicate sample if d_sample all_offset=all_offset+offset_group_dict['g'+str(i)] all_cellline=all_cellline+cell_line_dict['g'+str(i)] g_s_counter.append(g_s_counter[i-1]+len(s_group_dict['g'+str(i)])) for i in all_sample: sample_counter[i.name]=1 if str(type(i))=="<class 'probes.models.Sample'>": ##print("i am sample!!") cell_object.append(i.cell_line_id) else: ##print("i am clinical!!") cell_object.append(i) #read the user file text=request.FILES.getlist('user_file') user_counter=len(text) if(gene_flag==1): user_counter=1 ugroup=[] for i in range(user_counter): ugroup.append(request.POST['ugroup_name'+str(i+1)]) if(ugroup[-1]==''): ugroup[-1]='User_Group'+str(i+1) dgroup=[] for i in range(1,group_counter+1): dgroup.append(request.POST['group_name'+str(i)]) if(dgroup[-1]==''): dgroup[-1]='Dataset_Group'+str(i) user_dict={} #{user group number:user 2d array} samples=0 nans=[] #to store the probe name that has nan for x in range(1,user_counter+1): #check the file format and content here first filetype=str(text[x-1]).split('.') if(filetype[-1]!="csv"): error_reason='You have the wrong file type. Please upload .csv files' return render_to_response('pca_error.html',RequestContext(request, { 'error_reason':mark_safe(json.dumps(error_reason)), })) if(text[x-1].size>=80000000): #bytes error_reason='The file size is too big. Please upload .csv file with size less than 80MB.' return render_to_response('pca_error.html',RequestContext(request, { 'error_reason':mark_safe(json.dumps(error_reason)), })) temp_data = pd.read_csv(text[x-1]) col=list(temp_data.columns.values) samples=samples+len(col)-1 if(samples==0): error_reason='The file does not have any samples.' return render_to_response('pca_error.html',RequestContext(request, { 'error_reason':mark_safe(json.dumps(error_reason)), })) if(gene_flag==0): #probe level check check_probe=[str(x) for x in list(temp_data.iloc[:,0]) if not str(x).lower().startswith('affx')] #print(len(check_probe)) if(len(check_probe)!=len(uni_probe)): error_reason='The probe number does not match with the platform you selected.' return render_to_response('pca_error.html',RequestContext(request, { 'error_reason':mark_safe(json.dumps(error_reason)), })) if(set(check_probe)!=set(uni_probe)): error_reason='The probe number or probe name in your file does not match the platform you selected.' #error_reason+='</br>The probes that are not in the platform: '+str(set(check_probe)-set(uni_probe))[1:-1] #error_reason+='</br>The probes that are lacking: '+str(set(uni_probe)-set(check_probe))[1:-1] return render_to_response('pca_error.html',RequestContext(request, { 'error_reason':mark_safe(json.dumps(error_reason)), })) col=list(temp_data.columns.values) n=pd.isnull(temp_data).any(1).nonzero()[0] nans += list(temp_data[col[0]][n]) user_dict[x]=temp_data if 'd_sample' in show: if((len(all_sample)+samples)<4): error_reason='You should have at least 4 samples for PCA. The samples are not enough.<br />'\ 'The total number of samples in your uploaded file is '+str(samples)+'.<br />'\ 'The number of samples you selected is '+str(len(all_sample))+'.<br />'\ 'Total is '+str(len(all_sample)+samples)+'.' return render_to_response('pca_error.html',RequestContext(request, { 'error_reason':mark_safe(json.dumps(error_reason)), })) else: s_count=0 sample_list=[] a_sample=np.array(all_sample) for i in range(1,group_counter+1): dis_cellline=list(set(cell_object[g_s_counter[i-1]:g_s_counter[i]])) a_cell_object=np.array(cell_object) for c in dis_cellline: temp1=np.where((a_cell_object==c))[0] temp2=np.where((temp1>=g_s_counter[i-1])&(temp1<g_s_counter[i])) total_offset=temp1[temp2] selected_sample=a_sample[total_offset] if list(selected_sample) in sample_list: #to prevent two different colors in different group continue else: sample_list.append(list(selected_sample)) s_count=s_count+1 if(s_count>=4): #check this part break if(s_count>=4): #check this part break if((s_count+samples)<4): error_reason='Since the display method is [centroid], you should have at least 4 dots for PCA. The total number is not enough.<br />'\ 'The total number of dots in you uploaded file is '+str(samples)+'.<br />'\ 'The number of centroid dots you selected is '+str(s_count)+'.<br />'\ 'Total is '+str(s_count+samples)+'.' return render_to_response('pca_error.html',RequestContext(request, { 'error_reason':mark_safe(json.dumps(error_reason)), })) new_name=[] origin_name=[] com_gene=[] #for ngs select same gene nans=list(set(nans)) for x in range(1,user_counter+1): #temp_data = pd.read_csv(text[x-1]) temp_data=user_dict[x] col=list(temp_data.columns.values) col[0]='probe' temp_data.columns=col temp_data.index = temp_data['probe'] temp_data.index.name = None temp_data=temp_data.iloc[:, 1:] #add "use_" to user's sample names col_name=list(temp_data.columns.values) #have user's sample name list here origin_name=origin_name+list(temp_data.columns.values) col_name=[ "user_"+str(index)+"_"+s for index,s in enumerate(col_name)] temp_data.columns=col_name new_name=new_name+col_name if(gene_flag==0): try: temp_data=temp_data.reindex(uni_probe) except ValueError: return HttpResponse('The file has probes with the same names, please let them be unique.') #remove probe that has nan temp_data=temp_data.drop(nans) temp_data=temp_data.rank(method='dense') #this is for quantile for i in col_name: for j in range(0,len(temp_data[i])): #if(not(np.isnan(temp_data[i][j]))): temp_data[i][j]=quantile[int(temp_data[i][j]-1)] if x==1: data=temp_data else: data=np.concatenate((data,temp_data), axis=1) user_dict[x]=np.array(temp_data) else: temp_data=temp_data.drop(nans) #drop nan temp_data=temp_data.loc[~(temp_data==0).all(axis=1)] #drop all rows with 0 here temp_data=temp_data.groupby(temp_data.index).first() #drop the duplicate gene row user_dict[x]=temp_data #print(temp_data) #delete nan, combine user data to the datasets,transpose matrix for x in range(0,len(all_exist_dataset)): if(gene_flag==0): if all_exist_dataset[x] in clline: pth=Path('../').resolve().joinpath('src',Dataset.objects.get(name=all_exist_dataset[x]).data_path) else: pth=Path('../').resolve().joinpath('src',Clinical_Dataset.objects.get(name=all_exist_dataset[x]).data_path) else: if all_exist_dataset[x] in clline: pth=Path('../').resolve().joinpath('src','gene_'+Dataset.objects.get(name=all_exist_dataset[x]).data_path) else: pth=Path('../').resolve().joinpath('src','gene_'+Clinical_Dataset.objects.get(name=all_exist_dataset[x]).data_path) if x==0: val=np.load(pth.as_posix()) else: val=np.hstack((val, np.load(pth.as_posix())))#combine together #database dataset remove nan probes if(gene_flag==0): uni=[] p_offset=list(ProbeID.objects.filter(platform__name__in=[pform],Probe_id__in=nans).values_list('offset',flat=True)) for n in range(0,len(uni_probe)): if(n not in p_offset): uni.append(n) else: #deal with ngs uploaded data here probe_path=Path('../').resolve().joinpath('src','new_human_gene_info.txt') #probe_list = pd.read_csv(probe_path.as_posix()) #notice duplicate #get the match gene first, notice the size issue info=pd.read_csv(probe_path.as_posix(),sep='\t') col=list(info.columns.values) col[0]='symbol' info.columns=col info.index = info['symbol'] info.index.name = None info=info.iloc[:, 1:] data=user_dict[1] #data=data.groupby(data.index).first() #drop the duplicate gene row com_gene=list(data.index) temp_data=data rloop=divmod(len(com_gene),990) if(rloop[1]==0): rloop=(rloop[0]-1,0) gg=[] for x in range(0,rloop[0]+1): gg+=list(Gene.objects.filter(platform__name__in=[pform],symbol__in=com_gene[x*990:(x+1)*990]).order_by('offset')) exist_gene=[] uni=[] for i in gg: exist_gene.append(i.symbol) uni.append(i.offset) info=info.drop(exist_gene,errors='ignore') new_data=temp_data.loc[data.index.isin(exist_gene)].reindex(exist_gene) ##print(exist_gene) ##print(new_data.index) #search remain symbol's alias and symbol search_alias=list(set(com_gene)-set(exist_gene)) for i in search_alias: re_symbol=list(set(info.loc[info['alias'].isin([i])].index)) #find whether has alias first if(len(re_symbol)!=0): re_match=Gene.objects.filter(platform__name__in=[pform],symbol__in=re_symbol).order_by('offset') #check the symbol in database or not repeat=len(re_match) if(repeat!=0): #match gene symbol in database ##print(re_match) for x in re_match: to_copy=data.loc[i] to_copy.name=x.symbol new_data=new_data.append(to_copy) uni.append(x.offset) info=info.drop(x.symbol,errors='ignore') user_dict[1]=np.array(new_data) ##print("length of new data:"+str(len(new_data))) ##print("data:") ##print(data) ##print("new_data:") ##print(new_data) data=new_data if 'd_sample' in show: val=val[np.ix_(uni,all_offset)] #print(len(val)) user_offset=len(val[0]) if(gene_flag==1): #do the rank invariant here #print("sample with ngs data do rank invariant here") ref_path=Path('../').resolve().joinpath('src','cv_result.txt') ref=pd.read_csv(ref_path.as_posix()) col=list(ref.columns.values) col[0]='symbol' ref.columns=col ref.index = ref['symbol'] ref.index.name = None ref=ref.iloc[:, 1:] ref=ref.iloc[:5000,:] #rank invariant need 5000 genes same_gene=list(ref.index.intersection(data.index)) #to lowess rref=pandas2ri.py2ri(ref.loc[same_gene]) rngs=pandas2ri.py2ri(data.loc[same_gene].mean(axis=1)) rall=pandas2ri.py2ri(data) ro.globalenv['x'] = rngs ro.globalenv['y'] = rref ro.globalenv['newx'] = rall r('x<-as.vector(as.matrix(x))') r('y<-as.vector(as.matrix(y))') r('newx<-as.matrix(newx)') try: if(request.POST['data_type']=='raw'): r('y.loess<-loess(2**y~x,span=0.3)') r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,newx[,z])))') elif(request.POST['data_type']=='log2'): r('y.loess<-loess(2**y~2**x,span=0.3)') r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,2**newx[,z])))') else: r('y.loess<-loess(2**y~10**x,span=0.3)') r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,10**newx[,z])))') except: error_reason='Match too less genes. Check your gene symbols again. We use NCBI standard gene symbol.' return render_to_response('pca_error.html',RequestContext(request, { 'error_reason':mark_safe(json.dumps(error_reason)), })) #r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,newx[,z])))') data=r('newx') #print(data[:10]) #print(type(data)) val=np.hstack((np.array(val), np.array(data))) val=val[~np.isnan(val).any(axis=1)] val=np.transpose(val) else: #val=np.array(val) val=val[np.ix_(uni)] user_offset=len(val[0]) if(gene_flag==1): #print("sample with ngs data do rank invariant here") ref_path=Path('../').resolve().joinpath('src','cv_result.txt') ref=pd.read_csv(ref_path.as_posix()) col=list(ref.columns.values) col[0]='symbol' ref.columns=col ref.index = ref['symbol'] ref.index.name = None ref=ref.iloc[:, 1:] ref=ref.iloc[:5000,:] #rank invariant need 5000 genes same_gene=list(ref.index.intersection(data.index)) #to lowess rref=pandas2ri.py2ri(ref.loc[same_gene]) rngs=pandas2ri.py2ri(data.loc[same_gene].mean(axis=1)) rall=pandas2ri.py2ri(data) ro.globalenv['x'] = rngs ro.globalenv['y'] = rref ro.globalenv['newx'] = rall r('x<-as.vector(as.matrix(x))') r('y<-as.vector(as.matrix(y))') r('newx<-as.matrix(newx)') try: if(request.POST['data_type']=='raw'): r('y.loess<-loess(2**y~x,span=0.3)') r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,newx[,z])))') elif(request.POST['data_type']=='log2'): r('y.loess<-loess(2**y~2**x,span=0.3)') r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,2**newx[,z])))') else: r('y.loess<-loess(2**y~10**x,span=0.3)') r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,10**newx[,z])))') except: error_reason='Match too less genes. Check your gene symbols again. We use NCBI standard gene symbol.' return render_to_response('pca_error.html',RequestContext(request, { 'error_reason':mark_safe(json.dumps(error_reason)), })) #r('for(z in c(1:ncol(newx))) newx[,z]=log2(as.matrix(predict(y.loess,newx[,z])))') data=r('newx') #print(data[:10]) #print(type(data)) val=np.hstack((np.array(val), np.array(data))) val=val[~np.isnan(val).any(axis=1)] pca_index=[] dis_offset=[] #PREMISE:same dataset same cell line will have only one type of primary site and primary histology name1=[] name2=[] name3=[] name4=[] name5=[] X1=[] Y1=[] Z1=[] X2=[] Y2=[] Z2=[] X3=[] Y3=[] Z3=[] X4=[] Y4=[] Z4=[] X5=[] Y5=[] Z5=[] n=4 #need to fix to the best one if 'd_sample' in show: #count the pca first pca= PCA(n_components=n) #combine user sample's offset to all_offset in another variable Xval = pca.fit_transform(val[:,:]) #cannot get Xval with original offset any more ratio_temp=pca.explained_variance_ratio_ propotion=sum(ratio_temp[1:n]) table_propotion=sum(ratio_temp[0:n]) user_new_offset=len(all_offset) ##print(Xval) max=0 min=10000000000 out_group=[] exist_cell={}#cell line object:counter for g in range(1,group_counter+1): output_cell={} check={} for s in range(g_s_counter[g-1],g_s_counter[g]): if str(type(all_sample[s]))=="<class 'probes.models.Sample'>": cell=all_sample[s].cell_line_id else: cell=all_sample[s] try: counter=exist_cell[cell] exist_cell[cell]=counter+1 except KeyError: exist_cell[cell]=1 try: t=output_cell[cell] except KeyError: output_cell[cell]=[cell,[]] check[all_sample[s].name]=[] sample_counter[all_sample[s].name]=exist_cell[cell] for i in range(0,len(all_sample)): if i!=s: try: if(all_sample[s].name not in check[all_sample[i].name]): distance=np.linalg.norm(Xval[i][n-3:n]-Xval[s][n-3:n]) if distance<min: min=distance if distance>max: max=distance output_cell[cell][1].append([all_cellline[s]+'('+str(exist_cell[cell])+')' ,all_sample[s].name,cell.primary_site,cell.primary_hist, all_sample[s].dataset_id.name,all_cellline[i],all_sample[i].name,cell_object[i].primary_site ,cell_object[i].primary_hist,all_sample[i].dataset_id.name,distance]) check[all_sample[s].name].append(all_sample[i].name) except KeyError: distance=np.linalg.norm(Xval[i][n-3:n]-Xval[s][n-3:n]) if distance<min: min=distance if distance>max: max=distance output_cell[cell][1].append([all_cellline[s]+'('+str(exist_cell[cell])+')' ,all_sample[s].name,cell.primary_site,cell.primary_hist, all_sample[s].dataset_id.name,all_cellline[i],all_sample[i].name,cell_object[i].primary_site ,cell_object[i].primary_hist,all_sample[i].dataset_id.name,distance]) check[all_sample[s].name].append(all_sample[i].name) g_count=1 u_count=len(user_dict[g_count][0]) #sample number in first user file for i in range(user_new_offset,user_new_offset+len(origin_name)): #remember to prevent empty file uploaded distance=np.linalg.norm(Xval[i][n-3:n]-Xval[s][n-3:n]) if distance<min: min=distance if distance>max: max=distance output_cell[cell][1].append([all_cellline[s]+'('+str(exist_cell[cell])+')' ,all_sample[s].name,cell.primary_site,cell.primary_hist, all_sample[s].dataset_id.name," ",origin_name[i-user_new_offset]," "," ","User Group"+str(g_count),distance]) if ((i-user_new_offset+1)==u_count): g_count+=1 try: u_count+=len(user_dict[g_count][0]) except KeyError: u_count+=0 if(g==1): name3.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name) X3.append(round(Xval[s][n-3],5)) Y3.append(round(Xval[s][n-2],5)) Z3.append(round(Xval[s][n-1],5)) elif(g==2): name4.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name) X4.append(round(Xval[s][n-3],5)) Y4.append(round(Xval[s][n-2],5)) Z4.append(round(Xval[s][n-1],5)) elif(g==3): name5.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name) X5.append(round(Xval[s][n-3],5)) Y5.append(round(Xval[s][n-2],5)) Z5.append(round(Xval[s][n-1],5)) dictlist=[] for key, value in output_cell.items(): temp = [value] dictlist+=temp output_cell=list(dictlist) out_group.append(["Dataset Group"+str(g),output_cell]) if g==group_counter: output_cell={} g_count=1 output_cell[g_count]=[" ",[]] u_count=len(user_dict[g_count][0]) temp_count=u_count temp_g=1 before=0 for i in range(user_new_offset,user_new_offset+len(origin_name)): for x in range(0,len(all_sample)): distance=np.linalg.norm(Xval[x][n-3:n]-Xval[i][n-3:n]) if distance<min: min=distance if distance>max: max=distance output_cell[g_count][1].append([origin_name[i-user_new_offset],"User Group"+str(g_count),all_cellline[x] ,all_sample[x].name,cell_object[x].primary_site,cell_object[x].primary_hist, all_sample[x].dataset_id.name,distance]) temp_g=1 temp_count=len(user_dict[temp_g][0]) for j in range(user_new_offset,user_new_offset+before): if ((j-user_new_offset)==temp_count): temp_g+=1 try: temp_count+=len(user_dict[temp_g][0]) except KeyError: temp_count+=0 distance=np.linalg.norm(Xval[j][n-3:n]-Xval[i][n-3:n]) if distance<min: min=distance if distance>max: max=distance output_cell[g_count][1].append([origin_name[i-user_new_offset],"User Group"+str(g_count) ," ",origin_name[j-user_new_offset]," "," ","User Group"+str(temp_g),distance]) temp_g=g_count temp_count=len(user_dict[g_count][0]) for j in range(i+1,user_new_offset+len(origin_name)): if ((j-user_new_offset)==temp_count): temp_g+=1 try: temp_count+=len(user_dict[temp_g][0]) except KeyError: temp_count+=0 distance=np.linalg.norm(Xval[j][n-3:n]-Xval[i][n-3:n]) if distance<min: min=distance if distance>max: max=distance output_cell[g_count][1].append([origin_name[i-user_new_offset],"User Group"+str(g_count) ," ",origin_name[j-user_new_offset]," "," ","User Group"+str(temp_g),distance]) if g_count==1: name1.append(origin_name[i-user_new_offset]) X1.append(round(Xval[i][n-3],5)) Y1.append(round(Xval[i][n-2],5)) Z1.append(round(Xval[i][n-1],5)) else: name2.append(origin_name[i-user_new_offset]) X2.append(round(Xval[i][n-3],5)) Y2.append(round(Xval[i][n-2],5)) Z2.append(round(Xval[i][n-1],5)) if ((i-user_new_offset+1)==u_count): dictlist=[] for key, value in output_cell.items(): temp = [value] dictlist+=temp output_cell=list(dictlist) user_out_group.append(["User Group"+str(g_count),output_cell]) g_count+=1 before=u_count+before #print("I am here!!") try: u_count+=len(user_dict[g_count][0]) output_cell={} output_cell[g_count]=[" ",[]] except KeyError: u_count+=0 #[g,[group_cell_1 object,[[outputs paired1,......,],[paired2],[paired3]]],[group_cell_2 object,[[pair1],[pair2]]]] #for xx in origin_name: #sample_counter[xx]=1 ##print(out_group) element_counter=0 for i in out_group: for temp_list in i[1]: element_counter=element_counter+len(temp_list[1]) for temp in temp_list[1]: if(temp[5]!=" "): temp[5]=temp[5]+'('+str(sample_counter[temp[6]])+')' for i in user_out_group: for temp_list in i[1]: for temp in temp_list[1]: if(temp[2]!=" "): temp[2]=temp[2]+'('+str(sample_counter[temp[3]])+')' return_html='user_pca.html' else: #This part is for centroid display return_html='user_pca_center.html' #This part is for select cell line base on dataset,count centroid base on the dataset #group中的cell line為單位來算重心 location_dict={} #{group number:[[cell object,dataset,new location]]} combined=[] sample_list=[] pca_index=np.array(pca_index) X_val=[] val_a=np.array(val) a_all_offset=np.array(all_offset) a_sample=np.array(all_sample) for i in range(1,group_counter+1): dis_cellline=list(set(cell_object[g_s_counter[i-1]:g_s_counter[i]])) #cell object may have duplicate cell line since:NCI A + CCLE A===>[A,A] location_dict['g'+str(i)]=[] dataset_dict={} a_cell_object=np.array(cell_object) for c in dis_cellline: #dis_cellline may not have the same order as cell_object temp1=np.where((a_cell_object==c))[0] temp2=np.where((temp1>=g_s_counter[i-1])&(temp1<g_s_counter[i])) total_offset=temp1[temp2] selected_val=val_a[:,a_all_offset[total_offset]] selected_val=np.transpose(selected_val) new_loca=(np.mean(selected_val,axis=0,dtype=np.float64,keepdims=True)).tolist()[0] selected_sample=a_sample[total_offset] if list(selected_sample) in sample_list: #to prevent two different colors in different group continue else: sample_list.append(list(selected_sample)) d_temp=[] for s in selected_sample: d_temp.append(s.dataset_id.name) dataset_dict[c]="/".join(list(set(d_temp))) X_val.append(new_loca) location_dict['g'+str(i)].append([c,dataset_dict[c],len(X_val)-1]) #the last part is the index to get pca result from new_val combined.append([c,dataset_dict[c],len(X_val)-1]) #all cell line, do not matter order #run the pca user_new_offset=len(X_val) temp_val=np.transpose(val[:,user_offset:]) for x in range(0,len(temp_val)): X_val.append(list(temp_val[x])) if(len(X_val)<4): error_reason='Since the display method is [centroid], you should have at least 4 dots for PCA. The total number is not enough.<br />'\ 'The total number of dots in you uploaded file is '+str(len(temp_val))+'.<br />'\ 'The number of centroid dots you selected is '+str(len(X_val)-len(temp_val))+'.<br />'\ 'Total is '+str(len(X_val))+'.' return render_to_response('pca_error.html',RequestContext(request, { 'error_reason':mark_safe(json.dumps(error_reason)), })) X_val=np.matrix(X_val) pca= PCA(n_components=n) new_val = pca.fit_transform(X_val[:,:]) #cannot get Xval with original offset any more ratio_temp=pca.explained_variance_ratio_ propotion=sum(ratio_temp[1:n]) table_propotion=sum(ratio_temp[0:n]) #print(new_val) out_group=[] min=10000000000 max=0 element_counter=0 for g in range(1,group_counter+1): output_cell=[] exist_cell={} for group_c in location_dict['g'+str(g)]: #a list of [c,dataset_dict[c],new_val index] in group one cell=group_c[0] key_string=cell.name+'/'+cell.primary_site+'/'+cell.primary_hist+'/'+group_c[1] exist_cell[key_string]=[] output_cell.append([cell,[]]) #count the distance for temp_list in combined: c=temp_list[0] temp_string=c.name+'/'+c.primary_site+'/'+c.primary_hist+'/'+temp_list[1] try: if(key_string not in exist_cell[temp_string]): distance=np.linalg.norm(np.array(new_val[group_c[2]][n-3:n])-np.array(new_val[temp_list[2]][n-3:n])) if distance==0: continue if distance<min: min=distance if distance>max: max=distance output_cell[len(output_cell)-1][1].append([cell.name,cell.primary_site,cell.primary_hist ,group_c[1],temp_list[0].name,temp_list[0].primary_site,temp_list[0].primary_hist,temp_list[1],distance]) element_counter=element_counter+1 except KeyError: distance=np.linalg.norm(np.array(new_val[group_c[2]][n-3:n])-np.array(new_val[temp_list[2]][n-3:n])) if distance==0: continue if distance<min: min=distance if distance>max: max=distance output_cell[len(output_cell)-1][1].append([cell.name,cell.primary_site,cell.primary_hist ,group_c[1],temp_list[0].name,temp_list[0].primary_site,temp_list[0].primary_hist,temp_list[1],distance]) element_counter=element_counter+1 exist_cell[key_string].append(temp_string) g_count=1 u_count=len(user_dict[g_count][0]) #sample number in first user file for i in range(user_new_offset,user_new_offset+len(origin_name)): distance=np.linalg.norm(np.array(new_val[group_c[2]][n-3:n])-np.array(new_val[i][n-3:n])) if distance<min: min=distance if distance>max: max=distance output_cell[len(output_cell)-1][1].append([cell.name,cell.primary_site,cell.primary_hist,group_c[1] ,origin_name[i-user_new_offset]," "," ","User Group"+str(g_count),distance]) element_counter=element_counter+1 if ((i-user_new_offset+1)==u_count): g_count+=1 try: u_count+=len(user_dict[g_count][0]) except KeyError: u_count+=0 if(g==1): name3.append(cell.name+'<br>'+group_c[1]) X3.append(round(new_val[group_c[2]][n-3],5)) Y3.append(round(new_val[group_c[2]][n-2],5)) Z3.append(round(new_val[group_c[2]][n-1],5)) elif(g==2): name4.append(cell.name+'<br>'+group_c[1]) X4.append(round(new_val[group_c[2]][n-3],5)) Y4.append(round(new_val[group_c[2]][n-2],5)) Z4.append(round(new_val[group_c[2]][n-1],5)) elif(g==3): name5.append(cell.name+'<br>'+group_c[1]) X5.append(round(new_val[group_c[2]][n-3],5)) Y5.append(round(new_val[group_c[2]][n-2],5)) Z5.append(round(new_val[group_c[2]][n-1],5)) out_group.append(["Dataset Group"+str(g),output_cell]) if g==group_counter: output_cell=[] g_count=1 output_cell.append([" ",[]]) u_count=len(user_dict[g_count][0]) temp_count=u_count temp_g=1 before=0 for i in range(user_new_offset,user_new_offset+len(origin_name)): for temp_list in combined: c=temp_list[0] distance=np.linalg.norm(np.array(new_val[i][n-3:n])-np.array(new_val[temp_list[2]][n-3:n])) if distance<min: min=distance if distance>max: max=distance output_cell[len(output_cell)-1][1].append([origin_name[i-user_new_offset],"User Group"+str(g_count) ,c.name,c.primary_site,c.primary_hist,temp_list[1],distance]) temp_g=1 temp_count=len(user_dict[temp_g][0]) for j in range(user_new_offset,user_new_offset+before): if ((j-user_new_offset)==temp_count): temp_g+=1 try: temp_count+=len(user_dict[temp_g][0]) except KeyError: temp_count+=0 distance=np.linalg.norm(np.array(new_val[i][n-3:n])-np.array(new_val[j][n-3:n])) if distance<min: min=distance if distance>max: max=distance output_cell[len(output_cell)-1][1].append([origin_name[i-user_new_offset],"User Group"+str(g_count) ,origin_name[j-user_new_offset]," "," ","User Group"+str(temp_g),distance]) temp_g=g_count temp_count=len(user_dict[g_count][0]) for x in range(i+1,user_new_offset+len(origin_name)): if ((x-user_new_offset)==temp_count): temp_g+=1 try: temp_count+=len(user_dict[temp_g][0]) except KeyError: temp_count+=0 distance=np.linalg.norm(np.array(new_val[i][n-3:n])-np.array(new_val[x][n-3:n])) if distance<min: min=distance if distance>max: max=distance output_cell[len(output_cell)-1][1].append([origin_name[i-user_new_offset],"User Group"+str(g_count) ,origin_name[x-user_new_offset]," "," ","User Group"+str(temp_g),distance]) if g_count==1: name1.append(origin_name[i-user_new_offset]) X1.append(round(new_val[i][n-3],5)) Y1.append(round(new_val[i][n-2],5)) Z1.append(round(new_val[i][n-1],5)) else: name2.append(origin_name[i-user_new_offset]) X2.append(round(new_val[i][n-3],5)) Y2.append(round(new_val[i][n-2],5)) Z2.append(round(new_val[i][n-1],5)) if ((i-user_new_offset+1)==u_count): user_out_group.append(["User Group"+str(g_count),output_cell]) g_count+=1 before=u_count+before #print("I am here!!") try: u_count+=len(user_dict[g_count][0]) output_cell=[] output_cell.append([" ",[]]) except KeyError: u_count+=0 #print(element_counter) #print(show_row) if(element_counter>show_row): big_flag=1 sid=str(uuid.uuid1())+".csv" if(return_html=='user_pca.html'): dataset_header=['Group Cell Line/Clinical Sample','Sample Name','Primary Site','Primary Histology' ,'Dataset','Paired Cell Line name/Clinical Sample','Sample Name','Primary Site','Primary Histology','Dataset','Distance'] user_header=['User Sample Name','Dataset','Paired Cell Line name/Clinical Sample','Sample Name','Primary Site','Primary Histology','Dataset','Distance'] else: dataset_header=['Group Cell Line/Clinical Sample','Primary Site','Primary Histology' ,'Dataset','Paired Cell Line name/Clinical Sample','Primary Site','Primary Histology','Dataset','Distance'] user_header=['User Sample Name','Dataset','Paired Cell Line name/Clinical Sample','Primary Site','Primary Histology','Dataset','Distance'] P=Path('../').resolve().joinpath('src','static','csv',"dataset_"+sid) userP=Path('../').resolve().joinpath('src','static','csv',"user_"+sid) assP=Path('../').resolve().joinpath('src','assets','csv',"dataset_"+sid) assuserP=Path('../').resolve().joinpath('src','assets','csv',"user_"+sid) #print("start writing files") with open(str(assP), "w", newline='') as f: writer = csv.writer(f) for index,output_cell in out_group: writer.writerows([[dgroup[int(index[-1])-1]]]) writer.writerows([dataset_header]) for cell_line,b in output_cell: writer.writerows(b) #print("end writing first file") ''' with open(str(assP), "w", newline='') as ff: writer = csv.writer(ff) for index,output_cell in out_group: writer.writerows([[index]]) writer.writerows([dataset_header]) for cell_line,b in output_cell: writer.writerows(b) ''' #print("end writing 2 file") with open(str(assuserP), "w", newline='') as ff: writer = csv.writer(ff) for index,output_cell in user_out_group: writer.writerows([[ugroup[int(index[-1])-1]]]) writer.writerows([user_header]) for cell_line,b in output_cell: writer.writerows(b) #print("end writing 3 file") ''' with open(str(userP), "w", newline='') as f: writer = csv.writer(f) for index,output_cell in user_out_group: writer.writerows([[index]]) writer.writerows([user_header]) for cell_line,b in output_cell: writer.writerows(b) ''' #print("end writing 4 file") data_file_name="dataset_"+sid user_file_name="user_"+sid else: big_flag=0 data_file_name=0 user_file_name=0 return render_to_response(return_html,RequestContext(request, { 'ugroup':ugroup, 'dgroup':dgroup, 'min':min,'max':max, 'big_flag':big_flag, 'out_group':out_group,'user_out_group':user_out_group, 'propotion':propotion, 'table_propotion':table_propotion, 'data_file_name':data_file_name, 'user_file_name':user_file_name, 'X1':X1,'name1':mark_safe(json.dumps(name1)), 'Y1':Y1,'name2':mark_safe(json.dumps(name2)), 'Z1':Z1,'name3':mark_safe(json.dumps(name3)), 'X2':X2,'name4':mark_safe(json.dumps(name4)), 'Y2':Y2,'name5':mark_safe(json.dumps(name5)), 'Z2':Z2, 'X3':X3, 'Y3':Y3, 'Z3':Z3, 'X4':X4, 'Y4':Y4, 'Z4':Z4, 'X5':X5, 'Y5':Y5, 'Z5':Z5, })) #notice that we need to return a user_pca_center.html, too!! #return render_to_response('welcome.html',locals()) def express_profiling(request): return render(request, 'express_profiling.html', generate_samples()) def welcome(request): return render_to_response('welcome.html',locals()) def help(request): example_name="CellExpress_Examples.pptx" tutorial_name="CellExpress_Tutorial.pptx" return render_to_response('help.html',locals()) def help_similar_assessment(request): return render_to_response('help_similar_assessment.html',RequestContext(request)) def similar_assessment(request): return render(request, 'similar_assessment.html', generate_samples()) def gene_signature(request): return render(request, 'gene_signature.html', generate_samples()) def heatmap(request): group1=[] group2=[] group_count=0 presult={} #{probe object:p value} expression=[] probe_out=[] sample_out=[] not_found=[] quantile_flag=0 ratio_flag=0 indata=[] #get probe from different platform pform=request.POST.get('data_platform','U133A') if(pform=="mix_quantile"): pform="U133A" quantile_flag=1 if(pform=="mix_ratio"): pform="U133A" ratio_flag=1 stop_end=601 return_page_flag=0 user_probe_flag=0 if(request.POST['user_type']=="all"): all_probe=ProbeID.objects.filter(platform__name=pform).order_by('offset') probe_offset=list(all_probe.values_list('offset',flat=True)) pro_number=float(request.POST['probe_number']) #significant 0.05 or 0.01 all_probe=list(all_probe) elif(request.POST['user_type']=="genes"): #for all genes return_page_flag=1 if(pform=="U133A"): probe_path=Path('../').resolve().joinpath('src','uni_u133a.txt') gene_list = pd.read_csv(probe_path.as_posix()) all_probe=list(gene_list['SYMBOL']) else: probe_path=Path('../').resolve().joinpath('src','uni_plus2.txt') gene_list = pd.read_csv(probe_path.as_posix()) all_probe=list(gene_list['SYMBOL']) pro_number=float(request.POST['probe_number_gene']) probe_offset=[] for i in range(0,len(all_probe)): probe_offset.append(i) else: indata=request.POST['keyword'] indata = list(set(indata.split())) if(request.POST['gtype']=="probeid"): user_probe_flag=1 all_probe=ProbeID.objects.filter(platform__name=pform,Probe_id__in=indata).order_by('offset') probe_offset=list(all_probe.values_list('offset',flat=True)) pro_number=float('+inf') not_found=list(set(set(indata) - set(all_probe.values_list('Probe_id',flat=True)))) all_probe=list(all_probe) else: probe_offset=[] return_page_flag=1 pro_number=float('+inf') if(pform=="U133A"): probe_path=Path('../').resolve().joinpath('src','uni_u133a.txt') gene_list = pd.read_csv(probe_path.as_posix()) gene=list(gene_list['SYMBOL']) else: probe_path=Path('../').resolve().joinpath('src','uni_plus2.txt') gene_list = pd.read_csv(probe_path.as_posix()) gene=list(gene_list['SYMBOL']) probe_path=Path('../').resolve().joinpath('src','new_human_gene_info.txt') info=pd.read_csv(probe_path.as_posix(),sep='\t') col=list(info.columns.values) col[0]='symbol' info.columns=col info.index = info['symbol'] info.index.name = None info=info.iloc[:, 1:] all_probe=[] for i in indata: try: probe_offset.append(gene.index(i)) all_probe.append(i) info=info.drop(i,errors='ignore') except ValueError: re_symbol=list(set(info.loc[info['alias'].isin([i])].index)) #find whether has alias first if(len(re_symbol)!=0): re_match=Gene.objects.filter(platform__name__in=[pform],symbol__in=re_symbol).order_by('offset') #check the symbol in database or not repeat=len(re_match) if(repeat!=0): #match gene symbol in database ##print(re_match) for x in re_match: info=info.drop(x.symbol,errors='ignore') probe_offset.append(x.offset) all_probe.append(i+"("+x.symbol+")") else: not_found.append(i) else: not_found.append(i) #count the number of group group_counter=1 check_set=[] while True: temp_name='dataset_g'+str(group_counter) if temp_name in request.POST: group_counter=group_counter+1 else: group_counter=group_counter-1 break #get binary data s_group_dict={} #store sample val=[] #store value get from binary data group_name=[] clinic=list(Clinical_Dataset.objects.all().values_list('name',flat=True)) clline=list(Dataset.objects.all().values_list('name',flat=True)) #print(clline) opened_name=[] opened_val=[] for i in range(1,group_counter+1): s_group_dict['g'+str(i)]=[] dname='dataset_g'+str(i) datasets=request.POST.getlist(dname) temp_name='g'+str(i) group_name.append(temp_name) a_data=np.array([]) for dn in datasets: if dn=='Sanger Cell Line Project': c='select_sanger_g'+str(i) elif dn=='NCI60': c='select_nci_g'+str(i) elif dn=='GSE36133': c='select_ccle_g'+str(i) if dn in clline: ACELL=request.POST.getlist(c) s=Sample.objects.filter(dataset_id__name__in=[dn],cell_line_id__name__in=ACELL).order_by('dataset_id').select_related('cell_line_id__name','dataset_id') s_group_dict['g'+str(i)]=list(s)+s_group_dict['g'+str(i)] goffset=list(s.values_list('offset',flat=True)) #print(goffset) if dn not in opened_name: #check if the file is opened #print("opend file!!") opened_name.append(dn) if(return_page_flag==1): pth=Path('../').resolve().joinpath('src','gene_'+Dataset.objects.get(name=dn).data_path) if(quantile_flag==1): pth=Path('../').resolve().joinpath('src','mix_gene_'+Dataset.objects.get(name=dn).data_path) elif(ratio_flag==1): pth=Path('../').resolve().joinpath('src','mix_gene_'+Dataset.objects.get(name=dn).data_path) gap=[Gene.objects.filter(platform__name=pform,symbol="GAPDH")[0].offset] else: pth=Path('../').resolve().joinpath('src',Dataset.objects.get(name=dn).data_path) if(quantile_flag==1): pth=Path('../').resolve().joinpath('src','mix_'+Dataset.objects.get(name=dn).data_path) elif(ratio_flag==1): pth=Path('../').resolve().joinpath('src','mix_'+Dataset.objects.get(name=dn).data_path) gap=list(ProbeID.objects.filter(platform__name=pform).filter(Gene_symbol="GAPDH").order_by('id').values_list('offset',flat=True)) raw_val=np.load(pth.as_posix(),mmap_mode='r') if(ratio_flag==1): norm=raw_val[np.ix_(gap)] raw_val=np.subtract(raw_val,np.mean(norm,axis=0, dtype=np.float64,keepdims=True)) opened_val.append(raw_val) temp=raw_val[np.ix_(probe_offset,list(goffset))] if (len(a_data)!=0 ) and (len(temp)!=0): a_data=np.concatenate((a_data,temp),axis=1) elif (len(temp)!=0): a_data=raw_val[np.ix_(probe_offset,list(goffset))] else: temp=opened_val[opened_name.index(dn)][np.ix_(probe_offset,list(goffset))] if (len(a_data)!=0 ) and (len(temp)!=0): a_data=np.concatenate((a_data,temp),axis=1) elif (len(temp)!=0): a_data=opened_val[opened_name.index(dn)][np.ix_(probe_offset,list(goffset))] elif dn in clinic: #print("I am in clinical part") com_hists=list(set(request.POST.getlist('primd_'+dn+'_g'+str(i)))) #can I get this by label to reduce number of queries? com_hists=[w1 for segments in com_hists for w1 in segments.split('/')] prims=com_hists[0::2] hists=com_hists[1::2] temp=request.POST.getlist('filter_'+dn+'_g'+str(i)) age=[] gender=[] ethnic=[] grade=[] stage=[] T=[] N=[] M=[] metas=[] for t in temp: if 'stage/' in t: stage.append(t[6:]) elif 'gender/' in t: gender.append(t[7:]) elif 'ethnic/' in t: ethnic.append(t[7:]) elif 'grade/' in t: grade.append(t[6:]) elif 'stageT/' in t: T.append(t[7:]) elif 'stageN/' in t: N.append(t[7:]) elif 'stageM/' in t: M.append(t[7:]) elif 'metastatic/' in t: metas.append(t[11:]) ''' if t[11:]=='False': metas.append(0) else: metas.append(1) ''' else: #"age/" age.append(t[4:]) cgoffset=[] for x in range(0,len(prims)): s=Clinical_sample.objects.filter(dataset_id__name=dn,primary_site=prims[x], primary_hist=hists[x], age__in=age, gender__in=gender, ethnic__in=ethnic, stage__in=stage, grade__in=grade, stageT__in=T, stageN__in=N, stageM__in=M, metastatic__in=metas, ).select_related('dataset_id').order_by('id') s_group_dict['g'+str(i)]=list(s)+s_group_dict['g'+str(i)] cgoffset+=list(s.values_list('offset',flat=True)) if dn not in opened_name: #check if the file is opened #print("opend file!!") opened_name.append(dn) if(return_page_flag==1): pth=Path('../').resolve().joinpath('src','gene_'+Clinical_Dataset.objects.get(name=dn).data_path) if(quantile_flag==1): pth=Path('../').resolve().joinpath('src','mix_gene_'+Clinical_Dataset.objects.get(name=dn).data_path) elif(ratio_flag==1): pth=Path('../').resolve().joinpath('src','mix_gene_'+Clinical_Dataset.objects.get(name=dn).data_path) gap=[Gene.objects.filter(platform__name=pform,symbol="GAPDH")[0].offset] else: pth=Path('../').resolve().joinpath('src',Clinical_Dataset.objects.get(name=dn).data_path) if(quantile_flag==1): pth=Path('../').resolve().joinpath('src','mix_'+Clinical_Dataset.objects.get(name=dn).data_path) elif(ratio_flag==1): pth=Path('../').resolve().joinpath('src','mix_'+Clinical_Dataset.objects.get(name=dn).data_path) gap=list(ProbeID.objects.filter(platform__name=pform).filter(Gene_symbol="GAPDH").order_by('id').values_list('offset',flat=True)) raw_val=np.load(pth.as_posix(),mmap_mode='r') if(ratio_flag==1): norm=raw_val[np.ix_(gap)] raw_val=np.subtract(raw_val,np.mean(norm,axis=0, dtype=np.float64,keepdims=True)) opened_val.append(raw_val) temp=raw_val[np.ix_(probe_offset,list(cgoffset))] #print(temp) if (len(a_data)!=0 ) and (len(temp)!=0): a_data=np.concatenate((a_data,temp),axis=1) elif (len(temp)!=0): a_data=raw_val[np.ix_(probe_offset,list(cgoffset))] else: temp=opened_val[opened_name.index(dn)][np.ix_(probe_offset,list(cgoffset))] if (len(a_data)!=0 ) and (len(temp)!=0): a_data=np.concatenate((a_data,temp),axis=1) elif (len(temp)!=0): a_data=opened_val[opened_name.index(dn)][np.ix_(probe_offset,list(cgoffset))] val.append(a_data.tolist()) #print(len(val)) #print(len(val[0])) ##print(val) #run the one way ANOVA test or ttest for every probe base on the platform selected express={} #logger.info('run ttest or anova') if group_counter<=2: for i in range(0,len(all_probe)): #need to fix if try to run on laptop presult[all_probe[i]]=stats.ttest_ind(list(val[0][i]),list(val[1][i]),equal_var=False,nan_policy='omit')[1] express[all_probe[i]]=np.append(val[0][i],val[1][i]).tolist() else: for i in range(0,len(all_probe)): #need to fix if try to run on laptop to_anova=[] for n in range(0,group_counter): #val[n]=sum(val[n],[]) to_anova.append(val[n][i]) presult[all_probe[i]]=stats.f_oneway(*to_anova)[1] express[all_probe[i]]=sum(to_anova,[]) #print("test done") #sort the dictionary with p-value and need to get the expression data again (top20) #presult[all_probe[0]]=float('nan') #presult[all_probe[11]]=float('nan') #how to deal with all "nan"? tempf=pd.DataFrame(list(presult.items()), columns=['probe', 'pvalue']) tempf=tempf.replace(to_replace=float('nan'),value=float('+inf')) presult=dict(zip(tempf.probe, tempf.pvalue)) sortkey=sorted(presult,key=presult.get) #can optimize here counter=1 cell_probe_val=[] for w in sortkey: #print(presult[w],":",w) if (presult[w]<pro_number): cell_probe_val.append([w,presult[w]]) print(cell_probe_val) express_mean=np.mean(np.array(express[w])) expression.append(list((np.array(express[w]))-express_mean)) if(return_page_flag==1): probe_out.append(w) else: probe_out.append(w.Probe_id+"("+w.Gene_symbol+")") counter+=1 else: break if counter>=stop_end: break n_counter=1 for n in group_name: sample_counter=1 for s in s_group_dict[n]: dataset_n=s.dataset_id.name if dataset_n=="Sanger Cell Line Project": sample_out.append(s.cell_line_id.name+"(SCLP)(group"+str(n_counter)+"-"+str(sample_counter)+")") elif dataset_n in clline: #print(s.cell_line_id.name+"("+s.dataset_id.name+")"+"(group"+str(n_counter)+"-"+str(sample_counter)+")") sample_out.append(s.cell_line_id.name+"("+s.dataset_id.name+")"+"(group"+str(n_counter)+"-"+str(sample_counter)+")") else: #what to output for clinical part? #print(s.name+"("+s.dataset_id.name+")"+"(group"+str(n_counter)+"-"+str(sample_counter)+")") sample_out.append(s.name+"("+s.dataset_id.name+")"+"(group"+str(n_counter)+"-"+str(sample_counter)+")") sample_counter+=1 n_counter+=1 #logger.info('finish combine output samples') sns.set(font="monospace") test=pd.DataFrame(data=expression,index=probe_out,columns=sample_out) cdict = {'red': ((0.0, 0.0, 0.0), (0.5, 0.0, 0.1), (1.0, 1.0, 1.0)), 'blue': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)), 'green': ((0.0, 0.0, 1.0), (0.5, 0.1, 0.0), (1.0, 0.0, 0.0)) } my_cmap = LinearSegmentedColormap('my_colormap',cdict,256) #test.to_csv('heatmap_text.csv') try: #g = sns.clustermap(test,cmap=my_cmap) if(len(probe_out)<=300): g = sns.clustermap(test,cmap=my_cmap,xticklabels=list(test.columns),yticklabels=(test.index),figsize=(19.20,48.60)) else: g = sns.clustermap(test,cmap=my_cmap,xticklabels=list(test.columns),yticklabels=(test.index),figsize=(30.00,100.00)) except: if((return_page_flag==1) and (probe_out==[])): probe_out=indata #probe_out is the rows of heatmap return render_to_response('noprobe.html',RequestContext(request, { 'user_probe_flag':user_probe_flag, 'return_page_flag':return_page_flag, 'probe_out':probe_out, 'not_found':not_found })) ''' plt.setp(g.ax_heatmap.get_yticklabels(), rotation=0) if counter>=stop_end: plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), fontsize=4) else: plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), fontsize=7) plt.setp(g.ax_heatmap.get_xticklabels(), rotation=270,ha='center') ''' sid=str(uuid.uuid1())+".png" #print(sid) P=Path('../').resolve().joinpath('src','static','image',sid) assP=Path('../').resolve().joinpath('src','assets','image',sid) #g.savefig(str(P)) #plt.figure(figsize=(1920/my_dpi, 2160/my_dpi), dpi=100) #plt.savefig(str(assP), dpi=my_dpi*10) g.savefig(str(assP),bbox_inches='tight') file_name=sid return render_to_response('heatmap.html',RequestContext(request, { 'user_probe_flag':user_probe_flag, 'return_page_flag':return_page_flag, 'cell_probe_val':cell_probe_val, 'file_name':file_name, 'pro_number':pro_number, 'not_found':not_found })) def pca(request): propotion=0 table_propotion=0 pform=request.POST['data_platform'] #get the platform show=request.POST['show_type'] #get the pca show type group_counter=1 cell_line_dict={} #count how many group group_counter=1 while True: temp_name='dataset_g'+str(group_counter) if temp_name in request.POST: group_counter=group_counter+1 else: group_counter=group_counter-1 break udgroup=[] s_group_dict={} #store sample group_name=[] offset_group_dict={} #store offset clinic=list(Clinical_Dataset.objects.all().values_list('name',flat=True)) clline=list(Dataset.objects.all().values_list('name',flat=True)) all_exist_dataset=[] for i in range(1,group_counter+1): udgroup.append(request.POST['group_name'+str(i)]) #print(udgroup) if(udgroup[-1]==''): udgroup[-1]='Group'+str(i) dname='dataset_g'+str(i) all_exist_dataset=all_exist_dataset+request.POST.getlist(dname) all_exist_dataset=list(set(all_exist_dataset)) all_base=[0] for i in range(0,len(all_exist_dataset)-1): if all_exist_dataset[i] in clline: all_base.append(all_base[i]+Sample.objects.filter(dataset_id__name__in=[all_exist_dataset[i]]).count()) else: all_base.append(all_base[i]+Clinical_sample.objects.filter(dataset_id__name__in=[all_exist_dataset[i]]).count()) all_c=[] for i in range(1,group_counter+1): s_group_dict['g'+str(i)]=[] offset_group_dict['g'+str(i)]=[] cell_line_dict['g'+str(i)]=[] dname='dataset_g'+str(i) datasets=request.POST.getlist(dname) group_name.append('g'+str(i)) goffset_nci=[] goffset_gse=[] for dn in datasets: if dn=='Sanger Cell Line Project': c='select_sanger_g'+str(i) elif dn=='NCI60': c='select_nci_g'+str(i) elif dn=='GSE36133': c='select_ccle_g'+str(i) if dn in clline: temp=list(set(request.POST.getlist(c))) if 'd_sample' in show: if all_c==[]: all_c=all_c+temp uni=temp else: uni=list(set(temp)-set(all_c)) all_c=all_c+uni else: uni=list(temp) #do not filter duplicate input only when select+centroid s=Sample.objects.filter(cell_line_id__name__in=uni,dataset_id__name__in=[dn]).order_by('dataset_id' ).select_related('cell_line_id__name','cell_line_id__primary_site','cell_line_id__primary_hist','dataset_id','dataset_id__name') cell_line_dict['g'+str(i)]=cell_line_dict['g'+str(i)]+list(s.values_list('cell_line_id__name',flat=True)) s_group_dict['g'+str(i)]=s_group_dict['g'+str(i)]+list(s) offset_group_dict['g'+str(i)]=offset_group_dict['g'+str(i)]+list(np.add(list(s.values_list('offset',flat=True)),all_base[all_exist_dataset.index(dn)])) else: #dealing with clinical sample datasets com_hists=list(set(request.POST.getlist('primd_'+dn+'_g'+str(i)))) #can I get this by label to reduce number of queries? com_hists=[w1 for segments in com_hists for w1 in segments.split('/')] prims=com_hists[0::2] hists=com_hists[1::2] temp=request.POST.getlist('filter_'+dn+'_g'+str(i)) age=[] gender=[] ethnic=[] grade=[] stage=[] T=[] N=[] M=[] metas=[] for t in temp: if 'stage/' in t: stage.append(t[6:]) elif 'gender/' in t: gender.append(t[7:]) elif 'ethnic/' in t: ethnic.append(t[7:]) elif 'grade/' in t: grade.append(t[6:]) elif 'stageT/' in t: T.append(t[7:]) elif 'stageN/' in t: N.append(t[7:]) elif 'stageM/' in t: M.append(t[7:]) elif 'metastatic/' in t: metas.append(t[11:]) ''' if t[11:]=='False': metas.append(0) else: metas.append(1) ''' else: #"age/" age.append(t[4:]) for x in range(0,len(prims)): s=Clinical_sample.objects.filter(dataset_id__name=dn,primary_site=prims[x], primary_hist=hists[x], age__in=age, gender__in=gender, ethnic__in=ethnic, stage__in=stage, grade__in=grade, stageT__in=T, stageN__in=N, stageM__in=M, metastatic__in=metas, ).select_related('dataset_id').order_by('id') s_group_dict['g'+str(i)]=s_group_dict['g'+str(i)]+list(s) cell_line_dict['g'+str(i)]=cell_line_dict['g'+str(i)]+list(s.values_list('name',flat=True)) offset_group_dict['g'+str(i)]=offset_group_dict['g'+str(i)]+list(np.add(list(s.values_list('offset',flat=True)),all_base[all_exist_dataset.index(dn)])) all_sample=[] all_cellline=[] cell_object=[] all_offset=[] sample_counter={} group_cell=[] g_s_counter=[0] for i in range(1,group_counter+1): all_sample=all_sample+s_group_dict['g'+str(i)] #will not exist duplicate sample if d_sample all_offset=all_offset+offset_group_dict['g'+str(i)] all_cellline=all_cellline+cell_line_dict['g'+str(i)] g_s_counter.append(g_s_counter[i-1]+len(s_group_dict['g'+str(i)])) if 'd_sample' in show: if((len(all_sample))<4): error_reason='You should have at least 4 samples for PCA. The samples are not enough.<br />'\ 'The number of samples you selected is '+str(len(all_sample))+'.' return render_to_response('pca_error.html',RequestContext(request, { 'error_reason':mark_safe(json.dumps(error_reason)), })) for i in all_sample: sample_counter[i.name]=1 if str(type(i))=="<class 'probes.models.Sample'>": ##print("i am sample!!") cell_object.append(i.cell_line_id) else: ##print("i am clinical!!") cell_object.append(i) #delete nan, transpose matrix ##open file for x in range(0,len(all_exist_dataset)): if all_exist_dataset[x] in clline: pth=Path('../').resolve().joinpath('src',Dataset.objects.get(name=all_exist_dataset[x]).data_path) else: pth=Path('../').resolve().joinpath('src',Clinical_Dataset.objects.get(name=all_exist_dataset[x]).data_path) if x==0: val=np.load(pth.as_posix()) else: val=np.hstack((val, np.load(pth.as_posix())))#combine together if 'd_sample' in show: val=val[:,all_offset] #val=val[~np.isnan(val).any(axis=1)] val=np.transpose(val) pca_index=[] dis_offset=[] #PREMISE:same dataset same cell line will have only one type of primary site and primary histology name1=[] name2=[] name3=[] name4=[] name5=[] X1=[] Y1=[] Z1=[] X2=[] Y2=[] Z2=[] X3=[] Y3=[] Z3=[] X4=[] Y4=[] Z4=[] X5=[] Y5=[] Z5=[] if(len(all_exist_dataset)==1): n=3 #need to fix to the best one #need to fix proportion else: n=4 #logger.info('pca show') if 'd_sample' in show: #count the pca first pca= PCA(n_components=n) Xval = pca.fit_transform(val[:,:]) #cannot get Xval with original all_offset any more ratio_temp=pca.explained_variance_ratio_ propotion=sum(ratio_temp[n-3:n]) table_propotion=sum(ratio_temp[0:n]) ##print(Xval) ##print(all_cellline) ##print(all_sample) max=0 min=10000000000 out_group=[] exist_cell={}#cell line object:counter for g in range(1,group_counter+1): output_cell={} check={} for s in range(g_s_counter[g-1],g_s_counter[g]): if str(type(all_sample[s]))=="<class 'probes.models.Sample'>": cell=all_sample[s].cell_line_id else: cell=all_sample[s] try: counter=exist_cell[cell] exist_cell[cell]=counter+1 except KeyError: exist_cell[cell]=1 try: t=output_cell[cell] except KeyError: output_cell[cell]=[cell,[]] check[all_sample[s].name]=[] sample_counter[all_sample[s].name]=exist_cell[cell] for i in range(0,len(all_sample)): if i!=s: try: if(all_sample[s].name not in check[all_sample[i].name]): distance=np.linalg.norm(Xval[i][n-3:n]-Xval[s][n-3:n]) if distance<min: min=distance if distance>max: max=distance output_cell[cell][1].append([all_cellline[s]+'('+str(exist_cell[cell])+')' ,all_sample[s].name,all_sample[s].dataset_id.name,all_cellline[i],all_sample[i].name,all_sample[i].dataset_id.name,distance,cell_object[i]]) check[all_sample[s].name].append(all_sample[i].name) except KeyError: distance=np.linalg.norm(Xval[i][n-3:n]-Xval[s][n-3:n]) if distance<min: min=distance if distance>max: max=distance output_cell[cell][1].append([all_cellline[s]+'('+str(exist_cell[cell])+')' ,all_sample[s].name,all_sample[s].dataset_id.name,all_cellline[i],all_sample[i].name,all_sample[i].dataset_id.name,distance,cell_object[i]]) check[all_sample[s].name].append(all_sample[i].name) if(g==1): name1.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name) X1.append(round(Xval[s][n-3],5)) Y1.append(round(Xval[s][n-2],5)) Z1.append(round(Xval[s][n-1],5)) elif(g==2): name2.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name) X2.append(round(Xval[s][n-3],5)) Y2.append(round(Xval[s][n-2],5)) Z2.append(round(Xval[s][n-1],5)) elif(g==3): name3.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name) X3.append(round(Xval[s][n-3],5)) Y3.append(round(Xval[s][n-2],5)) Z3.append(round(Xval[s][n-1],5)) elif(g==4): name4.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name) X4.append(round(Xval[s][n-3],5)) Y4.append(round(Xval[s][n-2],5)) Z4.append(round(Xval[s][n-1],5)) elif(g==5): name5.append(all_cellline[s]+'('+str(exist_cell[cell])+')'+'<br>'+all_sample[s].name) X5.append(round(Xval[s][n-3],5)) Y5.append(round(Xval[s][n-2],5)) Z5.append(round(Xval[s][n-1],5)) dictlist=[] for key, value in output_cell.items(): temp = [value] dictlist+=temp output_cell=list(dictlist) out_group.append([g,output_cell]) element_counter=0 #[g,[[group_cell_line,[paired_cellline,......,]],[],[]]] for i in out_group: for temp_list in i[1]: element_counter+=len(temp_list[1]) for temp in temp_list[1]: ##print(temp) temp[3]=temp[3]+'('+str(sample_counter[temp[4]])+')' return_html='pca.html' else: #This part is for centroid display return_html='pca_center.html' element_counter=0 #val=val[~np.isnan(val).any(axis=1)] #bottle neck??? #This part is for select cell line base on dataset,count centroid base on the dataset #group中的cell line為單位來算重心 #logger.info('pca show centroid with selection') location_dict={} #{group number:[[cell object,dataset,new location]]} combined=[] sample_list=[] pca_index=np.array(pca_index) X_val=[] a_all_offset=np.array(all_offset) for i in range(1,group_counter+1): dis_cellline=list(set(cell_object[g_s_counter[i-1]:g_s_counter[i]])) #cell object may have duplicate cell line since:NCI A + CCLE A===>[A,A] location_dict['g'+str(i)]=[] dataset_dict={} a_cell_object=np.array(cell_object) for c in dis_cellline: #dis_cellline may not have the same order as cell_object temp1=np.where((a_cell_object==c))[0] temp2=np.where((temp1>=g_s_counter[i-1])&(temp1<g_s_counter[i])) total_offset=temp1[temp2] selected_val=val[:,a_all_offset[total_offset]] selected_val=np.transpose(selected_val) new_loca=(np.mean(selected_val,axis=0,dtype=np.float64,keepdims=True)).tolist()[0] a_sample=np.array(all_sample) selected_sample=a_sample[total_offset] if list(selected_sample) in sample_list: #to prevent two different colors in different group continue else: sample_list.append(list(selected_sample)) ##print(selected_sample) d_temp=[] for s in selected_sample: d_temp.append(s.dataset_id.name) dataset_dict[c]="/".join(list(set(d_temp))) ##print(dataset_dict[c]) X_val.append(new_loca) location_dict['g'+str(i)].append([c,dataset_dict[c],len(X_val)-1]) #the last part is the index to get pca result from new_val combined.append([c,dataset_dict[c],len(X_val)-1]) #all cell line, do not matter order #run the pca ##print(len(X_val)) if((len(X_val))<4): error_reason='Since the display method is [centroid], you should have at least 4 dots for PCA. The dots are not enough.<br />'\ 'The number of centroid dots you selected is '+str(len(X_val))+'.' return render_to_response('pca_error.html',RequestContext(request, { 'error_reason':mark_safe(json.dumps(error_reason)), })) X_val=np.matrix(X_val) pca= PCA(n_components=n) new_val = pca.fit_transform(X_val[:,:]) #cannot get Xval with original offset any more ratio_temp=pca.explained_variance_ratio_ propotion=sum(ratio_temp[n-3:n]) table_propotion=sum(ratio_temp[0:n]) ##print(new_val) out_group=[] min=10000000000 max=0 for g in range(1,group_counter+1): output_cell=[] exist_cell={} for group_c in location_dict['g'+str(g)]: #a list of [c,dataset_dict[c],new_val index] in group one cell=group_c[0] key_string=cell.name+'/'+cell.primary_site+'/'+cell.primary_hist+'/'+group_c[1] exist_cell[key_string]=[] output_cell.append([cell,[]]) #count the distance for temp_list in combined: c=temp_list[0] temp_string=c.name+'/'+c.primary_site+'/'+c.primary_hist+'/'+temp_list[1] try: if(key_string not in exist_cell[temp_string]): distance=np.linalg.norm(np.array(new_val[group_c[2]][n-3:n])-np.array(new_val[temp_list[2]][n-3:n])) if distance==0: continue if distance<min: min=distance if distance>max: max=distance output_cell[len(output_cell)-1][1].append([cell,group_c[1],temp_list[0],temp_list[1],distance]) element_counter+=1 exist_cell[key_string].append(temp_string) except KeyError: distance=np.linalg.norm(np.array(new_val[group_c[2]][n-3:n])-np.array(new_val[temp_list[2]][n-3:n])) if distance==0: continue if distance<min: min=distance if distance>max: max=distance output_cell[len(output_cell)-1][1].append([cell,group_c[1],temp_list[0],temp_list[1],distance]) element_counter+=1 exist_cell[key_string].append(temp_string) if(g==1): name1.append(cell.name+'<br>'+group_c[1]) X1.append(round(new_val[group_c[2]][n-3],5)) Y1.append(round(new_val[group_c[2]][n-2],5)) Z1.append(round(new_val[group_c[2]][n-1],5)) elif(g==2): name2.append(cell.name+'<br>'+group_c[1]) X2.append(round(new_val[group_c[2]][n-3],5)) Y2.append(round(new_val[group_c[2]][n-2],5)) Z2.append(round(new_val[group_c[2]][n-1],5)) elif(g==3): name3.append(cell.name+'<br>'+group_c[1]) X3.append(round(new_val[group_c[2]][n-3],5)) Y3.append(round(new_val[group_c[2]][n-2],5)) Z3.append(round(new_val[group_c[2]][n-1],5)) elif(g==4): name4.append(cell.name+'<br>'+group_c[1]) X4.append(round(new_val[group_c[2]][n-3],5)) Y4.append(round(new_val[group_c[2]][n-2],5)) Z4.append(round(new_val[group_c[2]][n-1],5)) elif(g==5): name5.append(cell.name+'<br>'+group_c[1]) X5.append(round(new_val[group_c[2]][n-3],5)) Y5.append(round(new_val[group_c[2]][n-2],5)) Z5.append(round(new_val[group_c[2]][n-1],5)) out_group.append([g,output_cell]) #logger.info('end pca') if(element_counter>show_row): big_flag=1 sid=str(uuid.uuid1())+".csv" if(return_html=='pca.html'): dataset_header=['Group Cell Line/Clinical Sample','Sample Name','Primary Site','Primary Histology' ,'Dataset','Paired Cell Line name/Clinical Sample','Sample Name','Primary Site','Primary Histology','Dataset','Distance'] else: dataset_header=['Group Cell Line/Clinical Sample','Primary Site','Primary Histology' ,'Dataset','Paired Cell Line name/Clinical Sample','Primary Site','Primary Histology','Dataset','Distance'] P=Path('../').resolve().joinpath('src','static','csv',sid) assP=Path('../').resolve().joinpath('src','assets','csv',sid) with open(str(assP), "w", newline='') as f: writer = csv.writer(f) for index,output_cell in out_group: writer.writerows([[udgroup[index-1]]]) writer.writerows([dataset_header]) for cell_line,b in output_cell: temp_b=[] if(return_html=='pca.html'): for group_cell,sn,dset,cname,sname,setname,dis,cell_object in b: temp_b.append([group_cell,sn,cell_line.primary_site,cell_line.primary_hist,dset,cname ,sname,cell_object.primary_site,cell_object.primary_hist,setname,dis]) else: for group_cell,group_dataset,paired_cell,paired_dataset,dis in b: temp_b.append([group_cell.name,group_cell.primary_site,group_cell.primary_hist,group_dataset ,paired_cell.name,paired_cell.primary_site,paired_cell.primary_hist,paired_dataset,dis]) writer.writerows(temp_b) #print('write first file done') ''' with open(str(assP), "w", newline='') as ff: writer = csv.writer(ff) for index,output_cell in out_group: writer.writerows([[udgroup[index-1]]]) writer.writerows([dataset_header]) for cell_line,b in output_cell: temp_b=[] if(return_html=='pca.html'): for group_cell,sn,dset,cname,sname,setname,dis,cell_object in b: temp_b.append([group_cell,sn,cell_line.primary_site,cell_line.primary_hist,dset,cname ,sname,cell_object.primary_site,cell_object.primary_hist,setname,dis]) else: for group_cell,group_dataset,paired_cell,paired_dataset,dis in b: temp_b.append([group_cell.name,group_cell.primary_site,group_cell.primary_hist,group_dataset ,paired_cell.name,paired_cell.primary_site,paired_cell.primary_hist,paired_dataset,dis]) writer.writerows(temp_b) ''' #print('write second file done') data_file_name=sid else: big_flag=0 data_file_name=0 return render_to_response(return_html,RequestContext(request, { 'udgroup':udgroup, 'min':min,'max':max, 'out_group':out_group, 'propotion':propotion, 'big_flag':big_flag, 'data_file_name':data_file_name, 'table_propotion':table_propotion, 'X1':X1,'name1':mark_safe(json.dumps(name1)), 'Y1':Y1,'name2':mark_safe(json.dumps(name2)), 'Z1':Z1,'name3':mark_safe(json.dumps(name3)), 'X2':X2,'name4':mark_safe(json.dumps(name4)), 'Y2':Y2,'name5':mark_safe(json.dumps(name5)), 'Z2':Z2, 'X3':X3, 'Y3':Y3, 'Z3':Z3, 'X4':X4, 'Y4':Y4, 'Z4':Z4, 'X5':X5, 'Y5':Y5, 'Z5':Z5, })) def cellline_microarray(request): # Pre-fetch the cell line field for all samples. # Reduce N query in to 1. N = number of samples d=Dataset.objects.all() d_name=list(d.values_list('name',flat=True)) datasets=[] #[[dataset_name,[[primary_site,[cell line]]]] an=[] for i in d_name: if i=="Sanger Cell Line Project": alias='sanger' elif i=="NCI60": alias='nci' elif i=="GSE36133": alias='gse' else: alias=i an.append(alias) sample=Sample.objects.filter(dataset_id__name=i).order_by('cell_line_id__primary_site').select_related('cell_line_id') datasets.append([i,alias,list(sample),[]]) sites=list(sample.values_list('cell_line_id__primary_site',flat=True)) hists=list(sample.values_list('cell_line_id__name',flat=True)) dis_prim=list(sample.values_list('cell_line_id__primary_site',flat=True).distinct()) hists=list(hists) id_counter=0 for p in range(0,len(dis_prim)): temp=sites.count(dis_prim[p]) datasets[-1][3].append([dis_prim[p],list(set(hists[id_counter:id_counter+temp]))]) id_counter+=temp return render(request, 'cellline_microarray.html', { 'an':mark_safe(json.dumps(an)), 'd_name':d_name, 'datasets':datasets, }) def cell_lines(request): #samples = Sample.objects.all().select_related('cell_line_id','dataset_id') #lines=CellLine.objects.all().distinct() #val_pairs = ( # (l, l.fcell_line_id.prefetch_related('dataset_id__name').values_list('dataset_id__name',flat=True).distinct()) # for l in lines # ) #context['val_pairs']=val_pairs cell_line_dict={} context={} nr_samples=[] samples=Sample.objects.all().select_related('cell_line_id','dataset_id').order_by('id') for ss in samples: name=ss.cell_line_id.name primary_site=ss.cell_line_id.primary_site primary_hist=ss.cell_line_id.primary_hist comb=name+"/"+primary_site+"/"+primary_hist dataset=ss.dataset_id.name try: sets=cell_line_dict[comb] if (dataset not in sets): cell_line_dict[comb]=dataset+"/"+sets except KeyError: cell_line_dict[comb]=dataset nr_samples.append(ss) val_pairs = ( (ss,cell_line_dict[ss.cell_line_id.name+"/"+ss.cell_line_id.primary_site+"/"+ss.cell_line_id.primary_hist]) for ss in nr_samples ) context['val_pairs']=val_pairs return render_to_response('cell_line.html', RequestContext(request, context)) def clinical_search(request): norm_name=[request.POST['normalize']] #get the normalize gene name #f_type=['age','gender','ethnic','grade','stage','stageT','stageN','stageM','metastatic'] age=[] gender=[] ethnic=[] grade=[] stage=[] T=[] N=[] M=[] metas=[] #get the probe/gene/id keywords if 'keyword' in request.POST and request.POST['keyword'] != '': words = request.POST['keyword'] words = list(set(words.split())) else: return HttpResponse("<p>where is your keyword?</p>") plus2_rank=np.load('ranking_u133plus2.npy') #open only plus2 platform rank sample_probe_val_pairs=[] #for output if 'gtype' in request.POST and request.POST['gtype'] == 'probeid': gene = ProbeID.objects.filter(platform__name__in=["PLUS2"]).filter(Probe_id__in=words).order_by('id') probe=list(gene.values_list('offset',flat=True)) ##print(gene) elif 'gtype' in request.POST and request.POST['gtype'] == 'symbol': gene = ProbeID.objects.filter(platform__name__in=["PLUS2"]).filter(Gene_symbol__in=words).order_by('id') probe=list(gene.values_list('offset',flat=True)) else: gene = ProbeID.objects.filter(platform__name__in=["PLUS2"]).filter(Entrez_id__in=words).order_by('id') probe=list(gene.values_list('offset',flat=True)) if request.POST['clinical_method'] == 'prim_dataset': if 'dataset' in request.POST and request.POST['dataset'] != '': datas=request.POST.getlist('dataset') else: d=Clinical_Dataset.objects.all() datas=d.values_list('name',flat=True) com_hists=list(set(request.POST.getlist('primhist'))) com_hists=[w1 for segments in com_hists for w1 in segments.split('/')] prims=com_hists[0::2] hists=com_hists[1::2] temp=request.POST.getlist('filter_primh') for i in temp: if 'stage/' in i: stage.append(i[6:]) elif 'gender/' in i: gender.append(i[7:]) elif 'ethnic/' in i: ethnic.append(i[7:]) elif 'grade/' in i: grade.append(i[6:]) elif 'stageT/' in i: T.append(i[7:]) elif 'stageN/' in i: N.append(i[7:]) elif 'stageM/' in i: M.append(i[7:]) elif 'metastatic/' in i: metas.append(i[11:]) ''' if i[11:]=='False': metas.append(0) else: metas.append(1) ''' else: #"age/" age.append(i[4:]) for sets in datas: samples=[] offset=[] if request.POST['clinical_method'] == 'prim_dataset': com_hists=list(set(request.POST.getlist('primd_'+sets))) #can I get this by label to reduce number of queries? com_hists=[w1 for segments in com_hists for w1 in segments.split('/')] prims=com_hists[0::2] hists=com_hists[1::2] temp=request.POST.getlist('filter_'+sets) age=[] gender=[] ethnic=[] grade=[] stage=[] T=[] N=[] M=[] metas=[] for i in temp: if 'stage/' in i: stage.append(i[6:]) elif 'gender/' in i: gender.append(i[7:]) elif 'ethnic/' in i: ethnic.append(i[7:]) elif 'grade/' in i: grade.append(i[6:]) elif 'stageT/' in i: T.append(i[7:]) elif 'stageN/' in i: N.append(i[7:]) elif 'stageM/' in i: M.append(i[7:]) elif 'metastatic/' in i: metas.append(i[11:]) ''' if i[11:]=='False': metas.append(0) else: metas.append(1) ''' else: #"age/" age.append(i[4:]) for i in range(0,len(prims)): #metas=[bool(x) for x in metas] s=Clinical_sample.objects.filter(dataset_id__name=sets,primary_site=prims[i], primary_hist=hists[i], age__in=age, gender__in=gender, ethnic__in=ethnic, stage__in=stage, grade__in=grade, stageT__in=T, stageN__in=N, stageM__in=M, metastatic__in=metas ).select_related('dataset_id').order_by('id') samples+=list(s) offset+=list(s.values_list('offset',flat=True)) ##print(s) pth=Path('../').resolve().joinpath('src',Clinical_Dataset.objects.get(name=sets).data_path) val=np.load(pth.as_posix(),mmap_mode='r') norm_probe=ProbeID.objects.filter(platform__name__in=["PLUS2"]).filter(Gene_symbol__in=norm_name).order_by('id') probe_offset=list(norm_probe.values_list('offset',flat=True)) temp=val[np.ix_(probe_offset,offset)] norm=np.mean(temp,axis=0, dtype=np.float64,keepdims=True) # Make a generator to generate all (cell, probe, val) pairs if(len(gene)!=0 and len(samples)!=0): raw_test=val[np.ix_(probe,offset)] normalize=np.subtract(raw_test,norm)#dimension different!!!! sample_probe_val_pairs += [ (c, p, raw_test[probe_ix, cell_ix],54614-np.where(plus2_rank==raw_test[probe_ix, cell_ix])[0],normalize[probe_ix, cell_ix]) for probe_ix, p in enumerate(gene) for cell_ix, c in enumerate(samples) ] return render(request, 'clinical_search.html', { 'sample_probe_val_pairs': sample_probe_val_pairs, }) def data(request): SANGER=[] sanger_flag=0 NCI=[] nci_flag=0 GSE=[] gse_flag=0 cell=[] ncicell=[] CCcell=[] ps_id='0' pn_id='0' if request.POST.get('cell_line_method','text') == 'text': if request.POST['cellline'] =='': return HttpResponse("<p>please make sure to enter cell line name in Step3.</p>" ) c = request.POST['cellline'] c = list(set(c.split())) sanger_flag=1 samples=Sample.objects.filter(dataset_id__name__in=['Sanger Cell Line Project']).order_by('id') cell=samples.select_related('cell_line_id','dataset_id').filter(cell_line_id__name__in=c).order_by('id') offset=list(cell.values_list('offset',flat=True)) ps_id='1' nci_flag=1 ncisamples=Sample.objects.filter(dataset_id__name__in=['NCI60']).select_related('cell_line_id','dataset_id').order_by('id') ncicell=ncisamples.filter(cell_line_id__name__in=c).order_by('id') ncioffset=list(ncicell.values_list('offset',flat=True)) pn_id='3' gse_flag=1 CCsamples=Sample.objects.filter(dataset_id__name__in=['GSE36133']).select_related('cell_line_id','dataset_id').order_by('id') CCcell=CCsamples.filter(cell_line_id__name__in=c).order_by('id') CCoffset=list(CCcell.values_list('offset',flat=True)) pn_id='3' else: if 'dataset' in request.POST and request.POST['dataset'] != '': datas=request.POST.getlist('dataset') if 'Sanger Cell Line Project' in datas: sanger_flag=1 SANGER=list(set(request.POST.getlist('select_sanger'))) samples=Sample.objects.filter(dataset_id__name__in=['Sanger Cell Line Project']).order_by('id') cell=samples.select_related('cell_line_id','dataset_id').filter(cell_line_id__name__in=SANGER).order_by('id') offset=list(cell.values_list('offset',flat=True)) ps_id=str(Platform.objects.filter(name__in=["U133A"])[0].id) if 'NCI60' in datas: nci_flag=1 NCI=list(set(request.POST.getlist('select_nci'))) ncisamples=Sample.objects.filter(dataset_id__name__in=['NCI60']).select_related('cell_line_id','dataset_id').order_by('id') ncicell=ncisamples.filter(cell_line_id__name__in=NCI).order_by('id') ncioffset=list(ncicell.values_list('offset',flat=True)) pn_id=str(Platform.objects.filter(name__in=["PLUS2"])[0].id) if 'GSE36133' in datas: gse_flag=1 GSE=list(set(request.POST.getlist('select_gse'))) CCsamples=Sample.objects.filter(dataset_id__name__in=['GSE36133']).select_related('cell_line_id','dataset_id').order_by('id') CCcell=CCsamples.filter(cell_line_id__name__in=GSE).order_by('id') CCoffset=list(CCcell.values_list('offset',flat=True)) pn_id=str(Platform.objects.filter(name__in=["PLUS2"])[0].id) if len(SANGER)==0 and len(NCI)==0 and len(GSE)==0: return HttpResponse("<p>please select primary sites.</p>" ) else: return HttpResponse("<p>please check Step3 again.</p>" ) if 'keyword' in request.POST and request.POST['keyword'] != '': words = request.POST['keyword'] words = list(set(words.split())) else: return HttpResponse("<p>where is your keyword?</p>") #open files sanger_val_pth=Path('../').resolve().joinpath('src','sanger_cell_line_proj.npy') nci_val_pth=Path('../').resolve().joinpath('src','nci60.npy') gse_val_pth=Path('../').resolve().joinpath('src','GSE36133.npy') sanger_val=np.load(sanger_val_pth.as_posix(),mmap_mode='r') nci_val=np.load(nci_val_pth.as_posix(),mmap_mode='r') gse_val=np.load(gse_val_pth.as_posix(),mmap_mode='r') u133a_rank=np.load('ranking_u133a.npy') plus2_rank=np.load('ranking_u133plus2.npy') gene = [] ncigene = [] CCgene = [] context={} norm_name=[request.POST['normalize']] if sanger_flag==1: #if request.POST['normalize']!='NTRK3-AS1': sanger_g=ProbeID.objects.filter(platform__in=ps_id).filter(Gene_symbol__in=norm_name).order_by('id') sanger_probe_offset=list(sanger_g.values_list('offset',flat=True)) temp=sanger_val[np.ix_(sanger_probe_offset,offset)] norm=np.mean(temp,axis=0, dtype=np.float64,keepdims=True) #else: # norm=0.0 else: norm=0.0 #if / should = 1 if nci_flag==1: nci_g=ProbeID.objects.filter(platform__in=pn_id).filter(Gene_symbol__in=norm_name).order_by('id') nci_probe_offset=list(nci_g.values_list('offset',flat=True)) temp=nci_val[np.ix_(nci_probe_offset,ncioffset)] nci_norm=np.mean(temp,axis=0, dtype=np.float64,keepdims=True) ##print(nci_norm) else: nci_norm=0.0 #if / should = 1 if gse_flag==1: CC_g=ProbeID.objects.filter(platform__in=pn_id).filter(Gene_symbol__in=norm_name).order_by('id') CC_probe_offset=list(CC_g.values_list('offset',flat=True)) temp=gse_val[np.ix_(CC_probe_offset,CCoffset)] CC_norm=np.mean(temp,axis=0, dtype=np.float64,keepdims=True) ##print(CC_norm) else: CC_norm=0.0 #if / should = 1 #dealing with probes if 'gtype' in request.POST and request.POST['gtype'] == 'probeid': gene = ProbeID.objects.filter(platform__in=ps_id).filter(Probe_id__in=words).order_by('id') probe_offset=list(gene.values_list('offset',flat=True)) ncigene = ProbeID.objects.filter(platform__in=pn_id).filter(Probe_id__in=words).order_by('id') nciprobe_offset=list(ncigene.values_list('offset',flat=True)) #nci60 and ccle use same probe set(ncigene) and nicprobe # Make a generator to generate all (cell, probe, val) pairs if(len(gene)!=0 and len(cell)!=0): raw_test=sanger_val[np.ix_(probe_offset,offset)] normalize=np.subtract(raw_test,norm)#dimension different!!!! #normalize=np.around(normalize, decimals=1) cell_probe_val_pairs = ( (c, p, raw_test[probe_ix, cell_ix],22216-np.where(u133a_rank==raw_test[probe_ix, cell_ix])[0],normalize[probe_ix, cell_ix]) for probe_ix, p in enumerate(gene) for cell_ix, c in enumerate(cell) ) else: cell_probe_val_pairs =() if(len(ncigene)!=0 and len(ncicell)!=0): nci_raw_test=nci_val[np.ix_(nciprobe_offset,ncioffset)] nci_normalize=np.subtract(nci_raw_test,nci_norm) nci_cell_probe_val_pairs = ( (c, p, nci_raw_test[probe_ix, cell_ix],54614-np.where(plus2_rank==nci_raw_test[probe_ix, cell_ix])[0],nci_normalize[probe_ix, cell_ix]) for probe_ix, p in enumerate(ncigene) for cell_ix, c in enumerate(ncicell) ) else: nci_cell_probe_val_pairs =() if(len(ncigene)!=0 and len(CCcell)!=0): CC_raw_test=gse_val[np.ix_(nciprobe_offset,CCoffset)] CC_normalize=np.subtract(CC_raw_test,CC_norm) CC_cell_probe_val_pairs = ( (c, p, CC_raw_test[probe_ix, cell_ix],54614-np.where(plus2_rank==CC_raw_test[probe_ix, cell_ix])[0],CC_normalize[probe_ix, cell_ix]) for probe_ix, p in enumerate(ncigene) for cell_ix, c in enumerate(CCcell) ) else: CC_cell_probe_val_pairs =() context['cell_probe_val_pairs']=cell_probe_val_pairs context['nci_cell_probe_val_pairs']=nci_cell_probe_val_pairs context['CC_cell_probe_val_pairs']=CC_cell_probe_val_pairs return render_to_response('data.html', RequestContext(request,context)) elif 'gtype' in request.POST and request.POST['gtype'] == 'symbol': gene = ProbeID.objects.filter(platform__in=ps_id).filter(Gene_symbol__in=words).order_by('id') probe_offset=gene.values_list('offset',flat=True) ncigene = ProbeID.objects.filter(platform__in=pn_id).filter(Gene_symbol__in=words).order_by('id') nciprobe_offset=ncigene.values_list('offset',flat=True) #nci60 and ccle use same probe set(ncigene) and nicprobe # Make a generator to generate all (cell, probe, val) pairs if(len(gene)!=0 and len(cell)!=0): raw_test=sanger_val[np.ix_(probe_offset,offset)] normalize=np.subtract(raw_test,norm) cell_probe_val_pairs = ( (c, p, raw_test[probe_ix, cell_ix],22216-np.where(u133a_rank==raw_test[probe_ix, cell_ix])[0],normalize[probe_ix, cell_ix]) for probe_ix, p in enumerate(gene) for cell_ix, c in enumerate(cell) ) else: cell_probe_val_pairs =() if(len(ncigene)!=0 and len(ncicell)!=0): nci_raw_test=nci_val[np.ix_(nciprobe_offset,ncioffset)] nci_normalize=np.subtract(nci_raw_test,nci_norm) nci_cell_probe_val_pairs = ( (c, p, nci_raw_test[probe_ix, cell_ix],54676-np.where(plus2_rank==nci_raw_test[probe_ix, cell_ix])[0],nci_normalize[probe_ix, cell_ix]) for probe_ix, p in enumerate(ncigene) for cell_ix, c in enumerate(ncicell) ) else: nci_cell_probe_val_pairs =() if(len(ncigene)!=0 and len(CCcell)!=0): CC_raw_test=gse_val[np.ix_(nciprobe_offset,CCoffset)] CC_normalize=np.subtract(CC_raw_test,CC_norm) CC_cell_probe_val_pairs = ( (c, p, CC_raw_test[probe_ix, cell_ix],54614-np.where(plus2_rank==CC_raw_test[probe_ix, cell_ix])[0],CC_normalize[probe_ix, cell_ix]) for probe_ix, p in enumerate(ncigene) for cell_ix, c in enumerate(CCcell) ) else: CC_cell_probe_val_pairs =() context['cell_probe_val_pairs']=cell_probe_val_pairs context['nci_cell_probe_val_pairs']=nci_cell_probe_val_pairs context['CC_cell_probe_val_pairs']=CC_cell_probe_val_pairs return render_to_response('data.html', RequestContext(request,context)) elif 'gtype' in request.POST and request.POST['gtype'] == 'entrez': gene = ProbeID.objects.filter(platform__in=ps_id).filter(Entrez_id=words).order_by('id') probe_offset=gene.values_list('offset',flat=True) ncigene = ProbeID.objects.filter(platform__in=pn_id).filter(Entrez_id__in=words).order_by('id') nciprobe_offset=ncigene.values_list('offset',flat=True) #nci60 and ccle use same probe set(ncigene) and nicprobe # Make a generator to generate all (cell, probe, val) pairs if(len(gene)!=0 and len(cell)!=0): raw_test=sanger_val[np.ix_(probe_offset,offset)] normalize=np.subtract(raw_test,norm) cell_probe_val_pairs = ( (c, p, raw_test[probe_ix, cell_ix],22216-np.where(u133a_rank==raw_test[probe_ix, cell_ix])[0],normalize[probe_ix, cell_ix]) for probe_ix, p in enumerate(gene) for cell_ix, c in enumerate(cell) ) else: cell_probe_val_pairs =() if(len(ncigene)!=0 and len(ncicell)!=0): nci_raw_test=nci_val[np.ix_(nciprobe_offset,ncioffset)] nci_normalize=np.subtract(nci_raw_test,nci_norm) nci_cell_probe_val_pairs = ( (c, p, nci_raw_test[probe_ix, cell_ix],54614-np.where(plus2_rank==nci_raw_test[probe_ix, cell_ix])[0],nci_normalize[probe_ix, cell_ix]) for probe_ix, p in enumerate(ncigene) for cell_ix, c in enumerate(ncicell) ) else: nci_cell_probe_val_pairs =() if(len(ncigene)!=0 and len(CCcell)!=0): CC_raw_test=gse_val[np.ix_(nciprobe_offset,CCoffset)] CC_normalize=np.subtract(CC_raw_test,CC_norm) CC_cell_probe_val_pairs = ( (c, p, CC_raw_test[probe_ix, cell_ix],54614-np.where(plus2_rank==CC_raw_test[probe_ix, cell_ix])[0],CC_normalize[probe_ix, cell_ix]) for probe_ix, p in enumerate(ncigene) for cell_ix, c in enumerate(CCcell) ) else: CC_cell_probe_val_pairs =() context['cell_probe_val_pairs']=cell_probe_val_pairs context['nci_cell_probe_val_pairs']=nci_cell_probe_val_pairs context['CC_cell_probe_val_pairs']=CC_cell_probe_val_pairs return render_to_response('data.html', RequestContext(request,context)) else: return HttpResponse( "<p>keyword type not match with your keyword input</p>" )
mit
aetilley/scikit-learn
sklearn/datasets/samples_generator.py
45
56433
""" Generate samples of synthetic data sets. """ # Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel, # G. Louppe, J. Nothman # License: BSD 3 clause import numbers import warnings import array import numpy as np from scipy import linalg import scipy.sparse as sp from ..preprocessing import MultiLabelBinarizer from ..utils import check_array, check_random_state from ..utils import shuffle as util_shuffle from ..utils.fixes import astype from ..utils.random import sample_without_replacement from ..externals import six map = six.moves.map zip = six.moves.zip def _generate_hypercube(samples, dimensions, rng): """Returns distinct binary samples of length dimensions """ if dimensions > 30: return np.hstack([_generate_hypercube(samples, dimensions - 30, rng), _generate_hypercube(samples, 30, rng)]) out = astype(sample_without_replacement(2 ** dimensions, samples, random_state=rng), dtype='>u4', copy=False) out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:] return out def make_classification(n_samples=100, n_features=20, n_informative=2, n_redundant=2, n_repeated=0, n_classes=2, n_clusters_per_class=2, weights=None, flip_y=0.01, class_sep=1.0, hypercube=True, shift=0.0, scale=1.0, shuffle=True, random_state=None): """Generate a random n-class classification problem. This initially creates clusters of points normally distributed (std=1) about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal number of clusters to each class. It introduces interdependence between these features and adds various types of further noise to the data. Prior to shuffling, `X` stacks a number of these primary "informative" features, "redundant" linear combinations of these, "repeated" duplicates of sampled features, and arbitrary noise for and remaining features. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=20) The total number of features. These comprise `n_informative` informative features, `n_redundant` redundant features, `n_repeated` duplicated features and `n_features-n_informative-n_redundant- n_repeated` useless features drawn at random. n_informative : int, optional (default=2) The number of informative features. Each class is composed of a number of gaussian clusters each located around the vertices of a hypercube in a subspace of dimension `n_informative`. For each cluster, informative features are drawn independently from N(0, 1) and then randomly linearly combined within each cluster in order to add covariance. The clusters are then placed on the vertices of the hypercube. n_redundant : int, optional (default=2) The number of redundant features. These features are generated as random linear combinations of the informative features. n_repeated : int, optional (default=0) The number of duplicated features, drawn randomly from the informative and the redundant features. n_classes : int, optional (default=2) The number of classes (or labels) of the classification problem. n_clusters_per_class : int, optional (default=2) The number of clusters per class. weights : list of floats or None (default=None) The proportions of samples assigned to each class. If None, then classes are balanced. Note that if `len(weights) == n_classes - 1`, then the last class weight is automatically inferred. More than `n_samples` samples may be returned if the sum of `weights` exceeds 1. flip_y : float, optional (default=0.01) The fraction of samples whose class are randomly exchanged. class_sep : float, optional (default=1.0) The factor multiplying the hypercube dimension. hypercube : boolean, optional (default=True) If True, the clusters are put on the vertices of a hypercube. If False, the clusters are put on the vertices of a random polytope. shift : float, array of shape [n_features] or None, optional (default=0.0) Shift features by the specified value. If None, then features are shifted by a random value drawn in [-class_sep, class_sep]. scale : float, array of shape [n_features] or None, optional (default=1.0) Multiply features by the specified value. If None, then features are scaled by a random value drawn in [1, 100]. Note that scaling happens after shifting. shuffle : boolean, optional (default=True) Shuffle the samples and the features. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The generated samples. y : array of shape [n_samples] The integer labels for class membership of each sample. Notes ----- The algorithm is adapted from Guyon [1] and was designed to generate the "Madelon" dataset. References ---------- .. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable selection benchmark", 2003. See also -------- make_blobs: simplified variant make_multilabel_classification: unrelated generator for multilabel tasks """ generator = check_random_state(random_state) # Count features, clusters and samples if n_informative + n_redundant + n_repeated > n_features: raise ValueError("Number of informative, redundant and repeated " "features must sum to less than the number of total" " features") if 2 ** n_informative < n_classes * n_clusters_per_class: raise ValueError("n_classes * n_clusters_per_class must" " be smaller or equal 2 ** n_informative") if weights and len(weights) not in [n_classes, n_classes - 1]: raise ValueError("Weights specified but incompatible with number " "of classes.") n_useless = n_features - n_informative - n_redundant - n_repeated n_clusters = n_classes * n_clusters_per_class if weights and len(weights) == (n_classes - 1): weights.append(1.0 - sum(weights)) if weights is None: weights = [1.0 / n_classes] * n_classes weights[-1] = 1.0 - sum(weights[:-1]) # Distribute samples among clusters by weight n_samples_per_cluster = [] for k in range(n_clusters): n_samples_per_cluster.append(int(n_samples * weights[k % n_classes] / n_clusters_per_class)) for i in range(n_samples - sum(n_samples_per_cluster)): n_samples_per_cluster[i % n_clusters] += 1 # Intialize X and y X = np.zeros((n_samples, n_features)) y = np.zeros(n_samples, dtype=np.int) # Build the polytope whose vertices become cluster centroids centroids = _generate_hypercube(n_clusters, n_informative, generator).astype(float) centroids *= 2 * class_sep centroids -= class_sep if not hypercube: centroids *= generator.rand(n_clusters, 1) centroids *= generator.rand(1, n_informative) # Initially draw informative features from the standard normal X[:, :n_informative] = generator.randn(n_samples, n_informative) # Create each cluster; a variant of make_blobs stop = 0 for k, centroid in enumerate(centroids): start, stop = stop, stop + n_samples_per_cluster[k] y[start:stop] = k % n_classes # assign labels X_k = X[start:stop, :n_informative] # slice a view of the cluster A = 2 * generator.rand(n_informative, n_informative) - 1 X_k[...] = np.dot(X_k, A) # introduce random covariance X_k += centroid # shift the cluster to a vertex # Create redundant features if n_redundant > 0: B = 2 * generator.rand(n_informative, n_redundant) - 1 X[:, n_informative:n_informative + n_redundant] = \ np.dot(X[:, :n_informative], B) # Repeat some features if n_repeated > 0: n = n_informative + n_redundant indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp) X[:, n:n + n_repeated] = X[:, indices] # Fill useless features if n_useless > 0: X[:, -n_useless:] = generator.randn(n_samples, n_useless) # Randomly replace labels if flip_y >= 0.0: flip_mask = generator.rand(n_samples) < flip_y y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum()) # Randomly shift and scale if shift is None: shift = (2 * generator.rand(n_features) - 1) * class_sep X += shift if scale is None: scale = 1 + 100 * generator.rand(n_features) X *= scale if shuffle: # Randomly permute samples X, y = util_shuffle(X, y, random_state=generator) # Randomly permute features indices = np.arange(n_features) generator.shuffle(indices) X[:, :] = X[:, indices] return X, y def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=2, length=50, allow_unlabeled=True, sparse=False, return_indicator=False, return_distributions=False, random_state=None): """Generate a random multilabel classification problem. For each sample, the generative process is: - pick the number of labels: n ~ Poisson(n_labels) - n times, choose a class c: c ~ Multinomial(theta) - pick the document length: k ~ Poisson(length) - k times, choose a word: w ~ Multinomial(theta_c) In the above process, rejection sampling is used to make sure that n is never zero or more than `n_classes`, and that the document length is never zero. Likewise, we reject classes which have already been chosen. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=20) The total number of features. n_classes : int, optional (default=5) The number of classes of the classification problem. n_labels : int, optional (default=2) The average number of labels per instance. More precisely, the number of labels per sample is drawn from a Poisson distribution with ``n_labels`` as its expected value, but samples are bounded (using rejection sampling) by ``n_classes``, and must be nonzero if ``allow_unlabeled`` is False. length : int, optional (default=50) The sum of the features (number of words if documents) is drawn from a Poisson distribution with this expected value. allow_unlabeled : bool, optional (default=True) If ``True``, some instances might not belong to any class. sparse : bool, optional (default=False) If ``True``, return a sparse feature matrix return_indicator : bool, optional (default=False), If ``True``, return ``Y`` in the binary indicator format, else return a tuple of lists of labels. return_distributions : bool, optional (default=False) If ``True``, return the prior class probability and conditional probabilities of features given classes, from which the data was drawn. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array or sparse CSR matrix of shape [n_samples, n_features] The generated samples. Y : tuple of lists or array of shape [n_samples, n_classes] The label sets. p_c : array, shape [n_classes] The probability of each class being drawn. Only returned if ``return_distributions=True``. p_w_c : array, shape [n_features, n_classes] The probability of each feature being drawn given each class. Only returned if ``return_distributions=True``. """ generator = check_random_state(random_state) p_c = generator.rand(n_classes) p_c /= p_c.sum() cumulative_p_c = np.cumsum(p_c) p_w_c = generator.rand(n_features, n_classes) p_w_c /= np.sum(p_w_c, axis=0) def sample_example(): _, n_classes = p_w_c.shape # pick a nonzero number of labels per document by rejection sampling y_size = n_classes + 1 while (not allow_unlabeled and y_size == 0) or y_size > n_classes: y_size = generator.poisson(n_labels) # pick n classes y = set() while len(y) != y_size: # pick a class with probability P(c) c = np.searchsorted(cumulative_p_c, generator.rand(y_size - len(y))) y.update(c) y = list(y) # pick a non-zero document length by rejection sampling n_words = 0 while n_words == 0: n_words = generator.poisson(length) # generate a document of length n_words if len(y) == 0: # if sample does not belong to any class, generate noise word words = generator.randint(n_features, size=n_words) return words, y # sample words with replacement from selected classes cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum() cumulative_p_w_sample /= cumulative_p_w_sample[-1] words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words)) return words, y X_indices = array.array('i') X_indptr = array.array('i', [0]) Y = [] for i in range(n_samples): words, y = sample_example() X_indices.extend(words) X_indptr.append(len(X_indices)) Y.append(y) X_data = np.ones(len(X_indices), dtype=np.float64) X = sp.csr_matrix((X_data, X_indices, X_indptr), shape=(n_samples, n_features)) X.sum_duplicates() if not sparse: X = X.toarray() if return_indicator: lb = MultiLabelBinarizer() Y = lb.fit([range(n_classes)]).transform(Y) else: warnings.warn('Support for the sequence of sequences multilabel ' 'representation is being deprecated and replaced with ' 'a sparse indicator matrix. ' 'return_indicator will default to True from version ' '0.17.', DeprecationWarning) if return_distributions: return X, Y, p_c, p_w_c return X, Y def make_hastie_10_2(n_samples=12000, random_state=None): """Generates data for binary classification used in Hastie et al. 2009, Example 10.2. The ten features are standard independent Gaussian and the target ``y`` is defined by:: y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1 Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=12000) The number of samples. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, 10] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical Learning Ed. 2", Springer, 2009. See also -------- make_gaussian_quantiles: a generalization of this dataset approach """ rs = check_random_state(random_state) shape = (n_samples, 10) X = rs.normal(size=shape).reshape(shape) y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64) y[y == 0.0] = -1.0 return X, y def make_regression(n_samples=100, n_features=100, n_informative=10, n_targets=1, bias=0.0, effective_rank=None, tail_strength=0.5, noise=0.0, shuffle=True, coef=False, random_state=None): """Generate a random regression problem. The input set can either be well conditioned (by default) or have a low rank-fat tail singular profile. See :func:`make_low_rank_matrix` for more details. The output is generated by applying a (potentially biased) random linear regression model with `n_informative` nonzero regressors to the previously generated input and some gaussian centered noise with some adjustable scale. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=100) The number of features. n_informative : int, optional (default=10) The number of informative features, i.e., the number of features used to build the linear model used to generate the output. n_targets : int, optional (default=1) The number of regression targets, i.e., the dimension of the y output vector associated with a sample. By default, the output is a scalar. bias : float, optional (default=0.0) The bias term in the underlying linear model. effective_rank : int or None, optional (default=None) if not None: The approximate number of singular vectors required to explain most of the input data by linear combinations. Using this kind of singular spectrum in the input allows the generator to reproduce the correlations often observed in practice. if None: The input set is well conditioned, centered and gaussian with unit variance. tail_strength : float between 0.0 and 1.0, optional (default=0.5) The relative importance of the fat noisy tail of the singular values profile if `effective_rank` is not None. noise : float, optional (default=0.0) The standard deviation of the gaussian noise applied to the output. shuffle : boolean, optional (default=True) Shuffle the samples and the features. coef : boolean, optional (default=False) If True, the coefficients of the underlying linear model are returned. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] or [n_samples, n_targets] The output values. coef : array of shape [n_features] or [n_features, n_targets], optional The coefficient of the underlying linear model. It is returned only if coef is True. """ n_informative = min(n_features, n_informative) generator = check_random_state(random_state) if effective_rank is None: # Randomly generate a well conditioned input set X = generator.randn(n_samples, n_features) else: # Randomly generate a low rank, fat tail input set X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features, effective_rank=effective_rank, tail_strength=tail_strength, random_state=generator) # Generate a ground truth model with only n_informative features being non # zeros (the other features are not correlated to y and should be ignored # by a sparsifying regularizers such as L1 or elastic net) ground_truth = np.zeros((n_features, n_targets)) ground_truth[:n_informative, :] = 100 * generator.rand(n_informative, n_targets) y = np.dot(X, ground_truth) + bias # Add noise if noise > 0.0: y += generator.normal(scale=noise, size=y.shape) # Randomly permute samples and features if shuffle: X, y = util_shuffle(X, y, random_state=generator) indices = np.arange(n_features) generator.shuffle(indices) X[:, :] = X[:, indices] ground_truth = ground_truth[indices] y = np.squeeze(y) if coef: return X, y, np.squeeze(ground_truth) else: return X, y def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None, factor=.8): """Make a large circle containing a smaller circle in 2d. A simple toy dataset to visualize clustering and classification algorithms. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The total number of points generated. shuffle: bool, optional (default=True) Whether to shuffle the samples. noise : double or None (default=None) Standard deviation of Gaussian noise added to the data. factor : double < 1 (default=.8) Scale factor between inner and outer circle. Returns ------- X : array of shape [n_samples, 2] The generated samples. y : array of shape [n_samples] The integer labels (0 or 1) for class membership of each sample. """ if factor > 1 or factor < 0: raise ValueError("'factor' has to be between 0 and 1.") generator = check_random_state(random_state) # so as not to have the first point = last point, we add one and then # remove it. linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1] outer_circ_x = np.cos(linspace) outer_circ_y = np.sin(linspace) inner_circ_x = outer_circ_x * factor inner_circ_y = outer_circ_y * factor X = np.vstack((np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y))).T y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp), np.ones(n_samples // 2, dtype=np.intp)]) if shuffle: X, y = util_shuffle(X, y, random_state=generator) if noise is not None: X += generator.normal(scale=noise, size=X.shape) return X, y def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None): """Make two interleaving half circles A simple toy dataset to visualize clustering and classification algorithms. Parameters ---------- n_samples : int, optional (default=100) The total number of points generated. shuffle : bool, optional (default=True) Whether to shuffle the samples. noise : double or None (default=None) Standard deviation of Gaussian noise added to the data. Read more in the :ref:`User Guide <sample_generators>`. Returns ------- X : array of shape [n_samples, 2] The generated samples. y : array of shape [n_samples] The integer labels (0 or 1) for class membership of each sample. """ n_samples_out = n_samples // 2 n_samples_in = n_samples - n_samples_out generator = check_random_state(random_state) outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out)) outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out)) inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in)) inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5 X = np.vstack((np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y))).T y = np.hstack([np.zeros(n_samples_in, dtype=np.intp), np.ones(n_samples_out, dtype=np.intp)]) if shuffle: X, y = util_shuffle(X, y, random_state=generator) if noise is not None: X += generator.normal(scale=noise, size=X.shape) return X, y def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0, center_box=(-10.0, 10.0), shuffle=True, random_state=None): """Generate isotropic Gaussian blobs for clustering. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The total number of points equally divided among clusters. n_features : int, optional (default=2) The number of features for each sample. centers : int or array of shape [n_centers, n_features], optional (default=3) The number of centers to generate, or the fixed center locations. cluster_std: float or sequence of floats, optional (default=1.0) The standard deviation of the clusters. center_box: pair of floats (min, max), optional (default=(-10.0, 10.0)) The bounding box for each cluster center when centers are generated at random. shuffle : boolean, optional (default=True) Shuffle the samples. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The generated samples. y : array of shape [n_samples] The integer labels for cluster membership of each sample. Examples -------- >>> from sklearn.datasets.samples_generator import make_blobs >>> X, y = make_blobs(n_samples=10, centers=3, n_features=2, ... random_state=0) >>> print(X.shape) (10, 2) >>> y array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0]) See also -------- make_classification: a more intricate variant """ generator = check_random_state(random_state) if isinstance(centers, numbers.Integral): centers = generator.uniform(center_box[0], center_box[1], size=(centers, n_features)) else: centers = check_array(centers) n_features = centers.shape[1] if isinstance(cluster_std, numbers.Real): cluster_std = np.ones(len(centers)) * cluster_std X = [] y = [] n_centers = centers.shape[0] n_samples_per_center = [int(n_samples // n_centers)] * n_centers for i in range(n_samples % n_centers): n_samples_per_center[i] += 1 for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)): X.append(centers[i] + generator.normal(scale=std, size=(n, n_features))) y += [i] * n X = np.concatenate(X) y = np.array(y) if shuffle: indices = np.arange(n_samples) generator.shuffle(indices) X = X[indices] y = y[indices] return X, y def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None): """Generate the "Friedman \#1" regression problem This dataset is described in Friedman [1] and Breiman [2]. Inputs `X` are independent features uniformly distributed on the interval [0, 1]. The output `y` is created according to the formula:: y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \ + 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1). Out of the `n_features` features, only 5 are actually used to compute `y`. The remaining features are independent of `y`. The number of features has to be >= 5. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=10) The number of features. Should be at least 5. noise : float, optional (default=0.0) The standard deviation of the gaussian noise applied to the output. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals of Statistics 19 (1), pages 1-67, 1991. .. [2] L. Breiman, "Bagging predictors", Machine Learning 24, pages 123-140, 1996. """ if n_features < 5: raise ValueError("n_features must be at least five.") generator = check_random_state(random_state) X = generator.rand(n_samples, n_features) y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \ + 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples) return X, y def make_friedman2(n_samples=100, noise=0.0, random_state=None): """Generate the "Friedman \#2" regression problem This dataset is described in Friedman [1] and Breiman [2]. Inputs `X` are 4 independent features uniformly distributed on the intervals:: 0 <= X[:, 0] <= 100, 40 * pi <= X[:, 1] <= 560 * pi, 0 <= X[:, 2] <= 1, 1 <= X[:, 3] <= 11. The output `y` is created according to the formula:: y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \ - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1). Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. noise : float, optional (default=0.0) The standard deviation of the gaussian noise applied to the output. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, 4] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals of Statistics 19 (1), pages 1-67, 1991. .. [2] L. Breiman, "Bagging predictors", Machine Learning 24, pages 123-140, 1996. """ generator = check_random_state(random_state) X = generator.rand(n_samples, 4) X[:, 0] *= 100 X[:, 1] *= 520 * np.pi X[:, 1] += 40 * np.pi X[:, 3] *= 10 X[:, 3] += 1 y = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \ + noise * generator.randn(n_samples) return X, y def make_friedman3(n_samples=100, noise=0.0, random_state=None): """Generate the "Friedman \#3" regression problem This dataset is described in Friedman [1] and Breiman [2]. Inputs `X` are 4 independent features uniformly distributed on the intervals:: 0 <= X[:, 0] <= 100, 40 * pi <= X[:, 1] <= 560 * pi, 0 <= X[:, 2] <= 1, 1 <= X[:, 3] <= 11. The output `y` is created according to the formula:: y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \ / X[:, 0]) + noise * N(0, 1). Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. noise : float, optional (default=0.0) The standard deviation of the gaussian noise applied to the output. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, 4] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals of Statistics 19 (1), pages 1-67, 1991. .. [2] L. Breiman, "Bagging predictors", Machine Learning 24, pages 123-140, 1996. """ generator = check_random_state(random_state) X = generator.rand(n_samples, 4) X[:, 0] *= 100 X[:, 1] *= 520 * np.pi X[:, 1] += 40 * np.pi X[:, 3] *= 10 X[:, 3] += 1 y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \ + noise * generator.randn(n_samples) return X, y def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10, tail_strength=0.5, random_state=None): """Generate a mostly low rank matrix with bell-shaped singular values Most of the variance can be explained by a bell-shaped curve of width effective_rank: the low rank part of the singular values profile is:: (1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2) The remaining singular values' tail is fat, decreasing as:: tail_strength * exp(-0.1 * i / effective_rank). The low rank part of the profile can be considered the structured signal part of the data while the tail can be considered the noisy part of the data that cannot be summarized by a low number of linear components (singular vectors). This kind of singular profiles is often seen in practice, for instance: - gray level pictures of faces - TF-IDF vectors of text documents crawled from the web Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=100) The number of features. effective_rank : int, optional (default=10) The approximate number of singular vectors required to explain most of the data by linear combinations. tail_strength : float between 0.0 and 1.0, optional (default=0.5) The relative importance of the fat noisy tail of the singular values profile. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The matrix. """ generator = check_random_state(random_state) n = min(n_samples, n_features) # Random (ortho normal) vectors u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic') v, _ = linalg.qr(generator.randn(n_features, n), mode='economic') # Index of the singular values singular_ind = np.arange(n, dtype=np.float64) # Build the singular profile by assembling signal and noise components low_rank = ((1 - tail_strength) * np.exp(-1.0 * (singular_ind / effective_rank) ** 2)) tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank) s = np.identity(n) * (low_rank + tail) return np.dot(np.dot(u, s), v.T) def make_sparse_coded_signal(n_samples, n_components, n_features, n_nonzero_coefs, random_state=None): """Generate a signal as a sparse combination of dictionary elements. Returns a matrix Y = DX, such as D is (n_features, n_components), X is (n_components, n_samples) and each column of X has exactly n_nonzero_coefs non-zero elements. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int number of samples to generate n_components: int, number of components in the dictionary n_features : int number of features of the dataset to generate n_nonzero_coefs : int number of active (non-zero) coefficients in each sample random_state: int or RandomState instance, optional (default=None) seed used by the pseudo random number generator Returns ------- data: array of shape [n_features, n_samples] The encoded signal (Y). dictionary: array of shape [n_features, n_components] The dictionary with normalized components (D). code: array of shape [n_components, n_samples] The sparse code such that each column of this matrix has exactly n_nonzero_coefs non-zero items (X). """ generator = check_random_state(random_state) # generate dictionary D = generator.randn(n_features, n_components) D /= np.sqrt(np.sum((D ** 2), axis=0)) # generate code X = np.zeros((n_components, n_samples)) for i in range(n_samples): idx = np.arange(n_components) generator.shuffle(idx) idx = idx[:n_nonzero_coefs] X[idx, i] = generator.randn(n_nonzero_coefs) # encode signal Y = np.dot(D, X) return map(np.squeeze, (Y, D, X)) def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None): """Generate a random regression problem with sparse uncorrelated design This dataset is described in Celeux et al [1]. as:: X ~ N(0, 1) y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3] Only the first 4 features are informative. The remaining features are useless. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=10) The number of features. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert, "Regularization in regression: comparing Bayesian and frequentist methods in a poorly informative situation", 2009. """ generator = check_random_state(random_state) X = generator.normal(loc=0, scale=1, size=(n_samples, n_features)) y = generator.normal(loc=(X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]), scale=np.ones(n_samples)) return X, y def make_spd_matrix(n_dim, random_state=None): """Generate a random symmetric, positive-definite matrix. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_dim : int The matrix dimension. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_dim, n_dim] The random symmetric, positive-definite matrix. See also -------- make_sparse_spd_matrix """ generator = check_random_state(random_state) A = generator.rand(n_dim, n_dim) U, s, V = linalg.svd(np.dot(A.T, A)) X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V) return X def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False, smallest_coef=.1, largest_coef=.9, random_state=None): """Generate a sparse symmetric definite positive matrix. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- dim: integer, optional (default=1) The size of the random matrix to generate. alpha: float between 0 and 1, optional (default=0.95) The probability that a coefficient is non zero (see notes). random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. largest_coef : float between 0 and 1, optional (default=0.9) The value of the largest coefficient. smallest_coef : float between 0 and 1, optional (default=0.1) The value of the smallest coefficient. norm_diag : boolean, optional (default=False) Whether to normalize the output matrix to make the leading diagonal elements all 1 Returns ------- prec : sparse matrix of shape (dim, dim) The generated matrix. Notes ----- The sparsity is actually imposed on the cholesky factor of the matrix. Thus alpha does not translate directly into the filling fraction of the matrix itself. See also -------- make_spd_matrix """ random_state = check_random_state(random_state) chol = -np.eye(dim) aux = random_state.rand(dim, dim) aux[aux < alpha] = 0 aux[aux > alpha] = (smallest_coef + (largest_coef - smallest_coef) * random_state.rand(np.sum(aux > alpha))) aux = np.tril(aux, k=-1) # Permute the lines: we don't want to have asymmetries in the final # SPD matrix permutation = random_state.permutation(dim) aux = aux[permutation].T[permutation] chol += aux prec = np.dot(chol.T, chol) if norm_diag: # Form the diagonal vector into a row matrix d = np.diag(prec).reshape(1, prec.shape[0]) d = 1. / np.sqrt(d) prec *= d prec *= d.T return prec def make_swiss_roll(n_samples=100, noise=0.0, random_state=None): """Generate a swiss roll dataset. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of sample points on the S curve. noise : float, optional (default=0.0) The standard deviation of the gaussian noise. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, 3] The points. t : array of shape [n_samples] The univariate position of the sample according to the main dimension of the points in the manifold. Notes ----- The algorithm is from Marsland [1]. References ---------- .. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective", Chapter 10, 2009. http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py """ generator = check_random_state(random_state) t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples)) x = t * np.cos(t) y = 21 * generator.rand(1, n_samples) z = t * np.sin(t) X = np.concatenate((x, y, z)) X += noise * generator.randn(3, n_samples) X = X.T t = np.squeeze(t) return X, t def make_s_curve(n_samples=100, noise=0.0, random_state=None): """Generate an S curve dataset. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, optional (default=100) The number of sample points on the S curve. noise : float, optional (default=0.0) The standard deviation of the gaussian noise. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, 3] The points. t : array of shape [n_samples] The univariate position of the sample according to the main dimension of the points in the manifold. """ generator = check_random_state(random_state) t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5) x = np.sin(t) y = 2.0 * generator.rand(1, n_samples) z = np.sign(t) * (np.cos(t) - 1) X = np.concatenate((x, y, z)) X += noise * generator.randn(3, n_samples) X = X.T t = np.squeeze(t) return X, t def make_gaussian_quantiles(mean=None, cov=1., n_samples=100, n_features=2, n_classes=3, shuffle=True, random_state=None): """Generate isotropic Gaussian and label samples by quantile This classification dataset is constructed by taking a multi-dimensional standard normal distribution and defining classes separated by nested concentric multi-dimensional spheres such that roughly equal numbers of samples are in each class (quantiles of the :math:`\chi^2` distribution). Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- mean : array of shape [n_features], optional (default=None) The mean of the multi-dimensional normal distribution. If None then use the origin (0, 0, ...). cov : float, optional (default=1.) The covariance matrix will be this value times the unit matrix. This dataset only produces symmetric normal distributions. n_samples : int, optional (default=100) The total number of points equally divided among classes. n_features : int, optional (default=2) The number of features for each sample. n_classes : int, optional (default=3) The number of classes shuffle : boolean, optional (default=True) Shuffle the samples. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The generated samples. y : array of shape [n_samples] The integer labels for quantile membership of each sample. Notes ----- The dataset is from Zhu et al [1]. References ---------- .. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009. """ if n_samples < n_classes: raise ValueError("n_samples must be at least n_classes") generator = check_random_state(random_state) if mean is None: mean = np.zeros(n_features) else: mean = np.array(mean) # Build multivariate normal distribution X = generator.multivariate_normal(mean, cov * np.identity(n_features), (n_samples,)) # Sort by distance from origin idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1)) X = X[idx, :] # Label by quantile step = n_samples // n_classes y = np.hstack([np.repeat(np.arange(n_classes), step), np.repeat(n_classes - 1, n_samples - step * n_classes)]) if shuffle: X, y = util_shuffle(X, y, random_state=generator) return X, y def _shuffle(data, random_state=None): generator = check_random_state(random_state) n_rows, n_cols = data.shape row_idx = generator.permutation(n_rows) col_idx = generator.permutation(n_cols) result = data[row_idx][:, col_idx] return result, row_idx, col_idx def make_biclusters(shape, n_clusters, noise=0.0, minval=10, maxval=100, shuffle=True, random_state=None): """Generate an array with constant block diagonal structure for biclustering. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- shape : iterable (n_rows, n_cols) The shape of the result. n_clusters : integer The number of biclusters. noise : float, optional (default=0.0) The standard deviation of the gaussian noise. minval : int, optional (default=10) Minimum value of a bicluster. maxval : int, optional (default=100) Maximum value of a bicluster. shuffle : boolean, optional (default=True) Shuffle the samples. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape `shape` The generated array. rows : array of shape (n_clusters, X.shape[0],) The indicators for cluster membership of each row. cols : array of shape (n_clusters, X.shape[1],) The indicators for cluster membership of each column. References ---------- .. [1] Dhillon, I. S. (2001, August). Co-clustering documents and words using bipartite spectral graph partitioning. In Proceedings of the seventh ACM SIGKDD international conference on Knowledge discovery and data mining (pp. 269-274). ACM. See also -------- make_checkerboard """ generator = check_random_state(random_state) n_rows, n_cols = shape consts = generator.uniform(minval, maxval, n_clusters) # row and column clusters of approximately equal sizes row_sizes = generator.multinomial(n_rows, np.repeat(1.0 / n_clusters, n_clusters)) col_sizes = generator.multinomial(n_cols, np.repeat(1.0 / n_clusters, n_clusters)) row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in zip(range(n_clusters), row_sizes))) col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in zip(range(n_clusters), col_sizes))) result = np.zeros(shape, dtype=np.float64) for i in range(n_clusters): selector = np.outer(row_labels == i, col_labels == i) result[selector] += consts[i] if noise > 0: result += generator.normal(scale=noise, size=result.shape) if shuffle: result, row_idx, col_idx = _shuffle(result, random_state) row_labels = row_labels[row_idx] col_labels = col_labels[col_idx] rows = np.vstack(row_labels == c for c in range(n_clusters)) cols = np.vstack(col_labels == c for c in range(n_clusters)) return result, rows, cols def make_checkerboard(shape, n_clusters, noise=0.0, minval=10, maxval=100, shuffle=True, random_state=None): """Generate an array with block checkerboard structure for biclustering. Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- shape : iterable (n_rows, n_cols) The shape of the result. n_clusters : integer or iterable (n_row_clusters, n_column_clusters) The number of row and column clusters. noise : float, optional (default=0.0) The standard deviation of the gaussian noise. minval : int, optional (default=10) Minimum value of a bicluster. maxval : int, optional (default=100) Maximum value of a bicluster. shuffle : boolean, optional (default=True) Shuffle the samples. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape `shape` The generated array. rows : array of shape (n_clusters, X.shape[0],) The indicators for cluster membership of each row. cols : array of shape (n_clusters, X.shape[1],) The indicators for cluster membership of each column. References ---------- .. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003). Spectral biclustering of microarray data: coclustering genes and conditions. Genome research, 13(4), 703-716. See also -------- make_biclusters """ generator = check_random_state(random_state) if hasattr(n_clusters, "__len__"): n_row_clusters, n_col_clusters = n_clusters else: n_row_clusters = n_col_clusters = n_clusters # row and column clusters of approximately equal sizes n_rows, n_cols = shape row_sizes = generator.multinomial(n_rows, np.repeat(1.0 / n_row_clusters, n_row_clusters)) col_sizes = generator.multinomial(n_cols, np.repeat(1.0 / n_col_clusters, n_col_clusters)) row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in zip(range(n_row_clusters), row_sizes))) col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in zip(range(n_col_clusters), col_sizes))) result = np.zeros(shape, dtype=np.float64) for i in range(n_row_clusters): for j in range(n_col_clusters): selector = np.outer(row_labels == i, col_labels == j) result[selector] += generator.uniform(minval, maxval) if noise > 0: result += generator.normal(scale=noise, size=result.shape) if shuffle: result, row_idx, col_idx = _shuffle(result, random_state) row_labels = row_labels[row_idx] col_labels = col_labels[col_idx] rows = np.vstack(row_labels == label for label in range(n_row_clusters) for _ in range(n_col_clusters)) cols = np.vstack(col_labels == label for _ in range(n_row_clusters) for label in range(n_col_clusters)) return result, rows, cols
bsd-3-clause
rbooth200/DiscEvolution
DiscEvolution/internal_photo.py
1
30463
# internal_photo.py # # Author: A. Sellek # Date: 12 - Aug - 2020 # # Implementation of Photoevaporation Models ################################################################################ import numpy as np import argparse import json import matplotlib.pyplot as plt from DiscEvolution.constants import * from DiscEvolution.star import PhotoStar from scipy.signal import argrelmin class NotHoleError(Exception): """Raised if finds an outer edge, not a hole""" pass class PhotoBase(): def __init__(self, disc, Regime=None, Type=None): # Basic mass loss properties self._regime = Regime # EUV or X-ray self._type = Type # 'Primordial' or 'InnerHole' self._Sigmadot = np.zeros_like(disc.R) self.mdot_XE(disc.star) # Evolutionary state flags self._Hole = False # Has the hole started to open? self._reset = False # Have we needed to reset a decoy hole? self._empty = False # When no longer a valid hole radius or all below density threshold self._Thin = False # Is the hole exposed (ie low column density to star)? # Parameters of hole self._R_hole = None self._N_hole = None # The column density threshold below which the inner disc is "Thin" if self._regime=='X-ray': self._N_crit = 1e22 elif self._regime=='EUV': self._N_crit = 1e18 else: self._N_crit = 0.0 # (if 0, can never switch) # Outer radius self._R_out = max(disc.R_edge) def mdot_XE(self, star, Mdot=0): # Generic wrapper for initiating X-ray or EUV mass loss # Without prescription, mass loss is 0 self._Mdot = Mdot self._Mdot_true = Mdot def Sigma_dot(self, R, star): if self._type=='Primordial': self.Sigma_dot_Primordial(R, star) elif self._type=='InnerHole': self.Sigma_dot_InnerHole(R, star) def Sigma_dot_Primordial(self, R, star, ret=False): # Without prescription, mass loss is 0 if ret: return np.zeros(len(R)+1) else: self._Sigmadot = np.zeros_like(R) def Sigma_dot_InnerHole(self, R, star, ret=False): # Without prescription, mass loss is 0 if ret: return np.zeros(len(R)+1) else: self._Sigmadot = np.zeros_like(R) def scaled_R(self, R, star): # Prescriptions may rescale the radius variable # Without prescription, radius is unscaled return R def R_inner(self, star): # Innermost mass loss return 0 def check_dt(self, disc, dt): # Work out the timescale to clear cell where_photoevap = (self.dSigmadt > 0) t_w = np.full_like(disc.R,np.inf) t_w[where_photoevap] = disc.Sigma_G[where_photoevap] / self.dSigmadt[where_photoevap] # Return minimum value for cells inside outer edge indisc = (disc.R < self._R_out) * where_photoevap # Prohibit hole outside of mass loss region. try: imin = argrelmin(t_w[indisc])[0][0] # Find local minima in clearing time, neglecting outer edge where tails off. Take first to avoid solutions due to noise in dusty outskirts except IndexError: # If no local minimum, try to find hole as wherever the min is. imin = np.argmin(t_w[indisc]) # Check against timestep and report if (dt > t_w[where_photoevap][imin]): # If an entire cell can deplete #if not self._Hole: # print("Alert - hole can open after this timestep at {:.2f} AU".format(disc.R[imin])) # print("Outer radius is currently {:.2f} AU".format(self._R_out)) self._Hole = True # Set hole flag return t_w[where_photoevap][imin] def remove_mass(self, disc, dt, external_photo=None): # Find disc "outer edge" so we can apply mass loss only inside if external_photo: self._R_out = external_photo._Rot # If external photoevaporation is present, only consider radii inside its influence else: self._R_out = disc.Rout(thresh=1e-10) if disc.Rout()==0.0: print("Disc everywhere below density threshold. Declare Empty.") self._empty = True # Check whether hole can open if not self._Hole: #self._type=='Primordial': self.check_dt(disc, dt) # Determine mass loss dSigma = np.minimum(self.dSigmadt * dt, disc.Sigma_G) # Limit mass loss to density of cell dSigma *= (disc.R < self._R_out) # Only apply mass loss inside disc outer edge # Apply, preserving the dust mass if hasattr(disc, 'Sigma_D'): Sigma_D = disc.Sigma_D # Save the dust density disc._Sigma -= dSigma if hasattr(disc, 'Sigma_D'): dusty = Sigma_D.sum(0)>0 disc.dust_frac[:,dusty] = np.fmin(Sigma_D[:,dusty]/disc.Sigma[dusty],disc.dust_frac[:,dusty]/disc.dust_frac.sum(0)[dusty]) disc.dust_frac[:] /= np.maximum(disc.dust_frac.sum(0), 1.0) # Renormalise to 1 if it exceeds # Calculate actual mass loss given limit if dt>0: dM = 2*np.pi * disc.R * dSigma self._Mdot_true = np.trapz(dM,disc.R) / dt * AU**2 / Msun def get_Rhole(self, disc, external_photo=None): """Deal with calls when there is no hole""" if not self._Hole: print("No hole for which to get radius. Ignoring command and returning nans.") return np.nan, np.nan """Otherwise continue on to find hole First find outer edge of disc - hole must be inside this""" if external_photo: self._R_out = external_photo._Rot # If external photoevaporation is present, only consider radii inside its influence else: self._R_out = disc.Rout(thresh=1e-10) where_photoevap = (self.dSigmadt > 0) indisc = (disc.R < self._R_out) * where_photoevap # Prohibit hole outside of mass loss region. empty_indisc = (disc.Sigma_G <= 1e-10) * indisc # Consider empty if below 10^-10 g/cm^2 try: if np.sum(empty_indisc) == 0: # If none in disc are empty minima = argrelmin(disc.Sigma_G) if len(minima[0]) > 0: i_hole_out = minima[0][0] # Position of hole is minimum density else: # No empty cells anymore - disc has cleared to outside raise NotHoleError else: # First find the inner edge of the innermost hole i_hole_in = np.nonzero(empty_indisc)[0][0] # The hole cell is defined as the one inside the first non-empty cell outside the inner edge of the hole outer_disc = ~empty_indisc * (disc.R>disc.R_edge[i_hole_in]) if np.sum(outer_disc) > 0: i_hole_out = np.nonzero(outer_disc)[0][0] - 1 else: # No non-empty cells outside this - this is not a hole, but an outer edge. raise NotHoleError if i_hole_out == np.nonzero(indisc)[0][-1]: # This is not a hole, but the outermost photoevaporating cell raise NotHoleError """If hole position drops by an order of magnitude, it is likely that the previous was really the clearing of low surface density material in the outer disc, so reset""" if self._R_hole: R_old = self._R_hole if disc.R_edge[i_hole_out+1]/R_old<0.1: self._reset = True """If everything worked, update hole properties""" if not self._R_hole: print("Hole opened at {:.2f} AU".format(disc.R_edge[i_hole_out+1])) self._R_hole = disc.R_edge[i_hole_out+1] self._N_hole = disc.column_density[i_hole_out] # Test whether Thin if (self._N_hole < self._N_crit): self._Thin = True except NotHoleError: """Potential hole isn't a hole but an outer edge""" if self._type == 'Primordial': self._Hole = False self._reset = True if self._R_hole: print("No hole found") print("Last known location {} AU".format(self._R_hole)) return 0, 0 elif self._type == 'InnerHole': if not self._empty: print("Transition Disc has cleared to outside") self._empty = True # Proceed as usual to report but without update # Save state if tracking return self._R_hole, self._N_hole @property def Mdot(self): return self._Mdot @property def dSigmadt(self): return self._Sigmadot def __call__(self, disc, dt, external_photo=None): # For inner hole discs, need to update the hole radius and then the mass-loss as the normalisation changes based on R, not just x~R-Rhole. if self._type=='InnerHole': self.get_Rhole(disc) self.Sigma_dot(disc.R_edge, disc.star) # Remove the mass self.remove_mass(disc,dt, external_photo) # Check for new holes if self._Hole and not self._Thin: # If there is a hole but the inner disc is not already optically thin, update its properties R_hole, N_hole = self.get_Rhole(disc, external_photo) # Check if hole is now large enough that inner disc optically thin, switch internal photoevaporation to direct field if so if self._Thin: print("Column density to hole has fallen to N = {} < {} g cm^-2".format(N_hole,self._N_crit)) self._type = 'InnerHole' # Run the mass loss rates to update the table self.mdot_XE(disc.star) self.Sigma_dot(disc.R_edge, disc.star) # Report print("At initiation of InnerHole Type, M_D = {} M_J, Mdot = {}, t_clear ~ {} yr".format(disc.Mtot()/Mjup, self._Mdot, disc.Mtot()/Msun/self._Mdot)) def ASCII_header(self): return ("# InternalEvaporation, Type: {}, Mdot: {}" "".format(self._type+self.__class__.__name__,self._Mdot)) def HDF5_attributes(self): header = {} header['Type'] = self._type+"/"+self._regime header['Mdot'] = '{}'.format(self._Mdot) return self.__class__.__name__, header ################################################################################# """"""""" X-ray dominated photoevaporation -Following prescription of Owen, Ercolano and Clarke (2012) -Following prescription of Picogna, Ercolano, Owen and Weber (2019) """"""""" ################################################################################# """Owen, Ercolano and Clarke (2012)""" class XrayDiscOwen(PhotoBase): def __init__(self, disc, Type='Primordial', R_hole=None): super().__init__(disc, Regime='X-ray', Type=Type) # Parameters for Primordial mass loss profile self._a1 = 0.15138 self._b1 = -1.2182 self._c1 = 3.4046 self._d1 = -3.5717 self._e1 = -0.32762 self._f1 = 3.6064 self._g1 = -2.4918 # Parameters for Inner Hole mass loss profile self._a2 = -0.438226 self._b2 = -0.10658387 self._c2 = 0.5699464 self._d2 = 0.010732277 self._e2 = -0.131809597 self._f2 = -1.32285709 # If initiating with an Inner Hole disc, need to update properties if self._type == 'InnerHole': self._Hole = True self._R_hole = R_hole #self.get_Rhole(disc) # Run the mass loss rates to update the table self.Sigma_dot(disc.R_edge, disc.star) def mdot_XE(self, star, Mdot=None): # In Msun/yr if Mdot is not None: self._Mdot = Mdot elif self._type=='Primordial': self._Mdot = 6.25e-9 * star.M**(-0.068) * (star.L_X / 1e30)**(1.14) # Equation B1 elif self._type=='InnerHole': self._Mdot = 4.8e-9 * star.M**(-0.148) * (star.L_X / 1e30)**(1.14) # Equation B4 else: raise NotImplementedError("Disc is of unrecognised type, and no mass-loss rate has been manually specified") self._Mdot_true = self._Mdot def scaled_R(self, R, star): # Where R in AU x = 0.85 * R / star.M # Equation B3 if self._Hole: y = 0.95 * (R-self._R_hole) / star.M # Equation B6 else: y = R return x, y def R_inner(self, star): # Innermost mass loss return 0.7 / 0.85 * star.M def Sigma_dot_Primordial(self, R, star, ret=False): # Equation B2 Sigmadot = np.zeros_like(R) x, y = self.scaled_R(R,star) where_photoevap = (x >= 0.7) * (x<=99) # No mass loss close to star, mass loss prescription becomes negative at log10(x)=1.996 logx = np.log(x[where_photoevap]) log10 = np.log(10) log10x = logx/log10 # First term exponent = self._a1 * log10x**6 + self._b1 * log10x**5 + self._c1 * log10x**4 + self._d1 * log10x**3 + self._e1 * log10x**2 + self._f1 * log10x + self._g1 t1 = 10**exponent # Second term terms = 6*self._a1*logx**5/log10**7 + 5*self._b1*logx**4/log10**6 + 4*self._c1*logx**3/log10**5 + 3*self._d1*logx**2/log10**4 + 2*self._e1*logx/log10**3 + self._f1/log10**2 t2 = terms/x[where_photoevap]**2 # Third term t3 = np.exp(-(x[where_photoevap]/100)**10) # Combine terms Sigmadot[where_photoevap] = t1 * t2 * t3 # Work out total mass loss rate for normalisation M_dot = 2*np.pi * R * Sigmadot total = np.trapz(M_dot,R) # Normalise, convert to cgs Sigmadot = np.maximum(Sigmadot,0) Sigmadot *= self.Mdot / total * Msun / AU**2 # in g cm^-2 / yr if ret: # Return unaveraged values at cell edges return Sigmadot else: # Store values as average of mass loss rate at cell edges self._Sigmadot = (Sigmadot[1:] + Sigmadot[:-1]) / 2 def Sigma_dot_InnerHole(self, R, star, ret=False): # Equation B5 Sigmadot = np.zeros_like(R) x, y = self.scaled_R(R,star) where_photoevap = (y >= 0.0) # No mass loss inside hole use_y = y[where_photoevap] # Exponent of second term exp2 = -(use_y/57)**10 # Numerator terms = self._a2*self._b2 * np.exp(self._b2*use_y+exp2) + self._c2*self._d2 * np.exp(self._d2*use_y+exp2) + self._e2*self._f2 * np.exp(self._f2*use_y+exp2) # Divide by Denominator Sigmadot[where_photoevap] = terms/R[where_photoevap] # Work out total mass loss rate for normalisation M_dot = 2*np.pi * R * Sigmadot total = np.trapz(M_dot,R) # Normalise, convert to cgs Sigmadot = np.maximum(Sigmadot,0) Sigmadot *= self.Mdot / total * Msun / AU**2 # in g cm^-2 / yr # Mopping up in the gap mop_up = (x >= 0.7) * (y < 0.0) Sigmadot[mop_up] = np.inf if ret: # Return unaveraged values at cell edges return Sigmadot else: # Store values as average of mass loss rate at cell edges self._Sigmadot = (Sigmadot[1:] + Sigmadot[:-1]) / 2 """Picogna, Ercolano, Owen and Weber (2019)""" class XrayDiscPicogna(PhotoBase): def __init__(self, disc, Type='Primordial', R_hole=None): super().__init__(disc, Regime='X-ray', Type=Type) # Parameters for Primordial mass loss profile self._a1 = -0.5885 self._b1 = 4.3130 self._c1 = -12.1214 self._d1 = 16.3587 self._e1 = -11.4721 self._f1 = 5.7248 self._g1 = -2.8562 # Parameters for Inner Hole mass loss profile self._a2 = 0.11843 self._b2 = 0.99695 self._c2 = 0.48835 # If initiating with an Inner Hole disc, need to update properties if self._type == 'InnerHole': self._Hole = True self._R_hole = R_hole #self.get_Rhole(disc) # Run the mass loss rates to update the table self.Sigma_dot(disc.R_edge, disc.star) def mdot_XE(self, star, Mdot=None): # In Msun/yr if Mdot is not None: self._Mdot = Mdot elif self._type=='Primordial': logMd = -2.7326 * np.exp((np.log(np.log(star.L_X)/np.log(10))-3.3307)**2/-2.9868e-3) - 7.2580 # Equation 5 self._Mdot = 10**logMd elif self._type=='InnerHole': logMd = -2.7326 * np.exp((np.log(np.log(star.L_X)/np.log(10))-3.3307)**2/-2.9868e-3) - 7.2580 # 1.12 * Equation 5 self._Mdot = 1.12 * (10**logMd) else: raise NotImplementedError("Disc is of unrecognised type, and no mass-loss rate has been manually specified") self._Mdot_true = self._Mdot def scaled_R(self, R, star): # Where R in AU # All are divided by stellar mass normalised to 0.7 Msun (value used by Picogna+19) to represent rescaling by gravitational radius x = R / (star.M/0.7) if self._Hole: y = (R-self._R_hole) / (star.M/0.7) # Equation B6 else: y = R / (star.M/0.7) return x, y def R_inner(self, star): # Innermost mass loss if self._type=='Primordial': return 0 # Mass loss possible throughout elif self._type=='InnerHole': return self._R_hole # Mass loss profile applies outside hole else: return 0 # If unspecified, assume mass loss possible throughout def Sigma_dot_Primordial(self, R, star, ret=False): # Equation B2 Sigmadot = np.zeros_like(R) x, y = self.scaled_R(R,star) where_photoevap = (x<=137) # Mass loss prescription becomes negative at x=1.3785 logx = np.log(x[where_photoevap]) log10 = np.log(10) log10x = logx/log10 # First term exponent = self._a1 * log10x**6 + self._b1 * log10x**5 + self._c1 * log10x**4 + self._d1 * log10x**3 + self._e1 * log10x**2 + self._f1 * log10x + self._g1 t1 = 10**exponent # Second term terms = 6*self._a1*log10x**5 + 5*self._b1*log10x**4 + 4*self._c1*log10x**3 + 3*self._d1*log10x**2 + 2*self._e1*log10x + self._f1 t2 = terms/(2*np.pi*x[where_photoevap]**2) # Combine terms Sigmadot[where_photoevap] = t1 * t2 # Work out total mass loss rate for normalisation M_dot = 2*np.pi * R * Sigmadot total = np.trapz(M_dot,R) # Normalise, convert to cgs Sigmadot = np.maximum(Sigmadot,0) Sigmadot *= self.Mdot / total * Msun / AU**2 # in g cm^-2 / yr if ret: # Return unaveraged values at cell edges return Sigmadot else: # Store values as average of mass loss rate at cell edges self._Sigmadot = (Sigmadot[1:] + Sigmadot[:-1]) / 2 def Sigma_dot_InnerHole(self, R, star, ret=False): # Equation B5 Sigmadot = np.zeros_like(R) x, y = self.scaled_R(R,star) where_photoevap = (y > 0.0) * (y < -self._c2/np.log(self._b2)) # No mass loss inside hole, becomes negative at x=-c/ln(b) use_y = y[where_photoevap] # Numerator terms = self._a2 * np.power(self._b2,use_y) * np.power(use_y,self._c2-1) * (use_y * np.log(self._b2) + self._c2) # Divide by Denominator Sigmadot[where_photoevap] = terms/(2*np.pi*R[where_photoevap]) # Work out total mass loss rate for normalisation M_dot = 2*np.pi * R * Sigmadot total = np.trapz(M_dot,R) # Normalise, convert to cgs Sigmadot = np.maximum(Sigmadot,0) Sigmadot *= self.Mdot / total * Msun / AU**2 # in g cm^-2 / yr # Mopping up in the gap - assume usual primordial rates there. Sigmadot[(y<=0.0) * (x<=137)] = self.Sigma_dot_Primordial(R, star, ret=True)[(y<=0.0)*(x<=137)]/1.12 # divide by 1.12 so that normalise to correct mass loss rate mop_up = (x > 137) * (y < 0.0) Sigmadot[mop_up] = np.inf # Avoid having discontinuous mass-loss by filling in the rest if ret: # Return unaveraged values at cell edges return Sigmadot else: # Store values as average of mass loss rate at cell edges self._Sigmadot = (Sigmadot[1:] + Sigmadot[:-1]) / 2 ################################################################################# """"""""" EUV dominated photoevaporation -Following prescription given in Alexander and Armitage (2007) and based on Font, McCarthy, Johnstone and Ballantyne (2004) for Primordial Discs and based on Alexander, Clarke and Pringle (2006) for Inner Hole Discs """"""""" ################################################################################# class EUVDiscAlexander(PhotoBase): def __init__(self, disc, Type='Primordial', R_hole=None): super().__init__(disc, Regime='EUV', Type=Type) # Parameters for mass loss profiles self._cs = 10 # Sound speed in km s^-1 self._RG = disc.star.M / (self._cs*1e5 /Omega0/AU)**2 # Gravitational Radius in AU self._mu = 1.35 self._aB = 2.6e-13 # Case B Recombination coeff. in cm^3 s^-1 self._C1 = 0.14 self._A = 0.3423 self._B = -0.3612 self._D = 0.2457 self._C2 = 0.235 self._a = 2.42 h = disc.H/disc.R he = np.empty_like(disc.R_edge) he[1:-1] = 0.5*(h[1:] + h[-1:]) he[0] = 1.5*h[0] - 0.5*h[1] he[-1] = 1.5*h[-1] - 0.5*h[-2] self._h = he # If initiating with an Inner Hole disc, need to update properties if self._type == 'InnerHole': self._Hole = True self._R_hole = R_hole #self.get_Rhole(disc) # Run the mass loss rates to update the table self.Sigma_dot(disc.R_edge, disc.star) def mdot_XE(self, star, Mdot=0): # Store Mdot calculated from profile self._Mdot = Mdot # In Msun/yr self._Mdot_true = self._Mdot def scaled_R(self, R, star): if self._type=='Primordial': return R / self._RG # Normalise to RG elif self._type=='InnerHole': return R / self.R_inner() # Normalise to inner edge else: return R # If unspecified, don't modify def R_inner(self): # Innermost mass loss if self._type=='Primordial': return 0.1 * self._RG # Mass loss profile is only positive for >0.1 RG elif self._type=='InnerHole': return self._R_hole # Mass loss profile applies outside hole else: return 0 # If unspecified, assume mass-loss possible throughout def Sigma_dot_Primordial(self, R, star, ret=False): Sigmadot = np.zeros_like(R) x = self.scaled_R(R,star) where_photoevap = (x >= 0.1) # No mass loss close to star # Equation A3 nG = self._C1 * (3 * star.Phi / (4*np.pi * (self._RG*AU)**3 * self._aB))**(1/2) # cm^-3 # Equation A2 n0 = nG * (2 / (x**7.5 + x**12.5))**(1/5) # Equation A4 u1 = self._cs*1e5*yr/Omega0 * self._A * np.exp(self._B * (x-0.1)) * (x-0.1)**self._D # cm yr^-1 # Combine terms (Equation A1) Sigmadot[where_photoevap] = 2 * self._mu * m_H * (n0 * u1)[where_photoevap] # g cm^-2 /yr Sigmadot = np.maximum(Sigmadot,0) # Work out total mass loss rate dMdot = 2*np.pi * R * Sigmadot Mdot = np.trapz(dMdot,R) # g yr^-1 (AU/cm)^2 # Normalise, convert to cgs Mdot = Mdot * AU**2/Msun # g yr^-1 # Store result self.mdot_XE(star, Mdot=Mdot) if ret: # Return unaveraged values at cell edges return Sigmadot else: # Store values as average of mass loss rate at cell edges self._Sigmadot = (Sigmadot[1:] + Sigmadot[:-1]) / 2 def Sigma_dot_InnerHole(self, R, star, ret=False): Sigmadot = np.zeros_like(R) x = self.scaled_R(R,star) where_photoevap = (x > 1) # No mass loss inside hole # Combine terms (Equation A5) Sigmadot[where_photoevap] = (2 * self._mu * m_H * self._C2 * self._cs*1e5*yr/Omega0 * (star.Phi / (4*np.pi * (self.R_inner()*AU)**3 * self._aB * self._h))**(1/2) * x**(-self._a))[where_photoevap] # g cm^-2 /yr Sigmadot = np.maximum(Sigmadot,0) # Work out total mass loss rate dMdot = 2*np.pi * R * Sigmadot Mdot = np.trapz(dMdot,R) # g yr^-1 (AU/cm)^2 # Normalise, convert to cgs Mdot = Mdot * AU**2/Msun # g yr^-1 # Store result self.mdot_XE(star, Mdot=Mdot) # Mopping up in the gap mop_up = (R >= 0.1 * self._RG) * (x <= 1.0) Sigmadot[mop_up] = np.inf if ret: # Return unaveraged values at cell edges return Sigmadot else: # Store values as average of mass loss rate at cell edges self._Sigmadot = (Sigmadot[1:] + Sigmadot[:-1]) / 2 ################################################################################# """"""""" Functions for running as main Designed for plotting to test things out """"""""" ################################################################################# class DummyDisc(object): def __init__(self, R, star, MD=10, RC=100): self._M = MD * Mjup self.Rc = RC self.R_edge = R self.R = 0.5*(self.R_edge[1:]+self.R_edge[:-1]) self._Sigma = self._M / (2 * np.pi * self.Rc * self.R * AU**2) * np.exp(-self.R/self.Rc) self.star = star def Rout(self, thresh=None): return max(self.R_edge) @property def Sigma(self): return self._Sigma @property def Sigma_G(self): return self._Sigma def main(): Sigma_dot_plot() Test_Removal() def Test_Removal(): """Removes gas fom a power law disc in regular timesteps without viscous evolution etc""" star1 = PhotoStar(LX=1e30, M=1.0, R=2.5, T_eff=4000) R = np.linspace(0.1,200,2000) disc1 = DummyDisc(R, star1, RC=10) internal_photo = XrayDiscPicogna(disc1) plt.figure() for t in np.linspace(0,2e3,6): internal_photo(disc1, 2e3) plt.loglog(0.5*(R[1:]+R[:-1]), disc1.Sigma, label='{}'.format(t)) plt.xlabel("R / AU") plt.ylabel("$\Sigma_G~/~\mathrm{g~cm^{-2}}$") plt.legend(title='Time / yr') plt.show() def Sigma_dot_plot(): """Plot a comparison of the mass loss rate prescriptions""" from control_scripts import run_model # Set up dummy model parser = argparse.ArgumentParser() parser.add_argument("--model", "-m", type=str, default=DefaultModel) args = parser.parse_args() model = json.load(open(args.model, 'r')) plt.figure(figsize=(6,6)) starX = PhotoStar(LX=1e30, M=model['star']['mass'], R=model['star']['radius'], T_eff=model['star']['T_eff']) starE = PhotoStar(Phi=1e42, M=model['star']['mass'], R=model['star']['radius'], T_eff=model['star']['T_eff']) disc = run_model.setup_disc(model) R = disc.R # Calculate EUV rates disc._star = starE internal_photo_E = EUVDiscAlexander(disc) Sigma_dot_E = internal_photo_E.dSigmadt photoevaporating_E = (Sigma_dot_E>0) t_w_E = disc.Sigma[photoevaporating_E] / Sigma_dot_E[photoevaporating_E] print("Mdot maximum at R = {} AU".format(R[np.argmax(Sigma_dot_E)])) print("Time minimum at R = {} AU".format(R[photoevaporating_E][np.argmin(t_w_E)])) plt.loglog(R, Sigma_dot_E, label='EUV (AA07), $\Phi={}~\mathrm{{s^{{-1}}}}$'.format(1e42), linestyle='--') # Calculate X-ray rates disc._star = starX internal_photo_X = XrayDiscOwen(disc) Sigma_dot_X = internal_photo_X.dSigmadt photoevaporating_X = (Sigma_dot_X>0) t_w_X = disc.Sigma[photoevaporating_X] / Sigma_dot_X[photoevaporating_X] print("Mdot maximum at R = {} AU".format(R[np.argmax(Sigma_dot_X)])) print("Time minimum at R = {} AU".format(R[photoevaporating_X][np.argmin(t_w_X)])) plt.loglog(R, Sigma_dot_X, label='X-ray (OEC12), $L_X={}~\mathrm{{erg~s^{{-1}}}}$'.format(1e30)) # Calculate X-ray rates disc._star = starX internal_photo_X2 = XrayDiscPicogna(disc) Sigma_dot_X2 = internal_photo_X2.dSigmadt photoevaporating_X2 = (Sigma_dot_X2>0) t_w_X2 = disc.Sigma[photoevaporating_X2] / Sigma_dot_X2[photoevaporating_X2] print("Mdot maximum at R = {} AU".format(R[np.argmax(Sigma_dot_X2)])) print("Time minimum at R = {} AU".format(R[photoevaporating_X2][np.argmin(t_w_X2)])) plt.loglog(R, Sigma_dot_X2, label='X-ray (PEOW19), $L_X={}~\mathrm{{erg~s^{{-1}}}}$'.format(1e30)) # Plot mass loss rates plt.xlabel("R / AU") plt.ylabel("$\dot{\Sigma}_{\\rm w}$ / g cm$^{-2}$ yr$^{-1}$") plt.xlim([0.1,1000]) plt.ylim([1e-8,1e-2]) plt.legend() plt.show() # Plot depletion time plt.figure(figsize=(6,6)) plt.loglog(R[photoevaporating_E], t_w_E, label='EUV (AA07), $\Phi={}~\mathrm{{s^{{-1}}}}$'.format(1e42), linestyle='--') plt.loglog(R[photoevaporating_X], t_w_X, label='X-ray (OEC12), $L_X={}~\mathrm{{erg~s^{{-1}}}}$'.format(1e30)) plt.loglog(R[photoevaporating_X2], t_w_X2, label='X-ray (PEOW19), $L_X={}~\mathrm{{erg~s^{{-1}}}}$'.format(1e30)) plt.xlabel("R / AU") plt.ylabel("$t_w / \mathrm{yr}$") plt.xlim([0.1,1000]) plt.ylim([1e4,1e12]) plt.legend() plt.show() if __name__ == "__main__": # Set extra things DefaultModel = "../control_scripts/DiscConfig_default.json" plt.rcParams['text.usetex'] = "True" plt.rcParams['font.family'] = "serif" main()
gpl-3.0
zfrenchee/pandas
doc/sphinxext/ipython_sphinxext/ipython_directive.py
1
37812
# -*- coding: utf-8 -*- """ Sphinx directive to support embedded IPython code. This directive allows pasting of entire interactive IPython sessions, prompts and all, and their code will actually get re-executed at doc build time, with all prompts renumbered sequentially. It also allows you to input code as a pure python input by giving the argument python to the directive. The output looks like an interactive ipython section. To enable this directive, simply list it in your Sphinx ``conf.py`` file (making sure the directory where you placed it is visible to sphinx, as is needed for all Sphinx directives). For example, to enable syntax highlighting and the IPython directive:: extensions = ['IPython.sphinxext.ipython_console_highlighting', 'IPython.sphinxext.ipython_directive'] The IPython directive outputs code-blocks with the language 'ipython'. So if you do not have the syntax highlighting extension enabled as well, then all rendered code-blocks will be uncolored. By default this directive assumes that your prompts are unchanged IPython ones, but this can be customized. The configurable options that can be placed in conf.py are: ipython_savefig_dir: The directory in which to save the figures. This is relative to the Sphinx source directory. The default is `html_static_path`. ipython_rgxin: The compiled regular expression to denote the start of IPython input lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You shouldn't need to change this. ipython_rgxout: The compiled regular expression to denote the start of IPython output lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You shouldn't need to change this. ipython_promptin: The string to represent the IPython input prompt in the generated ReST. The default is 'In [%d]:'. This expects that the line numbers are used in the prompt. ipython_promptout: The string to represent the IPython prompt in the generated ReST. The default is 'Out [%d]:'. This expects that the line numbers are used in the prompt. ipython_mplbackend: The string which specifies if the embedded Sphinx shell should import Matplotlib and set the backend. The value specifies a backend that is passed to `matplotlib.use()` before any lines in `ipython_execlines` are executed. If not specified in conf.py, then the default value of 'agg' is used. To use the IPython directive without matplotlib as a dependency, set the value to `None`. It may end up that matplotlib is still imported if the user specifies so in `ipython_execlines` or makes use of the @savefig pseudo decorator. ipython_execlines: A list of strings to be exec'd in the embedded Sphinx shell. Typical usage is to make certain packages always available. Set this to an empty list if you wish to have no imports always available. If specified in conf.py as `None`, then it has the effect of making no imports available. If omitted from conf.py altogether, then the default value of ['import numpy as np', 'import matplotlib.pyplot as plt'] is used. ipython_holdcount When the @suppress pseudo-decorator is used, the execution count can be incremented or not. The default behavior is to hold the execution count, corresponding to a value of `True`. Set this to `False` to increment the execution count after each suppressed command. As an example, to use the IPython directive when `matplotlib` is not available, one sets the backend to `None`:: ipython_mplbackend = None An example usage of the directive is: .. code-block:: rst .. ipython:: In [1]: x = 1 In [2]: y = x**2 In [3]: print(y) See http://matplotlib.org/sampledoc/ipython_directive.html for additional documentation. ToDo ---- - Turn the ad-hoc test() function into a real test suite. - Break up ipython-specific functionality from matplotlib stuff into better separated code. Authors ------- - John D Hunter: original author. - Fernando Perez: refactoring, documentation, cleanups, port to 0.11. - VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations. - Skipper Seabold, refactoring, cleanups, pure python addition """ from __future__ import print_function from __future__ import unicode_literals #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Stdlib import os import re import sys import tempfile import ast from pandas.compat import zip, range, map, lmap, u, text_type, cStringIO as StringIO import warnings # To keep compatibility with various python versions try: from hashlib import md5 except ImportError: from md5 import md5 # Third-party import sphinx from docutils.parsers.rst import directives from docutils import nodes from sphinx.util.compat import Directive # Our own try: from traitlets.config import Config except ImportError: from IPython import Config from IPython import InteractiveShell from IPython.core.profiledir import ProfileDir from IPython.utils import io from IPython.utils.py3compat import PY3 if PY3: from io import StringIO else: from StringIO import StringIO #----------------------------------------------------------------------------- # Globals #----------------------------------------------------------------------------- # for tokenizing blocks COMMENT, INPUT, OUTPUT = range(3) #----------------------------------------------------------------------------- # Functions and class declarations #----------------------------------------------------------------------------- def block_parser(part, rgxin, rgxout, fmtin, fmtout): """ part is a string of ipython text, comprised of at most one input, one output, comments, and blank lines. The block parser parses the text into a list of:: blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...] where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and data is, depending on the type of token:: COMMENT : the comment string INPUT: the (DECORATOR, INPUT_LINE, REST) where DECORATOR: the input decorator (or None) INPUT_LINE: the input as string (possibly multi-line) REST : any stdout generated by the input line (not OUTPUT) OUTPUT: the output string, possibly multi-line """ block = [] lines = part.split('\n') N = len(lines) i = 0 decorator = None while 1: if i==N: # nothing left to parse -- the last line break line = lines[i] i += 1 line_stripped = line.strip() if line_stripped.startswith('#'): block.append((COMMENT, line)) continue if line_stripped.startswith('@'): # we're assuming at most one decorator -- may need to # rethink decorator = line_stripped continue # does this look like an input line? matchin = rgxin.match(line) if matchin: lineno, inputline = int(matchin.group(1)), matchin.group(2) # the ....: continuation string continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2)) Nc = len(continuation) # input lines can continue on for more than one line, if # we have a '\' line continuation char or a function call # echo line 'print'. The input line can only be # terminated by the end of the block or an output line, so # we parse out the rest of the input line if it is # multiline as well as any echo text rest = [] while i<N: # look ahead; if the next line is blank, or a comment, or # an output line, we're done nextline = lines[i] matchout = rgxout.match(nextline) #print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation)) if matchout or nextline.startswith('#'): break elif nextline.startswith(continuation): nextline = nextline[Nc:] if nextline and nextline[0] == ' ': nextline = nextline[1:] inputline += '\n' + nextline else: rest.append(nextline) i+= 1 block.append((INPUT, (decorator, inputline, '\n'.join(rest)))) continue # if it looks like an output line grab all the text to the end # of the block matchout = rgxout.match(line) if matchout: lineno, output = int(matchout.group(1)), matchout.group(2) if i<N-1: output = '\n'.join([output] + lines[i:]) block.append((OUTPUT, output)) break return block class DecodingStringIO(StringIO, object): def __init__(self,buf='',encodings=('utf8',), *args, **kwds): super(DecodingStringIO, self).__init__(buf, *args, **kwds) self.set_encodings(encodings) def set_encodings(self, encodings): self.encodings = encodings def write(self,data): if isinstance(data, text_type): return super(DecodingStringIO, self).write(data) else: for enc in self.encodings: try: data = data.decode(enc) return super(DecodingStringIO, self).write(data) except : pass # default to brute utf8 if no encoding succeeded return super(DecodingStringIO, self).write(data.decode('utf8', 'replace')) class EmbeddedSphinxShell(object): """An embedded IPython instance to run inside Sphinx""" def __init__(self, exec_lines=None,state=None): self.cout = DecodingStringIO(u'') if exec_lines is None: exec_lines = [] self.state = state # Create config object for IPython config = Config() config.InteractiveShell.autocall = False config.InteractiveShell.autoindent = False config.InteractiveShell.colors = 'NoColor' # create a profile so instance history isn't saved tmp_profile_dir = tempfile.mkdtemp(prefix='profile_') profname = 'auto_profile_sphinx_build' pdir = os.path.join(tmp_profile_dir,profname) profile = ProfileDir.create_profile_dir(pdir) # Create and initialize global ipython, but don't start its mainloop. # This will persist across different EmbededSphinxShell instances. IP = InteractiveShell.instance(config=config, profile_dir=profile) # io.stdout redirect must be done after instantiating InteractiveShell io.stdout = self.cout io.stderr = self.cout # For debugging, so we can see normal output, use this: #from IPython.utils.io import Tee #io.stdout = Tee(self.cout, channel='stdout') # dbg #io.stderr = Tee(self.cout, channel='stderr') # dbg # Store a few parts of IPython we'll need. self.IP = IP self.user_ns = self.IP.user_ns self.user_global_ns = self.IP.user_global_ns self.input = '' self.output = '' self.is_verbatim = False self.is_doctest = False self.is_suppress = False # Optionally, provide more detailed information to shell. self.directive = None # on the first call to the savefig decorator, we'll import # pyplot as plt so we can make a call to the plt.gcf().savefig self._pyplot_imported = False # Prepopulate the namespace. for line in exec_lines: self.process_input_line(line, store_history=False) def clear_cout(self): self.cout.seek(0) self.cout.truncate(0) def process_input_line(self, line, store_history=True): """process the input, capturing stdout""" stdout = sys.stdout splitter = self.IP.input_splitter try: sys.stdout = self.cout splitter.push(line) more = splitter.push_accepts_more() if not more: try: source_raw = splitter.source_raw_reset()[1] except: # recent ipython #4504 source_raw = splitter.raw_reset() self.IP.run_cell(source_raw, store_history=store_history) finally: sys.stdout = stdout def process_image(self, decorator): """ # build out an image directive like # .. image:: somefile.png # :width 4in # # from an input like # savefig somefile.png width=4in """ savefig_dir = self.savefig_dir source_dir = self.source_dir saveargs = decorator.split(' ') filename = saveargs[1] # insert relative path to image file in source outfile = os.path.relpath(os.path.join(savefig_dir,filename), source_dir) imagerows = ['.. image:: %s'%outfile] for kwarg in saveargs[2:]: arg, val = kwarg.split('=') arg = arg.strip() val = val.strip() imagerows.append(' :%s: %s'%(arg, val)) image_file = os.path.basename(outfile) # only return file name image_directive = '\n'.join(imagerows) return image_file, image_directive # Callbacks for each type of token def process_input(self, data, input_prompt, lineno): """ Process data block for INPUT token. """ decorator, input, rest = data image_file = None image_directive = None is_verbatim = decorator=='@verbatim' or self.is_verbatim is_doctest = (decorator is not None and \ decorator.startswith('@doctest')) or self.is_doctest is_suppress = decorator=='@suppress' or self.is_suppress is_okexcept = decorator=='@okexcept' or self.is_okexcept is_okwarning = decorator=='@okwarning' or self.is_okwarning is_savefig = decorator is not None and \ decorator.startswith('@savefig') # set the encodings to be used by DecodingStringIO # to convert the execution output into unicode if # needed. this attrib is set by IpythonDirective.run() # based on the specified block options, defaulting to ['ut self.cout.set_encodings(self.output_encoding) input_lines = input.split('\n') if len(input_lines) > 1: if input_lines[-1] != "": input_lines.append('') # make sure there's a blank line # so splitter buffer gets reset continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2)) if is_savefig: image_file, image_directive = self.process_image(decorator) ret = [] is_semicolon = False # Hold the execution count, if requested to do so. if is_suppress and self.hold_count: store_history = False else: store_history = True # Note: catch_warnings is not thread safe with warnings.catch_warnings(record=True) as ws: for i, line in enumerate(input_lines): if line.endswith(';'): is_semicolon = True if i == 0: # process the first input line if is_verbatim: self.process_input_line('') self.IP.execution_count += 1 # increment it anyway else: # only submit the line in non-verbatim mode self.process_input_line(line, store_history=store_history) formatted_line = '%s %s'%(input_prompt, line) else: # process a continuation line if not is_verbatim: self.process_input_line(line, store_history=store_history) formatted_line = '%s %s'%(continuation, line) if not is_suppress: ret.append(formatted_line) if not is_suppress and len(rest.strip()) and is_verbatim: # the "rest" is the standard output of the # input, which needs to be added in # verbatim mode ret.append(rest) self.cout.seek(0) output = self.cout.read() if not is_suppress and not is_semicolon: ret.append(output) elif is_semicolon: # get spacing right ret.append('') # context information filename = self.state.document.current_source lineno = self.state.document.current_line # output any exceptions raised during execution to stdout # unless :okexcept: has been specified. if not is_okexcept and "Traceback" in output: s = "\nException in %s at block ending on line %s\n" % (filename, lineno) s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n" sys.stdout.write('\n\n>>>' + ('-' * 73)) sys.stdout.write(s) sys.stdout.write(output) sys.stdout.write('<<<' + ('-' * 73) + '\n\n') # output any warning raised during execution to stdout # unless :okwarning: has been specified. if not is_okwarning: for w in ws: s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno) s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n" sys.stdout.write('\n\n>>>' + ('-' * 73)) sys.stdout.write(s) sys.stdout.write('-' * 76 + '\n') s=warnings.formatwarning(w.message, w.category, w.filename, w.lineno, w.line) sys.stdout.write(s) sys.stdout.write('<<<' + ('-' * 73) + '\n') self.cout.truncate(0) return (ret, input_lines, output, is_doctest, decorator, image_file, image_directive) def process_output(self, data, output_prompt, input_lines, output, is_doctest, decorator, image_file): """ Process data block for OUTPUT token. """ TAB = ' ' * 4 if is_doctest and output is not None: found = output found = found.strip() submitted = data.strip() if self.directive is None: source = 'Unavailable' content = 'Unavailable' else: source = self.directive.state.document.current_source content = self.directive.content # Add tabs and join into a single string. content = '\n'.join(TAB + line for line in content) # Make sure the output contains the output prompt. ind = found.find(output_prompt) if ind < 0: e = ('output does not contain output prompt\n\n' 'Document source: {0}\n\n' 'Raw content: \n{1}\n\n' 'Input line(s):\n{TAB}{2}\n\n' 'Output line(s):\n{TAB}{3}\n\n') e = e.format(source, content, '\n'.join(input_lines), repr(found), TAB=TAB) raise RuntimeError(e) found = found[len(output_prompt):].strip() # Handle the actual doctest comparison. if decorator.strip() == '@doctest': # Standard doctest if found != submitted: e = ('doctest failure\n\n' 'Document source: {0}\n\n' 'Raw content: \n{1}\n\n' 'On input line(s):\n{TAB}{2}\n\n' 'we found output:\n{TAB}{3}\n\n' 'instead of the expected:\n{TAB}{4}\n\n') e = e.format(source, content, '\n'.join(input_lines), repr(found), repr(submitted), TAB=TAB) raise RuntimeError(e) else: self.custom_doctest(decorator, input_lines, found, submitted) def process_comment(self, data): """Process data fPblock for COMMENT token.""" if not self.is_suppress: return [data] def save_image(self, image_file): """ Saves the image file to disk. """ self.ensure_pyplot() command = ('plt.gcf().savefig("%s", bbox_inches="tight", ' 'dpi=100)' % image_file) #print 'SAVEFIG', command # dbg self.process_input_line('bookmark ipy_thisdir', store_history=False) self.process_input_line('cd -b ipy_savedir', store_history=False) self.process_input_line(command, store_history=False) self.process_input_line('cd -b ipy_thisdir', store_history=False) self.process_input_line('bookmark -d ipy_thisdir', store_history=False) self.clear_cout() def process_block(self, block): """ process block from the block_parser and return a list of processed lines """ ret = [] output = None input_lines = None lineno = self.IP.execution_count input_prompt = self.promptin % lineno output_prompt = self.promptout % lineno image_file = None image_directive = None for token, data in block: if token == COMMENT: out_data = self.process_comment(data) elif token == INPUT: (out_data, input_lines, output, is_doctest, decorator, image_file, image_directive) = \ self.process_input(data, input_prompt, lineno) elif token == OUTPUT: out_data = \ self.process_output(data, output_prompt, input_lines, output, is_doctest, decorator, image_file) if out_data: ret.extend(out_data) # save the image files if image_file is not None: self.save_image(image_file) return ret, image_directive def ensure_pyplot(self): """ Ensures that pyplot has been imported into the embedded IPython shell. Also, makes sure to set the backend appropriately if not set already. """ # We are here if the @figure pseudo decorator was used. Thus, it's # possible that we could be here even if python_mplbackend were set to # `None`. That's also strange and perhaps worthy of raising an # exception, but for now, we just set the backend to 'agg'. if not self._pyplot_imported: if 'matplotlib.backends' not in sys.modules: # Then ipython_matplotlib was set to None but there was a # call to the @figure decorator (and ipython_execlines did # not set a backend). #raise Exception("No backend was set, but @figure was used!") import matplotlib matplotlib.use('agg') # Always import pyplot into embedded shell. self.process_input_line('import matplotlib.pyplot as plt', store_history=False) self._pyplot_imported = True def process_pure_python(self, content): """ content is a list of strings. it is unedited directive content This runs it line by line in the InteractiveShell, prepends prompts as needed capturing stderr and stdout, then returns the content as a list as if it were ipython code """ output = [] savefig = False # keep up with this to clear figure multiline = False # to handle line continuation multiline_start = None fmtin = self.promptin ct = 0 for lineno, line in enumerate(content): line_stripped = line.strip() if not len(line): output.append(line) continue # handle decorators if line_stripped.startswith('@'): output.extend([line]) if 'savefig' in line: savefig = True # and need to clear figure continue # handle comments if line_stripped.startswith('#'): output.extend([line]) continue # deal with lines checking for multiline continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2)) if not multiline: modified = u"%s %s" % (fmtin % ct, line_stripped) output.append(modified) ct += 1 try: ast.parse(line_stripped) output.append(u'') except Exception: # on a multiline multiline = True multiline_start = lineno else: # still on a multiline modified = u'%s %s' % (continuation, line) output.append(modified) # if the next line is indented, it should be part of multiline if len(content) > lineno + 1: nextline = content[lineno + 1] if len(nextline) - len(nextline.lstrip()) > 3: continue try: mod = ast.parse( '\n'.join(content[multiline_start:lineno+1])) if isinstance(mod.body[0], ast.FunctionDef): # check to see if we have the whole function for element in mod.body[0].body: if isinstance(element, ast.Return): multiline = False else: output.append(u'') multiline = False except Exception: pass if savefig: # clear figure if plotted self.ensure_pyplot() self.process_input_line('plt.clf()', store_history=False) self.clear_cout() savefig = False return output def custom_doctest(self, decorator, input_lines, found, submitted): """ Perform a specialized doctest. """ from .custom_doctests import doctests args = decorator.split() doctest_type = args[1] if doctest_type in doctests: doctests[doctest_type](self, args, input_lines, found, submitted) else: e = "Invalid option to @doctest: {0}".format(doctest_type) raise Exception(e) class IPythonDirective(Directive): has_content = True required_arguments = 0 optional_arguments = 4 # python, suppress, verbatim, doctest final_argumuent_whitespace = True option_spec = { 'python': directives.unchanged, 'suppress' : directives.flag, 'verbatim' : directives.flag, 'doctest' : directives.flag, 'okexcept': directives.flag, 'okwarning': directives.flag, 'output_encoding': directives.unchanged_required } shell = None seen_docs = set() def get_config_options(self): # contains sphinx configuration variables config = self.state.document.settings.env.config # get config variables to set figure output directory confdir = self.state.document.settings.env.app.confdir savefig_dir = config.ipython_savefig_dir source_dir = os.path.dirname(self.state.document.current_source) if savefig_dir is None: savefig_dir = config.html_static_path if isinstance(savefig_dir, list): savefig_dir = savefig_dir[0] # safe to assume only one path? savefig_dir = os.path.join(confdir, savefig_dir) # get regex and prompt stuff rgxin = config.ipython_rgxin rgxout = config.ipython_rgxout promptin = config.ipython_promptin promptout = config.ipython_promptout mplbackend = config.ipython_mplbackend exec_lines = config.ipython_execlines hold_count = config.ipython_holdcount return (savefig_dir, source_dir, rgxin, rgxout, promptin, promptout, mplbackend, exec_lines, hold_count) def setup(self): # Get configuration values. (savefig_dir, source_dir, rgxin, rgxout, promptin, promptout, mplbackend, exec_lines, hold_count) = self.get_config_options() if self.shell is None: # We will be here many times. However, when the # EmbeddedSphinxShell is created, its interactive shell member # is the same for each instance. if mplbackend and 'matplotlib.backends' not in sys.modules: import matplotlib # Repeated calls to use() will not hurt us since `mplbackend` # is the same each time. matplotlib.use(mplbackend) # Must be called after (potentially) importing matplotlib and # setting its backend since exec_lines might import pylab. self.shell = EmbeddedSphinxShell(exec_lines, self.state) # Store IPython directive to enable better error messages self.shell.directive = self # reset the execution count if we haven't processed this doc #NOTE: this may be borked if there are multiple seen_doc tmp files #check time stamp? if self.state.document.current_source not in self.seen_docs: self.shell.IP.history_manager.reset() self.shell.IP.execution_count = 1 try: self.shell.IP.prompt_manager.width = 0 except AttributeError: # GH14003: class promptManager has removed after IPython 5.x pass self.seen_docs.add(self.state.document.current_source) # and attach to shell so we don't have to pass them around self.shell.rgxin = rgxin self.shell.rgxout = rgxout self.shell.promptin = promptin self.shell.promptout = promptout self.shell.savefig_dir = savefig_dir self.shell.source_dir = source_dir self.shell.hold_count = hold_count # setup bookmark for saving figures directory self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir, store_history=False) self.shell.clear_cout() return rgxin, rgxout, promptin, promptout def teardown(self): # delete last bookmark self.shell.process_input_line('bookmark -d ipy_savedir', store_history=False) self.shell.clear_cout() def run(self): debug = False #TODO, any reason block_parser can't be a method of embeddable shell # then we wouldn't have to carry these around rgxin, rgxout, promptin, promptout = self.setup() options = self.options self.shell.is_suppress = 'suppress' in options self.shell.is_doctest = 'doctest' in options self.shell.is_verbatim = 'verbatim' in options self.shell.is_okexcept = 'okexcept' in options self.shell.is_okwarning = 'okwarning' in options self.shell.output_encoding = [options.get('output_encoding', 'utf8')] # handle pure python code if 'python' in self.arguments: content = self.content self.content = self.shell.process_pure_python(content) parts = '\n'.join(self.content).split('\n\n') lines = ['.. code-block:: ipython', ''] figures = [] for part in parts: block = block_parser(part, rgxin, rgxout, promptin, promptout) if len(block): rows, figure = self.shell.process_block(block) for row in rows: lines.extend([' %s'%line for line in row.split('\n')]) if figure is not None: figures.append(figure) for figure in figures: lines.append('') lines.extend(figure.split('\n')) lines.append('') if len(lines)>2: if debug: print('\n'.join(lines)) else: # This has to do with input, not output. But if we comment # these lines out, then no IPython code will appear in the # final output. self.state_machine.insert_input( lines, self.state_machine.input_lines.source(0)) # cleanup self.teardown() return [] # Enable as a proper Sphinx directive def setup(app): setup.app = app app.add_directive('ipython', IPythonDirective) app.add_config_value('ipython_savefig_dir', None, 'env') app.add_config_value('ipython_rgxin', re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env') app.add_config_value('ipython_rgxout', re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env') app.add_config_value('ipython_promptin', 'In [%d]:', 'env') app.add_config_value('ipython_promptout', 'Out[%d]:', 'env') # We could just let matplotlib pick whatever is specified as the default # backend in the matplotlibrc file, but this would cause issues if the # backend didn't work in headless environments. For this reason, 'agg' # is a good default backend choice. app.add_config_value('ipython_mplbackend', 'agg', 'env') # If the user sets this config value to `None`, then EmbeddedSphinxShell's # __init__ method will treat it as []. execlines = ['import numpy as np', 'import matplotlib.pyplot as plt'] app.add_config_value('ipython_execlines', execlines, 'env') app.add_config_value('ipython_holdcount', True, 'env') # Simple smoke test, needs to be converted to a proper automatic test. def test(): examples = [ r""" In [9]: pwd Out[9]: '/home/jdhunter/py4science/book' In [10]: cd bookdata/ /home/jdhunter/py4science/book/bookdata In [2]: from pylab import * In [2]: ion() In [3]: im = imread('stinkbug.png') @savefig mystinkbug.png width=4in In [4]: imshow(im) Out[4]: <matplotlib.image.AxesImage object at 0x39ea850> """, r""" In [1]: x = 'hello world' # string methods can be # used to alter the string @doctest In [2]: x.upper() Out[2]: 'HELLO WORLD' @verbatim In [3]: x.st<TAB> x.startswith x.strip """, r""" In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\ .....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv' In [131]: print url.split('&') ['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv'] In [60]: import urllib """, r"""\ In [133]: import numpy.random @suppress In [134]: numpy.random.seed(2358) @doctest In [135]: numpy.random.rand(10,2) Out[135]: array([[ 0.64524308, 0.59943846], [ 0.47102322, 0.8715456 ], [ 0.29370834, 0.74776844], [ 0.99539577, 0.1313423 ], [ 0.16250302, 0.21103583], [ 0.81626524, 0.1312433 ], [ 0.67338089, 0.72302393], [ 0.7566368 , 0.07033696], [ 0.22591016, 0.77731835], [ 0.0072729 , 0.34273127]]) """, r""" In [106]: print x jdh In [109]: for i in range(10): .....: print i .....: .....: 0 1 2 3 4 5 6 7 8 9 """, r""" In [144]: from pylab import * In [145]: ion() # use a semicolon to suppress the output @savefig test_hist.png width=4in In [151]: hist(np.random.randn(10000), 100); @savefig test_plot.png width=4in In [151]: plot(np.random.randn(10000), 'o'); """, r""" # use a semicolon to suppress the output In [151]: plt.clf() @savefig plot_simple.png width=4in In [151]: plot([1,2,3]) @savefig hist_simple.png width=4in In [151]: hist(np.random.randn(10000), 100); """, r""" # update the current fig In [151]: ylabel('number') In [152]: title('normal distribution') @savefig hist_with_text.png In [153]: grid(True) @doctest float In [154]: 0.1 + 0.2 Out[154]: 0.3 @doctest float In [155]: np.arange(16).reshape(4,4) Out[155]: array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) In [1]: x = np.arange(16, dtype=float).reshape(4,4) In [2]: x[0,0] = np.inf In [3]: x[0,1] = np.nan @doctest float In [4]: x Out[4]: array([[ inf, nan, 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.], [ 12., 13., 14., 15.]]) """, ] # skip local-file depending first example: examples = examples[1:] #ipython_directive.DEBUG = True # dbg #options = dict(suppress=True) # dbg options = dict() for example in examples: content = example.split('\n') IPythonDirective('debug', arguments=None, options=options, content=content, lineno=0, content_offset=None, block_text=None, state=None, state_machine=None, ) # Run test suite as a script if __name__=='__main__': if not os.path.isdir('_static'): os.mkdir('_static') test() print('All OK? Check figures in _static/')
bsd-3-clause
lamastex/scalable-data-science
db/xtraResources/edXBigDataSeries2015/CS100-1x/Module 4: Text Analysis and Entity Resolution Lab Solutions.py
2
73278
# Databricks notebook source exported at Mon, 14 Mar 2016 03:33:29 UTC # MAGIC %md # MAGIC **SOURCE:** This is from the Community Edition of databricks and has been added to this databricks shard at [/#workspace/scalable-data-science/xtraResources/edXBigDataSeries2015/CS100-1x](/#workspace/scalable-data-science/xtraResources/edXBigDataSeries2015/CS100-1x) as extra resources for the project-focussed course [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/) that is prepared by [Raazesh Sainudiin](https://nz.linkedin.com/in/raazesh-sainudiin-45955845) and [Sivanand Sivaram](https://www.linkedin.com/in/sivanand), and *supported by* [![](https://raw.githubusercontent.com/raazesh-sainudiin/scalable-data-science/master/images/databricks_logoTM_200px.png)](https://databricks.com/) # MAGIC and # MAGIC [![](https://raw.githubusercontent.com/raazesh-sainudiin/scalable-data-science/master/images/AWS_logoTM_200px.png)](https://www.awseducate.com/microsite/CommunitiesEngageHome). # COMMAND ---------- # MAGIC %md # MAGIC <a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-nc-nd/4.0/88x31.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/4.0/">Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License</a>. # COMMAND ---------- # MAGIC %md # MAGIC #![Spark Logo](http://spark-mooc.github.io/web-assets/images/ta_Spark-logo-small.png) + ![Python Logo](http://spark-mooc.github.io/web-assets/images/python-logo-master-v3-TM-flattened_small.png) # MAGIC # **Text Analysis and Entity Resolution** # MAGIC Entity resolution is a common, yet difficult problem in data cleaning and integration. This lab will demonstrate how we can use Apache Spark to apply powerful and scalable text analysis techniques and perform entity resolution across two datasets of commercial products. # COMMAND ---------- # MAGIC %md # MAGIC Entity Resolution, or "[Record linkage][wiki]" is the term used by statisticians, epidemiologists, and historians, among others, to describe the process of joining records from one data source with another that describe the same entity. Our terms with the same meaning include, "entity disambiguation/linking", duplicate detection", "deduplication", "record matching", "(reference) reconciliation", "object identification", "data/information integration", and "conflation". # MAGIC # MAGIC Entity Resolution (ER) refers to the task of finding records in a dataset that refer to the same entity across different data sources (e.g., data files, books, websites, databases). ER is necessary when joining datasets based on entities that may or may not share a common identifier (e.g., database key, URI, National identification number), as may be the case due to differences in record shape, storage location, and/or curator style or preference. A dataset that has undergone ER may be referred to as being cross-linked. # MAGIC [wiki]: https://en.wikipedia.org/wiki/Record_linkage # COMMAND ---------- labVersion = 'cs100.1x-lab3-1.0.4' # COMMAND ---------- # MAGIC %md # MAGIC #### Code # MAGIC This assignment can be completed using basic Python, pySpark Transformations and actions, and the plotting library matplotlib. Other libraries are not allowed. # MAGIC # MAGIC #### Files # MAGIC Data files for this assignment are from the [metric-learning](https://code.google.com/p/metric-learning/) project and can be found at: # MAGIC `cs100/lab3` # MAGIC # MAGIC The directory contains the following files: # MAGIC * **Google.csv**, the Google Products dataset # MAGIC * **Amazon.csv**, the Amazon dataset # MAGIC * **Google_small.csv**, 200 records sampled from the Google data # MAGIC * **Amazon_small.csv**, 200 records sampled from the Amazon data # MAGIC * **Amazon_Google_perfectMapping.csv**, the "gold standard" mapping # MAGIC * **stopwords.txt**, a list of common English words # MAGIC # MAGIC Besides the complete data files, there are "sample" data files for each dataset - we will use these for **Part 1**. In addition, there is a "gold standard" file that contains all of the true mappings between entities in the two datasets. Every row in the gold standard file has a pair of record IDs (one Google, one Amazon) that belong to two record that describe the same thing in the real world. We will use the gold standard to evaluate our algorithms. # COMMAND ---------- # MAGIC %md # MAGIC #### **Part 0: Preliminaries** # MAGIC We read in each of the files and create an RDD consisting of lines. # MAGIC For each of the data files ("Google.csv", "Amazon.csv", and the samples), we want to parse the IDs out of each record. The IDs are the first column of the file (they are URLs for Google, and alphanumeric strings for Amazon). Omitting the headers, we load these data files into pair RDDs where the *mapping ID* is the key, and the value is a string consisting of the name/title, description, and manufacturer from the record. # MAGIC # MAGIC The file format of an Amazon line is: # MAGIC # MAGIC `"id","title","description","manufacturer","price"` # MAGIC # MAGIC The file format of a Google line is: # MAGIC # MAGIC `"id","name","description","manufacturer","price"` # COMMAND ---------- import re DATAFILE_PATTERN = '^(.+),"(.+)",(.*),(.*),(.*)' def removeQuotes(s): """ Remove quotation marks from an input string Args: s (str): input string that might have the quote "" characters Returns: str: a string without the quote characters """ return ''.join(i for i in s if i!='"') def parseDatafileLine(datafileLine): """ Parse a line of the data file using the specified regular expression pattern Args: datafileLine (str): input string that is a line from the data file Returns: str: a string parsed using the given regular expression and without the quote characters """ match = re.search(DATAFILE_PATTERN, datafileLine) if match is None: print 'Invalid datafile line: %s' % datafileLine return (datafileLine, -1) elif match.group(1) == '"id"': print 'Header datafile line: %s' % datafileLine return (datafileLine, 0) else: product = '%s %s %s' % (match.group(2), match.group(3), match.group(4)) return ((removeQuotes(match.group(1)), product), 1) # COMMAND ---------- display(dbutils.fs.ls('/databricks-datasets/cs100/lab3/data-001/')) # COMMAND ---------- # MAGIC %md **WARNING:** If *test_helper*, required in the cell below, is not installed, follow the instructions [here](https://databricks-staging-cloudfront.staging.cloud.databricks.com/public/c65da9a2fa40e45a2028cddebe45b54c/8637560089690848/4187311313936645/6977722904629137/05f3c2ecc3.html). # COMMAND ---------- import sys import os from test_helper import Test baseDir = os.path.join('databricks-datasets') inputPath = os.path.join('cs100', 'lab3', 'data-001') GOOGLE_PATH = 'Google.csv' GOOGLE_SMALL_PATH = 'Google_small.csv' AMAZON_PATH = 'Amazon.csv' AMAZON_SMALL_PATH = 'Amazon_small.csv' GOLD_STANDARD_PATH = 'Amazon_Google_perfectMapping.csv' STOPWORDS_PATH = 'stopwords.txt' def parseData(filename): """ Parse a data file Args: filename (str): input file name of the data file Returns: RDD: a RDD of parsed lines """ return (sc .textFile(filename, 4, 0) .map(parseDatafileLine)) def loadData(path): """ Load a data file Args: path (str): input file name of the data file Returns: RDD: a RDD of parsed valid lines """ filename = os.path.join(baseDir, inputPath, path) raw = parseData(filename).cache() failed = (raw .filter(lambda s: s[1] == -1) .map(lambda s: s[0])) for line in failed.take(10): print '%s - Invalid datafile line: %s' % (path, line) valid = (raw .filter(lambda s: s[1] == 1) .map(lambda s: s[0]) .cache()) print '%s - Read %d lines, successfully parsed %d lines, failed to parse %d lines' % (path, raw.count(), valid.count(), failed.count()) assert failed.count() == 0 assert raw.count() == (valid.count() + 1) return valid googleSmall = loadData(GOOGLE_SMALL_PATH) google = loadData(GOOGLE_PATH) amazonSmall = loadData(AMAZON_SMALL_PATH) amazon = loadData(AMAZON_PATH) # COMMAND ---------- # MAGIC %md # MAGIC Let's examine the lines that were just loaded in the two subset (small) files - one from Google and one from Amazon # COMMAND ---------- for line in googleSmall.take(3): print 'google: %s: %s\n' % (line[0], line[1]) for line in amazonSmall.take(3): print 'amazon: %s: %s\n' % (line[0], line[1]) # COMMAND ---------- # MAGIC %md # MAGIC #### **Part 1: ER as Text Similarity - Bags of Words** # MAGIC # MAGIC A simple approach to entity resolution is to treat all records as strings and compute their similarity with a string distance function. In this part, we will build some components for performing bag-of-words text-analysis, and then use them to compute record similarity. # MAGIC [Bag-of-words][bag-of-words] is a conceptually simple yet powerful approach to text analysis. # MAGIC # MAGIC The idea is to treat strings, a.k.a. **documents**, as *unordered collections* of words, or **tokens**, i.e., as bags of words. # MAGIC > **Note on terminology**: a "token" is the result of parsing the document down to the elements we consider "atomic" for the task at hand. Tokens can be things like words, numbers, acronyms, or other exotica like word-roots or fixed-length character strings. # MAGIC > Bag of words techniques all apply to any sort of token, so when we say "bag-of-words" we really mean "bag-of-tokens," strictly speaking. # MAGIC Tokens become the atomic unit of text comparison. If we want to compare two documents, we count how many tokens they share in common. If we want to search for documents with keyword queries (this is what Google does), then we turn the keywords into tokens and find documents that contain them. The power of this approach is that it makes string comparisons insensitive to small differences that probably do not affect meaning much, for example, punctuation and word order. # MAGIC [bag-of-words]: https://en.wikipedia.org/wiki/Bag-of-words_model # COMMAND ---------- # MAGIC %md # MAGIC #### **1(a) Tokenize a String** # MAGIC Implement the function `simpleTokenize(string)` that takes a string and returns a list of non-empty tokens in the string. `simpleTokenize` should split strings using the provided regular expression. Since we want to make token-matching case insensitive, make sure all tokens are turned lower-case. Give an interpretation, in natural language, of what the regular expression, `split_regex`, matches. # MAGIC If you need help with Regular Expressions, try the site [regex101](https://regex101.com/) where you can interactively explore the results of applying different regular expressions to strings. *Note that \W includes the "_" character*. You should use [re.split()](https://docs.python.org/2/library/re.html#re.split) to perform the string split. Also, make sure you remove any empty tokens. # COMMAND ---------- # ANSWER quickbrownfox = 'A quick brown fox jumps over the lazy dog.' split_regex = r'\W+' def simpleTokenize(string): """ A simple implementation of input string tokenization Args: string (str): input string Returns: list: a list of tokens """ return [t for t in re.split(split_regex, string.lower()) if len(t)] print simpleTokenize(quickbrownfox) # Should give ['a', 'quick', 'brown', ... ] # COMMAND ---------- # TEST Tokenize a String (1a) Test.assertEquals(simpleTokenize(quickbrownfox), ['a','quick','brown','fox','jumps','over','the','lazy','dog'], 'simpleTokenize should handle sample text') Test.assertEquals(simpleTokenize(' '), [], 'simpleTokenize should handle empty string') Test.assertEquals(simpleTokenize('!!!!123A/456_B/789C.123A'), ['123a','456_b','789c','123a'], 'simpleTokenize should handle punctuations and lowercase result') Test.assertEquals(simpleTokenize('fox fox'), ['fox', 'fox'], 'simpleTokenize should not remove duplicates') # COMMAND ---------- # PRIVATE_TEST Tokenize a String (1a) Test.assertEquals(simpleTokenize(quickbrownfox), ['a','quick','brown','fox','jumps','over','the','lazy','dog'], 'simpleTokenize should handle sample text') Test.assertEquals(simpleTokenize(' '), [], 'simpleTokenize should handle empty string') Test.assertEquals(simpleTokenize('!!!!123A/456_B/789C.123A'), ['123a','456_b','789c','123a'], 'simpleTokenize should handle puntuations and lowercase result') Test.assertEquals(simpleTokenize('fox fox'), ['fox', 'fox'], 'simpleTokenize should not remove duplicates') # COMMAND ---------- # MAGIC %md # MAGIC #### **(1b) Removing stopwords** # MAGIC *[Stopwords][stopwords]* are common (English) words that do not contribute much to the content or meaning of a document (e.g., "the", "a", "is", "to", etc.). Stopwords add noise to bag-of-words comparisons, so they are usually excluded. # MAGIC Using the included file "stopwords.txt", implement `tokenize`, an improved tokenizer that does not emit stopwords. # MAGIC [stopwords]: https://en.wikipedia.org/wiki/Stop_words # COMMAND ---------- # ANSWER stopfile = os.path.join(baseDir, inputPath, STOPWORDS_PATH) stopwords = set(sc.textFile(stopfile).collect()) print 'These are the stopwords: %s' % stopwords def tokenize(string): """ An implementation of input string tokenization that excludes stopwords Args: string (str): input string Returns: list: a list of tokens without stopwords """ return [t for t in simpleTokenize(string) if t not in stopwords] print tokenize(quickbrownfox) # Should give ['quick', 'brown', ... ] # COMMAND ---------- # TEST Removing stopwords (1b) Test.assertEquals(tokenize("Why a the?"), [], 'tokenize should remove all stopwords') Test.assertEquals(tokenize("Being at the_?"), ['the_'], 'tokenize should handle non-stopwords') Test.assertEquals(tokenize(quickbrownfox), ['quick','brown','fox','jumps','lazy','dog'], 'tokenize should handle sample text') # COMMAND ---------- # PRIVATE_TEST Removing stopwords (1b) Test.assertEquals(tokenize("Why a the?"), [], 'tokenize should remove all stopwords') Test.assertEquals(tokenize("Being at the_?"), ['the_'], 'tokenize should handle non-stopwords') Test.assertEquals(tokenize(quickbrownfox), ['quick','brown','fox','jumps','lazy','dog'], 'tokenize should handle sample text') # COMMAND ---------- # MAGIC %md # MAGIC #### **(1c) Tokenizing the small datasets** # MAGIC Now let's tokenize the two *small* datasets. For each ID in a dataset, `tokenize` the values, and then count the total number of tokens. # MAGIC How many tokens, total, are there in the two datasets? # COMMAND ---------- # ANSWER amazonRecToToken = amazonSmall.map(lambda s: (s[0], tokenize(s[1]))) googleRecToToken = googleSmall.map(lambda s: (s[0], tokenize(s[1]))) def countTokens(vendorRDD): """ Count and return the number of tokens Args: vendorRDD (RDD of (recordId, tokenizedValue)): Pair tuple of record ID to tokenized output Returns: count: count of all tokens """ recordCount = vendorRDD.map(lambda s: len(s[1])) recordSum = recordCount.reduce(lambda a, b : a + b) return recordSum totalTokens = countTokens(amazonRecToToken) + countTokens(googleRecToToken) print 'There are %s tokens in the combined datasets' % totalTokens # COMMAND ---------- # TEST Tokenizing the small datasets (1c) Test.assertEquals(totalTokens, 22520, 'incorrect totalTokens') # COMMAND ---------- # PRIVATE_TEST Tokenizing the small datasets (1c) Test.assertEquals(totalTokens, 22520, 'incorrect totalTokens') Test.assertEquals(countTokens(amazonRecToToken), 16707, 'incorrect token count for Amazon records') # COMMAND ---------- # MAGIC %md # MAGIC #### **(1d) Amazon record with the most tokens** # MAGIC Which Amazon record has the biggest number of tokens? # MAGIC In other words, you want to sort the records and get the one with the largest count of tokens. # COMMAND ---------- # ANSWER def findBiggestRecord(vendorRDD): """ Find and return the record with the largest number of tokens Args: vendorRDD (RDD of (recordId, tokens)): input Pair Tuple of record ID and tokens Returns: list: a list of 1 Pair Tuple of record ID and tokens """ return(vendorRDD.takeOrdered(1, lambda s: -1 * len(s[1]))) biggestRecordAmazon = findBiggestRecord(amazonRecToToken) print 'The Amazon record with ID "%s" has the most tokens (%s)' % (biggestRecordAmazon[0][0], len(biggestRecordAmazon[0][1])) # COMMAND ---------- # TEST Amazon record with the most tokens (1d) Test.assertEquals(biggestRecordAmazon[0][0], 'b000o24l3q', 'incorrect biggestRecordAmazon') Test.assertEquals(len(biggestRecordAmazon[0][1]), 1547, 'incorrect len for biggestRecordAmazon') # COMMAND ---------- # PRIVATE_TEST Amazon record with the most tokens (1d) Test.assertEquals(biggestRecordAmazon[0][0], 'b000o24l3q', 'incorrect biggestRecordAmazon') Test.assertEquals(len(biggestRecordAmazon[0][1]), 1547, 'incorrect len for biggestRecordAmazon') # COMMAND ---------- # MAGIC %md # MAGIC #### **Part 2: ER as Text Similarity - Weighted Bag-of-Words using TF-IDF** # MAGIC Bag-of-words comparisons are not very good when all tokens are treated the same: some tokens are more important than others. Weights give us a way to specify which tokens to favor. With weights, when we compare documents, instead of counting common tokens, we sum up the weights of common tokens. A good heuristic for assigning weights is called "Term-Frequency/Inverse-Document-Frequency," or [TF-IDF][tfidf] for short. # MAGIC # MAGIC **TF** # MAGIC # MAGIC TF rewards tokens that appear many times in the same document. It is computed as the frequency of a token in a document, that is, if document *d* contains 100 tokens and token *t* appears in *d* 5 times, then the TF weight of *t* in *d* is *5/100 = 1/20*. The intuition for TF is that if a word occurs often in a document, then it is more important to the meaning of the document. # MAGIC # MAGIC **IDF** # MAGIC # MAGIC IDF rewards tokens that are rare overall in a dataset. The intuition is that it is more significant if two documents share a rare word than a common one. IDF weight for a token, *t*, in a set of documents, *U*, is computed as follows: # MAGIC * Let *N* be the total number of documents in *U* # MAGIC * Find *n(t)*, the number of documents in *U* that contain *t* # MAGIC * Then *IDF(t) = N/n(t)*. # MAGIC # MAGIC Note that *n(t)/N* is the frequency of *t* in *U*, and *N/n(t)* is the inverse frequency. # MAGIC # MAGIC > **Note on terminology**: Sometimes token weights depend on the document the token belongs to, that is, the same token may have a different weight when it's found in different documents. We call these weights *local* weights. TF is an example of a local weight, because it depends on the length of the source. On the other hand, some token weights only depend on the token, and are the same everywhere that token is found. We call these weights *global*, and IDF is one such weight. # MAGIC # MAGIC **TF-IDF** # MAGIC # MAGIC Finally, to bring it all together, the total TF-IDF weight for a token in a document is the product of its TF and IDF weights. # MAGIC [tfidf]: https://en.wikipedia.org/wiki/Tf%E2%80%93idf # COMMAND ---------- # MAGIC %md # MAGIC #### **(2a) Implement a TF function** # MAGIC # MAGIC Implement `tf(tokens)` that takes a list of tokens and returns a Python [dictionary](https://docs.python.org/2/tutorial/datastructures.html#dictionaries) mapping tokens to TF weights. # MAGIC # MAGIC The steps your function should perform are: # MAGIC * Create an empty Python dictionary # MAGIC * For each of the tokens in the input `tokens` list, count 1 for each occurance and add the token to the dictionary # MAGIC * For each of the tokens in the dictionary, divide the token's count by the total number of tokens in the input `tokens` list # COMMAND ---------- # ANSWER def tf(tokens): """ Compute TF Args: tokens (list of str): input list of tokens from tokenize Returns: dictionary: a dictionary of tokens to its TF values """ counts = {} length = len(tokens) for t in tokens: counts.setdefault(t, 0.0) counts[t] += 1 return { t: counts[t] / length for t in counts } print tf(tokenize(quickbrownfox)) # Should give { 'quick': 0.1666 ... } # COMMAND ---------- # TEST Implement a TF function (2a) tf_test = tf(tokenize(quickbrownfox)) Test.assertEquals(tf_test, {'brown': 0.16666666666666666, 'lazy': 0.16666666666666666, 'jumps': 0.16666666666666666, 'fox': 0.16666666666666666, 'dog': 0.16666666666666666, 'quick': 0.16666666666666666}, 'incorrect result for tf on sample text') tf_test2 = tf(tokenize('one_ one_ two!')) Test.assertEquals(tf_test2, {'one_': 0.6666666666666666, 'two': 0.3333333333333333}, 'incorrect result for tf test') # COMMAND ---------- # PRIVATE_TEST Implement a TF function (2a) tf_test = tf(tokenize(quickbrownfox)) Test.assertEquals(tf_test, {'brown': 0.16666666666666666, 'lazy': 0.16666666666666666, 'jumps': 0.16666666666666666, 'fox': 0.16666666666666666, 'dog': 0.16666666666666666, 'quick': 0.16666666666666666}, 'incorrect result for tf on sample text') tf_test2 = tf(tokenize('one_ one_ two!')) Test.assertEquals(tf_test2, {'one_': 0.6666666666666666, 'two': 0.3333333333333333}, 'incorrect result for tf test') # COMMAND ---------- # MAGIC %md # MAGIC #### **(2b) Create a corpus** # MAGIC Create a pair RDD called `corpusRDD`, consisting of a combination of the two small datasets, `amazonRecToToken` and `googleRecToToken`. Each element of the `corpusRDD` should be a pair consisting of a key from one of the small datasets (ID or URL) and the value is the associated value for that key from the small datasets. # COMMAND ---------- # ANSWER corpusRDD = amazonRecToToken.union(googleRecToToken) # COMMAND ---------- # TEST Create a corpus (2b) Test.assertEquals(corpusRDD.count(), 400, 'incorrect corpusRDD.count()') # COMMAND ---------- # PRIVATE_TEST Create a corpus (2b) Test.assertEquals(corpusRDD.count(), 400, 'incorrect corpusRDD.count()') # COMMAND ---------- # MAGIC %md # MAGIC #### **(2c) Implement an IDFs function** # MAGIC Implement `idfs` that assigns an IDF weight to every unique token in an RDD called `corpus`. The function should return an pair RDD where the `key` is the unique token and value is the IDF weight for the token. # MAGIC # MAGIC Recall that the IDF weight for a token, *t*, in a set of documents, *U*, is computed as follows: # MAGIC * Let *N* be the total number of documents in *U*. # MAGIC * Find *n(t)*, the number of documents in *U* that contain *t*. # MAGIC * Then *IDF(t) = N/n(t)*. # MAGIC # MAGIC The steps your function should perform are: # MAGIC * Calculate *N*. Think about how you can calculate *N* from the input RDD. # MAGIC * Create an RDD (*not a pair RDD*) containing the unique tokens from each document in the input `corpus`. For each document, you should only include a token once, *even if it appears multiple times in that document.* # MAGIC * For each of the unique tokens, count how many times it appears in the document and then compute the IDF for that token: *N/n(t)* # MAGIC # MAGIC Use your `idfs` to compute the IDF weights for all tokens in `corpusRDD` (the combined small datasets). # MAGIC How many unique tokens are there? # COMMAND ---------- # ANSWER def idfs(corpus): """ Compute IDF Args: corpus (RDD): input corpus Returns: RDD: a RDD of (token, IDF value) """ uniqueTokens = corpus.flatMap(lambda s: list(set(s[1]))) tokenCountPairTuple = uniqueTokens.map(lambda token: (token, 1)) tokenSumPairTuple = tokenCountPairTuple.reduceByKey(lambda a, b : a + b) N = float(corpus.count()) return (tokenSumPairTuple.map(lambda s: (s[0], float(N/s[1])))) idfsSmall = idfs(amazonRecToToken.union(googleRecToToken)) uniqueTokenCount = idfsSmall.count() print 'There are %s unique tokens in the small datasets.' % uniqueTokenCount # COMMAND ---------- # TEST Implement an IDFs function (2c) Test.assertEquals(uniqueTokenCount, 4772, 'incorrect uniqueTokenCount') tokenSmallestIdf = idfsSmall.takeOrdered(1, lambda s: s[1])[0] Test.assertEquals(tokenSmallestIdf[0], 'software', 'incorrect smallest IDF token') Test.assertTrue(abs(tokenSmallestIdf[1] - 4.25531914894) < 0.0000000001, 'incorrect smallest IDF value') # COMMAND ---------- # PRIVATE_TEST Implement an IDFs function (2c) Test.assertEquals(uniqueTokenCount, 4772, 'incorrect uniqueTokenCount') tokenSmallestIdf = idfsSmall.takeOrdered(1, lambda s: s[1])[0] Test.assertEquals(tokenSmallestIdf[0], 'software', 'incorrect smallest IDF token') Test.assertTrue(abs(tokenSmallestIdf[1] - 4.25531914894) < 0.0000000001, 'incorrect smallest IDF value') firstElevenTokens = set(idfsSmall.takeOrdered(11, lambda s: s[1])) Test.assertEquals(len(firstElevenTokens - set([('software', 4.25531914893617),('new', 6.896551724137931),('features', 6.896551724137931),('use', 7.017543859649122),('complete', 7.2727272727272725),('easy', 7.6923076923076925),('create', 8.333333333333334),('system', 8.333333333333334),('cd', 8.333333333333334),('1', 8.51063829787234), ('windows', 8.51063829787234)])), 0, 'incorrect firstTenTokens') # COMMAND ---------- # MAGIC %md # MAGIC #### **(2d) Tokens with the smallest IDF** # MAGIC Print out the 11 tokens with the smallest IDF in the combined small dataset. # COMMAND ---------- smallIDFTokens = idfsSmall.takeOrdered(11, lambda s: s[1]) print smallIDFTokens # COMMAND ---------- # ANSWER #*answer*: The 10 smallest IDFs are for: (1) software, (2) new, (3) features, (4) use, (5) complete, (6) easy, (7 tie) cd, (7 tie) system, (7 tie) create, (10 tie) windows, (10 tie) 1. #These terms not useful for entity resolution because they are generic terms for marketing, prices, and product categories. # COMMAND ---------- # ANSWER # Quiz question: # For part (2d), do you think the terms are useful for entity resolution? # ( ) Yes # (*) No # # Why or why not? # ( ) These terms are useful for entity resolution because they describe distinguishing tokens in product descriptions # ( ) These terms not useful for entity resolution because they are generic terms for marketing, prices, and product categories. # COMMAND ---------- # MAGIC %md # MAGIC #### **(2e) IDF Histogram** # MAGIC Plot a histogram of IDF values. Be sure to use appropriate scaling and bucketing for the data. # MAGIC First plot the histogram using `matplotlib` # COMMAND ---------- import matplotlib.pyplot as plt small_idf_values = idfsSmall.map(lambda s: s[1]).collect() fig = plt.figure(figsize=(8,3)) plt.hist(small_idf_values, 50, log=True) display(fig) pass # COMMAND ---------- from pyspark.sql import Row # Create a DataFrame and visualize using display() idfsToCountRow = idfsSmall.map(lambda (x, y): Row(token=x, value=y)) idfsToCountDF = sqlContext.createDataFrame(idfsToCountRow) display(idfsToCountDF) # COMMAND ---------- # ANSWER # Quiz question: # Using the plot in (2e), what conclusions can you draw from the distribution of weights? # # *ANSWER:* There is a long tail of rare words in the corpus (these have large IDF values). # [explanation] # There are gaps between IDF values because IDF is a function of a discrete variable, i.e., a document count. # [explanation] # COMMAND ---------- # MAGIC %md # MAGIC #### **(2f) Implement a TF-IDF function** # MAGIC Use your `tf` function to implement a `tfidf(tokens, idfs)` function that takes a list of tokens from a document and a Python dictionary of IDF weights and returns a Python dictionary mapping individual tokens to total TF-IDF weights. # MAGIC # MAGIC The steps your function should perform are: # MAGIC * Calculate the token frequencies (TF) for `tokens` # MAGIC * Create a Python dictionary where each token maps to the token's frequency times the token's IDF weight # MAGIC # MAGIC Use your `tfidf` function to compute the weights of Amazon product record 'b000hkgj8k'. To do this, we need to extract the record for the token from the tokenized small Amazon dataset and we need to convert the IDFs for the small dataset into a Python dictionary. We can do the first part, by using a `filter()` transformation to extract the matching record and a `collect()` action to return the value to the driver. # MAGIC # MAGIC For the second part, we use the [`collectAsMap()` action](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.collectAsMap) to return the IDFs to the driver as a Python dictionary. # COMMAND ---------- # ANSWER def tfidf(tokens, idfs): """ Compute TF-IDF Args: tokens (list of str): input list of tokens from tokenize idfs (dictionary): record to IDF value Returns: dictionary: a dictionary of records to TF-IDF values """ tfs = tf(tokens) return { t: tfs[t] * idfs[t] for t in tfs } rec_b000hkgj8k = amazonRecToToken.filter(lambda x: x[0] == 'b000hkgj8k').collect()[0][1] idfsSmallWeights = idfsSmall.collectAsMap() rec_b000hkgj8k_weights = tfidf(rec_b000hkgj8k, idfsSmallWeights) print 'Amazon record "b000hkgj8k" has tokens and weights:\n%s' % rec_b000hkgj8k_weights # COMMAND ---------- # TEST Implement a TF-IDF function (2f) Test.assertEquals(rec_b000hkgj8k_weights, {'autocad': 33.33333333333333, 'autodesk': 8.333333333333332, 'courseware': 66.66666666666666, 'psg': 33.33333333333333, '2007': 3.5087719298245617, 'customizing': 16.666666666666664, 'interface': 3.0303030303030303}, 'incorrect rec_b000hkgj8k_weights') # COMMAND ---------- # PRIVATE_TEST Implement a TF-IDF function (2f) Test.assertEquals(rec_b000hkgj8k_weights, {'autocad': 33.33333333333333, 'autodesk': 8.333333333333332, 'courseware': 66.66666666666666, 'psg': 33.33333333333333, '2007': 3.5087719298245617, 'customizing': 16.666666666666664, 'interface': 3.0303030303030303}, 'incorrect rec_b000hkgj8k_weights') # COMMAND ---------- # MAGIC %md # MAGIC #### **Part 3: ER as Text Similarity - Cosine Similarity** # MAGIC Now we are ready to do text comparisons in a formal way. The metric of string distance we will use is called **[cosine similarity][cosine]**. We will treat each document as a vector in some high dimensional space. Then, to compare two documents we compute the cosine of the angle between their two document vectors. This is *much* easier than it sounds. # MAGIC # MAGIC The first question to answer is how do we represent documents as vectors? The answer is familiar: bag-of-words! We treat each unique token as a dimension, and treat token weights as magnitudes in their respective token dimensions. For example, suppose we use simple counts as weights, and we want to interpret the string "Hello, world! Goodbye, world!" as a vector. Then in the "hello" and "goodbye" dimensions the vector has value 1, in the "world" dimension it has value 2, and it is zero in all other dimensions. # MAGIC # MAGIC The next question is: given two vectors how do we find the cosine of the angle between them? Recall the formula for the dot product of two vectors: # MAGIC \\[ a \cdot b = \| a \| \| b \| \cos \theta \\] # MAGIC Here \\( a \cdot b = \sum a_i b_i \\) is the ordinary dot product of two vectors, and \\( \|a\| = \sqrt{ \sum a_i^2 } \\) is the norm of \\( a \\). # MAGIC # MAGIC We can rearrange terms and solve for the cosine to find it is simply the normalized dot product of the vectors. With our vector model, the dot product and norm computations are simple functions of the bag-of-words document representations, so we now have a formal way to compute similarity: # MAGIC \\[ similarity = \cos \theta = \frac{a \cdot b}{\|a\| \|b\|} = \frac{\sum a_i b_i}{\sqrt{\sum a_i^2} \sqrt{\sum b_i^2}} \\] # MAGIC # MAGIC Setting aside the algebra, the geometric interpretation is more intuitive. The angle between two document vectors is small if they share many tokens in common, because they are pointing in roughly the same direction. For that case, the cosine of the angle will be large. Otherwise, if the angle is large (and they have few words in common), the cosine is small. Therefore, cosine similarity scales proportionally with our intuitive sense of similarity. # MAGIC [cosine]: https://en.wikipedia.org/wiki/Cosine_similarity # COMMAND ---------- # MAGIC %md # MAGIC #### **(3a) Implement the components of a `cosineSimilarity` function** # MAGIC Implement the components of a `cosineSimilarity` function. # MAGIC Use the `tokenize` and `tfidf` functions, and the IDF weights from Part 2 for extracting tokens and assigning them weights. # MAGIC The steps you should perform are: # MAGIC * Define a function `dotprod` that takes two Python dictionaries and produces the dot product of them, where the dot product is defined as the sum of the product of values for tokens that appear in *both* dictionaries # MAGIC * Define a function `norm` that returns the square root of the dot product of a dictionary and itself # MAGIC * Define a function `cossim` that returns the dot product of two dictionaries divided by the norm of the first dictionary and then by the norm of the second dictionary # COMMAND ---------- # ANSWER import math def dotprod(a, b): return sum([a[t] * b[t] for t in a if t in b]) def norm(a): return math.sqrt(dotprod(a, a)) def cossim(a, b): return dotprod(a, b) / norm(a) / norm(b) testVec1 = {'foo': 2, 'bar': 3, 'baz': 5 } testVec2 = {'foo': 1, 'bar': 0, 'baz': 20 } dp = dotprod(testVec1, testVec2) nm = norm(testVec1) print dp, nm # COMMAND ---------- # TEST Implement the components of a cosineSimilarity function (3a) Test.assertEquals(dp, 102, 'incorrect dp') Test.assertTrue(abs(nm - 6.16441400297) < 0.0000001, 'incorrrect nm') # COMMAND ---------- # PRIVATE_TEST Implement the components of a cosineSimilarity function (3a) Test.assertEquals(dp, 102, 'incorrect dp') Test.assertTrue(abs(nm - 6.16441400297) < 0.0000001, 'incorrrect nm') # COMMAND ---------- # MAGIC %md # MAGIC #### **(3b) Implement a `cosineSimilarity` function** # MAGIC Implement a `cosineSimilarity(string1, string2, idfsDictionary)` function that takes two strings and a dictionary of IDF weights, and computes their cosine similarity in the context of some global IDF weights. # MAGIC # MAGIC The steps you should perform are: # MAGIC * Apply your `tfidf` function to the tokenized first and second strings, using the dictionary of IDF weights # MAGIC * Compute and return your `cossim` function applied to the results of the two `tfidf` functions # COMMAND ---------- # ANSWER def cosineSimilarity(string1, string2, idfsDictionary): """ Compute cosine similarity between two strings Args: string1 (str): first string string2 (str): second string idfsDictionary (dictionary): a dictionary of IDF values Returns: cossim: cosine similarity value """ w1 = tfidf(tokenize(string1), idfsDictionary) w2 = tfidf(tokenize(string2), idfsDictionary) return cossim(w1, w2) cossimAdobe = cosineSimilarity('Adobe Photoshop', 'Adobe Illustrator', idfsSmallWeights) print cossimAdobe # COMMAND ---------- # TEST Implement a cosineSimilarity function (3b) Test.assertTrue(abs(cossimAdobe - 0.0577243382163) < 0.0000001, 'incorrect cossimAdobe') # COMMAND ---------- # PRIVATE_TEST Implement a cosineSimilarity function (3b) Test.assertTrue(abs(cossimAdobe - 0.0577243382163) < 0.0000001, 'incorrect cossimAdobe') # COMMAND ---------- # MAGIC %md # MAGIC #### **(3c) Perform Entity Resolution** # MAGIC Now we can finally do some entity resolution! # MAGIC For *every* product record in the small Google dataset, use your `cosineSimilarity` function to compute its similarity to every record in the small Amazon dataset. Then, build a dictionary mapping `(Google URL, Amazon ID)` tuples to similarity scores between 0 and 1. # MAGIC We'll do this computation two different ways, first we'll do it without a broadcast variable, and then we'll use a broadcast variable # MAGIC # MAGIC The steps you should perform are: # MAGIC * Create an RDD that is a combination of the small Google and small Amazon datasets that has as elements all pairs of elements (a, b) where a is in self and b is in other. The result will be an RDD of the form: `[ ((Google URL1, Google String1), (Amazon ID1, Amazon String1)), ((Google URL1, Google String1), (Amazon ID2, Amazon String2)), ((Google URL2, Google String2), (Amazon ID1, Amazon String1)), ... ]` # MAGIC * Define a worker function that given an element from the combination RDD computes the cosineSimlarity for the two records in the element # MAGIC * Apply the worker function to every element in the RDD # MAGIC # MAGIC Now, compute the similarity between Amazon record `b000o24l3q` and Google record `http://www.google.com/base/feeds/snippets/17242822440574356561`. # COMMAND ---------- # ANSWER crossSmall = (googleSmall .cartesian(amazonSmall) .cache()) def computeSimilarity(record): """ Compute similarity on a combination record Args: record: a pair, (google record, amazon record) Returns: pair: a pair, (google URL, amazon ID, cosine similarity value) """ googleRec = record[0] amazonRec = record[1] googleURL = googleRec[0] amazonID = amazonRec[0] googleValue = googleRec[1] amazonValue = amazonRec[1] cs = cosineSimilarity(googleValue, amazonValue, idfsSmallWeights) return (googleURL, amazonID, cs) similarities = (crossSmall .map(computeSimilarity) .cache()) def similar(amazonID, googleURL): """ Return similarity value Args: amazonID: amazon ID googleURL: google URL Returns: similar: cosine similarity value """ return (similarities .filter(lambda record: (record[0] == googleURL and record[1] == amazonID)) .collect()[0][2]) similarityAmazonGoogle = similar('b000o24l3q', 'http://www.google.com/base/feeds/snippets/17242822440574356561') print 'Requested similarity is %s.' % similarityAmazonGoogle # COMMAND ---------- # TEST Perform Entity Resolution (3c) Test.assertTrue(abs(similarityAmazonGoogle - 0.000303171940451) < 0.0000001, 'incorrect similarityAmazonGoogle') # COMMAND ---------- # PRIVATE_TEST Perform Entity Resolution (3c) Test.assertTrue(abs(similarityAmazonGoogle - 0.000303171940451) < 0.0000001, 'incorrect similarityAmazonGoogle') similarityAnother = similar('b000o24l3q', 'http://www.google.com/base/feeds/snippets/18274317756231697680') Test.assertTrue(abs(similarityAnother - 0.093899589276) < 0.0000001, 'incorrect another similarity test') # COMMAND ---------- # MAGIC %md # MAGIC #### **(3d) Perform Entity Resolution with Broadcast Variables** # MAGIC The solution in (3c) works well for small datasets, but it requires Spark to (automatically) send the `idfsSmallWeights` variable to all the workers. If we didn't `cache()` similarities, then it might have to be recreated if we run `similar()` multiple times. This would cause Spark to send `idfsSmallWeights` every time. # MAGIC # MAGIC Instead, we can use a broadcast variable - we define the broadcast variable in the driver and then we can refer to it in each worker. Spark saves the broadcast variable at each worker, so it is only sent once. # MAGIC # MAGIC The steps you should perform are: # MAGIC * Define a `computeSimilarityBroadcast` function that given an element from the combination RDD computes the cosine simlarity for the two records in the element. This will be the same as the worker function `computeSimilarity` in (3c) except that it uses a broadcast variable. # MAGIC * Apply the worker function to every element in the RDD # MAGIC # MAGIC Again, compute the similarity between Amazon record `b000o24l3q` and Google record `http://www.google.com/base/feeds/snippets/17242822440574356561`. # COMMAND ---------- # ANSWER def computeSimilarityBroadcast(record): """ Compute similarity on a combination record, using Broadcast variable Args: record: a pair, (google record, amazon record) Returns: pair: a pair, (google URL, amazon ID, cosine similarity value) """ googleRec = record[0] amazonRec = record[1] googleURL = googleRec[0] amazonID = amazonRec[0] googleValue = googleRec[1] amazonValue = amazonRec[1] cs = cosineSimilarity(googleValue, amazonValue, idfsSmallBroadcast.value) return (googleURL, amazonID, cs) idfsSmallBroadcast = sc.broadcast(idfsSmallWeights) similaritiesBroadcast = (crossSmall .map(computeSimilarityBroadcast) .cache()) def similarBroadcast(amazonID, googleURL): """ Return similarity value, computed using Broadcast variable Args: amazonID: amazon ID googleURL: google URL Returns: similar: cosine similarity value """ return (similaritiesBroadcast .filter(lambda record: (record[0] == googleURL and record[1] == amazonID)) .collect()[0][2]) similarityAmazonGoogleBroadcast = similarBroadcast('b000o24l3q', 'http://www.google.com/base/feeds/snippets/17242822440574356561') print 'Requested similarity is %s.' % similarityAmazonGoogleBroadcast # COMMAND ---------- # TEST Perform Entity Resolution with Broadcast Variables (3d) from pyspark import Broadcast Test.assertTrue(isinstance(idfsSmallBroadcast, Broadcast), 'incorrect idfsSmallBroadcast') Test.assertEquals(len(idfsSmallBroadcast.value), 4772, 'incorrect idfsSmallBroadcast value') Test.assertTrue(abs(similarityAmazonGoogleBroadcast - 0.000303171940451) < 0.0000001, 'incorrect similarityAmazonGoogle') # COMMAND ---------- # PRIVATE_TEST Perform Entity Resolution with Broadcast Variables (3d) from pyspark import Broadcast Test.assertTrue(isinstance(idfsSmallBroadcast, Broadcast), 'incorrect idfsSmallBroadcast') Test.assertEquals(len(idfsSmallBroadcast.value), 4772, 'incorrect idfsSmallBroadcast value') Test.assertTrue(abs(similarityAmazonGoogleBroadcast - 0.000303171940451) < 0.0000001, 'incorrect similarityAmazonGoogle') similarityAnotherBroadcast = similarBroadcast('b000o24l3q', 'http://www.google.com/base/feeds/snippets/18274317756231697680') Test.assertTrue(abs(similarityAnotherBroadcast - 0.093899589276) < 0.0000001, 'incorrect another similarity test') # COMMAND ---------- # MAGIC %md # MAGIC #### **(3e) Perform a Gold Standard evaluation** # MAGIC # MAGIC First, we'll load the "gold standard" data and use it to answer several questions. We read and parse the Gold Standard data, where the format of each line is "Amazon Product ID","Google URL". The resulting RDD has elements of the form ("AmazonID GoogleURL", 'gold') # COMMAND ---------- GOLDFILE_PATTERN = '^(.+),(.+)' # Parse each line of a data file useing the specified regular expression pattern def parse_goldfile_line(goldfile_line): """ Parse a line from the 'golden standard' data file Args: goldfile_line: a line of data Returns: pair: ((key, 'gold', 1 if successful or else 0)) """ match = re.search(GOLDFILE_PATTERN, goldfile_line) if match is None: print 'Invalid goldfile line: %s' % goldfile_line return (goldfile_line, -1) elif match.group(1) == '"idAmazon"': print 'Header datafile line: %s' % goldfile_line return (goldfile_line, 0) else: key = '%s %s' % (removeQuotes(match.group(1)), removeQuotes(match.group(2))) return ((key, 'gold'), 1) goldfile = os.path.join(baseDir, inputPath, GOLD_STANDARD_PATH) gsRaw = (sc .textFile(goldfile) .map(parse_goldfile_line) .cache()) gsFailed = (gsRaw .filter(lambda s: s[1] == -1) .map(lambda s: s[0])) for line in gsFailed.take(10): print 'Invalid goldfile line: %s' % line goldStandard = (gsRaw .filter(lambda s: s[1] == 1) .map(lambda s: s[0]) .cache()) print 'Read %d lines, successfully parsed %d lines, failed to parse %d lines' % (gsRaw.count(), goldStandard.count(), gsFailed.count()) assert (gsFailed.count() == 0) assert (gsRaw.count() == (goldStandard.count() + 1)) # COMMAND ---------- # MAGIC %md # MAGIC #### Using the "gold standard" data we can answer the following questions: # MAGIC # MAGIC * How many true duplicate pairs are there in the small datasets? # MAGIC * What is the average similarity score for true duplicates? # MAGIC * What about for non-duplicates? # MAGIC The steps you should perform are: # MAGIC * Create a new `sims` RDD from the `similaritiesBroadcast` RDD, where each element consists of a pair of the form ("AmazonID GoogleURL", cosineSimilarityScore). An example entry from `sims` is: ('b000bi7uqs http://www.google.com/base/feeds/snippets/18403148885652932189', 0.40202896125621296) # MAGIC * Combine the `sims` RDD with the `goldStandard` RDD by creating a new `trueDupsRDD` RDD that has the just the cosine similarity scores for those "AmazonID GoogleURL" pairs that appear in both the `sims` RDD and `goldStandard` RDD. Hint: you can do this using the join() transformation. # MAGIC * Count the number of true duplicate pairs in the `trueDupsRDD` dataset # MAGIC * Compute the average similarity score for true duplicates in the `trueDupsRDD` datasets. Remember to use `float` for calculation # MAGIC * Create a new `nonDupsRDD` RDD that has the just the cosine similarity scores for those "AmazonID GoogleURL" pairs from the `similaritiesBroadcast` RDD that **do not** appear in both the *sims* RDD and gold standard RDD. # MAGIC * Compute the average similarity score for non-duplicates in the last datasets. Remember to use `float` for calculation # COMMAND ---------- # ANSWER sims = similaritiesBroadcast.map(lambda x: ("%s %s" % (x[1], x[0]), x[2])) trueDupsRDD = (sims .join(goldStandard) .map(lambda a: a[1][0])) trueDupsCount = trueDupsRDD.count() avgSimDups = float(trueDupsRDD.reduce(lambda a, b: a + b)) / float(trueDupsCount) nonDupsRDD = (sims .leftOuterJoin(goldStandard) .filter(lambda x: (x[1][1] is None)) .map(lambda a: a[1][0])) avgSimNon = float(nonDupsRDD.reduce(lambda a, b: a + b)) / float(sims.count() - trueDupsCount) print 'There are %s true duplicates.' % trueDupsCount print 'The average similarity of true duplicates is %s.' % avgSimDups print 'And for non duplicates, it is %s.' % avgSimNon # COMMAND ---------- # TEST Perform a Gold Standard evaluation (3e) Test.assertEquals(trueDupsCount, 146, 'incorrect trueDupsCount') Test.assertTrue(abs(avgSimDups - 0.264332573435) < 0.0000001, 'incorrect avgSimDups') Test.assertTrue(abs(avgSimNon - 0.00123476304656) < 0.0000001, 'incorrect avgSimNon') # COMMAND ---------- # PRIVATE_TEST Perform a Gold Standard evaluation (3e) Test.assertEquals(trueDupsCount, 146, 'incorrect trueDupsCount') Test.assertTrue(abs(avgSimDups - 0.264332573435) < 0.0000001, 'incorrect avgSimDups') Test.assertTrue(abs(avgSimNon - 0.00123476304656) < 0.0000001, 'incorrect avgSimNon') # COMMAND ---------- # ANSWER # Quiz question: # Based on the answers to the questions in part (3e), is cosine similarity doing a good job, qualitatively speaking, of identifying duplicates? # (*) Yes # ( ) No # *answer*: Cosine similarity looks useful, because duplicates on average are 250X more similar than non-duplicates. As long as variance isn't too high, that's a good signal. # COMMAND ---------- # MAGIC %md # MAGIC #### **Part 4: Scalable ER** # MAGIC In the previous parts, we built a text similarity function and used it for small scale entity resolution. Our implementation is limited by its quadratic run time complexity, and is not practical for even modestly sized datasets. In this part, we will implement a more scalable algorithm and use it to do entity resolution on the full dataset. # MAGIC # MAGIC #### Inverted Indices # MAGIC To improve our ER algorithm from the earlier parts, we should begin by analyzing its running time. In particular, the algorithm above is quadratic in two ways. First, we did a lot of redundant computation of tokens and weights, since each record was reprocessed every time it was compared. Second, we made quadratically many token comparisons between records. # MAGIC # MAGIC The first source of quadratic overhead can be eliminated with precomputation and look-up tables, but the second source is a little more tricky. In the worst case, every token in every record in one dataset exists in every record in the other dataset, and therefore every token makes a non-zero contribution to the cosine similarity. In this case, token comparison is unavoidably quadratic. # MAGIC # MAGIC But in reality most records have nothing (or very little) in common. Moreover, it is typical for a record in one dataset to have at most one duplicate record in the other dataset (this is the case assuming each dataset has been de-duplicated against itself). In this case, the output is linear in the size of the input and we can hope to achieve linear running time. # MAGIC # MAGIC An [**inverted index**](https://en.wikipedia.org/wiki/Inverted_index) is a data structure that will allow us to avoid making quadratically many token comparisons. It maps each token in the dataset to the list of documents that contain the token. So, instead of comparing, record by record, each token to every other token to see if they match, we will use inverted indices to *look up* records that match on a particular token. # MAGIC # MAGIC > **Note on terminology**: In text search, a *forward* index maps documents in a dataset to the tokens they contain. An *inverted* index supports the inverse mapping. # MAGIC # MAGIC > **Note**: For this section, use the complete Google and Amazon datasets, not the samples # COMMAND ---------- # MAGIC %md # MAGIC #### **(4a) Tokenize the full dataset** # MAGIC Tokenize each of the two full datasets for Google and Amazon. # COMMAND ---------- # ANSWER amazonFullRecToToken = amazon.map(lambda s: (s[0], tokenize(s[1]))) googleFullRecToToken = google.map(lambda s: (s[0], tokenize(s[1]))) print 'Amazon full dataset is %s products, Google full dataset is %s products' % (amazonFullRecToToken.count(), googleFullRecToToken.count()) # COMMAND ---------- # TEST Tokenize the full dataset (4a) Test.assertEquals(amazonFullRecToToken.count(), 1363, 'incorrect amazonFullRecToToken.count()') Test.assertEquals(googleFullRecToToken.count(), 3226, 'incorrect googleFullRecToToken.count()') # COMMAND ---------- # PRIVATE_TEST Tokenize the full dataset (4a) Test.assertEquals(amazonFullRecToToken.count(), 1363, 'incorrect amazonFullRecToToken.count()') Test.assertEquals(googleFullRecToToken.count(), 3226, 'incorrect googleFullRecToToken.count()') # COMMAND ---------- # MAGIC %md # MAGIC #### **(4b) Compute IDFs and TF-IDFs for the full datasets** # MAGIC # MAGIC We will reuse your code from above to compute IDF weights for the complete combined datasets. # MAGIC The steps you should perform are: # MAGIC * Create a new `fullCorpusRDD` that contains the tokens from the full Amazon and Google datasets. # MAGIC * Apply your `idfs` function to the `fullCorpusRDD` # MAGIC * Create a broadcast variable containing a dictionary of the IDF weights for the full dataset. # MAGIC * For each of the Amazon and Google full datasets, create weight RDDs that map IDs/URLs to TF-IDF weighted token vectors. # COMMAND ---------- # ANSWER fullCorpusRDD = amazonFullRecToToken.union(googleFullRecToToken) idfsFull = idfs(fullCorpusRDD) idfsFullCount = idfsFull.count() print 'There are %s unique tokens in the full datasets.' % idfsFullCount # Recompute IDFs for full dataset idfsFullWeights = idfsFull.collectAsMap() idfsFullBroadcast = sc.broadcast(idfsFullWeights) # Pre-compute TF-IDF weights. Build mappings from record ID weight vector. amazonWeightsRDD = amazonFullRecToToken.map(lambda x: (x[0], tfidf(x[1], idfsFullBroadcast.value))) googleWeightsRDD = googleFullRecToToken.map(lambda x: (x[0], tfidf(x[1], idfsFullBroadcast.value))) print 'There are %s Amazon weights and %s Google weights.' % (amazonWeightsRDD.count(), googleWeightsRDD.count()) # COMMAND ---------- # TEST Compute IDFs and TF-IDFs for the full datasets (4b) Test.assertEquals(idfsFullCount, 17078, 'incorrect idfsFullCount') Test.assertEquals(amazonWeightsRDD.count(), 1363, 'incorrect amazonWeightsRDD.count()') Test.assertEquals(googleWeightsRDD.count(), 3226, 'incorrect googleWeightsRDD.count()') # COMMAND ---------- # PRIVATE_TEST Compute IDFs and TF-IDFs for the full datasets (4b) Test.assertEquals(idfsFullCount, 17078, 'incorrect idfsFullCount') Test.assertEquals(amazonWeightsRDD.count(), 1363, 'incorrect amazonWeightsRDD.count()') Test.assertEquals(googleWeightsRDD.count(), 3226, 'incorrect googleWeightsRDD.count()') # COMMAND ---------- # MAGIC %md # MAGIC #### **(4c) Compute Norms for the weights from the full datasets** # MAGIC # MAGIC We will reuse your code from above to compute norms of the IDF weights for the complete combined dataset. # MAGIC The steps you should perform are: # MAGIC * Create two collections, one for each of the full Amazon and Google datasets, where IDs/URLs map to the norm of the associated TF-IDF weighted token vectors. # MAGIC * Convert each collection into a broadcast variable, containing a dictionary of the norm of IDF weights for the full dataset # COMMAND ---------- # ANSWER amazonNorms = amazonWeightsRDD.map(lambda x: (x[0], norm(x[1]))).collectAsMap() amazonNormsBroadcast = sc.broadcast(amazonNorms) googleNorms = googleWeightsRDD.map(lambda x: (x[0], norm(x[1]))).collectAsMap() googleNormsBroadcast = sc.broadcast(googleNorms) print 'There are %s Amazon norms and %s Google norms.' % (len(amazonNorms), len(googleNorms)) # COMMAND ---------- # TEST Compute Norms for the weights from the full datasets (4c) Test.assertTrue(isinstance(amazonNormsBroadcast, Broadcast), 'incorrect amazonNormsBroadcast') Test.assertEquals(len(amazonNormsBroadcast.value), 1363, 'incorrect amazonNormsBroadcast.value') Test.assertTrue(isinstance(googleNormsBroadcast, Broadcast), 'incorrect googleNormsBroadcast') Test.assertEquals(len(googleNormsBroadcast.value), 3226, 'incorrect googleNormsBroadcast.value') # COMMAND ---------- # PRIVATE_TEST Compute Norms for the weights from the full datasets (4c) Test.assertTrue(isinstance(amazonNormsBroadcast, Broadcast), 'incorrect amazonNormsBroadcast') Test.assertEquals(len(amazonNormsBroadcast.value), 1363, 'incorrect amazonNormsBroadcast.value') Test.assertTrue(isinstance(googleNormsBroadcast, Broadcast), 'incorrect googleNormsBroadcast') Test.assertEquals(len(googleNormsBroadcast.value), 3226, 'incorrect googleNormsBroadcast.value') # COMMAND ---------- # MAGIC %md # MAGIC #### **(4d) Create inverted indicies from the full datasets** # MAGIC # MAGIC Build inverted indices of both data sources. # MAGIC The steps you should perform are: # MAGIC * Create an invert function that given a pair of (ID/URL, TF-IDF weighted token vector), returns a list of pairs of (token, ID/URL). Recall that the TF-IDF weighted token vector is a Python dictionary with keys that are tokens and values that are weights. # MAGIC * Use your invert function to convert the full Amazon and Google TF-IDF weighted token vector datasets into two RDDs where each element is a pair of a token and an ID/URL that contain that token. These are inverted indicies. # COMMAND ---------- # ANSWER def invert(record): """ Invert (ID, tokens) to a list of (token, ID) Args: record: a pair, (ID, token vector) Returns: pairs: a list of pairs of token to ID """ value = record[0] keys = record[1].keys() pairs = [] for key in keys: pairs.append((key, value)) return (pairs) amazonInvPairsRDD = (amazonWeightsRDD .flatMap(invert) .cache()) googleInvPairsRDD = (googleWeightsRDD .flatMap(invert) .cache()) print 'There are %s Amazon inverted pairs and %s Google inverted pairs.' % (amazonInvPairsRDD.count(), googleInvPairsRDD.count()) # COMMAND ---------- # TEST Create inverted indicies from the full datasets (4d) invertedPair = invert((1, {'foo': 2})) Test.assertEquals(invertedPair[0][1], 1, 'incorrect invert result') Test.assertEquals(amazonInvPairsRDD.count(), 111387, 'incorrect amazonInvPairsRDD.count()') Test.assertEquals(googleInvPairsRDD.count(), 77678, 'incorrect googleInvPairsRDD.count()') # COMMAND ---------- # PRIVATE_TEST Create inverted indicies from the full datasets (4d) invertedPair = invert((1, {'foo': 2})) Test.assertEquals(invertedPair[0][1], 1, 'incorrect invert result') Test.assertEquals(amazonInvPairsRDD.count(), 111387, 'incorrect amazonInvPairsRDD.count()') Test.assertEquals(googleInvPairsRDD.count(), 77678, 'incorrect googleInvPairsRDD.count()') # COMMAND ---------- # MAGIC %md # MAGIC #### **(4e) Identify common tokens from the full dataset** # MAGIC # MAGIC We are now in position to efficiently perform ER on the full datasets. Implement the following algorithm to build an RDD that maps a pair of (ID, URL) to a list of tokens they share in common: # MAGIC * Using the two inverted indicies (RDDs where each element is a pair of a token and an ID or URL that contains that token), create a new RDD that contains only tokens that appear in both datasets. This will yield an RDD of pairs of (token, iterable(ID, URL)). # MAGIC * We need a mapping from (ID, URL) to token, so create a function that will swap the elements of the RDD you just created to create this new RDD consisting of ((ID, URL), token) pairs. # MAGIC * Finally, create an RDD consisting of pairs mapping (ID, URL) to all the tokens the pair shares in common # COMMAND ---------- # ANSWER def swap(record): """ Swap (token, (ID, URL)) to ((ID, URL), token) Args: record: a pair, (token, (ID, URL)) Returns: pair: ((ID, URL), token) """ token = record[0] keys = (record[1][0], record[1][1]) return (keys, token) commonTokens = (amazonInvPairsRDD.join(googleInvPairsRDD) .map(swap) .groupByKey() .map(lambda rec: (rec[0], list(rec[1]))) .cache()) print 'Found %d common tokens' % commonTokens.count() # COMMAND ---------- # TEST Identify common tokens from the full dataset (4e) Test.assertEquals(commonTokens.count(), 2441100, 'incorrect commonTokens.count()') # COMMAND ---------- # PRIVATE_TEST Identify common tokens from the full dataset (4e) Test.assertEquals(commonTokens.count(), 2441100, 'incorrect commonTokens.count()') # COMMAND ---------- # MAGIC %md # MAGIC #### **(4f) Identify common tokens from the full dataset** # MAGIC # MAGIC Use the data structures from parts **(4a)** and **(4e)** to build a dictionary to map record pairs to cosine similarity scores. # MAGIC The steps you should perform are: # MAGIC * Create two broadcast dictionaries from the amazonWeights and googleWeights RDDs # MAGIC * Create a `fastCosinesSimilarity` function that takes in a record consisting of the pair ((Amazon ID, Google URL), tokens list) and computes the sum for each of the tokens in the token list of the products of the Amazon weight for the token times the Google weight for the token. The sum should then be divided by the norm for the Google URL and then divided by the norm for the Amazon ID. The function should return this value in a pair with the key being the (Amazon ID, Google URL). *Make sure you use broadcast variables you created for both the weights and norms* # MAGIC * Apply your `fastCosinesSimilarity` function to the common tokens from the full dataset # COMMAND ---------- # ANSWER amazonWeightsBroadcast = sc.broadcast(amazonWeightsRDD.collectAsMap()) googleWeightsBroadcast = sc.broadcast(googleWeightsRDD.collectAsMap()) def fastCosineSimilarity(record): """ Compute Cosine Similarity using Broadcast variables Args: record: ((ID, URL), token) Returns: pair: ((ID, URL), cosine similarity value) """ amazonRec = record[0][0] googleRec = record[0][1] tokens = record[1] s = sum([amazonWeightsBroadcast.value[amazonRec][t] * googleWeightsBroadcast.value[googleRec][t] for t in tokens]) value = s / googleNormsBroadcast.value[googleRec] / amazonNormsBroadcast.value[amazonRec] key = (amazonRec, googleRec) return (key, value) similaritiesFullRDD = (commonTokens .map(fastCosineSimilarity) .cache()) print similaritiesFullRDD.count() # COMMAND ---------- # TEST Identify common tokens from the full dataset (4f) similarityTest = similaritiesFullRDD.filter(lambda ((aID, gURL), cs): aID == 'b00005lzly' and gURL == 'http://www.google.com/base/feeds/snippets/13823221823254120257').collect() Test.assertEquals(len(similarityTest), 1, 'incorrect len(similarityTest)') Test.assertTrue(abs(similarityTest[0][1] - 4.286548414e-06) < 0.000000000001, 'incorrect similarityTest fastCosineSimilarity') Test.assertEquals(similaritiesFullRDD.count(), 2441100, 'incorrect similaritiesFullRDD.count()') # COMMAND ---------- # PRIVATE_TEST Identify common tokens from the full dataset (4f) similarityTest = similaritiesFullRDD.filter(lambda ((aID, gURL), cs): aID == 'b00005lzly' and gURL == 'http://www.google.com/base/feeds/snippets/13823221823254120257').collect() Test.assertEquals(len(similarityTest), 1, 'incorrect len(similarityTest)') Test.assertTrue(abs(similarityTest[0][1] - 4.286548414e-06) < 0.000000000001, 'incorrect similarityTest fastCosineSimilarity') Test.assertEquals(similaritiesFullRDD.count(), 2441100, 'incorrect similaritiesFullRDD.count()') # COMMAND ---------- # MAGIC %md # MAGIC #### **Part 5: Analysis** # MAGIC # MAGIC Now we have an authoritative list of record-pair similarities, but we need a way to use those similarities to decide if two records are duplicates or not. The simplest approach is to pick a **threshold**. Pairs whose similarity is above the threshold are declared duplicates, and pairs below the threshold are declared distinct. # MAGIC # MAGIC To decide where to set the threshold we need to understand what kind of errors result at different levels. If we set the threshold too low, we get more **false positives**, that is, record-pairs we say are duplicates that in reality are not. If we set the threshold too high, we get more **false negatives**, that is, record-pairs that really are duplicates but that we miss. # MAGIC # MAGIC ER algorithms are evaluated by the common metrics of information retrieval and search called **precision** and **recall**. Precision asks of all the record-pairs marked duplicates, what fraction are true duplicates? Recall asks of all the true duplicates in the data, what fraction did we successfully find? As with false positives and false negatives, there is a trade-off between precision and recall. A third metric, called **F-measure**, takes the harmonic mean of precision and recall to measure overall goodness in a single value: # MAGIC \\[ Fmeasure = 2 \frac{precision * recall}{precision + recall} \\] # MAGIC # MAGIC > **Note**: In this part, we use the "gold standard" mapping from the included file to look up true duplicates, and the results of Part 4. # MAGIC # MAGIC > **Note**: In this part, you will not be writing any code. We've written all of the code for you. Run each cell and then answer the quiz questions on Studio. # COMMAND ---------- # MAGIC %md # MAGIC #### **(5a) Counting True Positives, False Positives, and False Negatives** # MAGIC # MAGIC We need functions that count True Positives (true duplicates above the threshold), and False Positives and False Negatives: # MAGIC * We start with creating the `simsFullRDD` from our `similaritiesFullRDD` that consists of a pair of ((Amazon ID, Google URL), simlarity score) # MAGIC * From this RDD, we create an RDD consisting of only the similarity scores # MAGIC * To look up the similarity scores for true duplicates, we perform a left outer join using the `goldStandard` RDD and `simsFullRDD` and extract the # COMMAND ---------- # Create an RDD of ((Amazon ID, Google URL), similarity score) simsFullRDD = similaritiesFullRDD.map(lambda x: ("%s %s" % (x[0][0], x[0][1]), x[1])) assert (simsFullRDD.count() == 2441100) # Create an RDD of just the similarity scores simsFullValuesRDD = (simsFullRDD .map(lambda x: x[1]) .cache()) assert (simsFullValuesRDD.count() == 2441100) # Look up all similarity scores for true duplicates # This helper function will return the similarity score for records that are in the gold standard and the simsFullRDD (True positives), and will return 0 for records that are in the gold standard but not in simsFullRDD (False Negatives). def gs_value(record): if (record[1][1] is None): return 0 else: return record[1][1] # Join the gold standard and simsFullRDD, and then extract the similarities scores using the helper function trueDupSimsRDD = (goldStandard .leftOuterJoin(simsFullRDD) .map(gs_value) .cache()) print 'There are %s true duplicates.' % trueDupSimsRDD.count() assert(trueDupSimsRDD.count() == 1300) # COMMAND ---------- # MAGIC %md # MAGIC The next step is to pick a threshold between 0 and 1 for the count of True Positives (true duplicates above the threshold). However, we would like to explore many different thresholds. # MAGIC # MAGIC To do this, we divide the space of thresholds into 100 bins, and take the following actions: # MAGIC * We use Spark Accumulators to implement our counting function. We define a custom accumulator type, `VectorAccumulatorParam`, along with functions to initialize the accumulator's vector to zero, and to add two vectors. Note that we have to use the += operator because you can only add to an accumulator. # MAGIC * We create a helper function to create a list with one entry (bit) set to a value and all others set to 0. # MAGIC * We create 101 bins for the 100 threshold values between 0 and 1. # MAGIC * Now, for each similarity score, we can compute the false positives. We do this by adding each similarity score to the appropriate bin of the vector. Then we remove true positives from the vector by using the gold standard data. # MAGIC * We define functions for computing false positive and negative and true positives, for a given threshold. # COMMAND ---------- from pyspark.accumulators import AccumulatorParam class VectorAccumulatorParam(AccumulatorParam): # Initialize the VectorAccumulator to 0 def zero(self, value): return [0] * len(value) # Add two VectorAccumulator variables def addInPlace(self, val1, val2): for i in xrange(len(val1)): val1[i] += val2[i] return val1 # Return a list with entry x set to value and all other entries set to 0 def set_bit(x, value, length): bits = [] for y in xrange(length): if (x == y): bits.append(value) else: bits.append(0) return bits # Pre-bin counts of false positives for different threshold ranges BINS = 101 nthresholds = 100 def bin(similarity): return int(similarity * nthresholds) # fpCounts[i] = number of entries (possible false positives) where bin(similarity) == i zeros = [0] * BINS fpCounts = sc.accumulator(zeros, VectorAccumulatorParam()) def add_element(score): global fpCounts b = bin(score) fpCounts += set_bit(b, 1, BINS) simsFullValuesRDD.foreach(add_element) # Remove true positives from FP counts def sub_element(score): global fpCounts b = bin(score) fpCounts += set_bit(b, -1, BINS) trueDupSimsRDD.foreach(sub_element) def falsepos(threshold): fpList = fpCounts.value return sum([fpList[b] for b in range(0, BINS) if float(b) / nthresholds >= threshold]) def falseneg(threshold): return trueDupSimsRDD.filter(lambda x: x < threshold).count() def truepos(threshold): return trueDupSimsRDD.count() - falsenegDict[threshold] # COMMAND ---------- # MAGIC %md # MAGIC #### **(5b) Precision, Recall, and F-measures** # MAGIC We define functions so that we can compute the [Precision](https://en.wikipedia.org/wiki/Precision_and_recall), [Recall](https://en.wikipedia.org/wiki/Precision_and_recall), and [F-measure](https://en.wikipedia.org/wiki/Precision_and_recall#F-measure) as a function of threshold value: # MAGIC * Precision = true-positives / (true-positives + false-positives) # MAGIC * Recall = true-positives / (true-positives + false-negatives) # MAGIC * F-measure = 2 x Recall x Precision / (Recall + Precision) # COMMAND ---------- # Precision = true-positives / (true-positives + false-positives) # Recall = true-positives / (true-positives + false-negatives) # F-measure = 2 x Recall x Precision / (Recall + Precision) def precision(threshold): tp = trueposDict[threshold] return float(tp) / (tp + falseposDict[threshold]) def recall(threshold): tp = trueposDict[threshold] return float(tp) / (tp + falsenegDict[threshold]) def fmeasure(threshold): r = recall(threshold) p = precision(threshold) return 2 * r * p / (r + p) # COMMAND ---------- # MAGIC %md # MAGIC #### **(5c) Line Plots** # MAGIC We can make line plots of precision, recall, and F-measure as a function of threshold value, for thresholds between 0.0 and 1.0. You can change `nthresholds` (above in part **(5a)**) to change the threshold values to plot. # COMMAND ---------- thresholds = [float(n) / nthresholds for n in range(0, nthresholds)] falseposDict = dict([(t, falsepos(t)) for t in thresholds]) falsenegDict = dict([(t, falseneg(t)) for t in thresholds]) trueposDict = dict([(t, truepos(t)) for t in thresholds]) precisions = [precision(t) for t in thresholds] recalls = [recall(t) for t in thresholds] fmeasures = [fmeasure(t) for t in thresholds] print precisions[0], fmeasures[0] assert (abs(precisions[0] - 0.000532546802671) < 0.0000001) assert (abs(fmeasures[0] - 0.00106452669505) < 0.0000001) fig = plt.figure() plt.plot(thresholds, precisions) plt.plot(thresholds, recalls) plt.plot(thresholds, fmeasures) plt.legend(['Precision', 'Recall', 'F-measure']) display(fig) pass # COMMAND ---------- # Create a DataFrame and visualize using display() graph = [(t, precision(t), recall(t),fmeasure(t)) for t in thresholds] graphRDD = sc.parallelize(graph) graphRow = graphRDD.map(lambda (t, x, y, z): Row(threshold=t, precision=x, recall=y, fmeasure=z)) graphDF = sqlContext.createDataFrame(graphRow) display(graphDF) # COMMAND ---------- # MAGIC %md # MAGIC #### Discussion # MAGIC # MAGIC State-of-the-art tools can get an F-measure of about 60% on this dataset. In this lab exercise, our best F-measure is closer to 40%. Look at some examples of errors (both False Positives and False Negatives) and think about what went wrong. # MAGIC # MAGIC #### There are several ways we might improve our simple classifier, including: # MAGIC * Using additional attributes # MAGIC * Performing better featurization of our textual data (e.g., stemming, n-grams, etc.) # MAGIC * Using different similarity functions
unlicense
iarroyof/distributionalSemanticStabilityThesis
mklObj.py
2
55729
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys __author__ = 'Ignacio Arroyo-Fernandez' from modshogun import * from tools.load import LoadMatrix from sklearn.metrics import r2_score import random from math import sqrt import numpy from os import getcwd from sys import stderr from pdb import set_trace as st def open_configuration_file(fileName): """ Loads the input data configuration file. Lines which start with '#' are ignored. No lines different from configuration ones (even blank ones) at top are allowed. The amount of lines at top are exclusively either three or five (see below for allowed contents). The first line may be specifying the train data file in sparse market matrix format. The second line may be specifying the test data file in sparse market matrix format. The third line may be specifying the train labels file. An scalar by line must be associated as label of a vector in training data. The fourth line may be specifying the test labels file. An scalar by line must be associated as label of a vector in test data. The fifth line indicates options for the MKL object: First character : Problem type : valid_options = {r: regression, b: binary, m: multiclass} Second character: Machine mode : valid_options = {l: learning_mode, p: pattern_recognition_mode} Any other characters and amount of they will be ignored or caught as errors. For all configuration lines no other kind of content is allowed (e.g. comments in line ahead). Training data (and its labels) is optional. Whenever no five configuration lines are detected in this file, the first line will be considered as the test data file name, the second line as de test labels and third line as the MKL options. An error exception will be raised otherwise (e.g. no three or no five configuration lines). """ with open(fileName) as f: configuration_lines = f.read().splitlines() problem_modes = {'r':'regression', 'b':'binary', 'm':'multiclass'} machine_modes = {'l':'learning', 'p':'pattern_recognition'} cls = 0 # Counted number of configuration lines from top. ncls = 5 # Number of configuration lines allowed. for line in configuration_lines: if not line.startswith('#'): cls += 1 else: break if cls == ncls: mode = configuration_lines[4] configuration = {} if len(mode) == 2: try: configuration['problem_mode'] = problem_modes[mode[0]] configuration['machine_mode'] = machine_modes[mode[1]] except KeyError: sys.stderr.write('\nERROR: Incorrect configuration file. Invalid machine mode. See help for mklObj.open_configuration_file().') else: sys.stderr.write('\nERROR: Incorrect configuration file. Invalid number of lines. See help for mklObj.open_configuration_file().') exit() Null = ncls # Null index if configuration['machine_mode'] == 'learning': # According to availability of training files, indexes are setted. trf = 0; tsf = 1; trlf = 2 # training_file, test_file, training_labels_file, test_labels_file, mode tslf = 3; mf = Null configuration_lines[ncls] = None del(configuration_lines[ncls+1:]) # All from the first '#' onwards is ignored. elif configuration['machine_mode'] == 'pattern_recognition': trf = 0; tsf = 1; trlf = Null # training_file, test_file, test_labels_file, mode, model_file tslf = 2; mf = 3 configuration_lines[ncls] = None del(configuration_lines[ncls+1:]) configuration['training_file'] = configuration_lines[trf] configuration['test_file'] = configuration_lines[tsf] configuration['training_labels_file'] = configuration_lines[trlf] configuration['test_labels_file'] = configuration_lines[tslf] configuration['model_file'] = configuration_lines[mf] return configuration # Loading toy multiclass data from files def load_multiclassToy(dataRoute, fileTrain, fileLabels): """ :returns: [RealFeatures(training_data), RealFeatures(test_data), MulticlassLabels(train_labels), MulticlassLabels(test_labels)]. It is a set of Shogun training objects for raising a 10-class classification problem. This function is a modified version from http://www.shogun-toolbox.org/static/notebook/current/MKL.html Pay attention to input parameters because their documentations is valid for acquiring data for any multiclass problem with Shogun. :param dataRoute: The allocation directory of plain text file containing the train and test data. :param fileTrain: The name of the text file containing the train and test data. Each row of the file contains a sample vector and each column is a dimension of such a sample vector. :param fileLabels: The name of the text file containing the train and test labels. Each row must to correspond to each sample in fileTrain. It must be at the same directory specified by dataRoute. """ lm = LoadMatrix() dataSet = lm.load_numbers(dataRoute + fileTrain) labels = lm.load_labels(dataRoute + fileLabels) return (RealFeatures(dataSet.T[0:3 * len(dataSet.T) / 4].T), # Return the training set, 3/4 * dataSet RealFeatures(dataSet.T[(3 * len(dataSet.T) / 4):].T), # Return the test set, 1/4 * dataSet MulticlassLabels(labels[0:3 * len(labels) / 4]), # Return corresponding train and test labels MulticlassLabels(labels[(3 * len(labels) / 4):])) # 2D Toy data generator def generate_binToy(file_data = None, file_labels = None): """:return: [RealFeatures(train_data),RealFeatures(train_data),BinaryLabels(train_labels),BinaryLabels(test_labels)] This method generates random 2D training and test data for binary classification. The labels are {-1, 1} vectors. """ num = 30 num_components = 4 means = numpy.zeros((num_components, 2)) means[0] = [-1, 1] means[1] = [2, -1.5] means[2] = [-1, -3] means[3] = [2, 1] covs = numpy.array([[1.0, 0.0], [0.0, 1.0]]) gmm = GMM(num_components) [gmm.set_nth_mean(means[i], i) for i in range(num_components)] [gmm.set_nth_cov(covs, i) for i in range(num_components)] gmm.set_coef(numpy.array([1.0, 0.0, 0.0, 0.0])) xntr = numpy.array([gmm.sample() for i in xrange(num)]).T xnte = numpy.array([gmm.sample() for i in xrange(5000)]).T gmm.set_coef(numpy.array([0.0, 1.0, 0.0, 0.0])) xntr1 = numpy.array([gmm.sample() for i in xrange(num)]).T xnte1 = numpy.array([gmm.sample() for i in xrange(5000)]).T gmm.set_coef(numpy.array([0.0, 0.0, 1.0, 0.0])) xptr = numpy.array([gmm.sample() for i in xrange(num)]).T xpte = numpy.array([gmm.sample() for i in xrange(5000)]).T gmm.set_coef(numpy.array([0.0, 0.0, 0.0, 1.0])) xptr1 = numpy.array([gmm.sample() for i in xrange(num)]).T xpte1 = numpy.array([gmm.sample() for i in xrange(5000)]).T if not file_data: return (RealFeatures(numpy.concatenate((xntr, xntr1, xptr, xptr1), axis=1)), # Train Data RealFeatures(numpy.concatenate((xnte, xnte1, xpte, xpte1), axis=1)), # Test Data BinaryLabels(numpy.concatenate((-numpy.ones(2 * num), numpy.ones(2 * num)))), # Train Labels BinaryLabels(numpy.concatenate((-numpy.ones(10000), numpy.ones(10000))))) # Test Labels else: data_set = numpy.concatenate((numpy.concatenate((xntr, xntr1, xptr, xptr1), axis=1), numpy.concatenate((xnte, xnte1, xpte, xpte1), axis=1)), axis = 1).T labels = numpy.concatenate((numpy.concatenate((-numpy.ones(2 * num), numpy.ones(2 * num))), numpy.concatenate((-numpy.ones(10000), numpy.ones(10000)))), axis = 1).astype(int) indexes = range(len(data_set)) numpy.random.shuffle(indexes) fd = open(file_data, 'w') fl = open(file_labels, 'w') for i in indexes: fd.write('%f %f\n' % (data_set[i][0],data_set[i][1])) fl.write(str(labels[i])+'\n') fd.close() fl.close() #numpy.savetxt(file_data, data_set, fmt='%f') #numpy.savetxt(file_labels, labels, fmt='%d') def load_binData(tr_ts_portion = None, fileTrain = None, fileLabels = None, dataRoute = None): if not dataRoute: dataRoute = getcwd()+'/' assert fileTrain and fileLabels # One (or both) of the input files are not given. assert (tr_ts_portion > 0.0 and tr_ts_portion <= 1.0) # The proportion of dividing the data set into train and test is in (0, 1] lm = LoadMatrix() dataSet = lm.load_numbers(dataRoute + fileTrain) labels = lm.load_labels(dataRoute + fileLabels) return (RealFeatures(dataSet.T[0:tr_ts_portion * len(dataSet.T)].T), # Return the training set, 3/4 * dataSet RealFeatures(dataSet.T[tr_ts_portion * len(dataSet.T):].T), # Return the test set, 1/4 * dataSet BinaryLabels(labels[0:tr_ts_portion * len(labels)]), # Return corresponding train and test labels BinaryLabels(labels[tr_ts_portion * len(labels):])) def load_regression_data(fileTrain = None, fileTest = None, fileLabelsTr = None, fileLabelsTs = None, sparse=False): """ This method loads data from sparse mtx file format ('CSR' preferably. See Python sci.sparse matrix format, also referred to as matrix market read and write methods). Label files should contain a column of these labels, e.g. see the contents of a three labels file: 1.23 -102.45 2.2998438943 Loading uniquely test labels is allowed (training labels are optional). In pattern_recognition mode no training labels are required. None is returned out for corresponding Shogun label object. Feature list returned: [features_tr, features_ts, labels_tr, labels_ts] Returned data is float type (dtype='float64'). This is the minimum data length allowed by Shogun given the sparse distance functions does not allow other ones, e.g. short (float32). """ assert fileTrain and fileTest and fileLabelsTs # Necessary test labels as well as test and train data sets specification. from scipy.io import mmread lm = LoadMatrix() if sparse: sci_data_tr = mmread(fileTrain).asformat('csr').astype('float64').T features_tr = SparseRealFeatures(sci_data_tr) # Reformated as CSR and 'float64' type for sci_data_ts = mmread(fileTest).asformat('csr').astype('float64').T # compatibility with SparseRealFeatures features_ts = SparseRealFeatures(sci_data_ts) else: features_tr = RealFeatures(lm.load_numbers(fileTrain).astype('float64')) features_ts = RealFeatures(lm.load_numbers(fileTest).astype('float64')) labels_ts = RegressionLabels(lm.load_labels(fileLabelsTs)) if fileTrain and fileLabelsTr: # sci_data_x: Any sparse data type in the file. labels_tr = RegressionLabels(lm.load_labels(fileLabelsTr)) else: labels_tr = None return features_tr, features_ts, labels_tr, labels_ts # Exception handling: class customException(Exception): """ This exception prevents training inconsistencies. It could be edited for accepting a complete dictionary of exceptions if desired. """ def __init__(self, message): self.parameter = message def __str__(self): return repr(self.parameter) # Basis kernel parameter generation: def sigmaGen(self, hyperDistribution, size, rango, parameters): """ :return: list of float This module generates the pseudorandom vector of widths for basis Gaussian kernels according to a distribution, i.e. hyperDistribution = {'linear', 'quadratic', 'loggauss'*, 'gaussian'*, 'triangular', # parameters[0] is the median of the distribution. parameters[1] has not effect. 'pareto', 'beta'*, 'gamma', 'weibull'}. Names marked with * require parameters, e.g. for 'gaussian', parameters = [mean, width]. The input 'size' is the amount of segments the distribution domain will be discretized out. The 'rango' input are the minimum and maximum values of the obtained distributed values. The 'parameters' of these weight vector distributions are set to common values of each distribution by default, but they can be modified. :param hyperDistribution: string :param size: It is the number of basis kernels for the MKL object. :param rango: It is the range to which the basis kernel parameters will pertain. For some basis kernels families this input parameter has not effect. :param parameters: It is a list of parameters of the distribution of the random weights, e.g. for a gaussian distribution with mean zero and variance 1, parameters = [0, 1]. For some basis kernel families this input parameter has not effect: {'linear', 'quadratic', 'triangular', 'pareto', 'gamma', 'weilbull', } .. seealso: fit_kernel() function documentation. """ # Validating th inputs assert (isinstance(size, int) and size > 0) assert (rango[0] < rango[1] and len(rango) == 2) # .. todo: Revise the other linespaces of the other distributions. They must be equally consistent than the # .. todo: Gaussian one. Change 'is' when verifying equality between strings (PEP008 recommendation). sig = [] if hyperDistribution == 'linear': line = numpy.linspace(rango[0], rango[1], size*2) sig = random.sample(line, size) return sig elif hyperDistribution == 'quadratic': sig = numpy.square(random.sample(numpy.linspace(int(sqrt(rango[0])), int(sqrt(rango[1]))), size)) return sig elif hyperDistribution == 'gaussian': assert parameters[1] > 0 # The width is greater than zero? i = 0 while i < size: numero = random.gauss(parameters[0], parameters[1]) if rango[0] <= numero <= rango[1]: # Validate the initial point of sig.append(numero) # 'range'. If not met, loop does i += 1 # not end, but resets # If met, the number is appended return sig # to 'sig' width list. elif hyperDistribution == 'triangular': assert rango[0] <= parameters[0] <= rango[1] # The median is in the range? sig = numpy.random.triangular(rango[0], parameters[0], rango[1], size) return sig elif hyperDistribution == 'beta': assert (parameters[0] >= 0 and parameters[1] >= 0) # Alpha and Beta parameters are non-negative? sig = numpy.random.beta(parameters[0], parameters[1], size) * (rango[1] - rango[0]) + rango[0] return sig elif hyperDistribution == 'pareto': return numpy.random.pareto(5, size=size) * (rango[1] - rango[0]) + rango[0] elif hyperDistribution == 'gamma': return numpy.random.gamma(shape=1, size=size) * (rango[1] - rango[0]) + rango[0] elif hyperDistribution == 'weibull': return numpy.random.weibull(2, size=size) * (rango[1] - rango[0]) + rango[0] elif hyperDistribution == 'loggauss': assert parameters[1] > 0 # The width is greater than zero? i = 0 while i < size: numero = random.lognormvariate(parameters[0], parameters[1]) if numero > rango[0] and numero < rango[1]: sig.append(numero) i += 1 return sig else: print 'The entered hyperparameter distribution is not allowed: '+hyperDistribution #pdb.set_trace() # Combining kernels def genKer(self, featsL, featsR, basisFam, widths=[5.0, 4.0, 3.0, 2.0, 1.0], sparse = False): """:return: Shogun CombinedKernel object. This module generates a list of basis kernels. These kernels are tuned according to the vector ''widths''. Input parameters ''featsL'' and ''featsR'' are Shogun feature objects. In the case of a learnt RKHS, these both objects should be derived from the training SLM vectors, by means of the Shogun constructor realFeatures(). This module also appends basis kernels to a Shogun combinedKernel object. The kernels to be append are left in ''combKer'' object (see code), which is returned. We have analyzed some basis families available in Shogun, so possible string values of 'basisFam' are: basisFam = ['gaussian', 'inverseQuadratic', 'polynomial', 'power', 'rationalQuadratic', 'spherical', 'tstudent', 'wave', 'wavelet', 'cauchy', 'exponential'] """ allowed_sparse = ['gaussian', 'polynomial'] # Change this assertion list and function if different kernels are needed. assert not (featsL.get_feature_class() == featsR.get_feature_class() == 'C_SPARSE') or basisFam in allowed_sparse # Sparse type is not compatible with specified kernel or feature types are different. kernels = [] if basisFam == 'gaussian': for w in widths: k=GaussianKernel() #k.init(featsL, featsR) #st() kernels.append(k) kernels[-1].set_width(w) kernels[-1].init(featsL, featsR) #st() elif basisFam == 'inverseQuadratic': # For this (and others below) kernel it is necessary fitting the if not sparse: dst = MinkowskiMetric(l=featsL, r=featsR, k=2) # distance matrix at this moment k = 2 is for l_2 norm else: dst = SparseEuclideanDistance(l=featsL, r=featsR) for w in widths: kernels.append(InverseMultiQuadricKernel(0, w, dst)) elif basisFam == 'polynomial': for w in widths: kernels.append(PolyKernel(0, w, False)) elif basisFam == 'power': # At least for images, the used norm does not make differences in performace if not sparse: dst = MinkowskiMetric(l=featsL, r=featsR, k=2) else: dst = SparseEuclideanDistance(l=featsL, r=featsR) for w in widths: kernels.append(PowerKernel(0, w, dst)) elif basisFam == 'rationalQuadratic': # At least for images, using 3-norm make differences if not sparse: dst = MinkowskiMetric(l=featsL, r=featsR, k=2) # in performance else: dst = SparseEuclideanDistance(l=featsL, r=featsR) for w in widths: kernels.append(RationalQuadraticKernel(0, w, dst)) elif basisFam == 'spherical': # At least for images, the used norm does not make differences in performace if not sparse: dst = MinkowskiMetric(l=featsL, r=featsR, k=2) else: dst = SparseEuclideanDistance(l=featsL, r=featsR) for w in widths: kernels.append(SphericalKernel(0, w, dst)) elif basisFam == 'tstudent': # At least for images, the used norm does not make differences in performace if not sparse: dst = MinkowskiMetric(l=featsL, r=featsR, k=2) else: dst = SparseEuclideanDistance(l=featsL, r=featsR) for w in widths: kernels.append(TStudentKernel(0, w, dst)) elif basisFam == 'wave': # At least for images, the used norm does not make differences in performace if not sparse: dst = MinkowskiMetric(l=featsL, r=featsR, k=2) else: dst = SparseEuclideanDistance(l=featsL, r=featsR) for w in widths: kernels.append(WaveKernel(0, w, dst)) elif basisFam == 'wavelet' and not sparse: # At least for images it is very low the performance with this kernel. for w in widths: # It remains pending, for now, analysing its parameters. kernels.append(WaveletKernel(0, w, 0)) elif basisFam == 'cauchy': if not sparse: dst = MinkowskiMetric(l=featsL, r=featsR, k=2) else: dst = SparseEuclideanDistance(l=featsL, r=featsR) for w in widths: kernels.append(CauchyKernel(0, w, dst)) elif basisFam == 'exponential': # For this kernel it is necessary specifying features at the constructor if not sparse: dst = MinkowskiMetric(l=featsL, r=featsR, k=2) else: dst = SparseEuclideanDistance(l=featsL, r=featsR) for w in widths: kernels.append(ExponentialKernel(featsL, featsR, w, dst, 0)) elif basisFam == 'anova' and not sparse: # This kernel presents a warning in training: """RuntimeWarning: [WARN] In file /home/iarroyof/shogun/src/shogun/classifier/mkl/MKLMulticlass.cpp line 198: CMKLMulticlass::evaluatefinishcriterion(...): deltanew<=0.Switching back to weight norsm difference as criterion. """ for w in widths: kernels.append(ANOVAKernel(0, w)) else: raise NameError('Unknown Kernel family name!!!') combKer = CombinedKernel() #features_tr = CombinedFeatures() for k in kernels: combKer.append_kernel(k) #features_tr.append_feature_obj(featsL) #combKer.init(features_tr, features_tr) #combKer.init(featsL,featsR) return combKer#, features_tr # Defining the compounding kernel object class mklObj(object): """Default self definition of the Multiple Kernel Learning object. This object uses previously defined methods for generating a linear combination of basis kernels that can be constituted from different families. See at fit_kernel() function documentation for details. This function trains the kernel weights. The object has other member functions offering utilities. See the next instantiation and using example: import mklObj as mk kernel = mk.mklObj(weightRegNorm = 2, mklC = 2, # This is the Cparameter of the underlaying SVM. SVMepsilon = 1e-5, threads = 2, MKLepsilon = 0.001, probome = 'Multiclass', verbose = False) # IMPORTANT: Don't use this feature (True) if you are working in pipe mode. # The object will print undesired outputs to the stdout. The above values are the defaults, so if they are suitable for you it is possible instantiating the object by simply stating: kernel = mk.mklObj(). Even it is possible modifying a subset of input parameters (keeping others as default): kernel = mk.mklObj(weightRegNorm = 1, mklC = 10, SVMepsilon = 1e-2). See the documentation of each setter below for allowed setting parameters without new instantiations. Now, once main parameters has been setted, fit the kernel: kernel.fit_kernel(featsTr = feats_train, targetsTr = labelsTr, featsTs = feats_test, targetsTs = labelsTs, kernelFamily = 'gaussian', randomRange = [50, 200], # For homogeneous poly kernels these two parameter randomParams = [50, 20], # sets have not effect. No basis kernel parameters hyper = 'linear', # Also with not effect when kernel family is polynomial pKers = 3) # and some other powering forms. Once the MKL object has been fitted, you can get what you need from it. See getters documentation listed below. """ def __init__(self, weightRegNorm=2.0, mklC=2.0, SVMepsilon=0.01, model_file = None, threads=4, MKLepsilon=0.01, problem='regression', verbose=False, mode = 'learning', sparse = False): """Object initialization. This procedure is regardless of the input data, basis kernels and corresponding hyperparameters (kernel fitting). """ mkl_problem_object = {'regression':(MKLRegression, [mklC, mklC]), 'binary': (MKLClassification, [mklC, mklC]), 'multiclass': (MKLMulticlass, mklC)} self.mode = mode self.sparse = sparse assert not model_file and mode != 'pattern_recognition' or ( model_file and mode == 'pattern_recognition')# Model file or pattern_recognition mode must be specified. self.__problem = problem self.verbose = verbose # inner training process verbose flag self.Matrx = False # Kind of returned learned kernel object. See getter documentation of these self.expansion = False # object configuration parameters for details. Only modifiable by setter. self.__testerr = 0 if mode == 'learning': try: self.mkl = mkl_problem_object[problem][0]() self.mklC = mkl_problem_object[problem][1] except KeyError: sys.stderr.write('Error: Given problem type is not valid.') exit() #################<<<<<<<<<<<>>>>>>>>>> self.mkl.set_C_mkl(5.0) # This is the regularization parameter for the MKL weights regularizer (NOT the SVM C) self.weightRegNorm = weightRegNorm # Setting the basis' weight vector norm self.SVMepsilon = SVMepsilon # setting the transducer stop (convergence) criterion self.MKLepsilon = MKLepsilon # setting the MKL stop criterion. The value suggested by # Shogun examples is 0.001. See setter docs for details elif mode == 'pattern_recognition': [self.mkl, self.mkl_model] = self.load_mkl_model(file_name = model_file, model_type = problem) self.sigmas = self.mkl_model['widths'] self.threads = threads # setting number of training threads. Verify functionality!! def fit_pretrained(self, featsTr, featsTs): """ This method sets up a MKL machine by using parameters from self.mkl_model preloaded dictionary which contains preptrained model paremeters, e.g. weights and widths. """ self.ker = genKer(self, featsTr, featsTs, sparse = self.sparse, basisFam = self.family_translation[self.mkl_model['family']], widths = self.sigmas) self.ker.set_subkernel_weights(self.mkl_model['weights']) # Setting up pretrained weights to the self.ker.init(featsTr, featsTs) # new kernel # Self Function for kernel generation def fit_kernel(self, featsTr, targetsTr, featsTs, targetsTs, randomRange=[1, 50], randomParams=[1, 1], hyper='linear', kernelFamily='gaussian', pKers=3): """ :return: CombinedKernel Shogun object. This method is used for training the desired compound kernel. See documentation of the 'mklObj' object for using example. 'featsTr' and 'featsTs' are the training and test data respectively. 'targetsTr' and 'targetsTs' are the training and test labels, respectively. All they must be Shogun 'RealFeatures' and 'MulticlassLabels' objects respectively. The 'randomRange' parameter defines the range of numbers from which the basis kernel parameters will be drawn, e.g. Gaussian random widths between 1 and 50 (the default). The 'randomParams' input parameter states the parameters of the pseudorandom distribution of the basis kernel parameters to be drawn, e.g. Gaussian-pseudorandom-generated weights with std. deviation equal to 1 and mean equal to 1 (the default). The 'hyper' input parameter defines the distribution of the pseudorandom-generated weights. See documentation of the sigmaGen() method of the 'mklObj' object to see a list of possible basis kernel parameter distributions. The 'kernelFamily' input parameter is the basis kernel family to be append to the desired compound kernel if you select, e.g., the default 'gaussian' family, all elements of the learned linear combination will be gaussians (each differently weighted and parametrized). See documentation of the genKer() method of the 'mklObj' object to see a list of allowed basis kernel families. The 'pKers' input parameter defines the size of the learned kernel linear combination, i.e. how many basis kernels to be weighted in the training and therefore, how many coefficients will have the Fourier series of data (the default is 3). .. note:: In the cases of kernelFamily = {'polynomial' or 'power' or 'tstudent' or 'anova'}, the input parameters {'randomRange', 'randomParams', 'hyper'} have not effect, because these kernel families do not require basis kernel parameters. :param featsTr: RealFeatures Shogun object conflating the training data. :param targetsTr: MulticlassLabels Shogun object conflating the training labels. :param featsTr: RealFeatures Shogun object conflating the test data. :param targetsTr: MulticlassLabels Shogun object conflating the test labels. :param randomRange: It is the range to which the basis kernel parameters will pertain. For some basis kernels families this input parameter has not effect. :param randomParams: It is a list of parameters of the distribution of the random weights, e.g. for a gaussian distribution with mean zero and variance 1, parameters = [0, 1]. For some basis kernel families this input parameter has not effect. :param hyper: string which specifies the name of the basis kernel parameter distribution. See documentation for sigmaGen() function for viewing allowed strings (names). :param kernelFamily: string which specifies the name of the basis kernel family. See documentation for genKer() function for viewing allowed strings (names). :param pKers: This is the number of basis kernels for the MKL object (linear combination). """ # Inner variable copying: self._featsTr = featsTr self._targetsTr = targetsTr self._hyper = hyper self._pkers = pKers self.basisFamily = kernelFamily if self.verbose: # Printing the training progress print '\nNacho, multiple <' + kernelFamily + '> Kernels have been initialized...' print "\nInput main parameters: " print "\nHyperarameter distribution: ", self._hyper, "\nLinear combination size: ", pKers, \ '\nWeight regularization norm: ', self.weightRegNorm, \ 'Weight regularization parameter: ',self.mklC if self.__problem == 'multiclass': print "Classes: ", targetsTr.get_num_classes() elif self.__problem == 'binary': print "Classes: Binary" elif self.__problem == 'regression': print 'Regression problem' # Generating the list of subkernels. Creating the compound kernel. For monomial-nonhomogeneous (polynomial) # kernels the hyperparameters are uniquely the degree of each monomial, in the form of a sequence. MKL finds the # coefficient (weight) for each monomial in order to find a compound polynomial. if kernelFamily == 'polynomial' or kernelFamily == 'power' or \ kernelFamily == 'tstudent' or kernelFamily == 'anova': self.sigmas = range(1, pKers+1) self.ker = genKer(self, self._featsTr, self._featsTr, basisFam=kernelFamily, widths=self.sigmas, sparse = self.sparse) else: # We have called 'sigmas' to any basis kernel parameter, regardless if the kernel is Gaussian or not. So # let's generate the widths: self.sigmas = sorted(sigmaGen(self, hyperDistribution=hyper, size=pKers, rango=randomRange, parameters=randomParams)) try: z = self.sigmas.index(0) self.sigmas[z] = 0.1 except ValueError: pass try: # Verifying if number of kernels is greater or equal to 2 if pKers <= 1 or len(self.sigmas) < 2: raise customException('Senseless MKLClassification use!!!') except customException, (instance): print 'Caugth: ' + instance.parameter print "-----------------------------------------------------" print """The multikernel learning object is meaningless for less than 2 basis kernels, i.e. pKers <= 1, so 'mklObj' couldn't be instantiated.""" print "-----------------------------------------------------" self.ker = genKer(self, self._featsTr, self._featsTr, basisFam=kernelFamily, widths=self.sigmas, sparse = self.sparse) if self.verbose: print 'Widths: ', self.sigmas # Initializing the compound kernel # combf_tr = CombinedFeatures() # combf_tr.append_feature_obj(self._featsTr) # self.ker.init(combf_tr, combf_tr) try: # Verifying if number of kernels was greater or equal to 2 after training if self.ker.get_num_subkernels() < 2: raise customException( 'Multikernel coefficients were less than 2 after training. Revise object settings!!!') except customException, (instance): print 'Caugth: ' + instance.parameter # Verbose for learning surveying if self.verbose: print '\nKernel fitted...' # Initializing the transducer for multiclassification features_tr = CombinedFeatures() features_ts = CombinedFeatures() for k in self.sigmas: features_tr.append_feature_obj(self._featsTr) features_ts.append_feature_obj(featsTs) self.ker.init(features_tr, features_tr) self.mkl.set_kernel(self.ker) self.mkl.set_labels(self._targetsTr) # Train to return the learnt kernel if self.verbose: print '\nLearning the machine coefficients...' # ------------------ The most time consuming code segment -------------------------- self.crashed = False try: self.mkl.train() except SystemError: self.crashed = True self.mkl_model = self.keep_mkl_model(self.mkl, self.ker, self.sigmas) # Let's keep the trained model if self.verbose: # for future use. print 'Kernel trained... Weights: ', self.weights # Evaluate the learnt Kernel. Here it is assumed 'ker' is learnt, so we only need for initialize it again but # with the test set object. Then, set the initialized kernel to the mkl object in order to 'apply'. self.ker.init(features_tr, features_ts) # Now with test examples. The inner product between training #st() def pattern_recognition(self, targetsTs): self.mkl.set_kernel(self.ker) # and test examples generates the corresponding Gram Matrix. if not self.crashed: out = self.mkl.apply() # Applying the obtained Gram Matrix else: out = RegressionLabels(-1.0*numpy.ones(targetsTs.get_num_labels())) self.estimated_out = list(out.get_labels()) # ---------------------------------------------------------------------------------- if self.__problem == 'binary': # If the problem is either binary or multiclass, different evalua = ErrorRateMeasure() # performance measures are computed. self.__testerr = 100 - evalua.evaluate(out, targetsTs) * 100 elif self.__problem == 'multiclass': evalua = MulticlassAccuracy() self.__testerr = evalua.evaluate(out, targetsTs) * 100 elif self.__problem == 'regression': # Determination Coefficient was selected for measuring performance #evalua = MeanSquaredError() #self.__testerr = evalua.evaluate(out, targetsTs) self.__testerr = r2_score(self.estimated_out, list(targetsTs.get_labels())) # Verbose for learning surveying if self.verbose: print 'Kernel evaluation ready. The precision was: ', self.__testerr, '%' def keep_mkl_model(self, mkl, kernel, widths, file_name = None): """ Python reimplementated function for saving a pretrained MKL machine. This method saves a trained MKL machine to the file 'file_name'. If not 'file_name' is given, a dictionary 'mkl_machine' containing parameters of the given trained MKL object is returned. Here we assumed all subkernels of the passed CombinedKernel are of the same family, so uniquely the first kernel is used for verifying if the passed 'kernel' is a Gaussian mixture. If it is so, we insert the 'widths' to the model dictionary 'mkl_machine'. An error is returned otherwise. """ mkl_machine = {} support=[] mkl_machine['num_support_vectors'] = mkl.get_num_support_vectors() mkl_machine['bias']=mkl.get_bias() for i in xrange(mkl_machine['num_support_vectors']): support.append((mkl.get_alpha(i), mkl.get_support_vector(i))) mkl_machine['support'] = support mkl_machine['weights'] = list(kernel.get_subkernel_weights()) mkl_machine['family'] = kernel.get_first_kernel().get_name() mkl_machine['widths'] = widths if file_name: f = open(file_name,'w') f.write(str(mkl_machine)+'\n') f.close() else: return mkl_machine def load_mkl_model(self, file_name, model_type = 'regression'): """ This method receives a file name (if it is not in pwd, full path must be given) and a model type to be loaded {'regression', 'binary', 'multiclass'}. The loaded file must contain a t least a dictionary at its top. This dictionary must contain a key called 'model' whose value must be a dictionary, from which model parameters will be read. For example: {'key_0':value, 'key_1':value,..., 'model':{'family':'PolyKernel', 'bias':1.001,...}, key_n:value} Four objects are returned. The MKL model which is tuned to those parameters stored at the given file. A numpy array containing learned weights of a CombinedKernel. The widths corresponding to returned kernel weights and the kernel family. Be careful with the kernel family you are loading because widths no necessarily are it, but probably 'degrees', e.g. for the PolyKernel family. The Combined kernel must be instantiated outside this method, thereby loading to it corresponding weights and widths. """ with open(file_name, 'r') as pointer: mkl_machine = eval(pointer.read())['learned_model'] if model_type == 'regression': mkl = MKLRegression() # A new two-class MKL object elif model_type == 'binary': mkl = MKLClassification() elif model_type == 'multiclass': mkl = MKLMulticlass() else: sys.stderr.write('ERROR: Unknown problem type in model loading.') exit() mkl.set_bias(mkl_machine['bias']) mkl.create_new_model(mkl_machine['num_support_vectors']) # Initialize the inner SVM for i in xrange(mkl_machine['num_support_vectors']): mkl.set_alpha(i, mkl_machine['support'][i][0]) mkl.set_support_vector(i, mkl_machine['support'][i][1]) mkl_machine['weights'] = numpy.array(mkl_machine['weights']) return mkl, mkl_machine # Getters (properties): @property def family_translation(self): """ """ self.__family_translation = {'PolyKernel':'polynomial', 'GaussianKernel':'gaussian', 'ExponentialKernel':'exponential'} return self.__family_translation @property def mkl_model(self): """ This property stores the MKL model parameters learned by the self-object. These parameters can be stored into a file for future configuration of a non-trained MKL new MKL object. Also probably passed onwards for showing results. """ return self.__mkl_model @property def estimated_out(self): """ This property is the mkl result after applying. """ return self.__estimated_out @property def compoundKernel(self): """This method is used for getting the kernel object, i.e. the learned MKL object, which can be unwrapped into its matrix form instead of getting a Shogun object. Use the input parameters Matrix = True, expansion = False for getting the compound matrix of reals. For instance: mklObj.Matrix = True mklObj.expansion = False kernelMatrix = mklObj.compoundKernel Use Matrix = True, expansion = True for getting the expanded linear combination of matrices and weights separately, for instance: mklObj.Matrix = True mklObj.expansion = True basis, weights = mklObj.compoundKernel Use Matrix = False, expansion = False for getting the learned kernel Shogun object, for instance: mklObj.Matrix = False mklObj.expansion = False kernelObj = mklObj.compoundKernel .. warning:: Be careful with this latter variant of the method becuase of the large amount of needed physical memory. """ if self.Matrx: kernels = [] size = self.ker.get_num_subkernels() for k in xrange(0, size - 1): kernels.append(self.ker.get_kernel(k).get_kernel_matrix()) ws = self.weights if self.expansion: return kernels, ws # Returning the full expansion of the learned kernel. else: return sum(kernels * ws) # Returning the matrix linear combination, in other words, else: # a projector matrix representation. return self.ker # If matrix representation is not required, only the Shogun kernel # object is returned. @property def sigmas(self): """This method is used for getting the current set of basis kernel parameters, i.e. widths, in the case of the gaussian basis kernel. :rtype : list of float """ return self.__sigmas @property def verbose(self): """This is the verbose flag, which is used for monitoring the object training procedure. IMPORTANT: Don't use this feature (True) if you are working in pipe mode. The object will print undesired outputs to the stdout. :rtype : bool """ return self._verbose @property def Matrx(self): """This is a boolean property of the object. Its aim is getting and, mainly, setting the kind of object we want to obtain as learned kernel, i.e. a Kernel Shogun object or a Kernel Matrix whose entries are reals. The latter could require large amounts of physical memory. See the mklObj.compoundKernel property documentation in this object for using details. :rtype :bool """ return self.__Matrx @property def expansion(self): """This is a boolean property. Its aim is getting and, mainly, setting the mklObj object to return the complete expansion of the learned kernel, i.e. a list of basis kernel matrices as well as their corresponding coefficients. This configuration may require large amounts of physical memory. See the mklObj.compoundKernel property documentation in this object for using details. :rtype :bool .. seealso:: the code and examples and documentation about :@property:`compoundKernel` """ return self.__expansion @property def weightRegNorm(self): """ The value of this property is the basis' weight vector norm, e.g. :math:`||\beta||_p`, to be used as regularizer. It controls the smoothing among basis kernel weights of the learned multiple kernel combination. On one hand, If p=1 (the l_1 norm) the weight values B_i will be disproportionally between them, i.e. a few of them will be >> 0,some other simply > 0 and many of them will be zero or very near to zero (the vector B will be sparse). On the other hand, if p = 2 the weights B_i linearly distributed, i.e. their distribution shows an uniform tilt in such a way the differences between pairs of them are not significant, but rather proportional to the tilt of the distribution. To our knowledge, such a tilt is certainly not explicitly taken into account as regularization hyperparameter, although the parameter C \in [0, 1] is directly associated to it as scalar factor. Thus specifically for C \in [0, 1], it operates the vector B by forcing to it to certain orientation which describes a tilt m \in (0, 1)U(1, \infty) (with minima in the extremes of these subsets and maxima in their medians). Given that C \n [0, 1], the scaling effect behaves such that linearly depresses low values of B_i, whilst highlights their high values. The effect of C \in (1, \infty) is still not clearly studied, however it will be a bit different than the above, but keeping its scalar effect. Overall, as p tends to be >> 1 (or even p --> \\infty) the B_i values tend to be ever more uniformly distributed. More specific and complex regularization operators are explained in .. seealso:: Schölkopf, B., & Smola, A. J. (2002). Learning with kernels: Support vector machines, regularization, optimization, and beyond. MIT press. :rtype : vector of float """ return self.__weightRegNorm # function getters @property def weights(self): """This method is used for getting the learned weights of the MKL object. We first get the kernel weights into a list object, before returning it. This is because 'get_subkernel_weights()' causes error while printing to an output file by means of returning a nonlist object. :rtype : list of float """ self.__weights = list(self.ker.get_subkernel_weights()) return self.__weights @property def SVMepsilon(self): """This method is used for getting the SVM convergence criterion (the minimum allowed error commited by the transducer in training). :rtype : float .. seealso:: See at page 22 of Sonnemburg et.al., (2006) Large Scale Multiple Kernel Learning. .. seealso:: @SVMepsilon.setter """ return self.__SVMepsion @property def MKLepsilon(self): """This method is used for getting the MKL convergence criterion (the minimum allowed error committed by the MKL object in test). :rtype : float .. seealso:: See at page 22 of Sonnemburg et.al., (2006) Large Scale Multiple Kernel Learning. .. seealso:: @MKLepsilon.setter """ return self.__MKLepsilon @property def mklC(self): """This method is used for setting regularization parameters. 'mklC' is a real value in multiclass problems, while in binary problems it must be a list of two elements. These must be different when the two classes are imbalanced, but must be equal for balanced densities in binary classification problems. For multiclass problems, imbalanced densities are not considered. :rtype : float .. seealso:: See at page 4 of Bagchi, (2014) SVM Classifiers Based On Imperfect Training Data. .. seealso:: @weightRegNorm property documentation for more details about C as regularization parameter. """ return self.__mklC @property def threads(self): """ This property is used for getting and setting the number of threads in which the training procedure will be will be segmented into a single machine processor core. :rtype : int .. seealso:: @threads.setter documentation. """ return self.__threads # Readonly properties: @property def problem(self): """This method is used for getting the kind of problem the mklObj object will be trained for. If binary == True, the you want to train the object for a two-class classification problem. Otherwise if binary == False, you want to train the object for multiclass classification problems. This property can't be modified once the object has been instantiated. :rtype : bool """ return self.__problem @property def testerr(self): """This method is used for getting the test accuracy after training the MKL object. 'testerr' is a readonly object property. :rtype : float """ return self.__testerr @property def sparse(self): """This method is used for getting the sparse/dense mode of the MKL object. :rtype : float """ return self.__sparse @property def crashed(self): """This method is used for getting the sparse/dense mode of the MKL object. :rtype : float """ return self.__crashed # mklObj (decorated) Setters: Binary configuration of the classifier cant be changed. It is needed to instantiate # a new mklObj object. @crashed.setter def crashed(self, value): assert isinstance(value, bool) # The model is not stored as a dictionary self.__crashed = value @mkl_model.setter def mkl_model(self, value): assert isinstance(value, dict) # The model is not stored as a dictionary self.__mkl_model = value @estimated_out.setter def estimated_out(self, value): self.__estimated_out = value @sparse.setter def sparse(self, value): self.__sparse = value @Matrx.setter def Matrx(self, value): """ :type value: bool .. seealso:: @Matrx property documentation. """ assert isinstance(value, bool) self.__Matrx = value @expansion.setter def expansion(self, value): """ .. seealso:: @expansion property documentation :type value: bool """ assert isinstance(value, bool) self.__expansion = value @sigmas.setter def sigmas(self, value): """ This method is used for setting desired basis kernel parameters for the MKL object. 'value' is a list of real values of 'pKers' length. In 'learning' mode, be careful to avoid mismatching between the number of basis kernels of the current compound kernel and the one you have in mind. A mismatch error could be arisen. In 'pattern_recognition' mode, this quantity is taken from the learned model, which is stored at disk. @type value: list of float .. seealso:: @sigmas property documentation """ try: if self.mode == 'learning': if len(value) == self._pkers: self.__sigmas = value else: raise customException('Size of basis kernel parameter list mismatches the size of the combined\ kernel. You can use len(CMKLobj.sigmas) to revise the mismatching.') elif self.mode == 'pattern_recognition': self.__sigmas = value except customException, (instance): print "Caught: " + instance.parameter @verbose.setter def verbose(self, value): """This method sets to True of False the verbose flag, which is used in turn for monitoring the object training procedure. @type value: bool """ assert isinstance(value, bool) self._verbose = value @weightRegNorm.setter def weightRegNorm(self, value): """This method is used for changing the norm of the weight regularizer of the MKL object. Typically this changing is useful for retrain the model with other regularizer. @type value: float ..seealso:: @weightRegNorm property documentation. """ assert (isinstance(value, float) and value >= 0.0) self.mkl.set_mkl_norm(value) self.__weightRegNorm = value @SVMepsilon.setter def SVMepsilon(self, value): """This method is used for setting the SVM convergence criterion (the minimum allowed error commited by the transducer in training). In other words, the low level of the learning process. The current basis kernel combination is tested as the SVM kernel. Regardless of each basis' weights. @type value: float .. seealso:: Page 22 of Sonnemburg et.al., (2006) Large Scale Multiple Kernel Learning. """ assert (isinstance(value, float) and value >= 0.0) self.mkl.set_epsilon(value) self.__SVMepsion = value @MKLepsilon.setter def MKLepsilon(self, value): """This method is used for setting the MKL convergence criterion (the minimum allowed error committed by the MKL object in test). In other words, the high level of the learning process. The current basis kernel combination is tested as the SVM kernel. The basis' weights are tuned until 'MKLeps' is reached. @type value: float .. seealso:: Page 22 of Sonnemburg et.al., (2006) Large Scale Multiple Kernel Learning. """ assert (isinstance(value, float) and value >= 0.0) self.mkl.set_mkl_epsilon(value) self.__MKLepsilon = value @mklC.setter def mklC(self, value): """This method is used for setting regularization parameters. These are different when the two classes are imbalanced and Equal for balanced densities in binary classification problems. For multiclass problems imbalanced densities are not considered, so uniquely the first argument is caught by the method. If one or both arguments are misplaced the default values are one both them. @type value: float (for multiclass problems), [float, float] for binary and regression problems. .. seealso:: Page 4 of Bagchi,(2014) SVM Classifiers Based On Imperfect Training Data. """ if self.__problem == 'binary' or self.__problem == 'regression': assert len(value) == 2 assert (isinstance(value, (list, float)) and value[0] > 0.0 and value[1] > 0.0) self.mkl.set_C(value[0], value[1]) elif self.__problem == 'multiclass': assert (isinstance(value, float) and value > 0.0) self.mkl.set_C(value) self.__mklC = value @threads.setter def threads(self, value): """This method is used for changing the number of threads we want to be running with a single machine core. These threads are not different parallel processes running in different machine cores. """ assert (isinstance(value, int) and value > 0) self.mkl.parallel.set_num_threads(value) # setting number of training threads self.__threads = value
gpl-2.0
cbertinato/pandas
pandas/tests/io/test_packers.py
1
33090
import datetime from distutils.version import LooseVersion import glob from io import BytesIO import os from warnings import catch_warnings import numpy as np import pytest from pandas._libs.tslib import iNaT from pandas.errors import PerformanceWarning import pandas from pandas import ( Categorical, DataFrame, Index, Interval, MultiIndex, NaT, Period, Series, Timestamp, bdate_range, date_range, period_range) import pandas.util.testing as tm from pandas.util.testing import ( assert_categorical_equal, assert_frame_equal, assert_index_equal, assert_series_equal, ensure_clean) from pandas.io.packers import read_msgpack, to_msgpack nan = np.nan try: import blosc # NOQA except ImportError: _BLOSC_INSTALLED = False else: _BLOSC_INSTALLED = True try: import zlib # NOQA except ImportError: _ZLIB_INSTALLED = False else: _ZLIB_INSTALLED = True @pytest.fixture(scope='module') def current_packers_data(): # our current version packers data from pandas.tests.io.generate_legacy_storage_files import ( create_msgpack_data) return create_msgpack_data() @pytest.fixture(scope='module') def all_packers_data(): # our all of our current version packers data from pandas.tests.io.generate_legacy_storage_files import ( create_data) return create_data() def check_arbitrary(a, b): if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)): assert(len(a) == len(b)) for a_, b_ in zip(a, b): check_arbitrary(a_, b_) elif isinstance(a, DataFrame): assert_frame_equal(a, b) elif isinstance(a, Series): assert_series_equal(a, b) elif isinstance(a, Index): assert_index_equal(a, b) elif isinstance(a, Categorical): # Temp, # Categorical.categories is changed from str to bytes in PY3 # maybe the same as GH 13591 if b.categories.inferred_type == 'string': pass else: tm.assert_categorical_equal(a, b) elif a is NaT: assert b is NaT elif isinstance(a, Timestamp): assert a == b assert a.freq == b.freq else: assert(a == b) @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") class TestPackers: def setup_method(self, method): self.path = '__%s__.msg' % tm.rands(10) def teardown_method(self, method): pass def encode_decode(self, x, compress=None, **kwargs): with ensure_clean(self.path) as p: to_msgpack(p, x, compress=compress, **kwargs) return read_msgpack(p, **kwargs) @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") class TestAPI(TestPackers): def test_string_io(self): df = DataFrame(np.random.randn(10, 2)) s = df.to_msgpack(None) result = read_msgpack(s) tm.assert_frame_equal(result, df) s = df.to_msgpack() result = read_msgpack(s) tm.assert_frame_equal(result, df) s = df.to_msgpack() result = read_msgpack(BytesIO(s)) tm.assert_frame_equal(result, df) s = to_msgpack(None, df) result = read_msgpack(s) tm.assert_frame_equal(result, df) with ensure_clean(self.path) as p: s = df.to_msgpack() with open(p, 'wb') as fh: fh.write(s) result = read_msgpack(p) tm.assert_frame_equal(result, df) def test_path_pathlib(self): df = tm.makeDataFrame() result = tm.round_trip_pathlib(df.to_msgpack, read_msgpack) tm.assert_frame_equal(df, result) def test_path_localpath(self): df = tm.makeDataFrame() result = tm.round_trip_localpath(df.to_msgpack, read_msgpack) tm.assert_frame_equal(df, result) def test_iterator_with_string_io(self): dfs = [DataFrame(np.random.randn(10, 2)) for i in range(5)] s = to_msgpack(None, *dfs) for i, result in enumerate(read_msgpack(s, iterator=True)): tm.assert_frame_equal(result, dfs[i]) def test_invalid_arg(self): # GH10369 class A: def __init__(self): self.read = 0 msg = "Invalid file path or buffer object type: <class '{}'>" with pytest.raises(ValueError, match=msg.format('NoneType')): read_msgpack(path_or_buf=None) with pytest.raises(ValueError, match=msg.format('dict')): read_msgpack(path_or_buf={}) with pytest.raises(ValueError, match=msg.format(r'.*\.A')): read_msgpack(path_or_buf=A()) class TestNumpy(TestPackers): def test_numpy_scalar_float(self): x = np.float32(np.random.rand()) x_rec = self.encode_decode(x) tm.assert_almost_equal(x, x_rec) def test_numpy_scalar_complex(self): x = np.complex64(np.random.rand() + 1j * np.random.rand()) x_rec = self.encode_decode(x) assert np.allclose(x, x_rec) def test_scalar_float(self): x = np.random.rand() x_rec = self.encode_decode(x) tm.assert_almost_equal(x, x_rec) def test_scalar_bool(self): x = np.bool_(1) x_rec = self.encode_decode(x) tm.assert_almost_equal(x, x_rec) x = np.bool_(0) x_rec = self.encode_decode(x) tm.assert_almost_equal(x, x_rec) def test_scalar_complex(self): x = np.random.rand() + 1j * np.random.rand() x_rec = self.encode_decode(x) assert np.allclose(x, x_rec) def test_list_numpy_float(self): x = [np.float32(np.random.rand()) for i in range(5)] x_rec = self.encode_decode(x) # current msgpack cannot distinguish list/tuple tm.assert_almost_equal(tuple(x), x_rec) x_rec = self.encode_decode(tuple(x)) tm.assert_almost_equal(tuple(x), x_rec) def test_list_numpy_float_complex(self): if not hasattr(np, 'complex128'): pytest.skip('numpy can not handle complex128') x = [np.float32(np.random.rand()) for i in range(5)] + \ [np.complex128(np.random.rand() + 1j * np.random.rand()) for i in range(5)] x_rec = self.encode_decode(x) assert np.allclose(x, x_rec) def test_list_float(self): x = [np.random.rand() for i in range(5)] x_rec = self.encode_decode(x) # current msgpack cannot distinguish list/tuple tm.assert_almost_equal(tuple(x), x_rec) x_rec = self.encode_decode(tuple(x)) tm.assert_almost_equal(tuple(x), x_rec) def test_list_float_complex(self): x = [np.random.rand() for i in range(5)] + \ [(np.random.rand() + 1j * np.random.rand()) for i in range(5)] x_rec = self.encode_decode(x) assert np.allclose(x, x_rec) def test_dict_float(self): x = {'foo': 1.0, 'bar': 2.0} x_rec = self.encode_decode(x) tm.assert_almost_equal(x, x_rec) def test_dict_complex(self): x = {'foo': 1.0 + 1.0j, 'bar': 2.0 + 2.0j} x_rec = self.encode_decode(x) tm.assert_dict_equal(x, x_rec) for key in x: tm.assert_class_equal(x[key], x_rec[key], obj="complex value") def test_dict_numpy_float(self): x = {'foo': np.float32(1.0), 'bar': np.float32(2.0)} x_rec = self.encode_decode(x) tm.assert_almost_equal(x, x_rec) def test_dict_numpy_complex(self): x = {'foo': np.complex128(1.0 + 1.0j), 'bar': np.complex128(2.0 + 2.0j)} x_rec = self.encode_decode(x) tm.assert_dict_equal(x, x_rec) for key in x: tm.assert_class_equal(x[key], x_rec[key], obj="numpy complex128") def test_numpy_array_float(self): # run multiple times for n in range(10): x = np.random.rand(10) for dtype in ['float32', 'float64']: x = x.astype(dtype) x_rec = self.encode_decode(x) tm.assert_almost_equal(x, x_rec) def test_numpy_array_complex(self): x = (np.random.rand(5) + 1j * np.random.rand(5)).astype(np.complex128) x_rec = self.encode_decode(x) assert (all(map(lambda x, y: x == y, x, x_rec)) and x.dtype == x_rec.dtype) def test_list_mixed(self): x = [1.0, np.float32(3.5), np.complex128(4.25), 'foo', np.bool_(1)] x_rec = self.encode_decode(x) # current msgpack cannot distinguish list/tuple tm.assert_almost_equal(tuple(x), x_rec) x_rec = self.encode_decode(tuple(x)) tm.assert_almost_equal(tuple(x), x_rec) class TestBasic(TestPackers): def test_timestamp(self): for i in [Timestamp( '20130101'), Timestamp('20130101', tz='US/Eastern'), Timestamp('201301010501')]: i_rec = self.encode_decode(i) assert i == i_rec def test_nat(self): nat_rec = self.encode_decode(NaT) assert NaT is nat_rec def test_datetimes(self): for i in [datetime.datetime(2013, 1, 1), datetime.datetime(2013, 1, 1, 5, 1), datetime.date(2013, 1, 1), np.datetime64(datetime.datetime(2013, 1, 5, 2, 15))]: i_rec = self.encode_decode(i) assert i == i_rec def test_timedeltas(self): for i in [datetime.timedelta(days=1), datetime.timedelta(days=1, seconds=10), np.timedelta64(1000000)]: i_rec = self.encode_decode(i) assert i == i_rec def test_periods(self): # 13463 for i in [Period('2010-09', 'M'), Period('2014-Q1', 'Q')]: i_rec = self.encode_decode(i) assert i == i_rec def test_intervals(self): # 19967 for i in [Interval(0, 1), Interval(0, 1, 'left'), Interval(10, 25., 'right')]: i_rec = self.encode_decode(i) assert i == i_rec class TestIndex(TestPackers): def setup_method(self, method): super().setup_method(method) self.d = { 'string': tm.makeStringIndex(100), 'date': tm.makeDateIndex(100), 'int': tm.makeIntIndex(100), 'rng': tm.makeRangeIndex(100), 'float': tm.makeFloatIndex(100), 'empty': Index([]), 'tuple': Index(zip(['foo', 'bar', 'baz'], [1, 2, 3])), 'period': Index(period_range('2012-1-1', freq='M', periods=3)), 'date2': Index(date_range('2013-01-1', periods=10)), 'bdate': Index(bdate_range('2013-01-02', periods=10)), 'cat': tm.makeCategoricalIndex(100), 'interval': tm.makeIntervalIndex(100), 'timedelta': tm.makeTimedeltaIndex(100, 'H') } self.mi = { 'reg': MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), ('foo', 'two'), ('qux', 'one'), ('qux', 'two')], names=['first', 'second']), } def test_basic_index(self): for s, i in self.d.items(): i_rec = self.encode_decode(i) tm.assert_index_equal(i, i_rec) # datetime with no freq (GH5506) i = Index([Timestamp('20130101'), Timestamp('20130103')]) i_rec = self.encode_decode(i) tm.assert_index_equal(i, i_rec) # datetime with timezone i = Index([Timestamp('20130101 9:00:00'), Timestamp( '20130103 11:00:00')]).tz_localize('US/Eastern') i_rec = self.encode_decode(i) tm.assert_index_equal(i, i_rec) def test_multi_index(self): for s, i in self.mi.items(): i_rec = self.encode_decode(i) tm.assert_index_equal(i, i_rec) def test_unicode(self): i = tm.makeUnicodeIndex(100) i_rec = self.encode_decode(i) tm.assert_index_equal(i, i_rec) def categorical_index(self): # GH15487 df = DataFrame(np.random.randn(10, 2)) df = df.astype({0: 'category'}).set_index(0) result = self.encode_decode(df) tm.assert_frame_equal(result, df) class TestSeries(TestPackers): def setup_method(self, method): super().setup_method(method) self.d = {} s = tm.makeStringSeries() s.name = 'string' self.d['string'] = s s = tm.makeObjectSeries() s.name = 'object' self.d['object'] = s s = Series(iNaT, dtype='M8[ns]', index=range(5)) self.d['date'] = s data = { 'A': [0., 1., 2., 3., np.nan], 'B': [0, 1, 0, 1, 0], 'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'], 'D': date_range('1/1/2009', periods=5), 'E': [0., 1, Timestamp('20100101'), 'foo', 2.], 'F': [Timestamp('20130102', tz='US/Eastern')] * 2 + [Timestamp('20130603', tz='CET')] * 3, 'G': [Timestamp('20130102', tz='US/Eastern')] * 5, 'H': Categorical([1, 2, 3, 4, 5]), 'I': Categorical([1, 2, 3, 4, 5], ordered=True), 'J': (np.bool_(1), 2, 3, 4, 5), } self.d['float'] = Series(data['A']) self.d['int'] = Series(data['B']) self.d['mixed'] = Series(data['E']) self.d['dt_tz_mixed'] = Series(data['F']) self.d['dt_tz'] = Series(data['G']) self.d['cat_ordered'] = Series(data['H']) self.d['cat_unordered'] = Series(data['I']) self.d['numpy_bool_mixed'] = Series(data['J']) def test_basic(self): # run multiple times here for n in range(10): for s, i in self.d.items(): i_rec = self.encode_decode(i) assert_series_equal(i, i_rec) class TestCategorical(TestPackers): def setup_method(self, method): super().setup_method(method) self.d = {} self.d['plain_str'] = Categorical(['a', 'b', 'c', 'd', 'e']) self.d['plain_str_ordered'] = Categorical(['a', 'b', 'c', 'd', 'e'], ordered=True) self.d['plain_int'] = Categorical([5, 6, 7, 8]) self.d['plain_int_ordered'] = Categorical([5, 6, 7, 8], ordered=True) def test_basic(self): # run multiple times here for n in range(10): for s, i in self.d.items(): i_rec = self.encode_decode(i) assert_categorical_equal(i, i_rec) @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") class TestNDFrame(TestPackers): def setup_method(self, method): super().setup_method(method) data = { 'A': [0., 1., 2., 3., np.nan], 'B': [0, 1, 0, 1, 0], 'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'], 'D': date_range('1/1/2009', periods=5), 'E': [0., 1, Timestamp('20100101'), 'foo', 2.], 'F': [Timestamp('20130102', tz='US/Eastern')] * 5, 'G': [Timestamp('20130603', tz='CET')] * 5, 'H': Categorical(['a', 'b', 'c', 'd', 'e']), 'I': Categorical(['a', 'b', 'c', 'd', 'e'], ordered=True), } self.frame = { 'float': DataFrame(dict(A=data['A'], B=Series(data['A']) + 1)), 'int': DataFrame(dict(A=data['B'], B=Series(data['B']) + 1)), 'mixed': DataFrame(data)} def test_basic_frame(self): for s, i in self.frame.items(): i_rec = self.encode_decode(i) assert_frame_equal(i, i_rec) def test_multi(self): i_rec = self.encode_decode(self.frame) for k in self.frame.keys(): assert_frame_equal(self.frame[k], i_rec[k]) packed_items = tuple([self.frame['float'], self.frame['float'].A, self.frame['float'].B, None]) l_rec = self.encode_decode(packed_items) check_arbitrary(packed_items, l_rec) # this is an oddity in that packed lists will be returned as tuples packed_items = [self.frame['float'], self.frame['float'].A, self.frame['float'].B, None] l_rec = self.encode_decode(packed_items) assert isinstance(l_rec, tuple) check_arbitrary(packed_items, l_rec) def test_iterator(self): packed_items = [self.frame['float'], self.frame['float'].A, self.frame['float'].B, None] with ensure_clean(self.path) as path: to_msgpack(path, *packed_items) for i, packed in enumerate(read_msgpack(path, iterator=True)): check_arbitrary(packed, packed_items[i]) def tests_datetimeindex_freq_issue(self): # GH 5947 # inferring freq on the datetimeindex df = DataFrame([1, 2, 3], index=date_range('1/1/2013', '1/3/2013')) result = self.encode_decode(df) assert_frame_equal(result, df) df = DataFrame([1, 2], index=date_range('1/1/2013', '1/2/2013')) result = self.encode_decode(df) assert_frame_equal(result, df) def test_dataframe_duplicate_column_names(self): # GH 9618 expected_1 = DataFrame(columns=['a', 'a']) expected_2 = DataFrame(columns=[1] * 100) expected_2.loc[0] = np.random.randn(100) expected_3 = DataFrame(columns=[1, 1]) expected_3.loc[0] = ['abc', np.nan] result_1 = self.encode_decode(expected_1) result_2 = self.encode_decode(expected_2) result_3 = self.encode_decode(expected_3) assert_frame_equal(result_1, expected_1) assert_frame_equal(result_2, expected_2) assert_frame_equal(result_3, expected_3) @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning") @pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning") @pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning") class TestSparse(TestPackers): def _check_roundtrip(self, obj, comparator, **kwargs): # currently these are not implemetned # i_rec = self.encode_decode(obj) # comparator(obj, i_rec, **kwargs) msg = r"msgpack sparse (series|frame) is not implemented" with pytest.raises(NotImplementedError, match=msg): self.encode_decode(obj) def test_sparse_series(self): s = tm.makeStringSeries() s[3:5] = np.nan ss = s.to_sparse() self._check_roundtrip(ss, tm.assert_series_equal, check_series_type=True) ss2 = s.to_sparse(kind='integer') self._check_roundtrip(ss2, tm.assert_series_equal, check_series_type=True) ss3 = s.to_sparse(fill_value=0) self._check_roundtrip(ss3, tm.assert_series_equal, check_series_type=True) def test_sparse_frame(self): s = tm.makeDataFrame() s.loc[3:5, 1:3] = np.nan s.loc[8:10, -2] = np.nan ss = s.to_sparse() self._check_roundtrip(ss, tm.assert_frame_equal, check_frame_type=True) ss2 = s.to_sparse(kind='integer') self._check_roundtrip(ss2, tm.assert_frame_equal, check_frame_type=True) ss3 = s.to_sparse(fill_value=0) self._check_roundtrip(ss3, tm.assert_frame_equal, check_frame_type=True) class TestCompression(TestPackers): """See https://github.com/pandas-dev/pandas/pull/9783 """ def setup_method(self, method): try: from sqlalchemy import create_engine self._create_sql_engine = create_engine except ImportError: self._SQLALCHEMY_INSTALLED = False else: self._SQLALCHEMY_INSTALLED = True super().setup_method(method) data = { 'A': np.arange(1000, dtype=np.float64), 'B': np.arange(1000, dtype=np.int32), 'C': list(100 * 'abcdefghij'), 'D': date_range(datetime.datetime(2015, 4, 1), periods=1000), 'E': [datetime.timedelta(days=x) for x in range(1000)], } self.frame = { 'float': DataFrame({k: data[k] for k in ['A', 'A']}), 'int': DataFrame({k: data[k] for k in ['B', 'B']}), 'mixed': DataFrame(data), } def test_plain(self): i_rec = self.encode_decode(self.frame) for k in self.frame.keys(): assert_frame_equal(self.frame[k], i_rec[k]) def _test_compression(self, compress): i_rec = self.encode_decode(self.frame, compress=compress) for k in self.frame.keys(): value = i_rec[k] expected = self.frame[k] assert_frame_equal(value, expected) # make sure that we can write to the new frames for block in value._data.blocks: assert block.values.flags.writeable def test_compression_zlib(self): if not _ZLIB_INSTALLED: pytest.skip('no zlib') self._test_compression('zlib') def test_compression_blosc(self): if not _BLOSC_INSTALLED: pytest.skip('no blosc') self._test_compression('blosc') def _test_compression_warns_when_decompress_caches( self, monkeypatch, compress): not_garbage = [] control = [] # copied data compress_module = globals()[compress] real_decompress = compress_module.decompress def decompress(ob): """mock decompress function that delegates to the real decompress but caches the result and a copy of the result. """ res = real_decompress(ob) not_garbage.append(res) # hold a reference to this bytes object control.append(bytearray(res)) # copy the data here to check later return res # types mapped to values to add in place. rhs = { np.dtype('float64'): 1.0, np.dtype('int32'): 1, np.dtype('object'): 'a', np.dtype('datetime64[ns]'): np.timedelta64(1, 'ns'), np.dtype('timedelta64[ns]'): np.timedelta64(1, 'ns'), } with monkeypatch.context() as m, \ tm.assert_produces_warning(PerformanceWarning) as ws: m.setattr(compress_module, 'decompress', decompress) i_rec = self.encode_decode(self.frame, compress=compress) for k in self.frame.keys(): value = i_rec[k] expected = self.frame[k] assert_frame_equal(value, expected) # make sure that we can write to the new frames even though # we needed to copy the data for block in value._data.blocks: assert block.values.flags.writeable # mutate the data in some way block.values[0] += rhs[block.dtype] for w in ws: # check the messages from our warnings assert str(w.message) == ('copying data after decompressing; ' 'this may mean that decompress is ' 'caching its result') for buf, control_buf in zip(not_garbage, control): # make sure none of our mutations above affected the # original buffers assert buf == control_buf def test_compression_warns_when_decompress_caches_zlib(self, monkeypatch): if not _ZLIB_INSTALLED: pytest.skip('no zlib') self._test_compression_warns_when_decompress_caches( monkeypatch, 'zlib') def test_compression_warns_when_decompress_caches_blosc(self, monkeypatch): if not _BLOSC_INSTALLED: pytest.skip('no blosc') self._test_compression_warns_when_decompress_caches( monkeypatch, 'blosc') def _test_small_strings_no_warn(self, compress): empty = np.array([], dtype='uint8') with tm.assert_produces_warning(None): empty_unpacked = self.encode_decode(empty, compress=compress) tm.assert_numpy_array_equal(empty_unpacked, empty) assert empty_unpacked.flags.writeable char = np.array([ord(b'a')], dtype='uint8') with tm.assert_produces_warning(None): char_unpacked = self.encode_decode(char, compress=compress) tm.assert_numpy_array_equal(char_unpacked, char) assert char_unpacked.flags.writeable # if this test fails I am sorry because the interpreter is now in a # bad state where b'a' points to 98 == ord(b'b'). char_unpacked[0] = ord(b'b') # we compare the ord of bytes b'a' with unicode 'a' because the should # always be the same (unless we were able to mutate the shared # character singleton in which case ord(b'a') == ord(b'b'). assert ord(b'a') == ord('a') tm.assert_numpy_array_equal( char_unpacked, np.array([ord(b'b')], dtype='uint8'), ) def test_small_strings_no_warn_zlib(self): if not _ZLIB_INSTALLED: pytest.skip('no zlib') self._test_small_strings_no_warn('zlib') def test_small_strings_no_warn_blosc(self): if not _BLOSC_INSTALLED: pytest.skip('no blosc') self._test_small_strings_no_warn('blosc') def test_readonly_axis_blosc(self): # GH11880 if not _BLOSC_INSTALLED: pytest.skip('no blosc') df1 = DataFrame({'A': list('abcd')}) df2 = DataFrame(df1, index=[1., 2., 3., 4.]) assert 1 in self.encode_decode(df1['A'], compress='blosc') assert 1. in self.encode_decode(df2['A'], compress='blosc') def test_readonly_axis_zlib(self): # GH11880 df1 = DataFrame({'A': list('abcd')}) df2 = DataFrame(df1, index=[1., 2., 3., 4.]) assert 1 in self.encode_decode(df1['A'], compress='zlib') assert 1. in self.encode_decode(df2['A'], compress='zlib') def test_readonly_axis_blosc_to_sql(self): # GH11880 if not _BLOSC_INSTALLED: pytest.skip('no blosc') if not self._SQLALCHEMY_INSTALLED: pytest.skip('no sqlalchemy') expected = DataFrame({'A': list('abcd')}) df = self.encode_decode(expected, compress='blosc') eng = self._create_sql_engine("sqlite:///:memory:") df.to_sql('test', eng, if_exists='append') result = pandas.read_sql_table('test', eng, index_col='index') result.index.names = [None] assert_frame_equal(expected, result) def test_readonly_axis_zlib_to_sql(self): # GH11880 if not _ZLIB_INSTALLED: pytest.skip('no zlib') if not self._SQLALCHEMY_INSTALLED: pytest.skip('no sqlalchemy') expected = DataFrame({'A': list('abcd')}) df = self.encode_decode(expected, compress='zlib') eng = self._create_sql_engine("sqlite:///:memory:") df.to_sql('test', eng, if_exists='append') result = pandas.read_sql_table('test', eng, index_col='index') result.index.names = [None] assert_frame_equal(expected, result) class TestEncoding(TestPackers): def setup_method(self, method): super().setup_method(method) data = { 'A': ['\u2019'] * 1000, 'B': np.arange(1000, dtype=np.int32), 'C': list(100 * 'abcdefghij'), 'D': date_range(datetime.datetime(2015, 4, 1), periods=1000), 'E': [datetime.timedelta(days=x) for x in range(1000)], 'G': [400] * 1000 } self.frame = { 'float': DataFrame({k: data[k] for k in ['A', 'A']}), 'int': DataFrame({k: data[k] for k in ['B', 'B']}), 'mixed': DataFrame(data), } self.utf_encodings = ['utf8', 'utf16', 'utf32'] def test_utf(self): # GH10581 for encoding in self.utf_encodings: for frame in self.frame.values(): result = self.encode_decode(frame, encoding=encoding) assert_frame_equal(result, frame) def test_default_encoding(self): for frame in self.frame.values(): result = frame.to_msgpack() expected = frame.to_msgpack(encoding='utf8') assert result == expected result = self.encode_decode(frame) assert_frame_equal(result, frame) files = glob.glob(os.path.join(os.path.dirname(__file__), "data", "legacy_msgpack", "*", "*.msgpack")) @pytest.fixture(params=files) def legacy_packer(request, datapath): return datapath(request.param) @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") @pytest.mark.filterwarnings("ignore:Sparse:FutureWarning") class TestMsgpack: """ How to add msgpack tests: 1. Install pandas version intended to output the msgpack. 2. Execute "generate_legacy_storage_files.py" to create the msgpack. $ python generate_legacy_storage_files.py <output_dir> msgpack 3. Move the created pickle to "data/legacy_msgpack/<version>" directory. """ minimum_structure = {'series': ['float', 'int', 'mixed', 'ts', 'mi', 'dup'], 'frame': ['float', 'int', 'mixed', 'mi'], 'panel': ['float'], 'index': ['int', 'date', 'period'], 'mi': ['reg2']} def check_min_structure(self, data, version): for typ, v in self.minimum_structure.items(): if typ == "panel": # FIXME: kludge; get this key out of the legacy file continue assert typ in data, '"{0}" not found in unpacked data'.format(typ) for kind in v: msg = '"{0}" not found in data["{1}"]'.format(kind, typ) assert kind in data[typ], msg def compare(self, current_data, all_data, vf, version): # GH12277 encoding default used to be latin-1, now utf-8 if LooseVersion(version) < LooseVersion('0.18.0'): data = read_msgpack(vf, encoding='latin-1') else: data = read_msgpack(vf) if "panel" in data: # FIXME: kludge; get the key out of the stored file del data["panel"] self.check_min_structure(data, version) for typ, dv in data.items(): assert typ in all_data, ('unpacked data contains ' 'extra key "{0}"' .format(typ)) for dt, result in dv.items(): assert dt in current_data[typ], ('data["{0}"] contains extra ' 'key "{1}"'.format(typ, dt)) try: expected = current_data[typ][dt] except KeyError: continue # use a specific comparator # if available comp_method = "compare_{typ}_{dt}".format(typ=typ, dt=dt) comparator = getattr(self, comp_method, None) if comparator is not None: comparator(result, expected, typ, version) else: check_arbitrary(result, expected) return data def compare_series_dt_tz(self, result, expected, typ, version): # 8260 # dtype is object < 0.17.0 if LooseVersion(version) < LooseVersion('0.17.0'): expected = expected.astype(object) tm.assert_series_equal(result, expected) else: tm.assert_series_equal(result, expected) def compare_frame_dt_mixed_tzs(self, result, expected, typ, version): # 8260 # dtype is object < 0.17.0 if LooseVersion(version) < LooseVersion('0.17.0'): expected = expected.astype(object) tm.assert_frame_equal(result, expected) else: tm.assert_frame_equal(result, expected) def test_msgpacks_legacy(self, current_packers_data, all_packers_data, legacy_packer, datapath): version = os.path.basename(os.path.dirname(legacy_packer)) # GH12142 0.17 files packed in P2 can't be read in P3 if (version.startswith('0.17.') and legacy_packer.split('.')[-4][-1] == '2'): msg = "Files packed in Py2 can't be read in Py3 ({})" pytest.skip(msg.format(version)) try: with catch_warnings(record=True): self.compare(current_packers_data, all_packers_data, legacy_packer, version) except ImportError: # blosc not installed pass def test_msgpack_period_freq(self): # https://github.com/pandas-dev/pandas/issues/24135 s = Series(np.random.rand(5), index=date_range('20130101', periods=5)) r = read_msgpack(s.to_msgpack()) repr(r)
bsd-3-clause
sfepy/sfepy
examples/linear_elasticity/dispersion_analysis.py
2
35004
#!/usr/bin/env python """ Dispersion analysis of a heterogeneous finite scale periodic cell. The periodic cell mesh has to contain two subdomains Y1 (with the cell ids 1), Y2 (with the cell ids 2), so that different material properties can be defined in each of the subdomains (see ``--pars`` option). The command line parameters can be given in any consistent unit set, for example the basic SI units. The ``--unit-multipliers`` option can be used to rescale the input units to ones more suitable to the simulation, for example to prevent having different matrix blocks with large differences of matrix entries magnitudes. The results are then in the rescaled units. Usage Examples -------------- Default material parameters, a square periodic cell with a spherical inclusion, logs also standard pressure dilatation and shear waves, no eigenvectors:: python examples/linear_elasticity/dispersion_analysis.py meshes/2d/special/circle_in_square.mesh --log-std-waves --eigs-only As above, with custom eigenvalue solver parameters, and different number of eigenvalues, mesh size and units used in the calculation:: python examples/linear_elasticity/dispersion_analysis.py meshes/2d/special/circle_in_square.mesh --solver-conf="kind='eig.scipy', method='eigsh', tol=1e-10, maxiter=1000, which='LM', sigma=0" --log-std-waves -n 5 --range=0,640,101 --mode=omega --unit-multipliers=1e-6,1e-2,1e-3 --mesh-size=1e-2 --eigs-only Default material parameters, a square periodic cell with a square inclusion, and a very small mesh to allow comparing the omega and kappa modes (full matrix solver required!):: python examples/linear_elasticity/dispersion_analysis.py meshes/2d/square_2m.mesh --solver-conf="kind='eig.scipy', method='eigh'" --log-std-waves -n 10 --range=0,640,101 --mesh-size=1e-2 --mode=omega --eigs-only --no-legends --unit-multipliers=1e-6,1e-2,1e-3 -o output/omega python examples/linear_elasticity/dispersion_analysis.py meshes/2d/square_2m.mesh --solver-conf="kind='eig.qevp', method='companion', mode='inverted', solver={kind='eig.scipy', method='eig'}" --log-std-waves -n 500 --range=0,4000000,1001 --mesh-size=1e-2 --mode=kappa --eigs-only --no-legends --unit-multipliers=1e-6,1e-2,1e-3 -o output/kappa View/compare the resulting logs:: python script/plot_logs.py output/omega/frequencies.txt --no-legends -g 1 -o mode-omega.png python script/plot_logs.py output/kappa/wave-numbers.txt --no-legends -o mode-kappa.png python script/plot_logs.py output/kappa/wave-numbers.txt --no-legends --swap-axes -o mode-kappa-t.png In contrast to the heterogeneous square periodic cell, a homogeneous square periodic cell (the region Y2 is empty):: python examples/linear_elasticity/dispersion_analysis.py meshes/2d/square_1m.mesh --solver-conf="kind='eig.scipy', method='eigh'" --log-std-waves -n 10 --range=0,640,101 --mesh-size=1e-2 --mode=omega --eigs-only --no-legends --unit-multipliers=1e-6,1e-2,1e-3 -o output/omega-h python script/plot_logs.py output/omega-h/frequencies.txt --no-legends -g 1 -o mode-omega-h.png Use the Brillouin stepper:: python examples/linear_elasticity/dispersion_analysis.py meshes/2d/special/circle_in_square.mesh --log-std-waves -n=60 --eigs-only --no-legends --stepper=brillouin python script/plot_logs.py output/frequencies.txt -g 0 --rc="'font.size':14, 'lines.linewidth' : 3, 'lines.markersize' : 4" -o brillouin-stepper-kappas.png python script/plot_logs.py output/frequencies.txt -g 1 --no-legends --rc="'font.size':14, 'lines.linewidth' : 3, 'lines.markersize' : 4" -o brillouin-stepper-omegas.png Additional arguments can be passed to the problem configuration's :func:`define()` function using the ``--define-kwargs`` option. In this file, only the mesh vertex separation parameter `mesh_eps` can be used:: python examples/linear_elasticity/dispersion_analysis.py meshes/2d/special/circle_in_square.mesh --log-std-waves --eigs-only --define-kwargs="mesh_eps=1e-10" --save-regions """ from __future__ import absolute_import import os import sys sys.path.append('.') import gc from copy import copy from argparse import ArgumentParser, RawDescriptionHelpFormatter import numpy as nm import matplotlib.pyplot as plt from sfepy.base.base import import_file, output, Struct from sfepy.base.conf import dict_from_string, ProblemConf from sfepy.base.ioutils import ensure_path, remove_files_patterns, save_options from sfepy.base.log import Log from sfepy.discrete.fem import MeshIO from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson as stiffness import sfepy.mechanics.matcoefs as mc from sfepy.mechanics.units import apply_unit_multipliers, apply_units_to_pars import sfepy.discrete.fem.periodic as per from sfepy.discrete.fem.meshio import convert_complex_output from sfepy.homogenization.utils import define_box_regions from sfepy.discrete import Problem from sfepy.mechanics.tensors import get_von_mises_stress from sfepy.solvers import Solver from sfepy.solvers.ts import get_print_info, TimeStepper from sfepy.linalg.utils import output_array_stats, max_diff_csr pars_kinds = { 'young1' : 'stress', 'poisson1' : 'one', 'density1' : 'density', 'young2' : 'stress', 'poisson2' : 'one', 'density2' : 'density', } def define(filename_mesh, pars, approx_order, refinement_level, solver_conf, plane='strain', post_process=False, mesh_eps=1e-8): io = MeshIO.any_from_filename(filename_mesh) bbox = io.read_bounding_box() dim = bbox.shape[1] options = { 'absolute_mesh_path' : True, 'refinement_level' : refinement_level, 'allow_empty_regions' : True, 'post_process_hook' : 'compute_von_mises' if post_process else None, } fields = { 'displacement': ('complex', dim, 'Omega', approx_order), } materials = { 'm' : ({ 'D' : {'Y1' : stiffness(dim, young=pars.young1, poisson=pars.poisson1, plane=plane), 'Y2' : stiffness(dim, young=pars.young2, poisson=pars.poisson2, plane=plane)}, 'density' : {'Y1' : pars.density1, 'Y2' : pars.density2}, },), 'wave' : 'get_wdir', } variables = { 'u' : ('unknown field', 'displacement', 0), 'v' : ('test field', 'displacement', 'u'), } regions = { 'Omega' : 'all', 'Y1': 'cells of group 1', 'Y2': 'cells of group 2', } regions.update(define_box_regions(dim, bbox[0], bbox[1], mesh_eps)) ebcs = { } if dim == 3: epbcs = { 'periodic_x' : (['Left', 'Right'], {'u.all' : 'u.all'}, 'match_x_plane'), 'periodic_y' : (['Near', 'Far'], {'u.all' : 'u.all'}, 'match_y_plane'), 'periodic_z' : (['Top', 'Bottom'], {'u.all' : 'u.all'}, 'match_z_plane'), } else: epbcs = { 'periodic_x' : (['Left', 'Right'], {'u.all' : 'u.all'}, 'match_y_line'), 'periodic_y' : (['Bottom', 'Top'], {'u.all' : 'u.all'}, 'match_x_line'), } per.set_accuracy(mesh_eps) functions = { 'match_x_plane' : (per.match_x_plane,), 'match_y_plane' : (per.match_y_plane,), 'match_z_plane' : (per.match_z_plane,), 'match_x_line' : (per.match_x_line,), 'match_y_line' : (per.match_y_line,), 'get_wdir' : (get_wdir,), } integrals = { 'i' : 2 * approx_order, } equations = { 'K' : 'dw_lin_elastic.i.Omega(m.D, v, u)', 'S' : 'dw_elastic_wave.i.Omega(m.D, wave.vec, v, u)', 'R' : """1j * dw_elastic_wave_cauchy.i.Omega(m.D, wave.vec, u, v) - 1j * dw_elastic_wave_cauchy.i.Omega(m.D, wave.vec, v, u)""", 'M' : 'dw_dot.i.Omega(m.density, v, u)', } solver_0 = solver_conf.copy() solver_0['name'] = 'eig' return locals() def get_wdir(ts, coors, mode=None, equations=None, term=None, problem=None, wdir=None, **kwargs): if mode == 'special': return {'vec' : wdir} def set_wave_dir(pb, wdir): materials = pb.get_materials() wave_mat = materials['wave'] wave_mat.set_extra_args(wdir=wdir) def save_materials(output_dir, pb, options): stiffness = pb.evaluate('ev_integrate_mat.2.Omega(m.D, u)', mode='el_avg', copy_materials=False, verbose=False) young, poisson = mc.youngpoisson_from_stiffness(stiffness, plane=options.plane) density = pb.evaluate('ev_integrate_mat.2.Omega(m.density, u)', mode='el_avg', copy_materials=False, verbose=False) out = {} out['young'] = Struct(name='young', mode='cell', data=young[..., None, None]) out['poisson'] = Struct(name='poisson', mode='cell', data=poisson[..., None, None]) out['density'] = Struct(name='density', mode='cell', data=density) materials_filename = os.path.join(output_dir, 'materials.vtk') pb.save_state(materials_filename, out=out) def get_std_wave_fun(pb, options): stiffness = pb.evaluate('ev_integrate_mat.2.Omega(m.D, u)', mode='el_avg', copy_materials=False, verbose=False) young, poisson = mc.youngpoisson_from_stiffness(stiffness, plane=options.plane) density = pb.evaluate('ev_integrate_mat.2.Omega(m.density, u)', mode='el_avg', copy_materials=False, verbose=False) lam, mu = mc.lame_from_youngpoisson(young, poisson, plane=options.plane) alam = nm.average(lam) amu = nm.average(mu) adensity = nm.average(density) cp = nm.sqrt((alam + 2.0 * amu) / adensity) cs = nm.sqrt(amu / adensity) output('average p-wave speed:', cp) output('average shear wave speed:', cs) log_names = [r'$\omega_p$', r'$\omega_s$'] log_plot_kwargs = [{'ls' : '--', 'color' : 'k'}, {'ls' : '--', 'color' : 'gray'}] if options.mode == 'omega': fun = lambda wmag, wdir: (cp * wmag, cs * wmag) else: fun = lambda wmag, wdir: (wmag / cp, wmag / cs) return fun, log_names, log_plot_kwargs def get_stepper(rng, pb, options): if options.stepper == 'linear': stepper = TimeStepper(rng[0], rng[1], dt=None, n_step=rng[2]) return stepper bbox = pb.domain.mesh.get_bounding_box() bzone = 2.0 * nm.pi / (bbox[1] - bbox[0]) num = rng[2] // 3 class BrillouinStepper(Struct): """ Step over 1. Brillouin zone in xy plane. """ def __init__(self, t0, t1, dt=None, n_step=None, step=None, **kwargs): Struct.__init__(self, t0=t0, t1=t1, dt=dt, n_step=n_step, step=step) self.n_digit, self.format, self.suffix = get_print_info(self.n_step) def __iter__(self): ts = TimeStepper(0, bzone[0], dt=None, n_step=num) for ii, val in ts: yield ii, val, nm.array([1.0, 0.0]) if ii == (num-2): break ts = TimeStepper(0, bzone[1], dt=None, n_step=num) for ii, k1 in ts: wdir = nm.array([bzone[0], k1]) val = nm.linalg.norm(wdir) wdir = wdir / val yield num + ii, val, wdir if ii == (num-2): break wdir = nm.array([bzone[0], bzone[1]]) val = nm.linalg.norm(wdir) wdir = wdir / val ts = TimeStepper(0, 1, dt=None, n_step=num) for ii, _ in ts: yield 2 * num + ii, val * (1.0 - float(ii)/(num-1)), wdir stepper = BrillouinStepper(0, 1, n_step=rng[2]) return stepper def compute_von_mises(out, pb, state, extend=False, wmag=None, wdir=None): """ Calculate the von Mises stress. """ stress = pb.evaluate('ev_cauchy_stress.i.Omega(m.D, u)', mode='el_avg') vms = get_von_mises_stress(stress.squeeze()) vms.shape = (vms.shape[0], 1, 1, 1) out['von_mises_stress'] = Struct(name='output_data', mode='cell', data=vms) return out def save_eigenvectors(filename, svecs, wmag, wdir, pb): if svecs is None: return variables = pb.get_variables() # Make full eigenvectors (add DOFs fixed by boundary conditions). vecs = nm.empty((variables.di.ptr[-1], svecs.shape[1]), dtype=svecs.dtype) for ii in range(svecs.shape[1]): vecs[:, ii] = variables.make_full_vec(svecs[:, ii]) # Save the eigenvectors. out = {} state = pb.create_state() pp_name = pb.conf.options.get('post_process_hook') pp = getattr(pb.conf.funmod, pp_name if pp_name is not None else '', lambda out, *args, **kwargs: out) for ii in range(svecs.shape[1]): state.set_full(vecs[:, ii]) aux = state.create_output_dict() aux2 = {} pp(aux2, pb, state, wmag=wmag, wdir=wdir) aux.update(convert_complex_output(aux2)) out.update({key + '%03d' % ii : aux[key] for key in aux}) pb.save_state(filename, out=out) def assemble_matrices(define, mod, pars, set_wave_dir, options, wdir=None): """ Assemble the blocks of dispersion eigenvalue problem matrices. """ define_dict = define(filename_mesh=options.mesh_filename, pars=pars, approx_order=options.order, refinement_level=options.refine, solver_conf=options.solver_conf, plane=options.plane, post_process=options.post_process, **options.define_kwargs) conf = ProblemConf.from_dict(define_dict, mod) pb = Problem.from_conf(conf) pb.dispersion_options = options pb.set_output_dir(options.output_dir) dim = pb.domain.shape.dim # Set the normalized wave vector direction to the material(s). if wdir is None: wdir = nm.asarray(options.wave_dir[:dim], dtype=nm.float64) wdir = wdir / nm.linalg.norm(wdir) set_wave_dir(pb, wdir) bbox = pb.domain.mesh.get_bounding_box() size = (bbox[1] - bbox[0]).max() scaling0 = apply_unit_multipliers([1.0], ['length'], options.unit_multipliers)[0] scaling = scaling0 if options.mesh_size is not None: scaling *= options.mesh_size / size output('scaling factor of periodic cell mesh coordinates:', scaling) output('new mesh size with applied unit multipliers:', scaling * size) pb.domain.mesh.coors[:] *= scaling pb.set_mesh_coors(pb.domain.mesh.coors, update_fields=True) bzone = 2.0 * nm.pi / (scaling * size) output('1. Brillouin zone size:', bzone * scaling0) output('1. Brillouin zone size with applied unit multipliers:', bzone) pb.time_update() pb.update_materials() # Assemble the matrices. mtxs = {} for key, eq in pb.equations.iteritems(): mtxs[key] = mtx = pb.mtx_a.copy() mtx = eq.evaluate(mode='weak', dw_mode='matrix', asm_obj=mtx) mtx.eliminate_zeros() output_array_stats(mtx.data, 'nonzeros in %s' % key) output('symmetry checks:') output('%s - %s^T:' % (key, key), max_diff_csr(mtx, mtx.T)) output('%s - %s^H:' % (key, key), max_diff_csr(mtx, mtx.H)) return pb, wdir, bzone, mtxs def setup_n_eigs(options, pb, mtxs): """ Setup the numbers of eigenvalues based on options and numbers of DOFs. """ solver_n_eigs = n_eigs = options.n_eigs n_dof = mtxs['K'].shape[0] if options.mode == 'omega': if options.n_eigs > n_dof: n_eigs = n_dof solver_n_eigs = None else: if options.n_eigs > 2 * n_dof: n_eigs = 2 * n_dof solver_n_eigs = None return solver_n_eigs, n_eigs def build_evp_matrices(mtxs, val, mode, pb): """ Build the matrices of the dispersion eigenvalue problem. """ if mode == 'omega': mtx_a = mtxs['K'] + val**2 * mtxs['S'] + val * mtxs['R'] output('A - A^H:', max_diff_csr(mtx_a, mtx_a.H)) evp_mtxs = (mtx_a, mtxs['M']) else: evp_mtxs = (mtxs['S'], mtxs['R'], mtxs['K'] - val**2 * mtxs['M']) return evp_mtxs def process_evp_results(eigs, svecs, val, wdir, bzone, pb, mtxs, options, std_wave_fun=None): """ Transform eigenvalues to either omegas or kappas, depending on `mode`. Transform eigenvectors, if available, depending on `mode`. Return also the values to log. """ if options.mode == 'omega': omegas = nm.sqrt(eigs) output('eigs, omegas:') for ii, om in enumerate(omegas): output('{:>3}. {: .10e}, {:.10e}'.format(ii, eigs[ii], om)) if options.stepper == 'linear': out = tuple(eigs) + tuple(omegas) else: out = tuple(val * wdir) + tuple(omegas) if std_wave_fun is not None: out = out + std_wave_fun(val, wdir) return omegas, svecs, out else: kappas = eigs.copy() rks = kappas.copy() # Mask modes far from 1. Brillouin zone. max_kappa = 1.2 * bzone kappas[kappas.real > max_kappa] = nm.nan # Mask non-physical modes. kappas[kappas.real < 0] = nm.nan kappas[nm.abs(kappas.imag) > 1e-10] = nm.nan out = tuple(kappas.real) output('raw kappas, masked real part:',) for ii, kr in enumerate(kappas.real): output('{:>3}. {: 23.5e}, {:.10e}'.format(ii, rks[ii], kr)) if svecs is not None: n_dof = mtxs['K'].shape[0] # Select only vectors corresponding to physical modes. ii = nm.isfinite(kappas.real) svecs = svecs[:n_dof, ii] if std_wave_fun is not None: out = out + tuple(ii if ii <= max_kappa else nm.nan for ii in std_wave_fun(val, wdir)) return kappas, svecs, out helps = { 'pars' : 'material parameters in Y1, Y2 subdomains in basic units.' ' The default parameters are:' ' young1, poisson1, density1, young2, poisson2, density2' ' [default: %(default)s]', 'conf' : 'if given, an alternative problem description file with apply_units() and' ' define() functions [default: %(default)s]', 'define_kwargs' : 'additional keyword arguments passed to define()', 'mesh_size' : 'desired mesh size (max. of bounding box dimensions) in basic units' ' - the input periodic cell mesh is rescaled to this size' ' [default: %(default)s]', 'unit_multipliers' : 'basic unit multipliers (time, length, mass) [default: %(default)s]', 'plane' : 'for 2D problems, plane strain or stress hypothesis selection' ' [default: %(default)s]', 'wave_dir' : 'the wave vector direction (will be normalized)' ' [default: %(default)s]', 'mode' : 'solution mode: omega = solve a generalized EVP for omega,' ' kappa = solve a quadratic generalized EVP for kappa' ' [default: %(default)s]', 'stepper' : 'the range stepper. For "brillouin", only the number' ' of items from --range is used' ' [default: %(default)s]', 'range' : 'the wave vector magnitude / frequency range' ' (like numpy.linspace) depending on the mode option' ' [default: %(default)s]', 'order' : 'displacement field approximation order [default: %(default)s]', 'refine' : 'number of uniform mesh refinements [default: %(default)s]', 'n_eigs' : 'the number of eigenvalues to compute [default: %(default)s]', 'eigs_only' : 'compute only eigenvalues, not eigenvectors', 'post_process' : 'post-process eigenvectors', 'solver_conf' : 'eigenvalue problem solver configuration options' ' [default: %(default)s]', 'save_regions' : 'save defined regions into' ' <output_directory>/regions.vtk', 'save_materials' : 'save material parameters into' ' <output_directory>/materials.vtk', 'log_std_waves' : 'log also standard pressure dilatation and shear waves', 'no_legends' : 'do not show legends in the log plots', 'no_show' : 'do not show the log figure', 'silent' : 'do not print messages to screen', 'clear' : 'clear old solution files from output directory', 'output_dir' : 'output directory [default: %(default)s]', 'mesh_filename' : 'input periodic cell mesh file name [default: %(default)s]', } def main(): # Aluminium and epoxy. default_pars = '70e9,0.35,2.799e3,3.8e9,0.27,1.142e3' default_solver_conf = ("kind='eig.scipy',method='eigsh',tol=1.0e-5," "maxiter=1000,which='LM',sigma=0.0") parser = ArgumentParser(description=__doc__, formatter_class=RawDescriptionHelpFormatter) parser.add_argument('--pars', metavar='name1=value1,name2=value2,...' ' or value1,value2,...', action='store', dest='pars', default=default_pars, help=helps['pars']) parser.add_argument('--conf', metavar='filename', action='store', dest='conf', default=None, help=helps['conf']) parser.add_argument('--define-kwargs', metavar='dict-like', action='store', dest='define_kwargs', default=None, help=helps['define_kwargs']) parser.add_argument('--mesh-size', type=float, metavar='float', action='store', dest='mesh_size', default=None, help=helps['mesh_size']) parser.add_argument('--unit-multipliers', metavar='c_time,c_length,c_mass', action='store', dest='unit_multipliers', default='1.0,1.0,1.0', help=helps['unit_multipliers']) parser.add_argument('--plane', action='store', dest='plane', choices=['strain', 'stress'], default='strain', help=helps['plane']) parser.add_argument('--wave-dir', metavar='float,float[,float]', action='store', dest='wave_dir', default='1.0,0.0,0.0', help=helps['wave_dir']) parser.add_argument('--mode', action='store', dest='mode', choices=['omega', 'kappa'], default='omega', help=helps['mode']) parser.add_argument('--stepper', action='store', dest='stepper', choices=['linear', 'brillouin'], default='linear', help=helps['stepper']) parser.add_argument('--range', metavar='start,stop,count', action='store', dest='range', default='0,6.4,33', help=helps['range']) parser.add_argument('--order', metavar='int', type=int, action='store', dest='order', default=1, help=helps['order']) parser.add_argument('--refine', metavar='int', type=int, action='store', dest='refine', default=0, help=helps['refine']) parser.add_argument('-n', '--n-eigs', metavar='int', type=int, action='store', dest='n_eigs', default=6, help=helps['n_eigs']) group = parser.add_mutually_exclusive_group() group.add_argument('--eigs-only', action='store_true', dest='eigs_only', default=False, help=helps['eigs_only']) group.add_argument('--post-process', action='store_true', dest='post_process', default=False, help=helps['post_process']) parser.add_argument('--solver-conf', metavar='dict-like', action='store', dest='solver_conf', default=default_solver_conf, help=helps['solver_conf']) parser.add_argument('--save-regions', action='store_true', dest='save_regions', default=False, help=helps['save_regions']) parser.add_argument('--save-materials', action='store_true', dest='save_materials', default=False, help=helps['save_materials']) parser.add_argument('--log-std-waves', action='store_true', dest='log_std_waves', default=False, help=helps['log_std_waves']) parser.add_argument('--no-legends', action='store_false', dest='show_legends', default=True, help=helps['no_legends']) parser.add_argument('--no-show', action='store_false', dest='show', default=True, help=helps['no_show']) parser.add_argument('--silent', action='store_true', dest='silent', default=False, help=helps['silent']) parser.add_argument('-c', '--clear', action='store_true', dest='clear', default=False, help=helps['clear']) parser.add_argument('-o', '--output-dir', metavar='path', action='store', dest='output_dir', default='output', help=helps['output_dir']) parser.add_argument('mesh_filename', default='', help=helps['mesh_filename']) options = parser.parse_args() output_dir = options.output_dir output.set_output(filename=os.path.join(output_dir,'output_log.txt'), combined=options.silent == False) if options.conf is not None: mod = import_file(options.conf) else: mod = sys.modules[__name__] pars_kinds = mod.pars_kinds define = mod.define set_wave_dir = mod.set_wave_dir setup_n_eigs = mod.setup_n_eigs build_evp_matrices = mod.build_evp_matrices save_materials = mod.save_materials get_std_wave_fun = mod.get_std_wave_fun get_stepper = mod.get_stepper process_evp_results = mod.process_evp_results save_eigenvectors = mod.save_eigenvectors try: options.pars = dict_from_string(options.pars) except: aux = [float(ii) for ii in options.pars.split(',')] options.pars = {key : aux[ii] for ii, key in enumerate(pars_kinds.keys())} options.unit_multipliers = [float(ii) for ii in options.unit_multipliers.split(',')] options.wave_dir = [float(ii) for ii in options.wave_dir.split(',')] aux = options.range.split(',') options.range = [float(aux[0]), float(aux[1]), int(aux[2])] options.solver_conf = dict_from_string(options.solver_conf) options.define_kwargs = dict_from_string(options.define_kwargs) if options.clear: remove_files_patterns(output_dir, ['*.h5', '*.vtk', '*.txt'], ignores=['output_log.txt'], verbose=True) filename = os.path.join(output_dir, 'options.txt') ensure_path(filename) save_options(filename, [('options', vars(options))], quote_command_line=True) pars = apply_units_to_pars(options.pars, pars_kinds, options.unit_multipliers) output('material parameter names and kinds:') output(pars_kinds) output('material parameters with applied unit multipliers:') output(pars) pars = Struct(**pars) if options.mode == 'omega': rng = copy(options.range) rng[:2] = apply_unit_multipliers(options.range[:2], ['wave_number', 'wave_number'], options.unit_multipliers) output('wave number range with applied unit multipliers:', rng) else: if options.stepper == 'brillouin': raise ValueError('Cannot use "brillouin" stepper in kappa mode!') rng = copy(options.range) rng[:2] = apply_unit_multipliers(options.range[:2], ['frequency', 'frequency'], options.unit_multipliers) output('frequency range with applied unit multipliers:', rng) pb, wdir, bzone, mtxs = assemble_matrices(define, mod, pars, set_wave_dir, options) dim = pb.domain.shape.dim if dim != 2: options.plane = 'strain' if options.save_regions: pb.save_regions_as_groups(os.path.join(output_dir, 'regions')) if options.save_materials: save_materials(output_dir, pb, options) conf = pb.solver_confs['eig'] eig_solver = Solver.any_from_conf(conf) n_eigs, options.n_eigs = setup_n_eigs(options, pb, mtxs) get_color = lambda ii: plt.cm.viridis((float(ii) / (max(options.n_eigs, 2) - 1))) plot_kwargs = [{'color' : get_color(ii), 'ls' : '', 'marker' : 'o'} for ii in range(options.n_eigs)] get_color_dim = lambda ii: plt.cm.viridis((float(ii) / (max(dim, 2) -1))) plot_kwargs_dim = [{'color' : get_color_dim(ii), 'ls' : '', 'marker' : 'o'} for ii in range(dim)] log_names = [] log_plot_kwargs = [] if options.log_std_waves: std_wave_fun, log_names, log_plot_kwargs = get_std_wave_fun( pb, options) else: std_wave_fun = None stepper = get_stepper(rng, pb, options) if options.mode == 'omega': eigenshapes_filename = os.path.join(output_dir, 'frequency-eigenshapes-%s.vtk' % stepper.suffix) if options.stepper == 'linear': log = Log([[r'$\lambda_{%d}$' % ii for ii in range(options.n_eigs)], [r'$\omega_{%d}$' % ii for ii in range(options.n_eigs)] + log_names], plot_kwargs=[plot_kwargs, plot_kwargs + log_plot_kwargs], formats=[['{:.12e}'] * options.n_eigs, ['{:.12e}'] * (options.n_eigs + len(log_names))], yscales=['linear', 'linear'], xlabels=[r'$\kappa$', r'$\kappa$'], ylabels=[r'eigenvalues $\lambda_i$', r'frequencies $\omega_i$'], show_legends=options.show_legends, is_plot=options.show, log_filename=os.path.join(output_dir, 'frequencies.txt'), aggregate=1000, sleep=0.1) else: log = Log([[r'$\kappa_{%d}$'% ii for ii in range(dim)], [r'$\omega_{%d}$' % ii for ii in range(options.n_eigs)] + log_names], plot_kwargs=[plot_kwargs_dim, plot_kwargs + log_plot_kwargs], formats=[['{:.12e}'] * dim, ['{:.12e}'] * (options.n_eigs + len(log_names))], yscales=['linear', 'linear'], xlabels=[r'', r''], ylabels=[r'wave vector $\kappa$', r'frequencies $\omega_i$'], show_legends=options.show_legends, is_plot=options.show, log_filename=os.path.join(output_dir, 'frequencies.txt'), aggregate=1000, sleep=0.1) for aux in stepper: if options.stepper == 'linear': iv, wmag = aux else: iv, wmag, wdir = aux output('step %d: wave vector %s' % (iv, wmag * wdir)) if options.stepper == 'brillouin': pb, _, bzone, mtxs = assemble_matrices( define, mod, pars, set_wave_dir, options, wdir=wdir) evp_mtxs = build_evp_matrices(mtxs, wmag, options.mode, pb) if options.eigs_only: eigs = eig_solver(*evp_mtxs, n_eigs=n_eigs, eigenvectors=False) svecs = None else: eigs, svecs = eig_solver(*evp_mtxs, n_eigs=n_eigs, eigenvectors=True) omegas, svecs, out = process_evp_results( eigs, svecs, wmag, wdir, bzone, pb, mtxs, options, std_wave_fun=std_wave_fun ) if options.stepper == 'linear': log(*out, x=[wmag, wmag]) else: log(*out, x=[iv, iv]) save_eigenvectors(eigenshapes_filename % iv, svecs, wmag, wdir, pb) gc.collect() log(save_figure=os.path.join(output_dir, 'frequencies.png')) log(finished=True) else: eigenshapes_filename = os.path.join(output_dir, 'wave-number-eigenshapes-%s.vtk' % stepper.suffix) log = Log([[r'$\kappa_{%d}$' % ii for ii in range(options.n_eigs)] + log_names], plot_kwargs=[plot_kwargs + log_plot_kwargs], formats=[['{:.12e}'] * (options.n_eigs + len(log_names))], yscales=['linear'], xlabels=[r'$\omega$'], ylabels=[r'wave numbers $\kappa_i$'], show_legends=options.show_legends, is_plot=options.show, log_filename=os.path.join(output_dir, 'wave-numbers.txt'), aggregate=1000, sleep=0.1) for io, omega in stepper: output('step %d: frequency %s' % (io, omega)) evp_mtxs = build_evp_matrices(mtxs, omega, options.mode, pb) if options.eigs_only: eigs = eig_solver(*evp_mtxs, n_eigs=n_eigs, eigenvectors=False) svecs = None else: eigs, svecs = eig_solver(*evp_mtxs, n_eigs=n_eigs, eigenvectors=True) kappas, svecs, out = process_evp_results( eigs, svecs, omega, wdir, bzone, pb, mtxs, options, std_wave_fun=std_wave_fun ) log(*out, x=[omega]) save_eigenvectors(eigenshapes_filename % io, svecs, kappas, wdir, pb) gc.collect() log(save_figure=os.path.join(output_dir, 'wave-numbers.png')) log(finished=True) if __name__ == '__main__': main()
bsd-3-clause
ky822/scikit-learn
sklearn/utils/estimator_checks.py
9
51912
from __future__ import print_function import types import warnings import sys import traceback import inspect import pickle from copy import deepcopy import numpy as np from scipy import sparse import struct from sklearn.externals.six.moves import zip from sklearn.externals.joblib import hash, Memory from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regex from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_in from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import META_ESTIMATORS from sklearn.utils.testing import set_random_state from sklearn.utils.testing import assert_greater from sklearn.utils.testing import SkipTest from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_warns from sklearn.base import (clone, ClassifierMixin, RegressorMixin, TransformerMixin, ClusterMixin, BaseEstimator) from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score from sklearn.lda import LDA from sklearn.random_projection import BaseRandomProjection from sklearn.feature_selection import SelectKBest from sklearn.svm.base import BaseLibSVM from sklearn.pipeline import make_pipeline from sklearn.utils.validation import DataConversionWarning from sklearn.utils import ConvergenceWarning from sklearn.cross_validation import train_test_split from sklearn.utils import shuffle from sklearn.preprocessing import StandardScaler from sklearn.datasets import load_iris, load_boston, make_blobs BOSTON = None CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD'] MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet', 'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess', 'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso', 'LassoLars', 'LinearRegression', 'MultiTaskElasticNet', 'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV', 'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression', 'RANSACRegressor', 'RadiusNeighborsRegressor', 'RandomForestRegressor', 'Ridge', 'RidgeCV'] def _yield_non_meta_checks(name, Estimator): yield check_estimators_dtypes yield check_fit_score_takes_y yield check_dtype_object yield check_estimators_fit_returns_self # Check that all estimator yield informative messages when # trained on empty datasets yield check_estimators_empty_data_messages if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']: # SpectralEmbedding is non-deterministic, # see issue #4236 # cross-decomposition's "transform" returns X and Y yield check_pipeline_consistency if name not in ['Imputer']: # Test that all estimators check their input for NaN's and infs yield check_estimators_nan_inf if name not in ['GaussianProcess']: # FIXME! # in particular GaussianProcess! yield check_estimators_overwrite_params if hasattr(Estimator, 'sparsify'): yield check_sparsify_coefficients yield check_estimator_sparse_data # Test that estimators can be pickled, and once pickled # give the same answer as before. yield check_estimators_pickle def _yield_classifier_checks(name, Classifier): # test classfiers can handle non-array data yield check_classifier_data_not_an_array # test classifiers trained on a single label always return this label yield check_classifiers_one_label yield check_classifiers_classes yield check_estimators_partial_fit_n_features # basic consistency testing yield check_classifiers_train if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"] # TODO some complication with -1 label and name not in ["DecisionTreeClassifier", "ExtraTreeClassifier"]): # We don't raise a warning in these classifiers, as # the column y interface is used by the forests. yield check_supervised_y_2d # test if NotFittedError is raised yield check_estimators_unfitted if 'class_weight' in Classifier().get_params().keys(): yield check_class_weight_classifiers def _yield_regressor_checks(name, Regressor): # TODO: test with intercept # TODO: test with multiple responses # basic testing yield check_regressors_train yield check_regressor_data_not_an_array yield check_estimators_partial_fit_n_features yield check_regressors_no_decision_function yield check_supervised_y_2d if name != 'CCA': # check that the regressor handles int input yield check_regressors_int # Test if NotFittedError is raised yield check_estimators_unfitted def _yield_transformer_checks(name, Transformer): # All transformers should either deal with sparse data or raise an # exception with type TypeError and an intelligible error message if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer', 'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']: yield check_transformer_data_not_an_array # these don't actually fit the data, so don't raise errors if name not in ['AdditiveChi2Sampler', 'Binarizer', 'FunctionTransformer', 'Normalizer']: # basic tests yield check_transformer_general yield check_transformers_unfitted def _yield_clustering_checks(name, Clusterer): yield check_clusterer_compute_labels_predict if name not in ('WardAgglomeration', "FeatureAgglomeration"): # this is clustering on the features # let's not test that here. yield check_clustering yield check_estimators_partial_fit_n_features def _yield_all_checks(name, Estimator): for check in _yield_non_meta_checks(name, Estimator): yield check if issubclass(Estimator, ClassifierMixin): for check in _yield_classifier_checks(name, Estimator): yield check if issubclass(Estimator, RegressorMixin): for check in _yield_regressor_checks(name, Estimator): yield check if issubclass(Estimator, TransformerMixin): for check in _yield_transformer_checks(name, Estimator): yield check if issubclass(Estimator, ClusterMixin): for check in _yield_clustering_checks(name, Estimator): yield check yield check_fit2d_predict1d yield check_fit2d_1sample yield check_fit2d_1feature yield check_fit1d_1feature yield check_fit1d_1sample def check_estimator(Estimator): """Check if estimator adheres to sklearn conventions. This estimator will run an extensive test-suite for input validation, shapes, etc. Additional tests for classifiers, regressors, clustering or transformers will be run if the Estimator class inherits from the corresponding mixin from sklearn.base. Parameters ---------- Estimator : class Class to check. """ name = Estimator.__class__.__name__ check_parameters_default_constructible(name, Estimator) for check in _yield_all_checks(name, Estimator): check(name, Estimator) def _boston_subset(n_samples=200): global BOSTON if BOSTON is None: boston = load_boston() X, y = boston.data, boston.target X, y = shuffle(X, y, random_state=0) X, y = X[:n_samples], y[:n_samples] X = StandardScaler().fit_transform(X) BOSTON = X, y return BOSTON def set_fast_parameters(estimator): # speed up some estimators params = estimator.get_params() if ("n_iter" in params and estimator.__class__.__name__ != "TSNE"): estimator.set_params(n_iter=5) if "max_iter" in params: warnings.simplefilter("ignore", ConvergenceWarning) if estimator.max_iter is not None: estimator.set_params(max_iter=min(5, estimator.max_iter)) # LinearSVR if estimator.__class__.__name__ == 'LinearSVR': estimator.set_params(max_iter=20) if "n_resampling" in params: # randomized lasso estimator.set_params(n_resampling=5) if "n_estimators" in params: # especially gradient boosting with default 100 estimator.set_params(n_estimators=min(5, estimator.n_estimators)) if "max_trials" in params: # RANSAC estimator.set_params(max_trials=10) if "n_init" in params: # K-Means estimator.set_params(n_init=2) if estimator.__class__.__name__ == "SelectFdr": # be tolerant of noisy datasets (not actually speed) estimator.set_params(alpha=.5) if estimator.__class__.__name__ == "TheilSenRegressor": estimator.max_subpopulation = 100 if isinstance(estimator, BaseRandomProjection): # Due to the jl lemma and often very few samples, the number # of components of the random matrix projection will be probably # greater than the number of features. # So we impose a smaller number (avoid "auto" mode) estimator.set_params(n_components=1) if isinstance(estimator, SelectKBest): # SelectKBest has a default of k=10 # which is more feature than we have in most case. estimator.set_params(k=1) class NotAnArray(object): " An object that is convertable to an array" def __init__(self, data): self.data = data def __array__(self, dtype=None): return self.data def _is_32bit(): """Detect if process is 32bit Python.""" return struct.calcsize('P') * 8 == 32 def check_estimator_sparse_data(name, Estimator): rng = np.random.RandomState(0) X = rng.rand(40, 10) X[X < .8] = 0 X_csr = sparse.csr_matrix(X) y = (4 * rng.rand(40)).astype(np.int) for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']: X = X_csr.asformat(sparse_format) # catch deprecation warnings with warnings.catch_warnings(): if name in ['Scaler', 'StandardScaler']: estimator = Estimator(with_mean=False) else: estimator = Estimator() set_fast_parameters(estimator) # fit and predict try: estimator.fit(X, y) if hasattr(estimator, "predict"): pred = estimator.predict(X) assert_equal(pred.shape, (X.shape[0],)) if hasattr(estimator, 'predict_proba'): probs = estimator.predict_proba(X) assert_equal(probs.shape, (X.shape[0], 4)) except TypeError as e: if 'sparse' not in repr(e): print("Estimator %s doesn't seem to fail gracefully on " "sparse data: error message state explicitly that " "sparse input is not supported if this is not the case." % name) raise except Exception: print("Estimator %s doesn't seem to fail gracefully on " "sparse data: it should raise a TypeError if sparse input " "is explicitly not supported." % name) raise def check_dtype_object(name, Estimator): # check that estimators treat dtype object as numeric if possible rng = np.random.RandomState(0) X = rng.rand(40, 10).astype(object) y = (X[:, 0] * 4).astype(np.int) y = multioutput_estimator_convert_y_2d(name, y) with warnings.catch_warnings(): estimator = Estimator() set_fast_parameters(estimator) estimator.fit(X, y) if hasattr(estimator, "predict"): estimator.predict(X) if hasattr(estimator, "transform"): estimator.transform(X) try: estimator.fit(X, y.astype(object)) except Exception as e: if "Unknown label type" not in str(e): raise X[0, 0] = {'foo': 'bar'} msg = "argument must be a string or a number" assert_raises_regex(TypeError, msg, estimator.fit, X, y) @ignore_warnings def check_fit2d_predict1d(name, Estimator): # check by fitting a 2d array and prediting with a 1d array rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(20, 3)) y = X[:, 0].astype(np.int) y = multioutput_estimator_convert_y_2d(name, y) estimator = Estimator() set_fast_parameters(estimator) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) estimator.fit(X, y) for method in ["predict", "transform", "decision_function", "predict_proba"]: if hasattr(estimator, method): try: assert_warns(DeprecationWarning, getattr(estimator, method), X[0]) except ValueError: pass @ignore_warnings def check_fit2d_1sample(name, Estimator): # check by fitting a 2d array and prediting with a 1d array rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(1, 10)) y = X[:, 0].astype(np.int) y = multioutput_estimator_convert_y_2d(name, y) estimator = Estimator() set_fast_parameters(estimator) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) try: estimator.fit(X, y) except ValueError: pass @ignore_warnings def check_fit2d_1feature(name, Estimator): # check by fitting a 2d array and prediting with a 1d array rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(10, 1)) y = X[:, 0].astype(np.int) y = multioutput_estimator_convert_y_2d(name, y) estimator = Estimator() set_fast_parameters(estimator) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) try: estimator.fit(X, y) except ValueError: pass @ignore_warnings def check_fit1d_1feature(name, Estimator): # check fitting 1d array with 1 feature rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(20)) y = X.astype(np.int) y = multioutput_estimator_convert_y_2d(name, y) estimator = Estimator() set_fast_parameters(estimator) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) try: estimator.fit(X, y) except ValueError: pass @ignore_warnings def check_fit1d_1sample(name, Estimator): # check fitting 1d array with 1 feature rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(20)) y = np.array([1]) y = multioutput_estimator_convert_y_2d(name, y) estimator = Estimator() set_fast_parameters(estimator) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) try: estimator.fit(X, y) except ValueError : pass def check_transformer_general(name, Transformer): X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X = StandardScaler().fit_transform(X) X -= X.min() _check_transformer(name, Transformer, X, y) _check_transformer(name, Transformer, X.tolist(), y.tolist()) def check_transformer_data_not_an_array(name, Transformer): X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X = StandardScaler().fit_transform(X) # We need to make sure that we have non negative data, for things # like NMF X -= X.min() - .1 this_X = NotAnArray(X) this_y = NotAnArray(np.asarray(y)) _check_transformer(name, Transformer, this_X, this_y) def check_transformers_unfitted(name, Transformer): X, y = _boston_subset() with warnings.catch_warnings(record=True): transformer = Transformer() assert_raises((AttributeError, ValueError), transformer.transform, X) def _check_transformer(name, Transformer, X, y): if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit(): # Those transformers yield non-deterministic output when executed on # a 32bit Python. The same transformers are stable on 64bit Python. # FIXME: try to isolate a minimalistic reproduction case only depending # on numpy & scipy and/or maybe generate a test dataset that does not # cause such unstable behaviors. msg = name + ' is non deterministic on 32bit Python' raise SkipTest(msg) n_samples, n_features = np.asarray(X).shape # catch deprecation warnings with warnings.catch_warnings(record=True): transformer = Transformer() set_random_state(transformer) set_fast_parameters(transformer) # fit if name in CROSS_DECOMPOSITION: y_ = np.c_[y, y] y_[::2, 1] *= 2 else: y_ = y transformer.fit(X, y_) X_pred = transformer.fit_transform(X, y=y_) if isinstance(X_pred, tuple): for x_pred in X_pred: assert_equal(x_pred.shape[0], n_samples) else: # check for consistent n_samples assert_equal(X_pred.shape[0], n_samples) if hasattr(transformer, 'transform'): if name in CROSS_DECOMPOSITION: X_pred2 = transformer.transform(X, y_) X_pred3 = transformer.fit_transform(X, y=y_) else: X_pred2 = transformer.transform(X) X_pred3 = transformer.fit_transform(X, y=y_) if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple): for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3): assert_array_almost_equal( x_pred, x_pred2, 2, "fit_transform and transform outcomes not consistent in %s" % Transformer) assert_array_almost_equal( x_pred, x_pred3, 2, "consecutive fit_transform outcomes not consistent in %s" % Transformer) else: assert_array_almost_equal( X_pred, X_pred2, 2, "fit_transform and transform outcomes not consistent in %s" % Transformer) assert_array_almost_equal( X_pred, X_pred3, 2, "consecutive fit_transform outcomes not consistent in %s" % Transformer) assert_equal(len(X_pred2), n_samples) assert_equal(len(X_pred3), n_samples) # raises error on malformed input for transform if hasattr(X, 'T'): # If it's not an array, it does not have a 'T' property assert_raises(ValueError, transformer.transform, X.T) @ignore_warnings def check_pipeline_consistency(name, Estimator): if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit(): # Those transformers yield non-deterministic output when executed on # a 32bit Python. The same transformers are stable on 64bit Python. # FIXME: try to isolate a minimalistic reproduction case only depending # scipy and/or maybe generate a test dataset that does not # cause such unstable behaviors. msg = name + ' is non deterministic on 32bit Python' raise SkipTest(msg) # check that make_pipeline(est) gives same score as est X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X -= X.min() y = multioutput_estimator_convert_y_2d(name, y) estimator = Estimator() set_fast_parameters(estimator) set_random_state(estimator) pipeline = make_pipeline(estimator) estimator.fit(X, y) pipeline.fit(X, y) funcs = ["score", "fit_transform"] for func_name in funcs: func = getattr(estimator, func_name, None) if func is not None: func_pipeline = getattr(pipeline, func_name) result = func(X, y) result_pipe = func_pipeline(X, y) assert_array_almost_equal(result, result_pipe) @ignore_warnings def check_fit_score_takes_y(name, Estimator): # check that all estimators accept an optional y # in fit and score so they can be used in pipelines rnd = np.random.RandomState(0) X = rnd.uniform(size=(10, 3)) y = np.arange(10) % 3 y = multioutput_estimator_convert_y_2d(name, y) estimator = Estimator() set_fast_parameters(estimator) set_random_state(estimator) funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"] for func_name in funcs: func = getattr(estimator, func_name, None) if func is not None: func(X, y) args = inspect.getargspec(func).args assert_true(args[2] in ["y", "Y"]) @ignore_warnings def check_estimators_dtypes(name, Estimator): rnd = np.random.RandomState(0) X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32) X_train_64 = X_train_32.astype(np.float64) X_train_int_64 = X_train_32.astype(np.int64) X_train_int_32 = X_train_32.astype(np.int32) y = X_train_int_64[:, 0] y = multioutput_estimator_convert_y_2d(name, y) for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]: with warnings.catch_warnings(record=True): estimator = Estimator() set_fast_parameters(estimator) set_random_state(estimator, 1) estimator.fit(X_train, y) for method in ["predict", "transform", "decision_function", "predict_proba"]: if hasattr(estimator, method): getattr(estimator, method)(X_train) def check_estimators_empty_data_messages(name, Estimator): e = Estimator() set_fast_parameters(e) set_random_state(e, 1) X_zero_samples = np.empty(0).reshape(0, 3) # The precise message can change depending on whether X or y is # validated first. Let us test the type of exception only: assert_raises(ValueError, e.fit, X_zero_samples, []) X_zero_features = np.empty(0).reshape(3, 0) # the following y should be accepted by both classifiers and regressors # and ignored by unsupervised models y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1])) msg = "0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* is required." assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y) def check_estimators_nan_inf(name, Estimator): rnd = np.random.RandomState(0) X_train_finite = rnd.uniform(size=(10, 3)) X_train_nan = rnd.uniform(size=(10, 3)) X_train_nan[0, 0] = np.nan X_train_inf = rnd.uniform(size=(10, 3)) X_train_inf[0, 0] = np.inf y = np.ones(10) y[:5] = 0 y = multioutput_estimator_convert_y_2d(name, y) error_string_fit = "Estimator doesn't check for NaN and inf in fit." error_string_predict = ("Estimator doesn't check for NaN and inf in" " predict.") error_string_transform = ("Estimator doesn't check for NaN and inf in" " transform.") for X_train in [X_train_nan, X_train_inf]: # catch deprecation warnings with warnings.catch_warnings(record=True): estimator = Estimator() set_fast_parameters(estimator) set_random_state(estimator, 1) # try to fit try: estimator.fit(X_train, y) except ValueError as e: if 'inf' not in repr(e) and 'NaN' not in repr(e): print(error_string_fit, Estimator, e) traceback.print_exc(file=sys.stdout) raise e except Exception as exc: print(error_string_fit, Estimator, exc) traceback.print_exc(file=sys.stdout) raise exc else: raise AssertionError(error_string_fit, Estimator) # actually fit estimator.fit(X_train_finite, y) # predict if hasattr(estimator, "predict"): try: estimator.predict(X_train) except ValueError as e: if 'inf' not in repr(e) and 'NaN' not in repr(e): print(error_string_predict, Estimator, e) traceback.print_exc(file=sys.stdout) raise e except Exception as exc: print(error_string_predict, Estimator, exc) traceback.print_exc(file=sys.stdout) else: raise AssertionError(error_string_predict, Estimator) # transform if hasattr(estimator, "transform"): try: estimator.transform(X_train) except ValueError as e: if 'inf' not in repr(e) and 'NaN' not in repr(e): print(error_string_transform, Estimator, e) traceback.print_exc(file=sys.stdout) raise e except Exception as exc: print(error_string_transform, Estimator, exc) traceback.print_exc(file=sys.stdout) else: raise AssertionError(error_string_transform, Estimator) def check_estimators_pickle(name, Estimator): """Test that we can pickle all estimators""" check_methods = ["predict", "transform", "decision_function", "predict_proba"] X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) # some estimators can't do features less than 0 X -= X.min() # some estimators only take multioutputs y = multioutput_estimator_convert_y_2d(name, y) # catch deprecation warnings with warnings.catch_warnings(record=True): estimator = Estimator() set_random_state(estimator) set_fast_parameters(estimator) estimator.fit(X, y) result = dict() for method in check_methods: if hasattr(estimator, method): result[method] = getattr(estimator, method)(X) # pickle and unpickle! pickled_estimator = pickle.dumps(estimator) unpickled_estimator = pickle.loads(pickled_estimator) for method in result: unpickled_result = getattr(unpickled_estimator, method)(X) assert_array_almost_equal(result[method], unpickled_result) def check_estimators_partial_fit_n_features(name, Alg): # check if number of features changes between calls to partial_fit. if not hasattr(Alg, 'partial_fit'): return X, y = make_blobs(n_samples=50, random_state=1) X -= X.min() with warnings.catch_warnings(record=True): alg = Alg() set_fast_parameters(alg) if isinstance(alg, ClassifierMixin): classes = np.unique(y) alg.partial_fit(X, y, classes=classes) else: alg.partial_fit(X, y) assert_raises(ValueError, alg.partial_fit, X[:, :-1], y) def check_clustering(name, Alg): X, y = make_blobs(n_samples=50, random_state=1) X, y = shuffle(X, y, random_state=7) X = StandardScaler().fit_transform(X) n_samples, n_features = X.shape # catch deprecation and neighbors warnings with warnings.catch_warnings(record=True): alg = Alg() set_fast_parameters(alg) if hasattr(alg, "n_clusters"): alg.set_params(n_clusters=3) set_random_state(alg) if name == 'AffinityPropagation': alg.set_params(preference=-100) alg.set_params(max_iter=100) # fit alg.fit(X) # with lists alg.fit(X.tolist()) assert_equal(alg.labels_.shape, (n_samples,)) pred = alg.labels_ assert_greater(adjusted_rand_score(pred, y), 0.4) # fit another time with ``fit_predict`` and compare results if name is 'SpectralClustering': # there is no way to make Spectral clustering deterministic :( return set_random_state(alg) with warnings.catch_warnings(record=True): pred2 = alg.fit_predict(X) assert_array_equal(pred, pred2) def check_clusterer_compute_labels_predict(name, Clusterer): """Check that predict is invariant of compute_labels""" X, y = make_blobs(n_samples=20, random_state=0) clusterer = Clusterer() if hasattr(clusterer, "compute_labels"): # MiniBatchKMeans if hasattr(clusterer, "random_state"): clusterer.set_params(random_state=0) X_pred1 = clusterer.fit(X).predict(X) clusterer.set_params(compute_labels=False) X_pred2 = clusterer.fit(X).predict(X) assert_array_equal(X_pred1, X_pred2) def check_classifiers_one_label(name, Classifier): error_string_fit = "Classifier can't train when only one class is present." error_string_predict = ("Classifier can't predict when only one class is " "present.") rnd = np.random.RandomState(0) X_train = rnd.uniform(size=(10, 3)) X_test = rnd.uniform(size=(10, 3)) y = np.ones(10) # catch deprecation warnings with warnings.catch_warnings(record=True): classifier = Classifier() set_fast_parameters(classifier) # try to fit try: classifier.fit(X_train, y) except ValueError as e: if 'class' not in repr(e): print(error_string_fit, Classifier, e) traceback.print_exc(file=sys.stdout) raise e else: return except Exception as exc: print(error_string_fit, Classifier, exc) traceback.print_exc(file=sys.stdout) raise exc # predict try: assert_array_equal(classifier.predict(X_test), y) except Exception as exc: print(error_string_predict, Classifier, exc) raise exc def check_classifiers_train(name, Classifier): X_m, y_m = make_blobs(n_samples=300, random_state=0) X_m, y_m = shuffle(X_m, y_m, random_state=7) X_m = StandardScaler().fit_transform(X_m) # generate binary problem from multi-class one y_b = y_m[y_m != 2] X_b = X_m[y_m != 2] for (X, y) in [(X_m, y_m), (X_b, y_b)]: # catch deprecation warnings classes = np.unique(y) n_classes = len(classes) n_samples, n_features = X.shape with warnings.catch_warnings(record=True): classifier = Classifier() if name in ['BernoulliNB', 'MultinomialNB']: X -= X.min() set_fast_parameters(classifier) set_random_state(classifier) # raises error on malformed input for fit assert_raises(ValueError, classifier.fit, X, y[:-1]) # fit classifier.fit(X, y) # with lists classifier.fit(X.tolist(), y.tolist()) assert_true(hasattr(classifier, "classes_")) y_pred = classifier.predict(X) assert_equal(y_pred.shape, (n_samples,)) # training set performance if name not in ['BernoulliNB', 'MultinomialNB']: assert_greater(accuracy_score(y, y_pred), 0.83) # raises error on malformed input for predict assert_raises(ValueError, classifier.predict, X.T) if hasattr(classifier, "decision_function"): try: # decision_function agrees with predict decision = classifier.decision_function(X) if n_classes is 2: assert_equal(decision.shape, (n_samples,)) dec_pred = (decision.ravel() > 0).astype(np.int) assert_array_equal(dec_pred, y_pred) if (n_classes is 3 and not isinstance(classifier, BaseLibSVM)): # 1on1 of LibSVM works differently assert_equal(decision.shape, (n_samples, n_classes)) assert_array_equal(np.argmax(decision, axis=1), y_pred) # raises error on malformed input assert_raises(ValueError, classifier.decision_function, X.T) # raises error on malformed input for decision_function assert_raises(ValueError, classifier.decision_function, X.T) except NotImplementedError: pass if hasattr(classifier, "predict_proba"): # predict_proba agrees with predict y_prob = classifier.predict_proba(X) assert_equal(y_prob.shape, (n_samples, n_classes)) assert_array_equal(np.argmax(y_prob, axis=1), y_pred) # check that probas for all classes sum to one assert_array_almost_equal(np.sum(y_prob, axis=1), np.ones(n_samples)) # raises error on malformed input assert_raises(ValueError, classifier.predict_proba, X.T) # raises error on malformed input for predict_proba assert_raises(ValueError, classifier.predict_proba, X.T) def check_estimators_fit_returns_self(name, Estimator): """Check if self is returned when calling fit""" X, y = make_blobs(random_state=0, n_samples=9, n_features=4) y = multioutput_estimator_convert_y_2d(name, y) # some want non-negative input X -= X.min() estimator = Estimator() set_fast_parameters(estimator) set_random_state(estimator) assert_true(estimator.fit(X, y) is estimator) @ignore_warnings def check_estimators_unfitted(name, Estimator): """Check that predict raises an exception in an unfitted estimator. Unfitted estimators should raise either AttributeError or ValueError. The specific exception type NotFittedError inherits from both and can therefore be adequately raised for that purpose. """ # Common test for Regressors as well as Classifiers X, y = _boston_subset() with warnings.catch_warnings(record=True): est = Estimator() msg = "fit" if hasattr(est, 'predict'): assert_raise_message((AttributeError, ValueError), msg, est.predict, X) if hasattr(est, 'decision_function'): assert_raise_message((AttributeError, ValueError), msg, est.decision_function, X) if hasattr(est, 'predict_proba'): assert_raise_message((AttributeError, ValueError), msg, est.predict_proba, X) if hasattr(est, 'predict_log_proba'): assert_raise_message((AttributeError, ValueError), msg, est.predict_log_proba, X) def check_supervised_y_2d(name, Estimator): if "MultiTask" in name: # These only work on 2d, so this test makes no sense return rnd = np.random.RandomState(0) X = rnd.uniform(size=(10, 3)) y = np.arange(10) % 3 # catch deprecation warnings with warnings.catch_warnings(record=True): estimator = Estimator() set_fast_parameters(estimator) set_random_state(estimator) # fit estimator.fit(X, y) y_pred = estimator.predict(X) set_random_state(estimator) # Check that when a 2D y is given, a DataConversionWarning is # raised with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always", DataConversionWarning) warnings.simplefilter("ignore", RuntimeWarning) estimator.fit(X, y[:, np.newaxis]) y_pred_2d = estimator.predict(X) msg = "expected 1 DataConversionWarning, got: %s" % ( ", ".join([str(w_x) for w_x in w])) if name not in MULTI_OUTPUT: # check that we warned if we don't support multi-output assert_greater(len(w), 0, msg) assert_true("DataConversionWarning('A column-vector y" " was passed when a 1d array was expected" in msg) assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel()) def check_classifiers_classes(name, Classifier): X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1) X, y = shuffle(X, y, random_state=7) X = StandardScaler().fit_transform(X) # We need to make sure that we have non negative data, for things # like NMF X -= X.min() - .1 y_names = np.array(["one", "two", "three"])[y] for y_names in [y_names, y_names.astype('O')]: if name in ["LabelPropagation", "LabelSpreading"]: # TODO some complication with -1 label y_ = y else: y_ = y_names classes = np.unique(y_) # catch deprecation warnings with warnings.catch_warnings(record=True): classifier = Classifier() if name == 'BernoulliNB': classifier.set_params(binarize=X.mean()) set_fast_parameters(classifier) set_random_state(classifier) # fit classifier.fit(X, y_) y_pred = classifier.predict(X) # training set performance assert_array_equal(np.unique(y_), np.unique(y_pred)) if np.any(classifier.classes_ != classes): print("Unexpected classes_ attribute for %r: " "expected %s, got %s" % (classifier, classes, classifier.classes_)) def check_regressors_int(name, Regressor): X, _ = _boston_subset() X = X[:50] rnd = np.random.RandomState(0) y = rnd.randint(3, size=X.shape[0]) y = multioutput_estimator_convert_y_2d(name, y) rnd = np.random.RandomState(0) # catch deprecation warnings with warnings.catch_warnings(record=True): # separate estimators to control random seeds regressor_1 = Regressor() regressor_2 = Regressor() set_fast_parameters(regressor_1) set_fast_parameters(regressor_2) set_random_state(regressor_1) set_random_state(regressor_2) if name in CROSS_DECOMPOSITION: y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))]) y_ = y_.T else: y_ = y # fit regressor_1.fit(X, y_) pred1 = regressor_1.predict(X) regressor_2.fit(X, y_.astype(np.float)) pred2 = regressor_2.predict(X) assert_array_almost_equal(pred1, pred2, 2, name) def check_regressors_train(name, Regressor): X, y = _boston_subset() y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled y = y.ravel() y = multioutput_estimator_convert_y_2d(name, y) rnd = np.random.RandomState(0) # catch deprecation warnings with warnings.catch_warnings(record=True): regressor = Regressor() set_fast_parameters(regressor) if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'): # linear regressors need to set alpha, but not generalized CV ones regressor.alpha = 0.01 if name == 'PassiveAggressiveRegressor': regressor.C = 0.01 # raises error on malformed input for fit assert_raises(ValueError, regressor.fit, X, y[:-1]) # fit if name in CROSS_DECOMPOSITION: y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))]) y_ = y_.T else: y_ = y set_random_state(regressor) regressor.fit(X, y_) regressor.fit(X.tolist(), y_.tolist()) y_pred = regressor.predict(X) assert_equal(y_pred.shape, y_.shape) # TODO: find out why PLS and CCA fail. RANSAC is random # and furthermore assumes the presence of outliers, hence # skipped if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'): print(regressor) assert_greater(regressor.score(X, y_), 0.5) @ignore_warnings def check_regressors_no_decision_function(name, Regressor): # checks whether regressors have decision_function or predict_proba rng = np.random.RandomState(0) X = rng.normal(size=(10, 4)) y = multioutput_estimator_convert_y_2d(name, X[:, 0]) regressor = Regressor() set_fast_parameters(regressor) if hasattr(regressor, "n_components"): # FIXME CCA, PLS is not robust to rank 1 effects regressor.n_components = 1 regressor.fit(X, y) funcs = ["decision_function", "predict_proba", "predict_log_proba"] for func_name in funcs: func = getattr(regressor, func_name, None) if func is None: # doesn't have function continue # has function. Should raise deprecation warning msg = func_name assert_warns_message(DeprecationWarning, msg, func, X) def check_class_weight_classifiers(name, Classifier): if name == "NuSVC": # the sparse version has a parameter that doesn't do anything raise SkipTest if name.endswith("NB"): # NaiveBayes classifiers have a somewhat different interface. # FIXME SOON! raise SkipTest for n_centers in [2, 3]: # create a very noisy dataset X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5, random_state=0) n_centers = len(np.unique(y_train)) if n_centers == 2: class_weight = {0: 1000, 1: 0.0001} else: class_weight = {0: 1000, 1: 0.0001, 2: 0.0001} with warnings.catch_warnings(record=True): classifier = Classifier(class_weight=class_weight) if hasattr(classifier, "n_iter"): classifier.set_params(n_iter=100) if hasattr(classifier, "min_weight_fraction_leaf"): classifier.set_params(min_weight_fraction_leaf=0.01) set_random_state(classifier) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) assert_greater(np.mean(y_pred == 0), 0.89) def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train, X_test, y_test, weights): with warnings.catch_warnings(record=True): classifier = Classifier() if hasattr(classifier, "n_iter"): classifier.set_params(n_iter=100) set_random_state(classifier) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) classifier.set_params(class_weight='balanced') classifier.fit(X_train, y_train) y_pred_balanced = classifier.predict(X_test) assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'), f1_score(y_test, y_pred, average='weighted')) def check_class_weight_balanced_linear_classifier(name, Classifier): """Test class weights with non-contiguous class labels.""" X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) y = np.array([1, 1, 1, -1, -1]) with warnings.catch_warnings(record=True): classifier = Classifier() if hasattr(classifier, "n_iter"): # This is a very small dataset, default n_iter are likely to prevent # convergence classifier.set_params(n_iter=1000) set_random_state(classifier) # Let the model compute the class frequencies classifier.set_params(class_weight='balanced') coef_balanced = classifier.fit(X, y).coef_.copy() # Count each label occurrence to reweight manually n_samples = len(y) n_classes = float(len(np.unique(y))) class_weight = {1: n_samples / (np.sum(y == 1) * n_classes), -1: n_samples / (np.sum(y == -1) * n_classes)} classifier.set_params(class_weight=class_weight) coef_manual = classifier.fit(X, y).coef_.copy() assert_array_almost_equal(coef_balanced, coef_manual) def check_estimators_overwrite_params(name, Estimator): X, y = make_blobs(random_state=0, n_samples=9) y = multioutput_estimator_convert_y_2d(name, y) # some want non-negative input X -= X.min() with warnings.catch_warnings(record=True): # catch deprecation warnings estimator = Estimator() set_fast_parameters(estimator) set_random_state(estimator) # Make a physical copy of the orginal estimator parameters before fitting. params = estimator.get_params() original_params = deepcopy(params) # Fit the model estimator.fit(X, y) # Compare the state of the model parameters with the original parameters new_params = estimator.get_params() for param_name, original_value in original_params.items(): new_value = new_params[param_name] # We should never change or mutate the internal state of input # parameters by default. To check this we use the joblib.hash function # that introspects recursively any subobjects to compute a checksum. # The only exception to this rule of immutable constructor parameters # is possible RandomState instance but in this check we explicitly # fixed the random_state params recursively to be integer seeds. assert_equal(hash(new_value), hash(original_value), "Estimator %s should not change or mutate " " the parameter %s from %s to %s during fit." % (name, param_name, original_value, new_value)) def check_sparsify_coefficients(name, Estimator): X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-1, -2], [2, 2], [-2, -2]]) y = [1, 1, 1, 2, 2, 2, 3, 3, 3] est = Estimator() est.fit(X, y) pred_orig = est.predict(X) # test sparsify with dense inputs est.sparsify() assert_true(sparse.issparse(est.coef_)) pred = est.predict(X) assert_array_equal(pred, pred_orig) # pickle and unpickle with sparse coef_ est = pickle.loads(pickle.dumps(est)) assert_true(sparse.issparse(est.coef_)) pred = est.predict(X) assert_array_equal(pred, pred_orig) def check_classifier_data_not_an_array(name, Estimator): X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]]) y = [1, 1, 1, 2, 2, 2] y = multioutput_estimator_convert_y_2d(name, y) check_estimators_data_not_an_array(name, Estimator, X, y) def check_regressor_data_not_an_array(name, Estimator): X, y = _boston_subset(n_samples=50) y = multioutput_estimator_convert_y_2d(name, y) check_estimators_data_not_an_array(name, Estimator, X, y) def check_estimators_data_not_an_array(name, Estimator, X, y): if name in CROSS_DECOMPOSITION: raise SkipTest # catch deprecation warnings with warnings.catch_warnings(record=True): # separate estimators to control random seeds estimator_1 = Estimator() estimator_2 = Estimator() set_fast_parameters(estimator_1) set_fast_parameters(estimator_2) set_random_state(estimator_1) set_random_state(estimator_2) y_ = NotAnArray(np.asarray(y)) X_ = NotAnArray(np.asarray(X)) # fit estimator_1.fit(X_, y_) pred1 = estimator_1.predict(X_) estimator_2.fit(X, y) pred2 = estimator_2.predict(X) assert_array_almost_equal(pred1, pred2, 2, name) def check_parameters_default_constructible(name, Estimator): classifier = LDA() # test default-constructibility # get rid of deprecation warnings with warnings.catch_warnings(record=True): if name in META_ESTIMATORS: estimator = Estimator(classifier) else: estimator = Estimator() # test cloning clone(estimator) # test __repr__ repr(estimator) # test that set_params returns self assert_true(estimator.set_params() is estimator) # test if init does nothing but set parameters # this is important for grid_search etc. # We get the default parameters from init and then # compare these against the actual values of the attributes. # this comes from getattr. Gets rid of deprecation decorator. init = getattr(estimator.__init__, 'deprecated_original', estimator.__init__) try: args, varargs, kws, defaults = inspect.getargspec(init) except TypeError: # init is not a python function. # true for mixins return params = estimator.get_params() if name in META_ESTIMATORS: # they need a non-default argument args = args[2:] else: args = args[1:] if args: # non-empty list assert_equal(len(args), len(defaults)) else: return for arg, default in zip(args, defaults): assert_in(type(default), [str, int, float, bool, tuple, type(None), np.float64, types.FunctionType, Memory]) if arg not in params.keys(): # deprecated parameter, not in get_params assert_true(default is None) continue if isinstance(params[arg], np.ndarray): assert_array_equal(params[arg], default) else: assert_equal(params[arg], default) def multioutput_estimator_convert_y_2d(name, y): # Estimators in mono_output_task_error raise ValueError if y is of 1-D # Convert into a 2-D y for those estimators. if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV', 'MultiTaskLasso', 'MultiTaskElasticNet']): return y[:, np.newaxis] return y def check_non_transformer_estimators_n_iter(name, estimator, multi_output=False): # Check if all iterative solvers, run for more than one iteratiom iris = load_iris() X, y_ = iris.data, iris.target if multi_output: y_ = y_[:, np.newaxis] set_random_state(estimator, 0) if name == 'AffinityPropagation': estimator.fit(X) else: estimator.fit(X, y_) assert_greater(estimator.n_iter_, 0) def check_transformer_n_iter(name, estimator): if name in CROSS_DECOMPOSITION: # Check using default data X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]] y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]] else: X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X -= X.min() - 0.1 set_random_state(estimator, 0) estimator.fit(X, y_) # These return a n_iter per component. if name in CROSS_DECOMPOSITION: for iter_ in estimator.n_iter_: assert_greater(iter_, 1) else: assert_greater(estimator.n_iter_, 1) def check_get_params_invariance(name, estimator): class T(BaseEstimator): """Mock classifier """ def __init__(self): pass def fit(self, X, y): return self if name in ('FeatureUnion', 'Pipeline'): e = estimator([('clf', T())]) elif name in ('GridSearchCV' 'RandomizedSearchCV'): return else: e = estimator() shallow_params = e.get_params(deep=False) deep_params = e.get_params(deep=True) assert_true(all(item in deep_params.items() for item in shallow_params.items()))
bsd-3-clause
heli522/scikit-learn
sklearn/utils/arpack.py
265
64837
""" This contains a copy of the future version of scipy.sparse.linalg.eigen.arpack.eigsh It's an upgraded wrapper of the ARPACK library which allows the use of shift-invert mode for symmetric matrices. Find a few eigenvectors and eigenvalues of a matrix. Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/ """ # Wrapper implementation notes # # ARPACK Entry Points # ------------------- # The entry points to ARPACK are # - (s,d)seupd : single and double precision symmetric matrix # - (s,d,c,z)neupd: single,double,complex,double complex general matrix # This wrapper puts the *neupd (general matrix) interfaces in eigs() # and the *seupd (symmetric matrix) in eigsh(). # There is no Hermetian complex/double complex interface. # To find eigenvalues of a Hermetian matrix you # must use eigs() and not eigsh() # It might be desirable to handle the Hermetian case differently # and, for example, return real eigenvalues. # Number of eigenvalues returned and complex eigenvalues # ------------------------------------------------------ # The ARPACK nonsymmetric real and double interface (s,d)naupd return # eigenvalues and eigenvectors in real (float,double) arrays. # Since the eigenvalues and eigenvectors are, in general, complex # ARPACK puts the real and imaginary parts in consecutive entries # in real-valued arrays. This wrapper puts the real entries # into complex data types and attempts to return the requested eigenvalues # and eigenvectors. # Solver modes # ------------ # ARPACK and handle shifted and shift-inverse computations # for eigenvalues by providing a shift (sigma) and a solver. __docformat__ = "restructuredtext en" __all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence'] import warnings from scipy.sparse.linalg.eigen.arpack import _arpack import numpy as np from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator from scipy.sparse import identity, isspmatrix, isspmatrix_csr from scipy.linalg import lu_factor, lu_solve from scipy.sparse.sputils import isdense from scipy.sparse.linalg import gmres, splu import scipy from distutils.version import LooseVersion _type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'} _ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12} DNAUPD_ERRORS = { 0: "Normal exit.", 1: "Maximum number of iterations taken. " "All possible eigenvalues of OP has been found. IPARAM(5) " "returns the number of wanted converged Ritz values.", 2: "No longer an informational error. Deprecated starting " "with release 2 of ARPACK.", 3: "No shifts could be applied during a cycle of the " "Implicitly restarted Arnoldi iteration. One possibility " "is to increase the size of NCV relative to NEV. ", -1: "N must be positive.", -2: "NEV must be positive.", -3: "NCV-NEV >= 2 and less than or equal to N.", -4: "The maximum number of Arnoldi update iterations allowed " "must be greater than zero.", -5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'", -6: "BMAT must be one of 'I' or 'G'.", -7: "Length of private work array WORKL is not sufficient.", -8: "Error return from LAPACK eigenvalue calculation;", -9: "Starting vector is zero.", -10: "IPARAM(7) must be 1,2,3,4.", -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", -12: "IPARAM(1) must be equal to 0 or 1.", -13: "NEV and WHICH = 'BE' are incompatible.", -9999: "Could not build an Arnoldi factorization. " "IPARAM(5) returns the size of the current Arnoldi " "factorization. The user is advised to check that " "enough workspace and array storage has been allocated." } SNAUPD_ERRORS = DNAUPD_ERRORS ZNAUPD_ERRORS = DNAUPD_ERRORS.copy() ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3." CNAUPD_ERRORS = ZNAUPD_ERRORS DSAUPD_ERRORS = { 0: "Normal exit.", 1: "Maximum number of iterations taken. " "All possible eigenvalues of OP has been found.", 2: "No longer an informational error. Deprecated starting with " "release 2 of ARPACK.", 3: "No shifts could be applied during a cycle of the Implicitly " "restarted Arnoldi iteration. One possibility is to increase " "the size of NCV relative to NEV. ", -1: "N must be positive.", -2: "NEV must be positive.", -3: "NCV must be greater than NEV and less than or equal to N.", -4: "The maximum number of Arnoldi update iterations allowed " "must be greater than zero.", -5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.", -6: "BMAT must be one of 'I' or 'G'.", -7: "Length of private work array WORKL is not sufficient.", -8: "Error return from trid. eigenvalue calculation; " "Informational error from LAPACK routine dsteqr .", -9: "Starting vector is zero.", -10: "IPARAM(7) must be 1,2,3,4,5.", -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", -12: "IPARAM(1) must be equal to 0 or 1.", -13: "NEV and WHICH = 'BE' are incompatible. ", -9999: "Could not build an Arnoldi factorization. " "IPARAM(5) returns the size of the current Arnoldi " "factorization. The user is advised to check that " "enough workspace and array storage has been allocated.", } SSAUPD_ERRORS = DSAUPD_ERRORS DNEUPD_ERRORS = { 0: "Normal exit.", 1: "The Schur form computed by LAPACK routine dlahqr " "could not be reordered by LAPACK routine dtrsen. " "Re-enter subroutine dneupd with IPARAM(5)NCV and " "increase the size of the arrays DR and DI to have " "dimension at least dimension NCV and allocate at least NCV " "columns for Z. NOTE: Not necessary if Z and V share " "the same space. Please notify the authors if this error " "occurs.", -1: "N must be positive.", -2: "NEV must be positive.", -3: "NCV-NEV >= 2 and less than or equal to N.", -5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'", -6: "BMAT must be one of 'I' or 'G'.", -7: "Length of private work WORKL array is not sufficient.", -8: "Error return from calculation of a real Schur form. " "Informational error from LAPACK routine dlahqr .", -9: "Error return from calculation of eigenvectors. " "Informational error from LAPACK routine dtrevc.", -10: "IPARAM(7) must be 1,2,3,4.", -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", -12: "HOWMNY = 'S' not yet implemented", -13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.", -14: "DNAUPD did not find any eigenvalues to sufficient " "accuracy.", -15: "DNEUPD got a different count of the number of converged " "Ritz values than DNAUPD got. This indicates the user " "probably made an error in passing data from DNAUPD to " "DNEUPD or that the data was modified before entering " "DNEUPD", } SNEUPD_ERRORS = DNEUPD_ERRORS.copy() SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr " "could not be reordered by LAPACK routine strsen . " "Re-enter subroutine dneupd with IPARAM(5)=NCV and " "increase the size of the arrays DR and DI to have " "dimension at least dimension NCV and allocate at least " "NCV columns for Z. NOTE: Not necessary if Z and V share " "the same space. Please notify the authors if this error " "occurs.") SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient " "accuracy.") SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of " "converged Ritz values than SNAUPD got. This indicates " "the user probably made an error in passing data from " "SNAUPD to SNEUPD or that the data was modified before " "entering SNEUPD") ZNEUPD_ERRORS = {0: "Normal exit.", 1: "The Schur form computed by LAPACK routine csheqr " "could not be reordered by LAPACK routine ztrsen. " "Re-enter subroutine zneupd with IPARAM(5)=NCV and " "increase the size of the array D to have " "dimension at least dimension NCV and allocate at least " "NCV columns for Z. NOTE: Not necessary if Z and V share " "the same space. Please notify the authors if this error " "occurs.", -1: "N must be positive.", -2: "NEV must be positive.", -3: "NCV-NEV >= 1 and less than or equal to N.", -5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'", -6: "BMAT must be one of 'I' or 'G'.", -7: "Length of private work WORKL array is not sufficient.", -8: "Error return from LAPACK eigenvalue calculation. " "This should never happened.", -9: "Error return from calculation of eigenvectors. " "Informational error from LAPACK routine ztrevc.", -10: "IPARAM(7) must be 1,2,3", -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", -12: "HOWMNY = 'S' not yet implemented", -13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.", -14: "ZNAUPD did not find any eigenvalues to sufficient " "accuracy.", -15: "ZNEUPD got a different count of the number of " "converged Ritz values than ZNAUPD got. This " "indicates the user probably made an error in passing " "data from ZNAUPD to ZNEUPD or that the data was " "modified before entering ZNEUPD"} CNEUPD_ERRORS = ZNEUPD_ERRORS.copy() CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient " "accuracy.") CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of " "converged Ritz values than CNAUPD got. This indicates " "the user probably made an error in passing data from " "CNAUPD to CNEUPD or that the data was modified before " "entering CNEUPD") DSEUPD_ERRORS = { 0: "Normal exit.", -1: "N must be positive.", -2: "NEV must be positive.", -3: "NCV must be greater than NEV and less than or equal to N.", -5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.", -6: "BMAT must be one of 'I' or 'G'.", -7: "Length of private work WORKL array is not sufficient.", -8: ("Error return from trid. eigenvalue calculation; " "Information error from LAPACK routine dsteqr."), -9: "Starting vector is zero.", -10: "IPARAM(7) must be 1,2,3,4,5.", -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", -12: "NEV and WHICH = 'BE' are incompatible.", -14: "DSAUPD did not find any eigenvalues to sufficient accuracy.", -15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.", -16: "HOWMNY = 'S' not yet implemented", -17: ("DSEUPD got a different count of the number of converged " "Ritz values than DSAUPD got. This indicates the user " "probably made an error in passing data from DSAUPD to " "DSEUPD or that the data was modified before entering " "DSEUPD.") } SSEUPD_ERRORS = DSEUPD_ERRORS.copy() SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues " "to sufficient accuracy.") SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of " "converged " "Ritz values than SSAUPD got. This indicates the user " "probably made an error in passing data from SSAUPD to " "SSEUPD or that the data was modified before entering " "SSEUPD.") _SAUPD_ERRORS = {'d': DSAUPD_ERRORS, 's': SSAUPD_ERRORS} _NAUPD_ERRORS = {'d': DNAUPD_ERRORS, 's': SNAUPD_ERRORS, 'z': ZNAUPD_ERRORS, 'c': CNAUPD_ERRORS} _SEUPD_ERRORS = {'d': DSEUPD_ERRORS, 's': SSEUPD_ERRORS} _NEUPD_ERRORS = {'d': DNEUPD_ERRORS, 's': SNEUPD_ERRORS, 'z': ZNEUPD_ERRORS, 'c': CNEUPD_ERRORS} # accepted values of parameter WHICH in _SEUPD _SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE'] # accepted values of parameter WHICH in _NAUPD _NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI'] class ArpackError(RuntimeError): """ ARPACK error """ def __init__(self, info, infodict=_NAUPD_ERRORS): msg = infodict.get(info, "Unknown error") RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg)) class ArpackNoConvergence(ArpackError): """ ARPACK iteration did not converge Attributes ---------- eigenvalues : ndarray Partial result. Converged eigenvalues. eigenvectors : ndarray Partial result. Converged eigenvectors. """ def __init__(self, msg, eigenvalues, eigenvectors): ArpackError.__init__(self, -1, {-1: msg}) self.eigenvalues = eigenvalues self.eigenvectors = eigenvectors class _ArpackParams(object): def __init__(self, n, k, tp, mode=1, sigma=None, ncv=None, v0=None, maxiter=None, which="LM", tol=0): if k <= 0: raise ValueError("k must be positive, k=%d" % k) if maxiter is None: maxiter = n * 10 if maxiter <= 0: raise ValueError("maxiter must be positive, maxiter=%d" % maxiter) if tp not in 'fdFD': raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'") if v0 is not None: # ARPACK overwrites its initial resid, make a copy self.resid = np.array(v0, copy=True) info = 1 else: self.resid = np.zeros(n, tp) info = 0 if sigma is None: #sigma not used self.sigma = 0 else: self.sigma = sigma if ncv is None: ncv = 2 * k + 1 ncv = min(ncv, n) self.v = np.zeros((n, ncv), tp) # holds Ritz vectors self.iparam = np.zeros(11, "int") # set solver mode and parameters ishfts = 1 self.mode = mode self.iparam[0] = ishfts self.iparam[2] = maxiter self.iparam[3] = 1 self.iparam[6] = mode self.n = n self.tol = tol self.k = k self.maxiter = maxiter self.ncv = ncv self.which = which self.tp = tp self.info = info self.converged = False self.ido = 0 def _raise_no_convergence(self): msg = "No convergence (%d iterations, %d/%d eigenvectors converged)" k_ok = self.iparam[4] num_iter = self.iparam[2] try: ev, vec = self.extract(True) except ArpackError as err: msg = "%s [%s]" % (msg, err) ev = np.zeros((0,)) vec = np.zeros((self.n, 0)) k_ok = 0 raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec) class _SymmetricArpackParams(_ArpackParams): def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None, Minv_matvec=None, sigma=None, ncv=None, v0=None, maxiter=None, which="LM", tol=0): # The following modes are supported: # mode = 1: # Solve the standard eigenvalue problem: # A*x = lambda*x : # A - symmetric # Arguments should be # matvec = left multiplication by A # M_matvec = None [not used] # Minv_matvec = None [not used] # # mode = 2: # Solve the general eigenvalue problem: # A*x = lambda*M*x # A - symmetric # M - symmetric positive definite # Arguments should be # matvec = left multiplication by A # M_matvec = left multiplication by M # Minv_matvec = left multiplication by M^-1 # # mode = 3: # Solve the general eigenvalue problem in shift-invert mode: # A*x = lambda*M*x # A - symmetric # M - symmetric positive semi-definite # Arguments should be # matvec = None [not used] # M_matvec = left multiplication by M # or None, if M is the identity # Minv_matvec = left multiplication by [A-sigma*M]^-1 # # mode = 4: # Solve the general eigenvalue problem in Buckling mode: # A*x = lambda*AG*x # A - symmetric positive semi-definite # AG - symmetric indefinite # Arguments should be # matvec = left multiplication by A # M_matvec = None [not used] # Minv_matvec = left multiplication by [A-sigma*AG]^-1 # # mode = 5: # Solve the general eigenvalue problem in Cayley-transformed mode: # A*x = lambda*M*x # A - symmetric # M - symmetric positive semi-definite # Arguments should be # matvec = left multiplication by A # M_matvec = left multiplication by M # or None, if M is the identity # Minv_matvec = left multiplication by [A-sigma*M]^-1 if mode == 1: if matvec is None: raise ValueError("matvec must be specified for mode=1") if M_matvec is not None: raise ValueError("M_matvec cannot be specified for mode=1") if Minv_matvec is not None: raise ValueError("Minv_matvec cannot be specified for mode=1") self.OP = matvec self.B = lambda x: x self.bmat = 'I' elif mode == 2: if matvec is None: raise ValueError("matvec must be specified for mode=2") if M_matvec is None: raise ValueError("M_matvec must be specified for mode=2") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified for mode=2") self.OP = lambda x: Minv_matvec(matvec(x)) self.OPa = Minv_matvec self.OPb = matvec self.B = M_matvec self.bmat = 'G' elif mode == 3: if matvec is not None: raise ValueError("matvec must not be specified for mode=3") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified for mode=3") if M_matvec is None: self.OP = Minv_matvec self.OPa = Minv_matvec self.B = lambda x: x self.bmat = 'I' else: self.OP = lambda x: Minv_matvec(M_matvec(x)) self.OPa = Minv_matvec self.B = M_matvec self.bmat = 'G' elif mode == 4: if matvec is None: raise ValueError("matvec must be specified for mode=4") if M_matvec is not None: raise ValueError("M_matvec must not be specified for mode=4") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified for mode=4") self.OPa = Minv_matvec self.OP = lambda x: self.OPa(matvec(x)) self.B = matvec self.bmat = 'G' elif mode == 5: if matvec is None: raise ValueError("matvec must be specified for mode=5") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified for mode=5") self.OPa = Minv_matvec self.A_matvec = matvec if M_matvec is None: self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x) self.B = lambda x: x self.bmat = 'I' else: self.OP = lambda x: Minv_matvec(matvec(x) + sigma * M_matvec(x)) self.B = M_matvec self.bmat = 'G' else: raise ValueError("mode=%i not implemented" % mode) if which not in _SEUPD_WHICH: raise ValueError("which must be one of %s" % ' '.join(_SEUPD_WHICH)) if k >= n: raise ValueError("k must be less than rank(A), k=%d" % k) _ArpackParams.__init__(self, n, k, tp, mode, sigma, ncv, v0, maxiter, which, tol) if self.ncv > n or self.ncv <= k: raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv) self.workd = np.zeros(3 * n, self.tp) self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp) ltr = _type_conv[self.tp] if ltr not in ["s", "d"]: raise ValueError("Input matrix is not real-valued.") self._arpack_solver = _arpack.__dict__[ltr + 'saupd'] self._arpack_extract = _arpack.__dict__[ltr + 'seupd'] self.iterate_infodict = _SAUPD_ERRORS[ltr] self.extract_infodict = _SEUPD_ERRORS[ltr] self.ipntr = np.zeros(11, "int") def iterate(self): self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \ self._arpack_solver(self.ido, self.bmat, self.which, self.k, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.workd, self.workl, self.info) xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n) yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n) if self.ido == -1: # initialization self.workd[yslice] = self.OP(self.workd[xslice]) elif self.ido == 1: # compute y = Op*x if self.mode == 1: self.workd[yslice] = self.OP(self.workd[xslice]) elif self.mode == 2: self.workd[xslice] = self.OPb(self.workd[xslice]) self.workd[yslice] = self.OPa(self.workd[xslice]) elif self.mode == 5: Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n) Ax = self.A_matvec(self.workd[xslice]) self.workd[yslice] = self.OPa(Ax + (self.sigma * self.workd[Bxslice])) else: Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n) self.workd[yslice] = self.OPa(self.workd[Bxslice]) elif self.ido == 2: self.workd[yslice] = self.B(self.workd[xslice]) elif self.ido == 3: raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0") else: self.converged = True if self.info == 0: pass elif self.info == 1: self._raise_no_convergence() else: raise ArpackError(self.info, infodict=self.iterate_infodict) def extract(self, return_eigenvectors): rvec = return_eigenvectors ierr = 0 howmny = 'A' # return all eigenvectors sselect = np.zeros(self.ncv, 'int') # unused d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma, self.bmat, self.which, self.k, self.tol, self.resid, self.v, self.iparam[0:7], self.ipntr, self.workd[0:2 * self.n], self.workl, ierr) if ierr != 0: raise ArpackError(ierr, infodict=self.extract_infodict) k_ok = self.iparam[4] d = d[:k_ok] z = z[:, :k_ok] if return_eigenvectors: return d, z else: return d class _UnsymmetricArpackParams(_ArpackParams): def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None, Minv_matvec=None, sigma=None, ncv=None, v0=None, maxiter=None, which="LM", tol=0): # The following modes are supported: # mode = 1: # Solve the standard eigenvalue problem: # A*x = lambda*x # A - square matrix # Arguments should be # matvec = left multiplication by A # M_matvec = None [not used] # Minv_matvec = None [not used] # # mode = 2: # Solve the generalized eigenvalue problem: # A*x = lambda*M*x # A - square matrix # M - symmetric, positive semi-definite # Arguments should be # matvec = left multiplication by A # M_matvec = left multiplication by M # Minv_matvec = left multiplication by M^-1 # # mode = 3,4: # Solve the general eigenvalue problem in shift-invert mode: # A*x = lambda*M*x # A - square matrix # M - symmetric, positive semi-definite # Arguments should be # matvec = None [not used] # M_matvec = left multiplication by M # or None, if M is the identity # Minv_matvec = left multiplication by [A-sigma*M]^-1 # if A is real and mode==3, use the real part of Minv_matvec # if A is real and mode==4, use the imag part of Minv_matvec # if A is complex and mode==3, # use real and imag parts of Minv_matvec if mode == 1: if matvec is None: raise ValueError("matvec must be specified for mode=1") if M_matvec is not None: raise ValueError("M_matvec cannot be specified for mode=1") if Minv_matvec is not None: raise ValueError("Minv_matvec cannot be specified for mode=1") self.OP = matvec self.B = lambda x: x self.bmat = 'I' elif mode == 2: if matvec is None: raise ValueError("matvec must be specified for mode=2") if M_matvec is None: raise ValueError("M_matvec must be specified for mode=2") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified for mode=2") self.OP = lambda x: Minv_matvec(matvec(x)) self.OPa = Minv_matvec self.OPb = matvec self.B = M_matvec self.bmat = 'G' elif mode in (3, 4): if matvec is None: raise ValueError("matvec must be specified " "for mode in (3,4)") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified " "for mode in (3,4)") self.matvec = matvec if tp in 'DF': # complex type if mode == 3: self.OPa = Minv_matvec else: raise ValueError("mode=4 invalid for complex A") else: # real type if mode == 3: self.OPa = lambda x: np.real(Minv_matvec(x)) else: self.OPa = lambda x: np.imag(Minv_matvec(x)) if M_matvec is None: self.B = lambda x: x self.bmat = 'I' self.OP = self.OPa else: self.B = M_matvec self.bmat = 'G' self.OP = lambda x: self.OPa(M_matvec(x)) else: raise ValueError("mode=%i not implemented" % mode) if which not in _NEUPD_WHICH: raise ValueError("Parameter which must be one of %s" % ' '.join(_NEUPD_WHICH)) if k >= n - 1: raise ValueError("k must be less than rank(A)-1, k=%d" % k) _ArpackParams.__init__(self, n, k, tp, mode, sigma, ncv, v0, maxiter, which, tol) if self.ncv > n or self.ncv <= k + 1: raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv) self.workd = np.zeros(3 * n, self.tp) self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp) ltr = _type_conv[self.tp] self._arpack_solver = _arpack.__dict__[ltr + 'naupd'] self._arpack_extract = _arpack.__dict__[ltr + 'neupd'] self.iterate_infodict = _NAUPD_ERRORS[ltr] self.extract_infodict = _NEUPD_ERRORS[ltr] self.ipntr = np.zeros(14, "int") if self.tp in 'FD': self.rwork = np.zeros(self.ncv, self.tp.lower()) else: self.rwork = None def iterate(self): if self.tp in 'fd': self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\ self._arpack_solver(self.ido, self.bmat, self.which, self.k, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.workd, self.workl, self.info) else: self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\ self._arpack_solver(self.ido, self.bmat, self.which, self.k, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.workd, self.workl, self.rwork, self.info) xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n) yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n) if self.ido == -1: # initialization self.workd[yslice] = self.OP(self.workd[xslice]) elif self.ido == 1: # compute y = Op*x if self.mode in (1, 2): self.workd[yslice] = self.OP(self.workd[xslice]) else: Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n) self.workd[yslice] = self.OPa(self.workd[Bxslice]) elif self.ido == 2: self.workd[yslice] = self.B(self.workd[xslice]) elif self.ido == 3: raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0") else: self.converged = True if self.info == 0: pass elif self.info == 1: self._raise_no_convergence() else: raise ArpackError(self.info, infodict=self.iterate_infodict) def extract(self, return_eigenvectors): k, n = self.k, self.n ierr = 0 howmny = 'A' # return all eigenvectors sselect = np.zeros(self.ncv, 'int') # unused sigmar = np.real(self.sigma) sigmai = np.imag(self.sigma) workev = np.zeros(3 * self.ncv, self.tp) if self.tp in 'fd': dr = np.zeros(k + 1, self.tp) di = np.zeros(k + 1, self.tp) zr = np.zeros((n, k + 1), self.tp) dr, di, zr, ierr = \ self._arpack_extract( return_eigenvectors, howmny, sselect, sigmar, sigmai, workev, self.bmat, self.which, k, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.workd, self.workl, self.info) if ierr != 0: raise ArpackError(ierr, infodict=self.extract_infodict) nreturned = self.iparam[4] # number of good eigenvalues returned # Build complex eigenvalues from real and imaginary parts d = dr + 1.0j * di # Arrange the eigenvectors: complex eigenvectors are stored as # real,imaginary in consecutive columns z = zr.astype(self.tp.upper()) # The ARPACK nonsymmetric real and double interface (s,d)naupd # return eigenvalues and eigenvectors in real (float,double) # arrays. # Efficiency: this should check that return_eigenvectors == True # before going through this construction. if sigmai == 0: i = 0 while i <= k: # check if complex if abs(d[i].imag) != 0: # this is a complex conjugate pair with eigenvalues # in consecutive columns if i < k: z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1] z[:, i + 1] = z[:, i].conjugate() i += 1 else: #last eigenvalue is complex: the imaginary part of # the eigenvector has not been returned #this can only happen if nreturned > k, so we'll # throw out this case. nreturned -= 1 i += 1 else: # real matrix, mode 3 or 4, imag(sigma) is nonzero: # see remark 3 in <s,d>neupd.f # Build complex eigenvalues from real and imaginary parts i = 0 while i <= k: if abs(d[i].imag) == 0: d[i] = np.dot(zr[:, i], self.matvec(zr[:, i])) else: if i < k: z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1] z[:, i + 1] = z[:, i].conjugate() d[i] = ((np.dot(zr[:, i], self.matvec(zr[:, i])) + np.dot(zr[:, i + 1], self.matvec(zr[:, i + 1]))) + 1j * (np.dot(zr[:, i], self.matvec(zr[:, i + 1])) - np.dot(zr[:, i + 1], self.matvec(zr[:, i])))) d[i + 1] = d[i].conj() i += 1 else: #last eigenvalue is complex: the imaginary part of # the eigenvector has not been returned #this can only happen if nreturned > k, so we'll # throw out this case. nreturned -= 1 i += 1 # Now we have k+1 possible eigenvalues and eigenvectors # Return the ones specified by the keyword "which" if nreturned <= k: # we got less or equal as many eigenvalues we wanted d = d[:nreturned] z = z[:, :nreturned] else: # we got one extra eigenvalue (likely a cc pair, but which?) # cut at approx precision for sorting rd = np.round(d, decimals=_ndigits[self.tp]) if self.which in ['LR', 'SR']: ind = np.argsort(rd.real) elif self.which in ['LI', 'SI']: # for LI,SI ARPACK returns largest,smallest # abs(imaginary) why? ind = np.argsort(abs(rd.imag)) else: ind = np.argsort(abs(rd)) if self.which in ['LR', 'LM', 'LI']: d = d[ind[-k:]] z = z[:, ind[-k:]] if self.which in ['SR', 'SM', 'SI']: d = d[ind[:k]] z = z[:, ind[:k]] else: # complex is so much simpler... d, z, ierr =\ self._arpack_extract( return_eigenvectors, howmny, sselect, self.sigma, workev, self.bmat, self.which, k, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.workd, self.workl, self.rwork, ierr) if ierr != 0: raise ArpackError(ierr, infodict=self.extract_infodict) k_ok = self.iparam[4] d = d[:k_ok] z = z[:, :k_ok] if return_eigenvectors: return d, z else: return d def _aslinearoperator_with_dtype(m): m = aslinearoperator(m) if not hasattr(m, 'dtype'): x = np.zeros(m.shape[1]) m.dtype = (m * x).dtype return m class SpLuInv(LinearOperator): """ SpLuInv: helper class to repeatedly solve M*x=b using a sparse LU-decopposition of M """ def __init__(self, M): self.M_lu = splu(M) LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype) self.isreal = not np.issubdtype(self.dtype, np.complexfloating) def _matvec(self, x): # careful here: splu.solve will throw away imaginary # part of x if M is real if self.isreal and np.issubdtype(x.dtype, np.complexfloating): return (self.M_lu.solve(np.real(x)) + 1j * self.M_lu.solve(np.imag(x))) else: return self.M_lu.solve(x) class LuInv(LinearOperator): """ LuInv: helper class to repeatedly solve M*x=b using an LU-decomposition of M """ def __init__(self, M): self.M_lu = lu_factor(M) LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype) def _matvec(self, x): return lu_solve(self.M_lu, x) class IterInv(LinearOperator): """ IterInv: helper class to repeatedly solve M*x=b using an iterative method. """ def __init__(self, M, ifunc=gmres, tol=0): if tol <= 0: # when tol=0, ARPACK uses machine tolerance as calculated # by LAPACK's _LAMCH function. We should match this tol = np.finfo(M.dtype).eps self.M = M self.ifunc = ifunc self.tol = tol if hasattr(M, 'dtype'): dtype = M.dtype else: x = np.zeros(M.shape[1]) dtype = (M * x).dtype LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype) def _matvec(self, x): b, info = self.ifunc(self.M, x, tol=self.tol) if info != 0: raise ValueError("Error in inverting M: function " "%s did not converge (info = %i)." % (self.ifunc.__name__, info)) return b class IterOpInv(LinearOperator): """ IterOpInv: helper class to repeatedly solve [A-sigma*M]*x = b using an iterative method """ def __init__(self, A, M, sigma, ifunc=gmres, tol=0): if tol <= 0: # when tol=0, ARPACK uses machine tolerance as calculated # by LAPACK's _LAMCH function. We should match this tol = np.finfo(A.dtype).eps self.A = A self.M = M self.sigma = sigma self.ifunc = ifunc self.tol = tol x = np.zeros(A.shape[1]) if M is None: dtype = self.mult_func_M_None(x).dtype self.OP = LinearOperator(self.A.shape, self.mult_func_M_None, dtype=dtype) else: dtype = self.mult_func(x).dtype self.OP = LinearOperator(self.A.shape, self.mult_func, dtype=dtype) LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype) def mult_func(self, x): return self.A.matvec(x) - self.sigma * self.M.matvec(x) def mult_func_M_None(self, x): return self.A.matvec(x) - self.sigma * x def _matvec(self, x): b, info = self.ifunc(self.OP, x, tol=self.tol) if info != 0: raise ValueError("Error in inverting [A-sigma*M]: function " "%s did not converge (info = %i)." % (self.ifunc.__name__, info)) return b def get_inv_matvec(M, symmetric=False, tol=0): if isdense(M): return LuInv(M).matvec elif isspmatrix(M): if isspmatrix_csr(M) and symmetric: M = M.T return SpLuInv(M).matvec else: return IterInv(M, tol=tol).matvec def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0): if sigma == 0: return get_inv_matvec(A, symmetric=symmetric, tol=tol) if M is None: #M is the identity matrix if isdense(A): if (np.issubdtype(A.dtype, np.complexfloating) or np.imag(sigma) == 0): A = np.copy(A) else: A = A + 0j A.flat[::A.shape[1] + 1] -= sigma return LuInv(A).matvec elif isspmatrix(A): A = A - sigma * identity(A.shape[0]) if symmetric and isspmatrix_csr(A): A = A.T return SpLuInv(A.tocsc()).matvec else: return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma, tol=tol).matvec else: if ((not isdense(A) and not isspmatrix(A)) or (not isdense(M) and not isspmatrix(M))): return IterOpInv(_aslinearoperator_with_dtype(A), _aslinearoperator_with_dtype(M), sigma, tol=tol).matvec elif isdense(A) or isdense(M): return LuInv(A - sigma * M).matvec else: OP = A - sigma * M if symmetric and isspmatrix_csr(OP): OP = OP.T return SpLuInv(OP.tocsc()).matvec def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None, maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None, OPpart=None): """ Find k eigenvalues and eigenvectors of the square matrix A. Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for w[i] eigenvalues with corresponding eigenvectors x[i]. If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the generalized eigenvalue problem for w[i] eigenvalues with corresponding eigenvectors x[i] Parameters ---------- A : An N x N matrix, array, sparse matrix, or LinearOperator representing \ the operation A * x, where A is a real or complex square matrix. k : int, default 6 The number of eigenvalues and eigenvectors desired. `k` must be smaller than N. It is not possible to compute all eigenvectors of a matrix. return_eigenvectors : boolean, default True Whether to return the eigenvectors along with the eigenvalues. M : An N x N matrix, array, sparse matrix, or LinearOperator representing the operation M*x for the generalized eigenvalue problem ``A * x = w * M * x`` M must represent a real symmetric matrix. For best results, M should be of the same type as A. Additionally: * If sigma==None, M is positive definite * If sigma is specified, M is positive semi-definite If sigma==None, eigs requires an operator to compute the solution of the linear equation `M * x = b`. This is done internally via a (sparse) LU decomposition for an explicit matrix M, or via an iterative solver for a general linear operator. Alternatively, the user can supply the matrix or operator Minv, which gives x = Minv * b = M^-1 * b sigma : real or complex Find eigenvalues near sigma using shift-invert mode. This requires an operator to compute the solution of the linear system `[A - sigma * M] * x = b`, where M is the identity matrix if unspecified. This is computed internally via a (sparse) LU decomposition for explicit matrices A & M, or via an iterative solver if either A or M is a general linear operator. Alternatively, the user can supply the matrix or operator OPinv, which gives x = OPinv * b = [A - sigma * M]^-1 * b. For a real matrix A, shift-invert can either be done in imaginary mode or real mode, specified by the parameter OPpart ('r' or 'i'). Note that when sigma is specified, the keyword 'which' (below) refers to the shifted eigenvalues w'[i] where: * If A is real and OPpart == 'r' (default), w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ] * If A is real and OPpart == 'i', w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ] * If A is complex, w'[i] = 1/(w[i]-sigma) v0 : array Starting vector for iteration. ncv : integer The number of Lanczos vectors generated `ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``. which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI'] Which `k` eigenvectors and eigenvalues to find: - 'LM' : largest magnitude - 'SM' : smallest magnitude - 'LR' : largest real part - 'SR' : smallest real part - 'LI' : largest imaginary part - 'SI' : smallest imaginary part When sigma != None, 'which' refers to the shifted eigenvalues w'[i] (see discussion in 'sigma', above). ARPACK is generally better at finding large values than small values. If small eigenvalues are desired, consider using shift-invert mode for better performance. maxiter : integer Maximum number of Arnoldi update iterations allowed tol : float Relative accuracy for eigenvalues (stopping criterion) The default value of 0 implies machine precision. return_eigenvectors : boolean Return eigenvectors (True) in addition to eigenvalues Minv : N x N matrix, array, sparse matrix, or linear operator See notes in M, above. OPinv : N x N matrix, array, sparse matrix, or linear operator See notes in sigma, above. OPpart : 'r' or 'i'. See notes in sigma, above Returns ------- w : array Array of k eigenvalues. v : array An array of `k` eigenvectors. ``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i]. Raises ------ ArpackNoConvergence When the requested convergence is not obtained. The currently converged eigenvalues and eigenvectors can be found as ``eigenvalues`` and ``eigenvectors`` attributes of the exception object. See Also -------- eigsh : eigenvalues and eigenvectors for symmetric matrix A svds : singular value decomposition for a matrix A Examples -------- Find 6 eigenvectors of the identity matrix: >>> from sklearn.utils.arpack import eigs >>> id = np.identity(13) >>> vals, vecs = eigs(id, k=6) >>> vals array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j]) >>> vecs.shape (13, 6) Notes ----- This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD, ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to find the eigenvalues and eigenvectors [2]_. References ---------- .. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/ .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE: Solution of Large Scale Eigenvalue Problems by Implicitly Restarted Arnoldi Methods. SIAM, Philadelphia, PA, 1998. """ if A.shape[0] != A.shape[1]: raise ValueError('expected square matrix (shape=%s)' % (A.shape,)) if M is not None: if M.shape != A.shape: raise ValueError('wrong M dimensions %s, should be %s' % (M.shape, A.shape)) if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower(): warnings.warn('M does not have the same type precision as A. ' 'This may adversely affect ARPACK convergence') n = A.shape[0] if k <= 0 or k >= n: raise ValueError("k must be between 1 and rank(A)-1") if sigma is None: matvec = _aslinearoperator_with_dtype(A).matvec if OPinv is not None: raise ValueError("OPinv should not be specified " "with sigma = None.") if OPpart is not None: raise ValueError("OPpart should not be specified with " "sigma = None or complex A") if M is None: #standard eigenvalue problem mode = 1 M_matvec = None Minv_matvec = None if Minv is not None: raise ValueError("Minv should not be " "specified with M = None.") else: #general eigenvalue problem mode = 2 if Minv is None: Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol) else: Minv = _aslinearoperator_with_dtype(Minv) Minv_matvec = Minv.matvec M_matvec = _aslinearoperator_with_dtype(M).matvec else: #sigma is not None: shift-invert mode if np.issubdtype(A.dtype, np.complexfloating): if OPpart is not None: raise ValueError("OPpart should not be specified " "with sigma=None or complex A") mode = 3 elif OPpart is None or OPpart.lower() == 'r': mode = 3 elif OPpart.lower() == 'i': if np.imag(sigma) == 0: raise ValueError("OPpart cannot be 'i' if sigma is real") mode = 4 else: raise ValueError("OPpart must be one of ('r','i')") matvec = _aslinearoperator_with_dtype(A).matvec if Minv is not None: raise ValueError("Minv should not be specified when sigma is") if OPinv is None: Minv_matvec = get_OPinv_matvec(A, M, sigma, symmetric=False, tol=tol) else: OPinv = _aslinearoperator_with_dtype(OPinv) Minv_matvec = OPinv.matvec if M is None: M_matvec = None else: M_matvec = _aslinearoperator_with_dtype(M).matvec params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode, M_matvec, Minv_matvec, sigma, ncv, v0, maxiter, which, tol) while not params.converged: params.iterate() return params.extract(return_eigenvectors) def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None, maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None, mode='normal'): """ Find k eigenvalues and eigenvectors of the real symmetric square matrix or complex hermitian matrix A. Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for w[i] eigenvalues with corresponding eigenvectors x[i]. If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the generalized eigenvalue problem for w[i] eigenvalues with corresponding eigenvectors x[i] Parameters ---------- A : An N x N matrix, array, sparse matrix, or LinearOperator representing the operation A * x, where A is a real symmetric matrix For buckling mode (see below) A must additionally be positive-definite k : integer The number of eigenvalues and eigenvectors desired. `k` must be smaller than N. It is not possible to compute all eigenvectors of a matrix. M : An N x N matrix, array, sparse matrix, or linear operator representing the operation M * x for the generalized eigenvalue problem ``A * x = w * M * x``. M must represent a real, symmetric matrix. For best results, M should be of the same type as A. Additionally: * If sigma == None, M is symmetric positive definite * If sigma is specified, M is symmetric positive semi-definite * In buckling mode, M is symmetric indefinite. If sigma == None, eigsh requires an operator to compute the solution of the linear equation `M * x = b`. This is done internally via a (sparse) LU decomposition for an explicit matrix M, or via an iterative solver for a general linear operator. Alternatively, the user can supply the matrix or operator Minv, which gives x = Minv * b = M^-1 * b sigma : real Find eigenvalues near sigma using shift-invert mode. This requires an operator to compute the solution of the linear system `[A - sigma * M] x = b`, where M is the identity matrix if unspecified. This is computed internally via a (sparse) LU decomposition for explicit matrices A & M, or via an iterative solver if either A or M is a general linear operator. Alternatively, the user can supply the matrix or operator OPinv, which gives x = OPinv * b = [A - sigma * M]^-1 * b. Note that when sigma is specified, the keyword 'which' refers to the shifted eigenvalues w'[i] where: - if mode == 'normal', w'[i] = 1 / (w[i] - sigma) - if mode == 'cayley', w'[i] = (w[i] + sigma) / (w[i] - sigma) - if mode == 'buckling', w'[i] = w[i] / (w[i] - sigma) (see further discussion in 'mode' below) v0 : array Starting vector for iteration. ncv : integer The number of Lanczos vectors generated ncv must be greater than k and smaller than n; it is recommended that ncv > 2*k which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE'] If A is a complex hermitian matrix, 'BE' is invalid. Which `k` eigenvectors and eigenvalues to find - 'LM' : Largest (in magnitude) eigenvalues - 'SM' : Smallest (in magnitude) eigenvalues - 'LA' : Largest (algebraic) eigenvalues - 'SA' : Smallest (algebraic) eigenvalues - 'BE' : Half (k/2) from each end of the spectrum When k is odd, return one more (k/2+1) from the high end When sigma != None, 'which' refers to the shifted eigenvalues w'[i] (see discussion in 'sigma', above). ARPACK is generally better at finding large values than small values. If small eigenvalues are desired, consider using shift-invert mode for better performance. maxiter : integer Maximum number of Arnoldi update iterations allowed tol : float Relative accuracy for eigenvalues (stopping criterion). The default value of 0 implies machine precision. Minv : N x N matrix, array, sparse matrix, or LinearOperator See notes in M, above OPinv : N x N matrix, array, sparse matrix, or LinearOperator See notes in sigma, above. return_eigenvectors : boolean Return eigenvectors (True) in addition to eigenvalues mode : string ['normal' | 'buckling' | 'cayley'] Specify strategy to use for shift-invert mode. This argument applies only for real-valued A and sigma != None. For shift-invert mode, ARPACK internally solves the eigenvalue problem ``OP * x'[i] = w'[i] * B * x'[i]`` and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i] into the desired eigenvectors and eigenvalues of the problem ``A * x[i] = w[i] * M * x[i]``. The modes are as follows: - 'normal' : OP = [A - sigma * M]^-1 * M B = M w'[i] = 1 / (w[i] - sigma) - 'buckling' : OP = [A - sigma * M]^-1 * A B = A w'[i] = w[i] / (w[i] - sigma) - 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M] B = M w'[i] = (w[i] + sigma) / (w[i] - sigma) The choice of mode will affect which eigenvalues are selected by the keyword 'which', and can also impact the stability of convergence (see [2] for a discussion) Returns ------- w : array Array of k eigenvalues v : array An array of k eigenvectors The v[i] is the eigenvector corresponding to the eigenvector w[i] Raises ------ ArpackNoConvergence When the requested convergence is not obtained. The currently converged eigenvalues and eigenvectors can be found as ``eigenvalues`` and ``eigenvectors`` attributes of the exception object. See Also -------- eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A svds : singular value decomposition for a matrix A Notes ----- This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD functions which use the Implicitly Restarted Lanczos Method to find the eigenvalues and eigenvectors [2]_. Examples -------- >>> from sklearn.utils.arpack import eigsh >>> id = np.identity(13) >>> vals, vecs = eigsh(id, k=6) >>> vals # doctest: +SKIP array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j]) >>> print(vecs.shape) (13, 6) References ---------- .. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/ .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE: Solution of Large Scale Eigenvalue Problems by Implicitly Restarted Arnoldi Methods. SIAM, Philadelphia, PA, 1998. """ # complex hermitian matrices should be solved with eigs if np.issubdtype(A.dtype, np.complexfloating): if mode != 'normal': raise ValueError("mode=%s cannot be used with " "complex matrix A" % mode) if which == 'BE': raise ValueError("which='BE' cannot be used with complex matrix A") elif which == 'LA': which = 'LR' elif which == 'SA': which = 'SR' ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0, ncv=ncv, maxiter=maxiter, tol=tol, return_eigenvectors=return_eigenvectors, Minv=Minv, OPinv=OPinv) if return_eigenvectors: return ret[0].real, ret[1] else: return ret.real if A.shape[0] != A.shape[1]: raise ValueError('expected square matrix (shape=%s)' % (A.shape,)) if M is not None: if M.shape != A.shape: raise ValueError('wrong M dimensions %s, should be %s' % (M.shape, A.shape)) if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower(): warnings.warn('M does not have the same type precision as A. ' 'This may adversely affect ARPACK convergence') n = A.shape[0] if k <= 0 or k >= n: raise ValueError("k must be between 1 and rank(A)-1") if sigma is None: A = _aslinearoperator_with_dtype(A) matvec = A.matvec if OPinv is not None: raise ValueError("OPinv should not be specified " "with sigma = None.") if M is None: #standard eigenvalue problem mode = 1 M_matvec = None Minv_matvec = None if Minv is not None: raise ValueError("Minv should not be " "specified with M = None.") else: #general eigenvalue problem mode = 2 if Minv is None: Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol) else: Minv = _aslinearoperator_with_dtype(Minv) Minv_matvec = Minv.matvec M_matvec = _aslinearoperator_with_dtype(M).matvec else: # sigma is not None: shift-invert mode if Minv is not None: raise ValueError("Minv should not be specified when sigma is") # normal mode if mode == 'normal': mode = 3 matvec = None if OPinv is None: Minv_matvec = get_OPinv_matvec(A, M, sigma, symmetric=True, tol=tol) else: OPinv = _aslinearoperator_with_dtype(OPinv) Minv_matvec = OPinv.matvec if M is None: M_matvec = None else: M = _aslinearoperator_with_dtype(M) M_matvec = M.matvec # buckling mode elif mode == 'buckling': mode = 4 if OPinv is None: Minv_matvec = get_OPinv_matvec(A, M, sigma, symmetric=True, tol=tol) else: Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec matvec = _aslinearoperator_with_dtype(A).matvec M_matvec = None # cayley-transform mode elif mode == 'cayley': mode = 5 matvec = _aslinearoperator_with_dtype(A).matvec if OPinv is None: Minv_matvec = get_OPinv_matvec(A, M, sigma, symmetric=True, tol=tol) else: Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec if M is None: M_matvec = None else: M_matvec = _aslinearoperator_with_dtype(M).matvec # unrecognized mode else: raise ValueError("unrecognized mode '%s'" % mode) params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode, M_matvec, Minv_matvec, sigma, ncv, v0, maxiter, which, tol) while not params.converged: params.iterate() return params.extract(return_eigenvectors) def _svds(A, k=6, ncv=None, tol=0): """Compute k singular values/vectors for a sparse matrix using ARPACK. Parameters ---------- A : sparse matrix Array to compute the SVD on k : int, optional Number of singular values and vectors to compute. ncv : integer The number of Lanczos vectors generated ncv must be greater than k+1 and smaller than n; it is recommended that ncv > 2*k tol : float, optional Tolerance for singular values. Zero (default) means machine precision. Notes ----- This is a naive implementation using an eigensolver on A.H * A or A * A.H, depending on which one is more efficient. """ if not (isinstance(A, np.ndarray) or isspmatrix(A)): A = np.asarray(A) n, m = A.shape if np.issubdtype(A.dtype, np.complexfloating): herm = lambda x: x.T.conjugate() eigensolver = eigs else: herm = lambda x: x.T eigensolver = eigsh if n > m: X = A XH = herm(A) else: XH = A X = herm(A) if hasattr(XH, 'dot'): def matvec_XH_X(x): return XH.dot(X.dot(x)) else: def matvec_XH_X(x): return np.dot(XH, np.dot(X, x)) XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype, shape=(X.shape[1], X.shape[1])) # Ignore deprecation warnings here: dot on matrices is deprecated, # but this code is a backport anyhow with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2) s = np.sqrt(eigvals) if n > m: v = eigvec if hasattr(X, 'dot'): u = X.dot(v) / s else: u = np.dot(X, v) / s vh = herm(v) else: u = eigvec if hasattr(X, 'dot'): vh = herm(X.dot(u) / s) else: vh = herm(np.dot(X, u) / s) return u, s, vh # check if backport is actually needed: if scipy.version.version >= LooseVersion('0.10'): from scipy.sparse.linalg import eigs, eigsh, svds else: eigs, eigsh, svds = _eigs, _eigsh, _svds
bsd-3-clause
Vvucinic/Wander
venv_2_7/lib/python2.7/site-packages/pandas/core/indexing.py
9
64500
# pylint: disable=W0223 from pandas.core.index import Index, MultiIndex from pandas.compat import range, zip import pandas.compat as compat import pandas.core.common as com from pandas.core.common import (is_bool_indexer, is_integer_dtype, _asarray_tuplesafe, is_list_like, isnull, is_null_slice, is_full_slice, ABCSeries, ABCDataFrame, ABCPanel, is_float, _values_from_object, _infer_fill_value, is_integer) import numpy as np # the supported indexers def get_indexers_list(): return [ ('ix', _IXIndexer), ('iloc', _iLocIndexer), ('loc', _LocIndexer), ('at', _AtIndexer), ('iat', _iAtIndexer), ] # "null slice" _NS = slice(None, None) # the public IndexSlicerMaker class _IndexSlice(object): def __getitem__(self, arg): return arg IndexSlice = _IndexSlice() class IndexingError(Exception): pass class _NDFrameIndexer(object): _valid_types = None _exception = KeyError def __init__(self, obj, name): self.obj = obj self.ndim = obj.ndim self.name = name self.axis = None def __call__(self, *args, **kwargs): # we need to return a copy of ourselves self = self.__class__(self.obj, self.name) # set the passed in values for k, v in compat.iteritems(kwargs): setattr(self,k,v) return self def __iter__(self): raise NotImplementedError('ix is not iterable') def __getitem__(self, key): if type(key) is tuple: try: values = self.obj.get_value(*key) if np.isscalar(values): return values except Exception: pass return self._getitem_tuple(key) else: return self._getitem_axis(key, axis=0) def _get_label(self, label, axis=0): if self.ndim == 1: # for perf reasons we want to try _xs first # as its basically direct indexing # but will fail when the index is not present # see GH5667 try: return self.obj._xs(label, axis=axis) except: return self.obj[label] elif (isinstance(label, tuple) and isinstance(label[axis], slice)): raise IndexingError('no slices here, handle elsewhere') return self.obj._xs(label, axis=axis) def _get_loc(self, key, axis=0): return self.obj._ixs(key, axis=axis) def _slice(self, obj, axis=0, kind=None): return self.obj._slice(obj, axis=axis, kind=kind) def _get_setitem_indexer(self, key): if self.axis is not None: return self._convert_tuple(key, is_setter=True) axis = self.obj._get_axis(0) if isinstance(axis, MultiIndex): try: return axis.get_loc(key) except Exception: pass if isinstance(key, tuple) and not self.ndim < len(key): return self._convert_tuple(key, is_setter=True) if isinstance(key, range): return self._convert_range(key, is_setter=True) try: return self._convert_to_indexer(key, is_setter=True) except TypeError: raise IndexingError(key) def __setitem__(self, key, value): indexer = self._get_setitem_indexer(key) self._setitem_with_indexer(indexer, value) def _has_valid_type(self, k, axis): raise NotImplementedError() def _has_valid_tuple(self, key): """ check the key for valid keys across my indexer """ for i, k in enumerate(key): if i >= self.obj.ndim: raise IndexingError('Too many indexers') if not self._has_valid_type(k, i): raise ValueError("Location based indexing can only have [%s] " "types" % self._valid_types) def _should_validate_iterable(self, axis=0): """ return a boolean whether this axes needs validation for a passed iterable """ ax = self.obj._get_axis(axis) if isinstance(ax, MultiIndex): return False elif ax.is_floating(): return False return True def _is_nested_tuple_indexer(self, tup): if any([ isinstance(ax, MultiIndex) for ax in self.obj.axes ]): return any([ is_nested_tuple(tup,ax) for ax in self.obj.axes ]) return False def _convert_tuple(self, key, is_setter=False): keyidx = [] if self.axis is not None: axis = self.obj._get_axis_number(self.axis) for i in range(self.ndim): if i == axis: keyidx.append(self._convert_to_indexer(key, axis=axis, is_setter=is_setter)) else: keyidx.append(slice(None)) else: for i, k in enumerate(key): idx = self._convert_to_indexer(k, axis=i, is_setter=is_setter) keyidx.append(idx) return tuple(keyidx) def _convert_range(self, key, is_setter=False): """ convert a range argument """ return list(key) def _convert_scalar_indexer(self, key, axis): # if we are accessing via lowered dim, use the last dim ax = self.obj._get_axis(min(axis, self.ndim - 1)) # a scalar return ax._convert_scalar_indexer(key, kind=self.name) def _convert_slice_indexer(self, key, axis): # if we are accessing via lowered dim, use the last dim ax = self.obj._get_axis(min(axis, self.ndim - 1)) return ax._convert_slice_indexer(key, kind=self.name) def _has_valid_setitem_indexer(self, indexer): return True def _has_valid_positional_setitem_indexer(self, indexer): """ validate that an positional indexer cannot enlarge its target will raise if needed, does not modify the indexer externally """ if isinstance(indexer, dict): raise IndexError("{0} cannot enlarge its target object" .format(self.name)) else: if not isinstance(indexer, tuple): indexer = self._tuplify(indexer) for ax, i in zip(self.obj.axes, indexer): if isinstance(i, slice): # should check the stop slice? pass elif is_list_like_indexer(i): # should check the elements? pass elif is_integer(i): if i >= len(ax): raise IndexError("{0} cannot enlarge its target object" .format(self.name)) elif isinstance(i, dict): raise IndexError("{0} cannot enlarge its target object" .format(self.name)) return True def _setitem_with_indexer(self, indexer, value): self._has_valid_setitem_indexer(indexer) # also has the side effect of consolidating in-place from pandas import Panel, DataFrame, Series info_axis = self.obj._info_axis_number # maybe partial set take_split_path = self.obj._is_mixed_type # if there is only one block/type, still have to take split path # unless the block is one-dimensional or it can hold the value if not take_split_path and self.obj._data.blocks: blk, = self.obj._data.blocks if 1 < blk.ndim: # in case of dict, keys are indices val = list(value.values()) if isinstance(value,dict) else value take_split_path = not blk._can_hold_element(val) if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes): for i, ax in zip(indexer, self.obj.axes): # if we have any multi-indexes that have non-trivial slices (not null slices) # then we must take the split path, xref GH 10360 if isinstance(ax, MultiIndex) and not (is_integer(i) or is_null_slice(i)): take_split_path = True break if isinstance(indexer, tuple): nindexer = [] for i, idx in enumerate(indexer): if isinstance(idx, dict): # reindex the axis to the new value # and set inplace key, _ = convert_missing_indexer(idx) # if this is the items axes, then take the main missing # path first # this correctly sets the dtype and avoids cache issues # essentially this separates out the block that is needed # to possibly be modified if self.ndim > 1 and i == self.obj._info_axis_number: # add the new item, and set the value # must have all defined axes if we have a scalar # or a list-like on the non-info axes if we have a # list-like len_non_info_axes = [ len(_ax) for _i, _ax in enumerate(self.obj.axes) if _i != i ] if any([not l for l in len_non_info_axes]): if not is_list_like_indexer(value): raise ValueError("cannot set a frame with no " "defined index and a scalar") self.obj[key] = value return self.obj # add a new item with the dtype setup self.obj[key] = _infer_fill_value(value) new_indexer = convert_from_missing_indexer_tuple( indexer, self.obj.axes) self._setitem_with_indexer(new_indexer, value) return self.obj # reindex the axis # make sure to clear the cache because we are # just replacing the block manager here # so the object is the same index = self.obj._get_axis(i) labels = index.insert(len(index),key) self.obj._data = self.obj.reindex_axis(labels, i)._data self.obj._maybe_update_cacher(clear=True) self.obj.is_copy=None nindexer.append(labels.get_loc(key)) else: nindexer.append(idx) indexer = tuple(nindexer) else: indexer, missing = convert_missing_indexer(indexer) if missing: # reindex the axis to the new value # and set inplace if self.ndim == 1: index = self.obj.index new_index = index.insert(len(index),indexer) # this preserves dtype of the value new_values = Series([value])._values if len(self.obj._values): new_values = np.concatenate([self.obj._values, new_values]) self.obj._data = self.obj._constructor( new_values, index=new_index, name=self.obj.name)._data self.obj._maybe_update_cacher(clear=True) return self.obj elif self.ndim == 2: # no columns and scalar if not len(self.obj.columns): raise ValueError( "cannot set a frame with no defined columns" ) # append a Series if isinstance(value, Series): value = value.reindex(index=self.obj.columns,copy=True) value.name = indexer # a list-list else: # must have conforming columns if is_list_like_indexer(value): if len(value) != len(self.obj.columns): raise ValueError( "cannot set a row with mismatched columns" ) value = Series(value,index=self.obj.columns,name=indexer) self.obj._data = self.obj.append(value)._data self.obj._maybe_update_cacher(clear=True) return self.obj # set using setitem (Panel and > dims) elif self.ndim >= 3: return self.obj.__setitem__(indexer, value) # set item_labels = self.obj._get_axis(info_axis) # align and set the values if take_split_path: if not isinstance(indexer, tuple): indexer = self._tuplify(indexer) if isinstance(value, ABCSeries): value = self._align_series(indexer, value) info_idx = indexer[info_axis] if is_integer(info_idx): info_idx = [info_idx] labels = item_labels[info_idx] # if we have a partial multiindex, then need to adjust the plane # indexer here if (len(labels) == 1 and isinstance(self.obj[labels[0]].axes[0], MultiIndex)): item = labels[0] obj = self.obj[item] index = obj.index idx = indexer[:info_axis][0] plane_indexer = tuple([idx]) + indexer[info_axis + 1:] lplane_indexer = length_of_indexer(plane_indexer[0], index) # require that we are setting the right number of values that # we are indexing if is_list_like_indexer(value) and np.iterable(value) and lplane_indexer != len(value): if len(obj[idx]) != len(value): raise ValueError( "cannot set using a multi-index selection indexer " "with a different length than the value" ) # make sure we have an ndarray value = getattr(value,'values',value).ravel() # we can directly set the series here # as we select a slice indexer on the mi idx = index._convert_slice_indexer(idx) obj._consolidate_inplace() obj = obj.copy() obj._data = obj._data.setitem(indexer=tuple([idx]), value=value) self.obj[item] = obj return # non-mi else: plane_indexer = indexer[:info_axis] + indexer[info_axis + 1:] if info_axis > 0: plane_axis = self.obj.axes[:info_axis][0] lplane_indexer = length_of_indexer(plane_indexer[0], plane_axis) else: lplane_indexer = 0 def setter(item, v): s = self.obj[item] pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer # perform the equivalent of a setitem on the info axis # as we have a null slice or a slice with full bounds # which means essentially reassign to the columns of a multi-dim object # GH6149 (null slice), GH10408 (full bounds) if isinstance(pi, tuple) and all(is_null_slice(idx) or is_full_slice(idx, len(self.obj)) for idx in pi): s = v else: # set the item, possibly having a dtype change s._consolidate_inplace() s = s.copy() s._data = s._data.setitem(indexer=pi, value=v) s._maybe_update_cacher(clear=True) # reset the sliced object if unique self.obj[item] = s def can_do_equal_len(): """ return True if we have an equal len settable """ if not len(labels) == 1 or not np.iterable(value): return False l = len(value) item = labels[0] index = self.obj[item].index # equal len list/ndarray if len(index) == l: return True elif lplane_indexer == l: return True return False # we need an iterable, with a ndim of at least 1 # eg. don't pass through np.array(0) if is_list_like_indexer(value) and getattr(value,'ndim',1) > 0: # we have an equal len Frame if isinstance(value, ABCDataFrame) and value.ndim > 1: sub_indexer = list(indexer) multiindex_indexer = isinstance(labels, MultiIndex) for item in labels: if item in value: sub_indexer[info_axis] = item v = self._align_series( tuple(sub_indexer), value[item], multiindex_indexer ) else: v = np.nan setter(item, v) # we have an equal len ndarray/convertible to our labels elif np.array(value).ndim == 2: # note that this coerces the dtype if we are mixed # GH 7551 value = np.array(value,dtype=object) if len(labels) != value.shape[1]: raise ValueError('Must have equal len keys and value ' 'when setting with an ndarray') for i, item in enumerate(labels): # setting with a list, recoerces setter(item, value[:, i].tolist()) # we have an equal len list/ndarray elif can_do_equal_len(): setter(labels[0], value) # per label values else: if len(labels) != len(value): raise ValueError('Must have equal len keys and value ' 'when setting with an iterable') for item, v in zip(labels, value): setter(item, v) else: # scalar for item in labels: setter(item, value) else: if isinstance(indexer, tuple): indexer = maybe_convert_ix(*indexer) # if we are setting on the info axis ONLY # set using those methods to avoid block-splitting # logic here if len(indexer) > info_axis and is_integer(indexer[info_axis]) and all( is_null_slice(idx) for i, idx in enumerate(indexer) if i != info_axis): self.obj[item_labels[indexer[info_axis]]] = value return if isinstance(value, (ABCSeries, dict)): value = self._align_series(indexer, Series(value)) elif isinstance(value, ABCDataFrame): value = self._align_frame(indexer, value) if isinstance(value, ABCPanel): value = self._align_panel(indexer, value) # check for chained assignment self.obj._check_is_chained_assignment_possible() # actually do the set self.obj._consolidate_inplace() self.obj._data = self.obj._data.setitem(indexer=indexer, value=value) self.obj._maybe_update_cacher(clear=True) def _align_series(self, indexer, ser, multiindex_indexer=False): """ Parameters ---------- indexer : tuple, slice, scalar The indexer used to get the locations that will be set to `ser` ser : pd.Series The values to assign to the locations specified by `indexer` multiindex_indexer : boolean, optional Defaults to False. Should be set to True if `indexer` was from a `pd.MultiIndex`, to avoid unnecessary broadcasting. Returns: -------- `np.array` of `ser` broadcast to the appropriate shape for assignment to the locations selected by `indexer` """ if isinstance(indexer, (slice, np.ndarray, list, Index)): indexer = tuple([indexer]) if isinstance(indexer, tuple): # flatten np.ndarray indexers ravel = lambda i: i.ravel() if isinstance(i, np.ndarray) else i indexer = tuple(map(ravel, indexer)) aligners = [not is_null_slice(idx) for idx in indexer] sum_aligners = sum(aligners) single_aligner = sum_aligners == 1 is_frame = self.obj.ndim == 2 is_panel = self.obj.ndim >= 3 obj = self.obj # are we a single alignable value on a non-primary # dim (e.g. panel: 1,2, or frame: 0) ? # hence need to align to a single axis dimension # rather that find all valid dims # frame if is_frame: single_aligner = single_aligner and aligners[0] # panel elif is_panel: single_aligner = (single_aligner and (aligners[1] or aligners[2])) # we have a frame, with multiple indexers on both axes; and a # series, so need to broadcast (see GH5206) if (sum_aligners == self.ndim and all([com.is_sequence(_) for _ in indexer])): ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values # single indexer if len(indexer) > 1 and not multiindex_indexer: l = len(indexer[1]) ser = np.tile(ser, l).reshape(l, -1).T return ser for i, idx in enumerate(indexer): ax = obj.axes[i] # multiple aligners (or null slices) if com.is_sequence(idx) or isinstance(idx, slice): if single_aligner and is_null_slice(idx): continue new_ix = ax[idx] if not is_list_like_indexer(new_ix): new_ix = Index([new_ix]) else: new_ix = Index(new_ix) if ser.index.equals(new_ix) or not len(new_ix): return ser._values.copy() return ser.reindex(new_ix)._values # 2 dims elif single_aligner and is_frame: # reindex along index ax = self.obj.axes[1] if ser.index.equals(ax) or not len(ax): return ser._values.copy() return ser.reindex(ax)._values # >2 dims elif single_aligner: broadcast = [] for n, labels in enumerate(self.obj._get_plane_axes(i)): # reindex along the matching dimensions if len(labels & ser.index): ser = ser.reindex(labels) else: broadcast.append((n, len(labels))) # broadcast along other dims ser = ser._values.copy() for (axis, l) in broadcast: shape = [-1] * (len(broadcast) + 1) shape[axis] = l ser = np.tile(ser, l).reshape(shape) if self.obj.ndim == 3: ser = ser.T return ser elif np.isscalar(indexer): ax = self.obj._get_axis(1) if ser.index.equals(ax): return ser._values.copy() return ser.reindex(ax)._values raise ValueError('Incompatible indexer with Series') def _align_frame(self, indexer, df): is_frame = self.obj.ndim == 2 is_panel = self.obj.ndim >= 3 if isinstance(indexer, tuple): aligners = [not is_null_slice(idx) for idx in indexer] sum_aligners = sum(aligners) single_aligner = sum_aligners == 1 idx, cols = None, None sindexers = [] for i, ix in enumerate(indexer): ax = self.obj.axes[i] if com.is_sequence(ix) or isinstance(ix, slice): if idx is None: idx = ax[ix].ravel() elif cols is None: cols = ax[ix].ravel() else: break else: sindexers.append(i) # panel if is_panel: # need to conform to the convention # as we are not selecting on the items axis # and we have a single indexer # GH 7763 if len(sindexers) == 1 and sindexers[0] != 0: df = df.T if idx is None: idx = df.index if cols is None: cols = df.columns if idx is not None and cols is not None: if df.index.equals(idx) and df.columns.equals(cols): val = df.copy()._values else: val = df.reindex(idx, columns=cols)._values return val elif ((isinstance(indexer, slice) or is_list_like_indexer(indexer)) and is_frame): ax = self.obj.index[indexer] if df.index.equals(ax): val = df.copy()._values else: # we have a multi-index and are trying to align # with a particular, level GH3738 if isinstance(ax, MultiIndex) and isinstance( df.index, MultiIndex) and ax.nlevels != df.index.nlevels: raise TypeError("cannot align on a multi-index with out specifying the join levels") val = df.reindex(index=ax)._values return val elif np.isscalar(indexer) and is_panel: idx = self.obj.axes[1] cols = self.obj.axes[2] # by definition we are indexing on the 0th axis # a passed in dataframe which is actually a transpose # of what is needed if idx.equals(df.index) and cols.equals(df.columns): return df.copy()._values return df.reindex(idx, columns=cols)._values raise ValueError('Incompatible indexer with DataFrame') def _align_panel(self, indexer, df): is_frame = self.obj.ndim == 2 is_panel = self.obj.ndim >= 3 raise NotImplementedError("cannot set using an indexer with a Panel " "yet!") def _getitem_tuple(self, tup): try: return self._getitem_lowerdim(tup) except IndexingError: pass # no multi-index, so validate all of the indexers self._has_valid_tuple(tup) # ugly hack for GH #836 if self._multi_take_opportunity(tup): return self._multi_take(tup) # no shortcut needed retval = self.obj for i, key in enumerate(tup): if i >= self.obj.ndim: raise IndexingError('Too many indexers') if is_null_slice(key): continue retval = getattr(retval, self.name)._getitem_axis(key, axis=i) return retval def _multi_take_opportunity(self, tup): from pandas.core.generic import NDFrame # ugly hack for GH #836 if not isinstance(self.obj, NDFrame): return False if not all(is_list_like_indexer(x) for x in tup): return False # just too complicated for indexer, ax in zip(tup, self.obj._data.axes): if isinstance(ax, MultiIndex): return False elif is_bool_indexer(indexer): return False elif not ax.is_unique: return False return True def _multi_take(self, tup): """ create the reindex map for our objects, raise the _exception if we can't create the indexer """ try: o = self.obj d = dict([ (a, self._convert_for_reindex(t, axis=o._get_axis_number(a))) for t, a in zip(tup, o._AXIS_ORDERS) ]) return o.reindex(**d) except: raise self._exception def _convert_for_reindex(self, key, axis=0): labels = self.obj._get_axis(axis) if is_bool_indexer(key): key = check_bool_indexer(labels, key) return labels[key] else: if isinstance(key, Index): # want Index objects to pass through untouched keyarr = key else: # asarray can be unsafe, NumPy strings are weird keyarr = _asarray_tuplesafe(key) if is_integer_dtype(keyarr) and not labels.is_integer(): keyarr = com._ensure_platform_int(keyarr) return labels.take(keyarr) return keyarr def _handle_lowerdim_multi_index_axis0(self, tup): # we have an axis0 multi-index, handle or raise try: # fast path for series or for tup devoid of slices return self._get_label(tup, axis=0) except TypeError: # slices are unhashable pass except Exception as e1: if isinstance(tup[0], (slice, Index)): raise IndexingError("Handle elsewhere") # raise the error if we are not sorted ax0 = self.obj._get_axis(0) if not ax0.is_lexsorted_for_tuple(tup): raise e1 return None def _getitem_lowerdim(self, tup): # we can directly get the axis result since the axis is specified if self.axis is not None: axis = self.obj._get_axis_number(self.axis) return self._getitem_axis(tup, axis=axis) # we may have a nested tuples indexer here if self._is_nested_tuple_indexer(tup): return self._getitem_nested_tuple(tup) # we maybe be using a tuple to represent multiple dimensions here ax0 = self.obj._get_axis(0) if isinstance(ax0, MultiIndex): result = self._handle_lowerdim_multi_index_axis0(tup) if result is not None: return result if len(tup) > self.obj.ndim: raise IndexingError("Too many indexers. handle elsewhere") # to avoid wasted computation # df.ix[d1:d2, 0] -> columns first (True) # df.ix[0, ['C', 'B', A']] -> rows first (False) for i, key in enumerate(tup): if is_label_like(key) or isinstance(key, tuple): section = self._getitem_axis(key, axis=i) # we have yielded a scalar ? if not is_list_like_indexer(section): return section elif section.ndim == self.ndim: # we're in the middle of slicing through a MultiIndex # revise the key wrt to `section` by inserting an _NS new_key = tup[:i] + (_NS,) + tup[i + 1:] else: new_key = tup[:i] + tup[i + 1:] # unfortunately need an odious kludge here because of # DataFrame transposing convention if (isinstance(section, ABCDataFrame) and i > 0 and len(new_key) == 2): a, b = new_key new_key = b, a if len(new_key) == 1: new_key, = new_key # This is an elided recursive call to iloc/loc/etc' return getattr(section, self.name)[new_key] raise IndexingError('not applicable') def _getitem_nested_tuple(self, tup): # we have a nested tuple so have at least 1 multi-index level # we should be able to match up the dimensionaility here # we have too many indexers for our dim, but have at least 1 # multi-index dimension, try to see if we have something like # a tuple passed to a series with a multi-index if len(tup) > self.ndim: result = self._handle_lowerdim_multi_index_axis0(tup) if result is not None: return result # this is a series with a multi-index specified a tuple of selectors return self._getitem_axis(tup, axis=0) # handle the multi-axis by taking sections and reducing # this is iterative obj = self.obj axis = 0 for i, key in enumerate(tup): if is_null_slice(key): axis += 1 continue current_ndim = obj.ndim obj = getattr(obj, self.name)._getitem_axis(key, axis=axis) axis += 1 # if we have a scalar, we are done if np.isscalar(obj) or not hasattr(obj,'ndim'): break # has the dim of the obj changed? # GH 7199 if obj.ndim < current_ndim: # GH 7516 # if had a 3 dim and are going to a 2d # axes are reversed on a DataFrame if i >= 1 and current_ndim == 3 and obj.ndim == 2: obj = obj.T axis -= 1 return obj def _getitem_axis(self, key, axis=0): if self._should_validate_iterable(axis): self._has_valid_type(key, axis) labels = self.obj._get_axis(axis) if isinstance(key, slice): return self._get_slice_axis(key, axis=axis) elif is_list_like_indexer(key) and not (isinstance(key, tuple) and isinstance(labels, MultiIndex)): if hasattr(key, 'ndim') and key.ndim > 1: raise ValueError('Cannot index with multidimensional key') return self._getitem_iterable(key, axis=axis) else: if is_integer(key): if axis == 0 and isinstance(labels, MultiIndex): try: return self._get_label(key, axis=axis) except (KeyError, TypeError): if self.obj.index.levels[0].is_integer(): raise # this is the fallback! (for a non-float, non-integer index) if not labels.is_floating() and not labels.is_integer(): return self._get_loc(key, axis=axis) return self._get_label(key, axis=axis) def _getitem_iterable(self, key, axis=0): if self._should_validate_iterable(axis): self._has_valid_type(key, axis) labels = self.obj._get_axis(axis) if is_bool_indexer(key): key = check_bool_indexer(labels, key) inds, = key.nonzero() return self.obj.take(inds, axis=axis, convert=False) else: if isinstance(key, Index): # want Index objects to pass through untouched keyarr = key else: # asarray can be unsafe, NumPy strings are weird keyarr = _asarray_tuplesafe(key) # have the index handle the indexer and possibly return # an indexer or raising indexer = labels._convert_list_indexer(keyarr, kind=self.name) if indexer is not None: return self.obj.take(indexer, axis=axis) # this is not the most robust, but... if (isinstance(labels, MultiIndex) and len(keyarr) and not isinstance(keyarr[0], tuple)): level = 0 else: level = None # existing labels are unique and indexer are unique if labels.is_unique and Index(keyarr).is_unique: try: result = self.obj.reindex_axis(keyarr, axis=axis, level=level) # this is an error as we are trying to find # keys in a multi-index that don't exist if isinstance(labels, MultiIndex) and level is not None: if hasattr(result,'ndim') and not np.prod(result.shape) and len(keyarr): raise KeyError("cannot index a multi-index axis with these keys") return result except AttributeError: # Series if axis != 0: raise AssertionError('axis must be 0') return self.obj.reindex(keyarr, level=level) # existing labels are non-unique else: # reindex with the specified axis if axis + 1 > self.obj.ndim: raise AssertionError("invalid indexing error with " "non-unique index") new_target, indexer, new_indexer = labels._reindex_non_unique(keyarr) if new_indexer is not None: result = self.obj.take(indexer[indexer!=-1], axis=axis, convert=False) result = result._reindex_with_indexers({ axis: [new_target, new_indexer] }, copy=True, allow_dups=True) else: result = self.obj.take(indexer, axis=axis, convert=False) return result def _convert_to_indexer(self, obj, axis=0, is_setter=False): """ Convert indexing key into something we can use to do actual fancy indexing on an ndarray Examples ix[:5] -> slice(0, 5) ix[[1,2,3]] -> [1,2,3] ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz) Going by Zen of Python? "In the face of ambiguity, refuse the temptation to guess." raise AmbiguousIndexError with integer labels? - No, prefer label-based indexing """ labels = self.obj._get_axis(axis) # if we are a scalar indexer and not type correct raise obj = self._convert_scalar_indexer(obj, axis) # see if we are positional in nature is_int_index = labels.is_integer() is_int_positional = is_integer(obj) and not is_int_index # if we are a label return me try: return labels.get_loc(obj) except LookupError: if isinstance(obj, tuple) and isinstance(labels, MultiIndex): if is_setter and len(obj) == labels.nlevels: return {'key': obj} raise except TypeError: pass except (ValueError): if not is_int_positional: raise # a positional if is_int_positional: # if we are setting and its not a valid location # its an insert which fails by definition if is_setter: # always valid if self.name == 'loc': return {'key': obj} # a positional if (obj >= self.obj.shape[axis] and not isinstance(labels, MultiIndex)): raise ValueError("cannot set by positional indexing with " "enlargement") return obj if isinstance(obj, slice): return self._convert_slice_indexer(obj, axis) elif is_nested_tuple(obj, labels): return labels.get_locs(obj) elif is_list_like_indexer(obj): if is_bool_indexer(obj): obj = check_bool_indexer(labels, obj) inds, = obj.nonzero() return inds else: if isinstance(obj, Index): objarr = obj.values else: objarr = _asarray_tuplesafe(obj) # The index may want to handle a list indexer differently # by returning an indexer or raising indexer = labels._convert_list_indexer(objarr, kind=self.name) if indexer is not None: return indexer # this is not the most robust, but... if (isinstance(labels, MultiIndex) and not isinstance(objarr[0], tuple)): level = 0 _, indexer = labels.reindex(objarr, level=level) # take all if indexer is None: indexer = np.arange(len(labels)) check = labels.levels[0].get_indexer(objarr) else: level = None # unique index if labels.is_unique: indexer = check = labels.get_indexer(objarr) # non-unique (dups) else: (indexer, missing) = labels.get_indexer_non_unique(objarr) check = indexer mask = check == -1 if mask.any(): raise KeyError('%s not in index' % objarr[mask]) return _values_from_object(indexer) else: try: return labels.get_loc(obj) except LookupError: # allow a not found key only if we are a setter if not is_list_like_indexer(obj) and is_setter: return {'key': obj} raise def _tuplify(self, loc): tup = [slice(None, None) for _ in range(self.ndim)] tup[0] = loc return tuple(tup) def _get_slice_axis(self, slice_obj, axis=0): obj = self.obj if not need_slice(slice_obj): return obj indexer = self._convert_slice_indexer(slice_obj, axis) if isinstance(indexer, slice): return self._slice(indexer, axis=axis, kind='iloc') else: return self.obj.take(indexer, axis=axis, convert=False) class _IXIndexer(_NDFrameIndexer): """A primarily label-location based indexer, with integer position fallback. ``.ix[]`` supports mixed integer and label based access. It is primarily label based, but will fall back to integer positional access unless the corresponding axis is of integer type. ``.ix`` is the most general indexer and will support any of the inputs in ``.loc`` and ``.iloc``. ``.ix`` also supports floating point label schemes. ``.ix`` is exceptionally useful when dealing with mixed positional and label based hierachical indexes. However, when an axis is integer based, ONLY label based access and not positional access is supported. Thus, in such cases, it's usually better to be explicit and use ``.iloc`` or ``.loc``. See more at :ref:`Advanced Indexing <advanced>`. """ def _has_valid_type(self, key, axis): if isinstance(key, slice): return True elif is_bool_indexer(key): return True elif is_list_like_indexer(key): return True else: self._convert_scalar_indexer(key, axis) return True class _LocationIndexer(_NDFrameIndexer): _exception = Exception def __getitem__(self, key): if type(key) is tuple: return self._getitem_tuple(key) else: return self._getitem_axis(key, axis=0) def _getitem_axis(self, key, axis=0): raise NotImplementedError() def _getbool_axis(self, key, axis=0): labels = self.obj._get_axis(axis) key = check_bool_indexer(labels, key) inds, = key.nonzero() try: return self.obj.take(inds, axis=axis, convert=False) except Exception as detail: raise self._exception(detail) def _get_slice_axis(self, slice_obj, axis=0): """ this is pretty simple as we just have to deal with labels """ obj = self.obj if not need_slice(slice_obj): return obj labels = obj._get_axis(axis) indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop, slice_obj.step) if isinstance(indexer, slice): return self._slice(indexer, axis=axis, kind='iloc') else: return self.obj.take(indexer, axis=axis, convert=False) class _LocIndexer(_LocationIndexer): """Purely label-location based indexer for selection by label. ``.loc[]`` is primarily label based, but may also be used with a boolean array. Allowed inputs are: - A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is interpreted as a *label* of the index, and **never** as an integer position along the index). - A list or array of labels, e.g. ``['a', 'b', 'c']``. - A slice object with labels, e.g. ``'a':'f'`` (note that contrary to usual python slices, **both** the start and the stop are included!). - A boolean array. ``.loc`` will raise a ``KeyError`` when the items are not found. See more at :ref:`Selection by Label <indexing.label>` """ _valid_types = ("labels (MUST BE IN THE INDEX), slices of labels (BOTH " "endpoints included! Can be slices of integers if the " "index is integers), listlike of labels, boolean") _exception = KeyError def _has_valid_type(self, key, axis): ax = self.obj._get_axis(axis) # valid for a label where all labels are in the index # slice of lables (where start-end in labels) # slice of integers (only if in the lables) # boolean if isinstance(key, slice): return True elif is_bool_indexer(key): return True elif is_list_like_indexer(key): # mi is just a passthru if isinstance(key, tuple) and isinstance(ax, MultiIndex): return True # TODO: don't check the entire key unless necessary if len(key) and np.all(ax.get_indexer_for(key) < 0): raise KeyError("None of [%s] are in the [%s]" % (key, self.obj._get_axis_name(axis))) return True else: def error(): if isnull(key): raise TypeError( "cannot use label indexing with a null key") raise KeyError("the label [%s] is not in the [%s]" % (key, self.obj._get_axis_name(axis))) try: key = self._convert_scalar_indexer(key, axis) if not key in ax: error() except (TypeError) as e: # python 3 type errors should be raised if 'unorderable' in str(e): # pragma: no cover error() raise except: error() return True def _getitem_axis(self, key, axis=0): labels = self.obj._get_axis(axis) if isinstance(key, slice): self._has_valid_type(key, axis) return self._get_slice_axis(key, axis=axis) elif is_bool_indexer(key): return self._getbool_axis(key, axis=axis) elif is_list_like_indexer(key): # GH 7349 # possibly convert a list-like into a nested tuple # but don't convert a list-like of tuples if isinstance(labels, MultiIndex): if not isinstance(key, tuple) and len(key) > 1 and not isinstance(key[0], tuple): key = tuple([key]) # an iterable multi-selection if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)): if hasattr(key, 'ndim') and key.ndim > 1: raise ValueError('Cannot index with multidimensional key') return self._getitem_iterable(key, axis=axis) # nested tuple slicing if is_nested_tuple(key, labels): locs = labels.get_locs(key) indexer = [ slice(None) ] * self.ndim indexer[axis] = locs return self.obj.iloc[tuple(indexer)] # fall thru to straight lookup self._has_valid_type(key, axis) return self._get_label(key, axis=axis) class _iLocIndexer(_LocationIndexer): """Purely integer-location based indexing for selection by position. ``.iloc[]`` is primarily integer position based (from ``0`` to ``length-1`` of the axis), but may also be used with a boolean array. Allowed inputs are: - An integer, e.g. ``5``. - A list or array of integers, e.g. ``[4, 3, 0]``. - A slice object with ints, e.g. ``1:7``. - A boolean array. ``.iloc`` will raise ``IndexError`` if a requested indexer is out-of-bounds, except *slice* indexers which allow out-of-bounds indexing (this conforms with python/numpy *slice* semantics). See more at :ref:`Selection by Position <indexing.integer>` """ _valid_types = ("integer, integer slice (START point is INCLUDED, END " "point is EXCLUDED), listlike of integers, boolean array") _exception = IndexError def _has_valid_type(self, key, axis): if is_bool_indexer(key): if hasattr(key, 'index') and isinstance(key.index, Index): if key.index.inferred_type == 'integer': raise NotImplementedError( "iLocation based boolean indexing on an integer type " "is not available" ) raise ValueError("iLocation based boolean indexing cannot use " "an indexable as a mask") return True if isinstance(key, slice): return True elif is_integer(key): return self._is_valid_integer(key, axis) elif is_list_like_indexer(key): return self._is_valid_list_like(key, axis) return False def _has_valid_setitem_indexer(self, indexer): self._has_valid_positional_setitem_indexer(indexer) def _is_valid_integer(self, key, axis): # return a boolean if we have a valid integer indexer ax = self.obj._get_axis(axis) l = len(ax) if key >= l or key < -l: raise IndexError("single positional indexer is out-of-bounds") return True def _is_valid_list_like(self, key, axis): # return a boolean if we are a valid list-like (e.g. that we dont' have out-of-bounds values) # coerce the key to not exceed the maximum size of the index arr = np.array(key) ax = self.obj._get_axis(axis) l = len(ax) if len(arr) and (arr.max() >= l or arr.min() < -l): raise IndexError("positional indexers are out-of-bounds") return True def _getitem_tuple(self, tup): self._has_valid_tuple(tup) try: return self._getitem_lowerdim(tup) except: pass retval = self.obj axis=0 for i, key in enumerate(tup): if i >= self.obj.ndim: raise IndexingError('Too many indexers') if is_null_slice(key): axis += 1 continue retval = getattr(retval, self.name)._getitem_axis(key, axis=axis) # if the dim was reduced, then pass a lower-dim the next time if retval.ndim<self.ndim: axis -= 1 # try to get for the next axis axis += 1 return retval def _get_slice_axis(self, slice_obj, axis=0): obj = self.obj if not need_slice(slice_obj): return obj slice_obj = self._convert_slice_indexer(slice_obj, axis) if isinstance(slice_obj, slice): return self._slice(slice_obj, axis=axis, kind='iloc') else: return self.obj.take(slice_obj, axis=axis, convert=False) def _getitem_axis(self, key, axis=0): if isinstance(key, slice): self._has_valid_type(key, axis) return self._get_slice_axis(key, axis=axis) elif is_bool_indexer(key): self._has_valid_type(key, axis) return self._getbool_axis(key, axis=axis) # a single integer or a list of integers else: if is_list_like_indexer(key): # validate list bounds self._is_valid_list_like(key, axis) # force an actual list key = list(key) else: key = self._convert_scalar_indexer(key, axis) if not is_integer(key): raise TypeError("Cannot index by location index with a " "non-integer key") # validate the location self._is_valid_integer(key, axis) return self._get_loc(key, axis=axis) def _convert_to_indexer(self, obj, axis=0, is_setter=False): """ much simpler as we only have to deal with our valid types """ # make need to convert a float key if isinstance(obj, slice): return self._convert_slice_indexer(obj, axis) elif is_float(obj): return self._convert_scalar_indexer(obj, axis) elif self._has_valid_type(obj, axis): return obj raise ValueError("Can only index by location with a [%s]" % self._valid_types) class _ScalarAccessIndexer(_NDFrameIndexer): """ access scalars quickly """ def _convert_key(self, key, is_setter=False): return list(key) def __getitem__(self, key): if not isinstance(key, tuple): # we could have a convertible item here (e.g. Timestamp) if not is_list_like_indexer(key): key = tuple([key]) else: raise ValueError('Invalid call for scalar access (getting)!') key = self._convert_key(key) return self.obj.get_value(*key, takeable=self._takeable) def __setitem__(self, key, value): if not isinstance(key, tuple): key = self._tuplify(key) if len(key) != self.obj.ndim: raise ValueError('Not enough indexers for scalar access ' '(setting)!') key = list(self._convert_key(key, is_setter=True)) key.append(value) self.obj.set_value(*key, takeable=self._takeable) class _AtIndexer(_ScalarAccessIndexer): """Fast label-based scalar accessor Similarly to ``loc``, ``at`` provides **label** based scalar lookups. You can also set using these indexers. """ _takeable = False def _convert_key(self, key, is_setter=False): """ require they keys to be the same type as the index (so we don't fallback) """ # allow arbitrary setting if is_setter: return list(key) for ax, i in zip(self.obj.axes, key): if ax.is_integer(): if not is_integer(i): raise ValueError("At based indexing on an integer index can only have integer " "indexers") else: if is_integer(i): raise ValueError("At based indexing on an non-integer index can only have non-integer " "indexers") return key class _iAtIndexer(_ScalarAccessIndexer): """Fast integer location scalar accessor. Similarly to ``iloc``, ``iat`` provides **integer** based lookups. You can also set using these indexers. """ _takeable = True def _has_valid_setitem_indexer(self, indexer): self._has_valid_positional_setitem_indexer(indexer) def _convert_key(self, key, is_setter=False): """ require integer args (and convert to label arguments) """ for a, i in zip(self.obj.axes, key): if not is_integer(i): raise ValueError("iAt based indexing can only have integer " "indexers") return key # 32-bit floating point machine epsilon _eps = np.finfo('f4').eps def length_of_indexer(indexer, target=None): """return the length of a single non-tuple indexer which could be a slice """ if target is not None and isinstance(indexer, slice): l = len(target) start = indexer.start stop = indexer.stop step = indexer.step if start is None: start = 0 elif start < 0: start += l if stop is None or stop > l: stop = l elif stop < 0: stop += l if step is None: step = 1 elif step < 0: step = -step return (stop - start + step-1) // step elif isinstance(indexer, (ABCSeries, Index, np.ndarray, list)): return len(indexer) elif not is_list_like_indexer(indexer): return 1 raise AssertionError("cannot find the length of the indexer") def convert_to_index_sliceable(obj, key): """if we are index sliceable, then return my slicer, otherwise return None """ idx = obj.index if isinstance(key, slice): return idx._convert_slice_indexer(key, kind='getitem') elif isinstance(key, compat.string_types): # we are an actual column if key in obj._data.items: return None # we need a timelike key here if idx.is_all_dates: try: return idx._get_string_slice(key) except: return None return None def is_index_slice(obj): def _is_valid_index(x): return (is_integer(x) or is_float(x) and np.allclose(x, int(x), rtol=_eps, atol=0)) def _crit(v): return v is None or _is_valid_index(v) both_none = obj.start is None and obj.stop is None return not both_none and (_crit(obj.start) and _crit(obj.stop)) def check_bool_indexer(ax, key): # boolean indexing, need to check that the data are aligned, otherwise # disallowed # this function assumes that is_bool_indexer(key) == True result = key if isinstance(key, ABCSeries) and not key.index.equals(ax): result = result.reindex(ax) mask = com.isnull(result._values) if mask.any(): raise IndexingError('Unalignable boolean Series key provided') result = result.astype(bool)._values else: # is_bool_indexer has already checked for nulls in the case of an # object array key, so no check needed here result = np.asarray(result, dtype=bool) return result def convert_missing_indexer(indexer): """ reverse convert a missing indexer, which is a dict return the scalar indexer and a boolean indicating if we converted """ if isinstance(indexer, dict): # a missing key (but not a tuple indexer) indexer = indexer['key'] if isinstance(indexer, bool): raise KeyError("cannot use a single bool to index into setitem") return indexer, True return indexer, False def convert_from_missing_indexer_tuple(indexer, axes): """ create a filtered indexer that doesn't have any missing indexers """ def get_indexer(_i, _idx): return (axes[_i].get_loc(_idx['key']) if isinstance(_idx, dict) else _idx) return tuple([get_indexer(_i, _idx) for _i, _idx in enumerate(indexer)]) def maybe_convert_indices(indices, n): """ if we have negative indicies, translate to postive here if have indicies that are out-of-bounds, raise an IndexError """ if isinstance(indices, list): indices = np.array(indices) if len(indices) == 0: # If list is empty, np.array will return float and cause indexing # errors. return np.empty(0, dtype=np.int_) mask = indices < 0 if mask.any(): indices[mask] += n mask = (indices >= n) | (indices < 0) if mask.any(): raise IndexError("indices are out-of-bounds") return indices def maybe_convert_ix(*args): """ We likely want to take the cross-product """ ixify = True for arg in args: if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)): ixify = False if ixify: return np.ix_(*args) else: return args def is_nested_tuple(tup, labels): # check for a compatiable nested tuple and multiindexes among the axes if not isinstance(tup, tuple): return False # are we nested tuple of: tuple,list,slice for i, k in enumerate(tup): if isinstance(k, (tuple, list, slice)): return isinstance(labels, MultiIndex) return False def is_list_like_indexer(key): # allow a list_like, but exclude NamedTuples which can be indexers return is_list_like(key) and not (isinstance(key, tuple) and type(key) is not tuple) def is_label_like(key): # select a label or row return not isinstance(key, slice) and not is_list_like_indexer(key) def need_slice(obj): return (obj.start is not None or obj.stop is not None or (obj.step is not None and obj.step != 1)) def maybe_droplevels(index, key): # drop levels original_index = index if isinstance(key, tuple): for _ in key: try: index = index.droplevel(0) except: # we have dropped too much, so back out return original_index else: try: index = index.droplevel(0) except: pass return index def _non_reducing_slice(slice_): """ Ensurse that a slice doesn't reduce to a Series or Scalar. Any user-paseed `subset` should have this called on it to make sure we're always working with DataFrames. """ # default to column slice, like DataFrame # ['A', 'B'] -> IndexSlices[:, ['A', 'B']] kinds = tuple(list(compat.string_types) + [ABCSeries, np.ndarray, Index, list]) if isinstance(slice_, kinds): slice_ = IndexSlice[:, slice_] def pred(part): # true when slice does *not* reduce return isinstance(part, slice) or com.is_list_like(part) if not com.is_list_like(slice_): if not isinstance(slice_, slice): # a 1-d slice, like df.loc[1] slice_ = [[slice_]] else: # slice(a, b, c) slice_ = [slice_] # to tuplize later else: slice_ = [part if pred(part) else [part] for part in slice_] return tuple(slice_) def _maybe_numeric_slice(df, slice_, include_bool=False): """ want nice defaults for background_gradient that don't break with non-numeric data. But if slice_ is passed go with that. """ if slice_ is None: dtypes = [np.number] if include_bool: dtypes.append(bool) slice_ = IndexSlice[:, df.select_dtypes(include=dtypes).columns] return slice_
artistic-2.0
anntzer/scikit-learn
sklearn/tests/test_multiclass.py
5
32749
import numpy as np import scipy.sparse as sp import pytest from re import escape from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_almost_equal from sklearn.utils._testing import ignore_warnings from sklearn.utils._mocking import CheckingClassifier from sklearn.multiclass import OneVsRestClassifier from sklearn.multiclass import OneVsOneClassifier from sklearn.multiclass import OutputCodeClassifier from sklearn.utils.multiclass import (check_classification_targets, type_of_target) from sklearn.utils import ( check_array, shuffle, ) from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.svm import LinearSVC, SVC from sklearn.naive_bayes import MultinomialNB from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge, Perceptron, LogisticRegression, SGDClassifier) from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from sklearn.model_selection import GridSearchCV, cross_val_score from sklearn.pipeline import Pipeline, make_pipeline from sklearn.impute import SimpleImputer from sklearn import svm from sklearn.exceptions import NotFittedError from sklearn import datasets iris = datasets.load_iris() rng = np.random.RandomState(0) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] n_classes = 3 def test_ovr_exceptions(): ovr = OneVsRestClassifier(LinearSVC(random_state=0)) # test predicting without fitting with pytest.raises(NotFittedError): ovr.predict([]) # Fail on multioutput data msg = "Multioutput target data is not supported with label binarization" with pytest.raises(ValueError, match=msg): X = np.array([[1, 0], [0, 1]]) y = np.array([[1, 2], [3, 1]]) OneVsRestClassifier(MultinomialNB()).fit(X, y) with pytest.raises(ValueError, match=msg): X = np.array([[1, 0], [0, 1]]) y = np.array([[1.5, 2.4], [3.1, 0.8]]) OneVsRestClassifier(MultinomialNB()).fit(X, y) def test_check_classification_targets(): # Test that check_classification_target return correct type. #5782 y = np.array([0.0, 1.1, 2.0, 3.0]) msg = type_of_target(y) with pytest.raises(ValueError, match=msg): check_classification_targets(y) def test_ovr_fit_predict(): # A classifier which implements decision_function. ovr = OneVsRestClassifier(LinearSVC(random_state=0)) pred = ovr.fit(iris.data, iris.target).predict(iris.data) assert len(ovr.estimators_) == n_classes clf = LinearSVC(random_state=0) pred2 = clf.fit(iris.data, iris.target).predict(iris.data) assert np.mean(iris.target == pred) == np.mean(iris.target == pred2) # A classifier which implements predict_proba. ovr = OneVsRestClassifier(MultinomialNB()) pred = ovr.fit(iris.data, iris.target).predict(iris.data) assert np.mean(iris.target == pred) > 0.65 def test_ovr_partial_fit(): # Test if partial_fit is working as intended X, y = shuffle(iris.data, iris.target, random_state=0) ovr = OneVsRestClassifier(MultinomialNB()) ovr.partial_fit(X[:100], y[:100], np.unique(y)) ovr.partial_fit(X[100:], y[100:]) pred = ovr.predict(X) ovr2 = OneVsRestClassifier(MultinomialNB()) pred2 = ovr2.fit(X, y).predict(X) assert_almost_equal(pred, pred2) assert len(ovr.estimators_) == len(np.unique(y)) assert np.mean(y == pred) > 0.65 # Test when mini batches doesn't have all classes # with SGDClassifier X = np.abs(np.random.randn(14, 2)) y = [1, 1, 1, 1, 2, 3, 3, 0, 0, 2, 3, 1, 2, 3] ovr = OneVsRestClassifier(SGDClassifier(max_iter=1, tol=None, shuffle=False, random_state=0)) ovr.partial_fit(X[:7], y[:7], np.unique(y)) ovr.partial_fit(X[7:], y[7:]) pred = ovr.predict(X) ovr1 = OneVsRestClassifier(SGDClassifier(max_iter=1, tol=None, shuffle=False, random_state=0)) pred1 = ovr1.fit(X, y).predict(X) assert np.mean(pred == y) == np.mean(pred1 == y) # test partial_fit only exists if estimator has it: ovr = OneVsRestClassifier(SVC()) assert not hasattr(ovr, "partial_fit") def test_ovr_partial_fit_exceptions(): ovr = OneVsRestClassifier(MultinomialNB()) X = np.abs(np.random.randn(14, 2)) y = [1, 1, 1, 1, 2, 3, 3, 0, 0, 2, 3, 1, 2, 3] ovr.partial_fit(X[:7], y[:7], np.unique(y)) # If a new class that was not in the first call of partial fit is seen # it should raise ValueError y1 = [5] + y[7:-1] msg = r"Mini-batch contains \[.+\] while classes must be subset of \[.+\]" with pytest.raises(ValueError, match=msg): ovr.partial_fit(X=X[7:], y=y1) def test_ovr_ovo_regressor(): # test that ovr and ovo work on regressors which don't have a decision_ # function ovr = OneVsRestClassifier(DecisionTreeRegressor()) pred = ovr.fit(iris.data, iris.target).predict(iris.data) assert len(ovr.estimators_) == n_classes assert_array_equal(np.unique(pred), [0, 1, 2]) # we are doing something sensible assert np.mean(pred == iris.target) > .9 ovr = OneVsOneClassifier(DecisionTreeRegressor()) pred = ovr.fit(iris.data, iris.target).predict(iris.data) assert len(ovr.estimators_) == n_classes * (n_classes - 1) / 2 assert_array_equal(np.unique(pred), [0, 1, 2]) # we are doing something sensible assert np.mean(pred == iris.target) > .9 def test_ovr_fit_predict_sparse(): for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix, sp.lil_matrix]: base_clf = MultinomialNB(alpha=1) X, Y = datasets.make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=3, length=50, allow_unlabeled=True, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test = X[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) Y_pred = clf.predict(X_test) clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train)) Y_pred_sprs = clf_sprs.predict(X_test) assert clf.multilabel_ assert sp.issparse(Y_pred_sprs) assert_array_equal(Y_pred_sprs.toarray(), Y_pred) # Test predict_proba Y_proba = clf_sprs.predict_proba(X_test) # predict assigns a label if the probability that the # sample has the label is greater than 0.5. pred = Y_proba > .5 assert_array_equal(pred, Y_pred_sprs.toarray()) # Test decision_function clf = svm.SVC() clf_sprs = OneVsRestClassifier(clf).fit(X_train, sparse(Y_train)) dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int) assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray()) def test_ovr_always_present(): # Test that ovr works with classes that are always present or absent. # Note: tests is the case where _ConstantPredictor is utilised X = np.ones((10, 2)) X[:5, :] = 0 # Build an indicator matrix where two features are always on. # As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)] y = np.zeros((10, 3)) y[5:, 0] = 1 y[:, 1] = 1 y[:, 2] = 1 ovr = OneVsRestClassifier(LogisticRegression()) msg = r'Label .+ is present in all training examples' with pytest.warns(UserWarning, match=msg): ovr.fit(X, y) y_pred = ovr.predict(X) assert_array_equal(np.array(y_pred), np.array(y)) y_pred = ovr.decision_function(X) assert np.unique(y_pred[:, -2:]) == 1 y_pred = ovr.predict_proba(X) assert_array_equal(y_pred[:, -1], np.ones(X.shape[0])) # y has a constantly absent label y = np.zeros((10, 2)) y[5:, 0] = 1 # variable label ovr = OneVsRestClassifier(LogisticRegression()) msg = r'Label not 1 is present in all training examples' with pytest.warns(UserWarning, match=msg): ovr.fit(X, y) y_pred = ovr.predict_proba(X) assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0])) def test_ovr_multiclass(): # Toy dataset where features correspond directly to labels. X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]]) y = ["eggs", "spam", "ham", "eggs", "ham"] Y = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, 1], [1, 0, 0]]) classes = set("ham eggs spam".split()) for base_clf in (MultinomialNB(), LinearSVC(random_state=0), LinearRegression(), Ridge(), ElasticNet()): clf = OneVsRestClassifier(base_clf).fit(X, y) assert set(clf.classes_) == classes y_pred = clf.predict(np.array([[0, 0, 4]]))[0] assert_array_equal(y_pred, ["eggs"]) # test input as label indicator matrix clf = OneVsRestClassifier(base_clf).fit(X, Y) y_pred = clf.predict([[0, 0, 4]])[0] assert_array_equal(y_pred, [0, 0, 1]) def test_ovr_binary(): # Toy dataset where features correspond directly to labels. X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]]) y = ["eggs", "spam", "spam", "eggs", "spam"] Y = np.array([[0, 1, 1, 0, 1]]).T classes = set("eggs spam".split()) def conduct_test(base_clf, test_predict_proba=False): clf = OneVsRestClassifier(base_clf).fit(X, y) assert set(clf.classes_) == classes y_pred = clf.predict(np.array([[0, 0, 4]]))[0] assert_array_equal(y_pred, ["eggs"]) if hasattr(base_clf, 'decision_function'): dec = clf.decision_function(X) assert dec.shape == (5,) if test_predict_proba: X_test = np.array([[0, 0, 4]]) probabilities = clf.predict_proba(X_test) assert 2 == len(probabilities[0]) assert (clf.classes_[np.argmax(probabilities, axis=1)] == clf.predict(X_test)) # test input as label indicator matrix clf = OneVsRestClassifier(base_clf).fit(X, Y) y_pred = clf.predict([[3, 0, 0]])[0] assert y_pred == 1 for base_clf in (LinearSVC(random_state=0), LinearRegression(), Ridge(), ElasticNet()): conduct_test(base_clf) for base_clf in (MultinomialNB(), SVC(probability=True), LogisticRegression()): conduct_test(base_clf, test_predict_proba=True) def test_ovr_multilabel(): # Toy dataset where features correspond directly to labels. X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]]) y = np.array([[0, 1, 1], [0, 1, 0], [1, 1, 1], [1, 0, 1], [1, 0, 0]]) for base_clf in (MultinomialNB(), LinearSVC(random_state=0), LinearRegression(), Ridge(), ElasticNet(), Lasso(alpha=0.5)): clf = OneVsRestClassifier(base_clf).fit(X, y) y_pred = clf.predict([[0, 4, 4]])[0] assert_array_equal(y_pred, [0, 1, 1]) assert clf.multilabel_ def test_ovr_fit_predict_svc(): ovr = OneVsRestClassifier(svm.SVC()) ovr.fit(iris.data, iris.target) assert len(ovr.estimators_) == 3 assert ovr.score(iris.data, iris.target) > .9 def test_ovr_multilabel_dataset(): base_clf = MultinomialNB(alpha=1) for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)): X, Y = datasets.make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=2, length=50, allow_unlabeled=au, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test, Y_test = X[80:], Y[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) Y_pred = clf.predict(X_test) assert clf.multilabel_ assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"), prec, decimal=2) assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"), recall, decimal=2) def test_ovr_multilabel_predict_proba(): base_clf = MultinomialNB(alpha=1) for au in (False, True): X, Y = datasets.make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=3, length=50, allow_unlabeled=au, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test = X[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) # Decision function only estimator. decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train) assert not hasattr(decision_only, 'predict_proba') # Estimator with predict_proba disabled, depending on parameters. decision_only = OneVsRestClassifier(svm.SVC(probability=False)) assert not hasattr(decision_only, 'predict_proba') decision_only.fit(X_train, Y_train) assert not hasattr(decision_only, 'predict_proba') assert hasattr(decision_only, 'decision_function') # Estimator which can get predict_proba enabled after fitting gs = GridSearchCV(svm.SVC(probability=False), param_grid={'probability': [True]}) proba_after_fit = OneVsRestClassifier(gs) assert not hasattr(proba_after_fit, 'predict_proba') proba_after_fit.fit(X_train, Y_train) assert hasattr(proba_after_fit, 'predict_proba') Y_pred = clf.predict(X_test) Y_proba = clf.predict_proba(X_test) # predict assigns a label if the probability that the # sample has the label is greater than 0.5. pred = Y_proba > .5 assert_array_equal(pred, Y_pred) def test_ovr_single_label_predict_proba(): base_clf = MultinomialNB(alpha=1) X, Y = iris.data, iris.target X_train, Y_train = X[:80], Y[:80] X_test = X[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) # Decision function only estimator. decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train) assert not hasattr(decision_only, 'predict_proba') Y_pred = clf.predict(X_test) Y_proba = clf.predict_proba(X_test) assert_almost_equal(Y_proba.sum(axis=1), 1.0) # predict assigns a label if the probability that the # sample has the label with the greatest predictive probability. pred = Y_proba.argmax(axis=1) assert not (pred - Y_pred).any() def test_ovr_multilabel_decision_function(): X, Y = datasets.make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=3, length=50, allow_unlabeled=True, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test = X[80:] clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train) assert_array_equal((clf.decision_function(X_test) > 0).astype(int), clf.predict(X_test)) def test_ovr_single_label_decision_function(): X, Y = datasets.make_classification(n_samples=100, n_features=20, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test = X[80:] clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train) assert_array_equal(clf.decision_function(X_test).ravel() > 0, clf.predict(X_test)) def test_ovr_gridsearch(): ovr = OneVsRestClassifier(LinearSVC(random_state=0)) Cs = [0.1, 0.5, 0.8] cv = GridSearchCV(ovr, {'estimator__C': Cs}) cv.fit(iris.data, iris.target) best_C = cv.best_estimator_.estimators_[0].C assert best_C in Cs def test_ovr_pipeline(): # Test with pipeline of length one # This test is needed because the multiclass estimators may fail to detect # the presence of predict_proba or decision_function. clf = Pipeline([("tree", DecisionTreeClassifier())]) ovr_pipe = OneVsRestClassifier(clf) ovr_pipe.fit(iris.data, iris.target) ovr = OneVsRestClassifier(DecisionTreeClassifier()) ovr.fit(iris.data, iris.target) assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data)) # TODO: Remove this test in version 1.1 # when the coef_ attribute is removed @ignore_warnings(category=FutureWarning) def test_ovr_coef_(): for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]: # SVC has sparse coef with sparse input data ovr = OneVsRestClassifier(base_classifier) for X in [iris.data, sp.csr_matrix(iris.data)]: # test with dense and sparse coef ovr.fit(X, iris.target) shape = ovr.coef_.shape assert shape[0] == n_classes assert shape[1] == iris.data.shape[1] # don't densify sparse coefficients assert (sp.issparse(ovr.estimators_[0].coef_) == sp.issparse(ovr.coef_)) # TODO: Remove this test in version 1.1 # when the coef_ attribute is removed @ignore_warnings(category=FutureWarning) def test_ovr_coef_exceptions(): # Not fitted exception! ovr = OneVsRestClassifier(LinearSVC(random_state=0)) with pytest.raises(NotFittedError): ovr.coef_ # Doesn't have coef_ exception! ovr = OneVsRestClassifier(DecisionTreeClassifier()) ovr.fit(iris.data, iris.target) msg = "Base estimator doesn't have a coef_ attribute" with pytest.raises(AttributeError, match=msg): ovr.coef_ # TODO: Remove this test in version 1.1 when # the coef_ and intercept_ attributes are removed def test_ovr_deprecated_coef_intercept(): ovr = OneVsRestClassifier(SVC(kernel="linear")) ovr = ovr.fit(iris.data, iris.target) msg = (r"Attribute {0} was deprecated in version 0.24 " r"and will be removed in 1.1 \(renaming of 0.26\). If you observe " r"this warning while using RFE or SelectFromModel, " r"use the importance_getter parameter instead.") for att in ["coef_", "intercept_"]: with pytest.warns(FutureWarning, match=msg.format(att)): getattr(ovr, att) def test_ovo_exceptions(): ovo = OneVsOneClassifier(LinearSVC(random_state=0)) with pytest.raises(NotFittedError): ovo.predict([]) def test_ovo_fit_on_list(): # Test that OneVsOne fitting works with a list of targets and yields the # same output as predict from an array ovo = OneVsOneClassifier(LinearSVC(random_state=0)) prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data) iris_data_list = [list(a) for a in iris.data] prediction_from_list = ovo.fit(iris_data_list, list(iris.target)).predict(iris_data_list) assert_array_equal(prediction_from_array, prediction_from_list) def test_ovo_fit_predict(): # A classifier which implements decision_function. ovo = OneVsOneClassifier(LinearSVC(random_state=0)) ovo.fit(iris.data, iris.target).predict(iris.data) assert len(ovo.estimators_) == n_classes * (n_classes - 1) / 2 # A classifier which implements predict_proba. ovo = OneVsOneClassifier(MultinomialNB()) ovo.fit(iris.data, iris.target).predict(iris.data) assert len(ovo.estimators_) == n_classes * (n_classes - 1) / 2 def test_ovo_partial_fit_predict(): temp = datasets.load_iris() X, y = temp.data, temp.target ovo1 = OneVsOneClassifier(MultinomialNB()) ovo1.partial_fit(X[:100], y[:100], np.unique(y)) ovo1.partial_fit(X[100:], y[100:]) pred1 = ovo1.predict(X) ovo2 = OneVsOneClassifier(MultinomialNB()) ovo2.fit(X, y) pred2 = ovo2.predict(X) assert len(ovo1.estimators_) == n_classes * (n_classes - 1) / 2 assert np.mean(y == pred1) > 0.65 assert_almost_equal(pred1, pred2) # Test when mini-batches have binary target classes ovo1 = OneVsOneClassifier(MultinomialNB()) ovo1.partial_fit(X[:60], y[:60], np.unique(y)) ovo1.partial_fit(X[60:], y[60:]) pred1 = ovo1.predict(X) ovo2 = OneVsOneClassifier(MultinomialNB()) pred2 = ovo2.fit(X, y).predict(X) assert_almost_equal(pred1, pred2) assert len(ovo1.estimators_) == len(np.unique(y)) assert np.mean(y == pred1) > 0.65 ovo = OneVsOneClassifier(MultinomialNB()) X = np.random.rand(14, 2) y = [1, 1, 2, 3, 3, 0, 0, 4, 4, 4, 4, 4, 2, 2] ovo.partial_fit(X[:7], y[:7], [0, 1, 2, 3, 4]) ovo.partial_fit(X[7:], y[7:]) pred = ovo.predict(X) ovo2 = OneVsOneClassifier(MultinomialNB()) pred2 = ovo2.fit(X, y).predict(X) assert_almost_equal(pred, pred2) # raises error when mini-batch does not have classes from all_classes ovo = OneVsOneClassifier(MultinomialNB()) error_y = [0, 1, 2, 3, 4, 5, 2] message_re = escape("Mini-batch contains {0} while " "it must be subset of {1}".format(np.unique(error_y), np.unique(y))) with pytest.raises(ValueError, match=message_re): ovo.partial_fit(X[:7], error_y, np.unique(y)) # test partial_fit only exists if estimator has it: ovr = OneVsOneClassifier(SVC()) assert not hasattr(ovr, "partial_fit") def test_ovo_decision_function(): n_samples = iris.data.shape[0] ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0)) # first binary ovo_clf.fit(iris.data, iris.target == 0) decisions = ovo_clf.decision_function(iris.data) assert decisions.shape == (n_samples,) # then multi-class ovo_clf.fit(iris.data, iris.target) decisions = ovo_clf.decision_function(iris.data) assert decisions.shape == (n_samples, n_classes) assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data)) # Compute the votes votes = np.zeros((n_samples, n_classes)) k = 0 for i in range(n_classes): for j in range(i + 1, n_classes): pred = ovo_clf.estimators_[k].predict(iris.data) votes[pred == 0, i] += 1 votes[pred == 1, j] += 1 k += 1 # Extract votes and verify assert_array_equal(votes, np.round(decisions)) for class_idx in range(n_classes): # For each sample and each class, there only 3 possible vote levels # because they are only 3 distinct class pairs thus 3 distinct # binary classifiers. # Therefore, sorting predictions based on votes would yield # mostly tied predictions: assert set(votes[:, class_idx]).issubset(set([0., 1., 2.])) # The OVO decision function on the other hand is able to resolve # most of the ties on this data as it combines both the vote counts # and the aggregated confidence levels of the binary classifiers # to compute the aggregate decision function. The iris dataset # has 150 samples with a couple of duplicates. The OvO decisions # can resolve most of the ties: assert len(np.unique(decisions[:, class_idx])) > 146 def test_ovo_gridsearch(): ovo = OneVsOneClassifier(LinearSVC(random_state=0)) Cs = [0.1, 0.5, 0.8] cv = GridSearchCV(ovo, {'estimator__C': Cs}) cv.fit(iris.data, iris.target) best_C = cv.best_estimator_.estimators_[0].C assert best_C in Cs def test_ovo_ties(): # Test that ties are broken using the decision function, # not defaulting to the smallest label X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]]) y = np.array([2, 0, 1, 2]) multi_clf = OneVsOneClassifier(Perceptron(shuffle=False, max_iter=4, tol=None)) ovo_prediction = multi_clf.fit(X, y).predict(X) ovo_decision = multi_clf.decision_function(X) # Classifiers are in order 0-1, 0-2, 1-2 # Use decision_function to compute the votes and the normalized # sum_of_confidences, which is used to disambiguate when there is a tie in # votes. votes = np.round(ovo_decision) normalized_confidences = ovo_decision - votes # For the first point, there is one vote per class assert_array_equal(votes[0, :], 1) # For the rest, there is no tie and the prediction is the argmax assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:]) # For the tie, the prediction is the class with the highest score assert ovo_prediction[0] == normalized_confidences[0].argmax() def test_ovo_ties2(): # test that ties can not only be won by the first two labels X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]]) y_ref = np.array([2, 0, 1, 2]) # cycle through labels so that each label wins once for i in range(3): y = (y_ref + i) % 3 multi_clf = OneVsOneClassifier(Perceptron(shuffle=False, max_iter=4, tol=None)) ovo_prediction = multi_clf.fit(X, y).predict(X) assert ovo_prediction[0] == i % 3 def test_ovo_string_y(): # Test that the OvO doesn't mess up the encoding of string labels X = np.eye(4) y = np.array(['a', 'b', 'c', 'd']) ovo = OneVsOneClassifier(LinearSVC()) ovo.fit(X, y) assert_array_equal(y, ovo.predict(X)) def test_ovo_one_class(): # Test error for OvO with one class X = np.eye(4) y = np.array(['a'] * 4) ovo = OneVsOneClassifier(LinearSVC()) msg = "when only one class" with pytest.raises(ValueError, match=msg): ovo.fit(X, y) def test_ovo_float_y(): # Test that the OvO errors on float targets X = iris.data y = iris.data[:, 0] ovo = OneVsOneClassifier(LinearSVC()) msg = "Unknown label type" with pytest.raises(ValueError, match=msg): ovo.fit(X, y) def test_ecoc_exceptions(): ecoc = OutputCodeClassifier(LinearSVC(random_state=0)) with pytest.raises(NotFittedError): ecoc.predict([]) def test_ecoc_fit_predict(): # A classifier which implements decision_function. ecoc = OutputCodeClassifier(LinearSVC(random_state=0), code_size=2, random_state=0) ecoc.fit(iris.data, iris.target).predict(iris.data) assert len(ecoc.estimators_) == n_classes * 2 # A classifier which implements predict_proba. ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0) ecoc.fit(iris.data, iris.target).predict(iris.data) assert len(ecoc.estimators_) == n_classes * 2 def test_ecoc_gridsearch(): ecoc = OutputCodeClassifier(LinearSVC(random_state=0), random_state=0) Cs = [0.1, 0.5, 0.8] cv = GridSearchCV(ecoc, {'estimator__C': Cs}) cv.fit(iris.data, iris.target) best_C = cv.best_estimator_.estimators_[0].C assert best_C in Cs def test_ecoc_float_y(): # Test that the OCC errors on float targets X = iris.data y = iris.data[:, 0] ovo = OutputCodeClassifier(LinearSVC()) msg = "Unknown label type" with pytest.raises(ValueError, match=msg): ovo.fit(X, y) ovo = OutputCodeClassifier(LinearSVC(), code_size=-1) msg = "code_size should be greater than 0, got -1" with pytest.raises(ValueError, match=msg): ovo.fit(X, y) def test_ecoc_delegate_sparse_base_estimator(): # Non-regression test for # https://github.com/scikit-learn/scikit-learn/issues/17218 X, y = iris.data, iris.target X_sp = sp.csc_matrix(X) # create an estimator that does not support sparse input base_estimator = CheckingClassifier( check_X=check_array, check_X_params={"ensure_2d": True, "accept_sparse": False}, ) ecoc = OutputCodeClassifier(base_estimator, random_state=0) with pytest.raises(TypeError, match="A sparse matrix was passed"): ecoc.fit(X_sp, y) ecoc.fit(X, y) with pytest.raises(TypeError, match="A sparse matrix was passed"): ecoc.predict(X_sp) # smoke test to check when sparse input should be supported ecoc = OutputCodeClassifier(LinearSVC(random_state=0)) ecoc.fit(X_sp, y).predict(X_sp) assert len(ecoc.estimators_) == 4 def test_pairwise_indices(): clf_precomputed = svm.SVC(kernel='precomputed') X, y = iris.data, iris.target ovr_false = OneVsOneClassifier(clf_precomputed) linear_kernel = np.dot(X, X.T) ovr_false.fit(linear_kernel, y) n_estimators = len(ovr_false.estimators_) precomputed_indices = ovr_false.pairwise_indices_ for idx in precomputed_indices: assert (idx.shape[0] * n_estimators / (n_estimators - 1) == linear_kernel.shape[0]) @ignore_warnings(category=FutureWarning) def test_pairwise_attribute(): clf_precomputed = svm.SVC(kernel='precomputed') clf_notprecomputed = svm.SVC() for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]: ovr_false = MultiClassClassifier(clf_notprecomputed) assert not ovr_false._pairwise ovr_true = MultiClassClassifier(clf_precomputed) assert ovr_true._pairwise @pytest.mark.parametrize("MultiClassClassifier", [OneVsRestClassifier, OneVsOneClassifier]) def test_pairwise_tag(MultiClassClassifier): clf_precomputed = svm.SVC(kernel='precomputed') clf_notprecomputed = svm.SVC() ovr_false = MultiClassClassifier(clf_notprecomputed) assert not ovr_false._get_tags()["pairwise"] ovr_true = MultiClassClassifier(clf_precomputed) assert ovr_true._get_tags()["pairwise"] # TODO: Remove in 1.1 @pytest.mark.parametrize("MultiClassClassifier", [OneVsRestClassifier, OneVsOneClassifier]) def test_pairwise_deprecated(MultiClassClassifier): clf_precomputed = svm.SVC(kernel='precomputed') ov_clf = MultiClassClassifier(clf_precomputed) msg = r"Attribute _pairwise was deprecated in version 0\.24" with pytest.warns(FutureWarning, match=msg): ov_clf._pairwise def test_pairwise_cross_val_score(): clf_precomputed = svm.SVC(kernel='precomputed') clf_notprecomputed = svm.SVC(kernel='linear') X, y = iris.data, iris.target for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]: ovr_false = MultiClassClassifier(clf_notprecomputed) ovr_true = MultiClassClassifier(clf_precomputed) linear_kernel = np.dot(X, X.T) score_precomputed = cross_val_score(ovr_true, linear_kernel, y) score_linear = cross_val_score(ovr_false, X, y) assert_array_equal(score_precomputed, score_linear) @pytest.mark.parametrize("MultiClassClassifier", [OneVsRestClassifier, OneVsOneClassifier]) # FIXME: we should move this test in `estimator_checks` once we are able # to construct meta-estimator instances def test_support_missing_values(MultiClassClassifier): # smoke test to check that pipeline OvR and OvO classifiers are letting # the validation of missing values to # the underlying pipeline or classifiers rng = np.random.RandomState(42) X, y = iris.data, iris.target X = np.copy(X) # Copy to avoid that the original data is modified mask = rng.choice([1, 0], X.shape, p=[.1, .9]).astype(bool) X[mask] = np.nan lr = make_pipeline(SimpleImputer(), LogisticRegression(random_state=rng)) MultiClassClassifier(lr).fit(X, y).score(X, y)
bsd-3-clause
rtrwalker/geotecha
geotecha/mathematics/quadrature.py
1
74253
# geotecha - A software suite for geotechncial engineering # Copyright (C) 2018 Rohan T. Walker (rtrwalker@gmail.com) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses/gpl.html. """Numerical integration by quadrature""" from __future__ import division, print_function import matplotlib.pyplot import numpy as np from scipy import integrate from scipy.special import jn_zeros from scipy.special import jn from matplotlib import pyplot as plt import functools import unittest from numpy.testing import assert_allclose from numpy.polynomial.polynomial import Polynomial def gauss_kronrod_abscissae_and_weights(n): """Gauss-Kronrod quadrature abscissae and weights Coarse integral = Sum(f(xi) * wi1) Fine integral = Sum(f(xi) * wi2) For the coarse integral the unused weights are set to zero Parameters ---------- n : [2-20, 32, 64, 100] number of integration points for the Gauss points. Number of Kronrod points will automatically be 2 * n + 1. Returns ------- xi : 1d array Abscissae for the quadrature points. wi1 : 1d array Weights for the coarse integral. wi2 : 1d array Weights for the fine integral References ---------- .. [2] Holoborodko, Pavel. 2011. 'Gauss-Kronrod Quadrature Nodes and Weights. November 7. http://www.advanpix.com/2011/11/07/gauss-kronrod-quadrature-nodes-weights/#Tabulated_Gauss-Kronrod_weights_and_abscissae """ if n not in [7,10,15,20,25,30]: raise ValueError('n must be 2-20, 32, 64, or 100') weights = { 7: { 'g': np.array( [[-0.9491079123427585245261897, 0.1294849661688696932706114], [ -0.7415311855993944398638648, 0.2797053914892766679014678], [ -0.4058451513773971669066064, 0.3818300505051189449503698], [ 0.0000000000000000000000000, 0.4179591836734693877551020], [ 0.4058451513773971669066064, 0.3818300505051189449503698], [ 0.7415311855993944398638648, 0.2797053914892766679014678], [ 0.9491079123427585245261897, 0.1294849661688696932706114]], dtype=float), 'k': np.array( [[-0.9914553711208126392068547, 0.0229353220105292249637320], [ -0.9491079123427585245261897, 0.0630920926299785532907007], [ -0.8648644233597690727897128, 0.1047900103222501838398763], [ -0.7415311855993944398638648, 0.1406532597155259187451896], [ -0.5860872354676911302941448, 0.1690047266392679028265834], [ -0.4058451513773971669066064, 0.1903505780647854099132564], [ -0.2077849550078984676006894, 0.2044329400752988924141620], [ 0.0000000000000000000000000, 0.2094821410847278280129992], [ 0.2077849550078984676006894, 0.2044329400752988924141620], [ 0.4058451513773971669066064, 0.1903505780647854099132564], [ 0.5860872354676911302941448, 0.1690047266392679028265834], [ 0.7415311855993944398638648, 0.1406532597155259187451896], [ 0.8648644233597690727897128, 0.1047900103222501838398763], [ 0.9491079123427585245261897, 0.0630920926299785532907007], [ 0.9914553711208126392068547, 0.0229353220105292249637320]], dtype=float), 'dup': np.array( [False, True, False, True, False, True, False, True, False, True, False, True, False, True, False], dtype=bool) }, 10: { 'g': np.array( [[-0.9739065285171717200779640, 0.0666713443086881375935688], [ -0.8650633666889845107320967, 0.1494513491505805931457763], [ -0.6794095682990244062343274, 0.2190863625159820439955349], [ -0.4333953941292471907992659, 0.2692667193099963550912269], [ -0.1488743389816312108848260, 0.2955242247147528701738930], [ 0.1488743389816312108848260, 0.2955242247147528701738930], [ 0.4333953941292471907992659, 0.2692667193099963550912269], [ 0.6794095682990244062343274, 0.2190863625159820439955349], [ 0.8650633666889845107320967, 0.1494513491505805931457763], [ 0.9739065285171717200779640, 0.0666713443086881375935688]], dtype=float), 'k': np.array( [[-0.9956571630258080807355273, 0.0116946388673718742780644], [ -0.9739065285171717200779640, 0.0325581623079647274788190], [ -0.9301574913557082260012072, 0.0547558965743519960313813], [ -0.8650633666889845107320967, 0.0750396748109199527670431], [ -0.7808177265864168970637176, 0.0931254545836976055350655], [ -0.6794095682990244062343274, 0.1093871588022976418992106], [ -0.5627571346686046833390001, 0.1234919762620658510779581], [ -0.4333953941292471907992659, 0.1347092173114733259280540], [ -0.2943928627014601981311266, 0.1427759385770600807970943], [ -0.1488743389816312108848260, 0.1477391049013384913748415], [ 0.0000000000000000000000000, 0.1494455540029169056649365], [ 0.1488743389816312108848260, 0.1477391049013384913748415], [ 0.2943928627014601981311266, 0.1427759385770600807970943], [ 0.4333953941292471907992659, 0.1347092173114733259280540], [ 0.5627571346686046833390001, 0.1234919762620658510779581], [ 0.6794095682990244062343274, 0.1093871588022976418992106], [ 0.7808177265864168970637176, 0.0931254545836976055350655], [ 0.8650633666889845107320967, 0.0750396748109199527670431], [ 0.9301574913557082260012072, 0.0547558965743519960313813], [ 0.9739065285171717200779640, 0.0325581623079647274788190], [ 0.9956571630258080807355273, 0.0116946388673718742780644]], dtype=float), 'dup': np.array( [False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False], dtype=bool) }, 15: { 'g': np.array( [[-0.9879925180204854284895657, 0.0307532419961172683546284], [ -0.9372733924007059043077589, 0.0703660474881081247092674], [ -0.8482065834104272162006483, 0.1071592204671719350118695], [ -0.7244177313601700474161861, 0.1395706779261543144478048], [ -0.5709721726085388475372267, 0.1662692058169939335532009], [ -0.3941513470775633698972074, 0.1861610000155622110268006], [ -0.2011940939974345223006283, 0.1984314853271115764561183], [ 0.0000000000000000000000000, 0.2025782419255612728806202], [ 0.2011940939974345223006283, 0.1984314853271115764561183], [ 0.3941513470775633698972074, 0.1861610000155622110268006], [ 0.5709721726085388475372267, 0.1662692058169939335532009], [ 0.7244177313601700474161861, 0.1395706779261543144478048], [ 0.8482065834104272162006483, 0.1071592204671719350118695], [ 0.9372733924007059043077589, 0.0703660474881081247092674], [ 0.9879925180204854284895657, 0.0307532419961172683546284]], dtype=float), 'k': np.array( [[-0.9980022986933970602851728, 0.0053774798729233489877921], [ -0.9879925180204854284895657, 0.0150079473293161225383748], [ -0.9677390756791391342573480, 0.0254608473267153201868740], [ -0.9372733924007059043077589, 0.0353463607913758462220379], [ -0.8972645323440819008825097, 0.0445897513247648766082273], [ -0.8482065834104272162006483, 0.0534815246909280872653431], [ -0.7904185014424659329676493, 0.0620095678006706402851392], [ -0.7244177313601700474161861, 0.0698541213187282587095201], [ -0.6509967412974169705337359, 0.0768496807577203788944328], [ -0.5709721726085388475372267, 0.0830805028231330210382892], [ -0.4850818636402396806936557, 0.0885644430562117706472754], [ -0.3941513470775633698972074, 0.0931265981708253212254869], [ -0.2991800071531688121667800, 0.0966427269836236785051799], [ -0.2011940939974345223006283, 0.0991735987217919593323932], [ -0.1011420669187174990270742, 0.1007698455238755950449467], [ 0.0000000000000000000000000, 0.1013300070147915490173748], [ 0.1011420669187174990270742, 0.1007698455238755950449467], [ 0.2011940939974345223006283, 0.0991735987217919593323932], [ 0.2991800071531688121667800, 0.0966427269836236785051799], [ 0.3941513470775633698972074, 0.0931265981708253212254869], [ 0.4850818636402396806936557, 0.0885644430562117706472754], [ 0.5709721726085388475372267, 0.0830805028231330210382892], [ 0.6509967412974169705337359, 0.0768496807577203788944328], [ 0.7244177313601700474161861, 0.0698541213187282587095201], [ 0.7904185014424659329676493, 0.0620095678006706402851392], [ 0.8482065834104272162006483, 0.0534815246909280872653431], [ 0.8972645323440819008825097, 0.0445897513247648766082273], [ 0.9372733924007059043077589, 0.0353463607913758462220379], [ 0.9677390756791391342573480, 0.0254608473267153201868740], [ 0.9879925180204854284895657, 0.0150079473293161225383748], [ 0.9980022986933970602851728, 0.0053774798729233489877921]], dtype=float), 'dup': np.array( [False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False], dtype=bool) }, 20: { 'g': np.array( [[-0.9931285991850949247861224, 0.0176140071391521183118620], [ -0.9639719272779137912676661, 0.0406014298003869413310400], [ -0.9122344282513259058677524, 0.0626720483341090635695065], [ -0.8391169718222188233945291, 0.0832767415767047487247581], [ -0.7463319064601507926143051, 0.1019301198172404350367501], [ -0.6360536807265150254528367, 0.1181945319615184173123774], [ -0.5108670019508270980043641, 0.1316886384491766268984945], [ -0.3737060887154195606725482, 0.1420961093183820513292983], [ -0.2277858511416450780804962, 0.1491729864726037467878287], [ -0.0765265211334973337546404, 0.1527533871307258506980843], [ 0.0765265211334973337546404, 0.1527533871307258506980843], [ 0.2277858511416450780804962, 0.1491729864726037467878287], [ 0.3737060887154195606725482, 0.1420961093183820513292983], [ 0.5108670019508270980043641, 0.1316886384491766268984945], [ 0.6360536807265150254528367, 0.1181945319615184173123774], [ 0.7463319064601507926143051, 0.1019301198172404350367501], [ 0.8391169718222188233945291, 0.0832767415767047487247581], [ 0.9122344282513259058677524, 0.0626720483341090635695065], [ 0.9639719272779137912676661, 0.0406014298003869413310400], [ 0.9931285991850949247861224, 0.0176140071391521183118620]], dtype=float), 'k': np.array( [[-0.9988590315882776638383156, 0.0030735837185205315012183], [ -0.9931285991850949247861224, 0.0086002698556429421986618], [ -0.9815078774502502591933430, 0.0146261692569712529837880], [ -0.9639719272779137912676661, 0.0203883734612665235980102], [ -0.9408226338317547535199827, 0.0258821336049511588345051], [ -0.9122344282513259058677524, 0.0312873067770327989585431], [ -0.8782768112522819760774430, 0.0366001697582007980305572], [ -0.8391169718222188233945291, 0.0416688733279736862637883], [ -0.7950414288375511983506388, 0.0464348218674976747202319], [ -0.7463319064601507926143051, 0.0509445739237286919327077], [ -0.6932376563347513848054907, 0.0551951053482859947448324], [ -0.6360536807265150254528367, 0.0591114008806395723749672], [ -0.5751404468197103153429460, 0.0626532375547811680258701], [ -0.5108670019508270980043641, 0.0658345971336184221115636], [ -0.4435931752387251031999922, 0.0686486729285216193456234], [ -0.3737060887154195606725482, 0.0710544235534440683057904], [ -0.3016278681149130043205554, 0.0730306903327866674951894], [ -0.2277858511416450780804962, 0.0745828754004991889865814], [ -0.1526054652409226755052202, 0.0757044976845566746595428], [ -0.0765265211334973337546404, 0.0763778676720807367055028], [ 0.0000000000000000000000000, 0.0766007119179996564450499], [ 0.0765265211334973337546404, 0.0763778676720807367055028], [ 0.1526054652409226755052202, 0.0757044976845566746595428], [ 0.2277858511416450780804962, 0.0745828754004991889865814], [ 0.3016278681149130043205554, 0.0730306903327866674951894], [ 0.3737060887154195606725482, 0.0710544235534440683057904], [ 0.4435931752387251031999922, 0.0686486729285216193456234], [ 0.5108670019508270980043641, 0.0658345971336184221115636], [ 0.5751404468197103153429460, 0.0626532375547811680258701], [ 0.6360536807265150254528367, 0.0591114008806395723749672], [ 0.6932376563347513848054907, 0.0551951053482859947448324], [ 0.7463319064601507926143051, 0.0509445739237286919327077], [ 0.7950414288375511983506388, 0.0464348218674976747202319], [ 0.8391169718222188233945291, 0.0416688733279736862637883], [ 0.8782768112522819760774430, 0.0366001697582007980305572], [ 0.9122344282513259058677524, 0.0312873067770327989585431], [ 0.9408226338317547535199827, 0.0258821336049511588345051], [ 0.9639719272779137912676661, 0.0203883734612665235980102], [ 0.9815078774502502591933430, 0.0146261692569712529837880], [ 0.9931285991850949247861224, 0.0086002698556429421986618], [ 0.9988590315882776638383156, 0.0030735837185205315012183]], dtype=float), 'dup': np.array( [False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False], dtype=bool) }, 25: { 'g': np.array( [[-0.9955569697904980979087849, 0.0113937985010262879479030], [ -0.9766639214595175114983154, 0.0263549866150321372619018], [ -0.9429745712289743394140112, 0.0409391567013063126556235], [ -0.8949919978782753688510420, 0.0549046959758351919259369], [ -0.8334426287608340014210211, 0.0680383338123569172071872], [ -0.7592592630373576305772829, 0.0801407003350010180132350], [ -0.6735663684734683644851206, 0.0910282619829636498114972], [ -0.5776629302412229677236898, 0.1005359490670506442022069], [ -0.4730027314457149605221821, 0.1085196244742636531160940], [ -0.3611723058093878377358217, 0.1148582591457116483393255], [ -0.2438668837209884320451904, 0.1194557635357847722281781], [ -0.1228646926107103963873598, 0.1222424429903100416889595], [ 0.0000000000000000000000000, 0.1231760537267154512039029], [ 0.1228646926107103963873598, 0.1222424429903100416889595], [ 0.2438668837209884320451904, 0.1194557635357847722281781], [ 0.3611723058093878377358217, 0.1148582591457116483393255], [ 0.4730027314457149605221821, 0.1085196244742636531160940], [ 0.5776629302412229677236898, 0.1005359490670506442022069], [ 0.6735663684734683644851206, 0.0910282619829636498114972], [ 0.7592592630373576305772829, 0.0801407003350010180132350], [ 0.8334426287608340014210211, 0.0680383338123569172071872], [ 0.8949919978782753688510420, 0.0549046959758351919259369], [ 0.9429745712289743394140112, 0.0409391567013063126556235], [ 0.9766639214595175114983154, 0.0263549866150321372619018], [ 0.9955569697904980979087849, 0.0113937985010262879479030]], dtype=float), 'k': np.array( [[-0.9992621049926098341934575, 0.0019873838923303159265079], [ -0.9955569697904980979087849, 0.0055619321353567137580402], [ -0.9880357945340772476373310, 0.0094739733861741516072077], [ -0.9766639214595175114983154, 0.0132362291955716748136564], [ -0.9616149864258425124181300, 0.0168478177091282982315167], [ -0.9429745712289743394140112, 0.0204353711458828354565683], [ -0.9207471152817015617463461, 0.0240099456069532162200925], [ -0.8949919978782753688510420, 0.0274753175878517378029485], [ -0.8658470652932755954489970, 0.0307923001673874888911090], [ -0.8334426287608340014210211, 0.0340021302743293378367488], [ -0.7978737979985000594104109, 0.0371162714834155435603306], [ -0.7592592630373576305772829, 0.0400838255040323820748393], [ -0.7177664068130843881866541, 0.0428728450201700494768958], [ -0.6735663684734683644851206, 0.0455029130499217889098706], [ -0.6268100990103174127881227, 0.0479825371388367139063923], [ -0.5776629302412229677236898, 0.0502776790807156719633253], [ -0.5263252843347191825996238, 0.0523628858064074758643667], [ -0.4730027314457149605221821, 0.0542511298885454901445434], [ -0.4178853821930377488518144, 0.0559508112204123173082407], [ -0.3611723058093878377358217, 0.0574371163615678328535827], [ -0.3030895389311078301674789, 0.0586896800223942079619742], [ -0.2438668837209884320451904, 0.0597203403241740599790993], [ -0.1837189394210488920159699, 0.0605394553760458629453603], [ -0.1228646926107103963873598, 0.0611285097170530483058590], [ -0.0615444830056850788865464, 0.0614711898714253166615441], [ 0.0000000000000000000000000, 0.0615808180678329350787598], [ 0.0615444830056850788865464, 0.0614711898714253166615441], [ 0.1228646926107103963873598, 0.0611285097170530483058590], [ 0.1837189394210488920159699, 0.0605394553760458629453603], [ 0.2438668837209884320451904, 0.0597203403241740599790993], [ 0.3030895389311078301674789, 0.0586896800223942079619742], [ 0.3611723058093878377358217, 0.0574371163615678328535827], [ 0.4178853821930377488518144, 0.0559508112204123173082407], [ 0.4730027314457149605221821, 0.0542511298885454901445434], [ 0.5263252843347191825996238, 0.0523628858064074758643667], [ 0.5776629302412229677236898, 0.0502776790807156719633253], [ 0.6268100990103174127881227, 0.0479825371388367139063923], [ 0.6735663684734683644851206, 0.0455029130499217889098706], [ 0.7177664068130843881866541, 0.0428728450201700494768958], [ 0.7592592630373576305772829, 0.0400838255040323820748393], [ 0.7978737979985000594104109, 0.0371162714834155435603306], [ 0.8334426287608340014210211, 0.0340021302743293378367488], [ 0.8658470652932755954489970, 0.0307923001673874888911090], [ 0.8949919978782753688510420, 0.0274753175878517378029485], [ 0.9207471152817015617463461, 0.0240099456069532162200925], [ 0.9429745712289743394140112, 0.0204353711458828354565683], [ 0.9616149864258425124181300, 0.0168478177091282982315167], [ 0.9766639214595175114983154, 0.0132362291955716748136564], [ 0.9880357945340772476373310, 0.0094739733861741516072077], [ 0.9955569697904980979087849, 0.0055619321353567137580402], [ 0.9992621049926098341934575, 0.0019873838923303159265079]], dtype=float), 'dup': np.array( [False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False], dtype=bool) }, 30: { 'g': np.array( [[-0.9968934840746495402716301, 0.0079681924961666056154659], [ -0.9836681232797472099700326, 0.0184664683110909591423021], [ -0.9600218649683075122168710, 0.0287847078833233693497192], [ -0.9262000474292743258793243, 0.0387991925696270495968019], [ -0.8825605357920526815431165, 0.0484026728305940529029381], [ -0.8295657623827683974428981, 0.0574931562176190664817217], [ -0.7677774321048261949179773, 0.0659742298821804951281285], [ -0.6978504947933157969322924, 0.0737559747377052062682439], [ -0.6205261829892428611404776, 0.0807558952294202153546949], [ -0.5366241481420198992641698, 0.0868997872010829798023875], [ -0.4470337695380891767806099, 0.0921225222377861287176327], [ -0.3527047255308781134710372, 0.0963687371746442596394686], [ -0.2546369261678898464398051, 0.0995934205867952670627803], [ -0.1538699136085835469637947, 0.1017623897484055045964290], [ -0.0514718425553176958330252, 0.1028526528935588403412856], [ 0.0514718425553176958330252, 0.1028526528935588403412856], [ 0.1538699136085835469637947, 0.1017623897484055045964290], [ 0.2546369261678898464398051, 0.0995934205867952670627803], [ 0.3527047255308781134710372, 0.0963687371746442596394686], [ 0.4470337695380891767806099, 0.0921225222377861287176327], [ 0.5366241481420198992641698, 0.0868997872010829798023875], [ 0.6205261829892428611404776, 0.0807558952294202153546949], [ 0.6978504947933157969322924, 0.0737559747377052062682439], [ 0.7677774321048261949179773, 0.0659742298821804951281285], [ 0.8295657623827683974428981, 0.0574931562176190664817217], [ 0.8825605357920526815431165, 0.0484026728305940529029381], [ 0.9262000474292743258793243, 0.0387991925696270495968019], [ 0.9600218649683075122168710, 0.0287847078833233693497192], [ 0.9836681232797472099700326, 0.0184664683110909591423021], [ 0.9968934840746495402716301, 0.0079681924961666056154659]], dtype=float), 'k': np.array( [[-0.9994844100504906375713259, 0.0013890136986770076245516], [ -0.9968934840746495402716301, 0.0038904611270998840512672], [ -0.9916309968704045948586284, 0.0066307039159312921733198], [ -0.9836681232797472099700326, 0.0092732796595177634284411], [ -0.9731163225011262683746939, 0.0118230152534963417422329], [ -0.9600218649683075122168710, 0.0143697295070458048124514], [ -0.9443744447485599794158313, 0.0169208891890532726275723], [ -0.9262000474292743258793243, 0.0194141411939423811734090], [ -0.9055733076999077985465226, 0.0218280358216091922971675], [ -0.8825605357920526815431165, 0.0241911620780806013656864], [ -0.8572052335460610989586585, 0.0265099548823331016106017], [ -0.8295657623827683974428981, 0.0287540487650412928439788], [ -0.7997278358218390830136689, 0.0309072575623877624728843], [ -0.7677774321048261949179773, 0.0329814470574837260318142], [ -0.7337900624532268047261711, 0.0349793380280600241374997], [ -0.6978504947933157969322924, 0.0368823646518212292239111], [ -0.6600610641266269613700537, 0.0386789456247275929503487], [ -0.6205261829892428611404776, 0.0403745389515359591119953], [ -0.5793452358263616917560249, 0.0419698102151642461471475], [ -0.5366241481420198992641698, 0.0434525397013560693168317], [ -0.4924804678617785749936931, 0.0448148001331626631923556], [ -0.4470337695380891767806099, 0.0460592382710069881162717], [ -0.4004012548303943925354762, 0.0471855465692991539452615], [ -0.3527047255308781134710372, 0.0481858617570871291407795], [ -0.3040732022736250773726771, 0.0490554345550297788875282], [ -0.2546369261678898464398051, 0.0497956834270742063578116], [ -0.2045251166823098914389577, 0.0504059214027823468408931], [ -0.1538699136085835469637947, 0.0508817958987496064922975], [ -0.1028069379667370301470968, 0.0512215478492587721706563], [ -0.0514718425553176958330252, 0.0514261285374590259338629], [ 0.0000000000000000000000000, 0.0514947294294515675583404], [ 0.0514718425553176958330252, 0.0514261285374590259338629], [ 0.1028069379667370301470968, 0.0512215478492587721706563], [ 0.1538699136085835469637947, 0.0508817958987496064922975], [ 0.2045251166823098914389577, 0.0504059214027823468408931], [ 0.2546369261678898464398051, 0.0497956834270742063578116], [ 0.3040732022736250773726771, 0.0490554345550297788875282], [ 0.3527047255308781134710372, 0.0481858617570871291407795], [ 0.4004012548303943925354762, 0.0471855465692991539452615], [ 0.4470337695380891767806099, 0.0460592382710069881162717], [ 0.4924804678617785749936931, 0.0448148001331626631923556], [ 0.5366241481420198992641698, 0.0434525397013560693168317], [ 0.5793452358263616917560249, 0.0419698102151642461471475], [ 0.6205261829892428611404776, 0.0403745389515359591119953], [ 0.6600610641266269613700537, 0.0386789456247275929503487], [ 0.6978504947933157969322924, 0.0368823646518212292239111], [ 0.7337900624532268047261711, 0.0349793380280600241374997], [ 0.7677774321048261949179773, 0.0329814470574837260318142], [ 0.7997278358218390830136689, 0.0309072575623877624728843], [ 0.8295657623827683974428981, 0.0287540487650412928439788], [ 0.8572052335460610989586585, 0.0265099548823331016106017], [ 0.8825605357920526815431165, 0.0241911620780806013656864], [ 0.9055733076999077985465226, 0.0218280358216091922971675], [ 0.9262000474292743258793243, 0.0194141411939423811734090], [ 0.9443744447485599794158313, 0.0169208891890532726275723], [ 0.9600218649683075122168710, 0.0143697295070458048124514], [ 0.9731163225011262683746939, 0.0118230152534963417422329], [ 0.9836681232797472099700326, 0.0092732796595177634284411], [ 0.9916309968704045948586284, 0.0066307039159312921733198], [ 0.9968934840746495402716301, 0.0038904611270998840512672], [ 0.9994844100504906375713259, 0.0013890136986770076245516]], dtype=float), 'dup': np.array( [False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False], dtype=bool) }, } w = weights[n] dup=w['dup'] xi = w['k'][:,0] wi1 = np.zeros_like(xi) wi1[dup] = w['g'][:, 1] wi2 = w['k'][:,1] return xi, wi1, wi2 def gauss_legendre_abscissae_and_weights(n): """Gauss-Legendre quadrature abscissae and weights Integral = Sum(f(xi) * wi) Parameters ---------- n : [2-20, 32, 64, 100] Number of integration points Returns ------- xi, wi : 1d array of len(n) Abscissae and weights for numericla integration References ---------- .. [1] Holoborodko, Pavel. 2014. 'Numerical Integration'. Accessed April 24. http://www.holoborodko.com/pavel/numerical-methods/numerical-integration/. """ if n not in [2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20, 32, 64, 100]: raise ValueError('n must be 2-20, 32, 64, or 100') weights = { 2: np.array( [[-0.5773502691896257645091488, 1.0000000000000000000000000], [ 0.5773502691896257645091488, 1.0000000000000000000000000]], dtype=float), 3: np.array( [[-0.7745966692414833770358531, 0.5555555555555555555555556], [ 0, 0.8888888888888888888888889], [ 0.7745966692414833770358531, 0.5555555555555555555555556]], dtype=float), 4: np.array( [[-0.8611363115940525752239465, 0.3478548451374538573730639], [ -0.3399810435848562648026658, 0.6521451548625461426269361], [ 0.3399810435848562648026658, 0.6521451548625461426269361], [ 0.8611363115940525752239465, 0.3478548451374538573730639]], dtype=float), 5: np.array( [[-0.9061798459386639927976269, 0.2369268850561890875142640], [ -0.5384693101056830910363144, 0.4786286704993664680412915], [ 0, 0.5688888888888888888888889], [ 0.5384693101056830910363144, 0.4786286704993664680412915], [ 0.9061798459386639927976269, 0.2369268850561890875142640]], dtype=float), 6: np.array( [[-0.9324695142031520278123016, 0.1713244923791703450402961], [ -0.6612093864662645136613996, 0.3607615730481386075698335], [ -0.2386191860831969086305017, 0.4679139345726910473898703], [ 0.2386191860831969086305017, 0.4679139345726910473898703], [ 0.6612093864662645136613996, 0.3607615730481386075698335], [ 0.9324695142031520278123016, 0.1713244923791703450402961]], dtype=float), 7: np.array( [[-0.9491079123427585245261897, 0.1294849661688696932706114], [ -0.7415311855993944398638648, 0.2797053914892766679014678], [ -0.4058451513773971669066064, 0.3818300505051189449503698], [ 0, 0.4179591836734693877551020], [ 0.4058451513773971669066064, 0.3818300505051189449503698], [ 0.7415311855993944398638648, 0.2797053914892766679014678], [ 0.9491079123427585245261897, 0.1294849661688696932706114]], dtype=float), 8: np.array( [[-0.9602898564975362316835609, 0.1012285362903762591525314], [ -0.7966664774136267395915539, 0.2223810344533744705443560], [ -0.5255324099163289858177390, 0.3137066458778872873379622], [ -0.1834346424956498049394761, 0.3626837833783619829651504], [ 0.1834346424956498049394761, 0.3626837833783619829651504], [ 0.5255324099163289858177390, 0.3137066458778872873379622], [ 0.7966664774136267395915539, 0.2223810344533744705443560], [ 0.9602898564975362316835609, 0.1012285362903762591525314]], dtype=float), 9: np.array( [[-0.9681602395076260898355762, 0.0812743883615744119718922], [ -0.8360311073266357942994298, 0.1806481606948574040584720], [ -0.6133714327005903973087020, 0.2606106964029354623187429], [ -0.3242534234038089290385380, 0.3123470770400028400686304], [ 0, 0.3302393550012597631645251], [ 0.3242534234038089290385380, 0.3123470770400028400686304], [ 0.6133714327005903973087020, 0.2606106964029354623187429], [ 0.8360311073266357942994298, 0.1806481606948574040584720], [ 0.9681602395076260898355762, 0.0812743883615744119718922]], dtype=float), 10: np.array( [[-0.9739065285171717200779640, 0.0666713443086881375935688], [ -0.8650633666889845107320967, 0.1494513491505805931457763], [ -0.6794095682990244062343274, 0.2190863625159820439955349], [ -0.4333953941292471907992659, 0.2692667193099963550912269], [ -0.1488743389816312108848260, 0.2955242247147528701738930], [ 0.1488743389816312108848260, 0.2955242247147528701738930], [ 0.4333953941292471907992659, 0.2692667193099963550912269], [ 0.6794095682990244062343274, 0.2190863625159820439955349], [ 0.8650633666889845107320967, 0.1494513491505805931457763], [ 0.9739065285171717200779640, 0.0666713443086881375935688]], dtype=float), 11: np.array( [[-0.9782286581460569928039380, 0.0556685671161736664827537], [ -0.8870625997680952990751578, 0.1255803694649046246346943], [ -0.7301520055740493240934163, 0.1862902109277342514260976], [ -0.5190961292068118159257257, 0.2331937645919904799185237], [ -0.2695431559523449723315320, 0.2628045445102466621806889], [ 0, 0.2729250867779006307144835], [ 0.2695431559523449723315320, 0.2628045445102466621806889], [ 0.5190961292068118159257257, 0.2331937645919904799185237], [ 0.7301520055740493240934163, 0.1862902109277342514260976], [ 0.8870625997680952990751578, 0.1255803694649046246346943], [ 0.9782286581460569928039380, 0.0556685671161736664827537]], dtype=float), 12: np.array( [[-0.9815606342467192506905491, 0.0471753363865118271946160], [ -0.9041172563704748566784659, 0.1069393259953184309602547], [ -0.7699026741943046870368938, 0.1600783285433462263346525], [ -0.5873179542866174472967024, 0.2031674267230659217490645], [ -0.3678314989981801937526915, 0.2334925365383548087608499], [ -0.1252334085114689154724414, 0.2491470458134027850005624], [ 0.1252334085114689154724414, 0.2491470458134027850005624], [ 0.3678314989981801937526915, 0.2334925365383548087608499], [ 0.5873179542866174472967024, 0.2031674267230659217490645], [ 0.7699026741943046870368938, 0.1600783285433462263346525], [ 0.9041172563704748566784659, 0.1069393259953184309602547], [ 0.9815606342467192506905491, 0.0471753363865118271946160]], dtype=float), 13: np.array( [[-0.9841830547185881494728294, 0.0404840047653158795200216], [ -0.9175983992229779652065478, 0.0921214998377284479144218], [ -0.8015780907333099127942065, 0.1388735102197872384636018], [ -0.6423493394403402206439846, 0.1781459807619457382800467], [ -0.4484927510364468528779129, 0.2078160475368885023125232], [ -0.2304583159551347940655281, 0.2262831802628972384120902], [ 0, 0.2325515532308739101945895], [ 0.2304583159551347940655281, 0.2262831802628972384120902], [ 0.4484927510364468528779129, 0.2078160475368885023125232], [ 0.6423493394403402206439846, 0.1781459807619457382800467], [ 0.8015780907333099127942065, 0.1388735102197872384636018], [ 0.9175983992229779652065478, 0.0921214998377284479144218], [ 0.9841830547185881494728294, 0.0404840047653158795200216]], dtype=float), 14: np.array( [[-0.9862838086968123388415973, 0.0351194603317518630318329], [ -0.9284348836635735173363911, 0.0801580871597602098056333], [ -0.8272013150697649931897947, 0.1215185706879031846894148], [ -0.6872929048116854701480198, 0.1572031671581935345696019], [ -0.5152486363581540919652907, 0.1855383974779378137417166], [ -0.3191123689278897604356718, 0.2051984637212956039659241], [ -0.1080549487073436620662447, 0.2152638534631577901958764], [ 0.1080549487073436620662447, 0.2152638534631577901958764], [ 0.3191123689278897604356718, 0.2051984637212956039659241], [ 0.5152486363581540919652907, 0.1855383974779378137417166], [ 0.6872929048116854701480198, 0.1572031671581935345696019], [ 0.8272013150697649931897947, 0.1215185706879031846894148], [ 0.9284348836635735173363911, 0.0801580871597602098056333], [ 0.9862838086968123388415973, 0.0351194603317518630318329]], dtype=float), 15: np.array( [[-0.9879925180204854284895657, 0.0307532419961172683546284], [ -0.9372733924007059043077589, 0.0703660474881081247092674], [ -0.8482065834104272162006483, 0.1071592204671719350118695], [ -0.7244177313601700474161861, 0.1395706779261543144478048], [ -0.5709721726085388475372267, 0.1662692058169939335532009], [ -0.3941513470775633698972074, 0.1861610000155622110268006], [ -0.2011940939974345223006283, 0.1984314853271115764561183], [ 0, 0.2025782419255612728806202], [ 0.2011940939974345223006283, 0.1984314853271115764561183], [ 0.3941513470775633698972074, 0.1861610000155622110268006], [ 0.5709721726085388475372267, 0.1662692058169939335532009], [ 0.7244177313601700474161861, 0.1395706779261543144478048], [ 0.8482065834104272162006483, 0.1071592204671719350118695], [ 0.9372733924007059043077589, 0.0703660474881081247092674], [ 0.9879925180204854284895657, 0.0307532419961172683546284]], dtype=float), 16: np.array( [[-0.9894009349916499325961542, 0.0271524594117540948517806], [ -0.9445750230732325760779884, 0.0622535239386478928628438], [ -0.8656312023878317438804679, 0.0951585116824927848099251], [ -0.7554044083550030338951012, 0.1246289712555338720524763], [ -0.6178762444026437484466718, 0.1495959888165767320815017], [ -0.4580167776572273863424194, 0.1691565193950025381893121], [ -0.2816035507792589132304605, 0.1826034150449235888667637], [ -0.0950125098376374401853193, 0.1894506104550684962853967], [ 0.0950125098376374401853193, 0.1894506104550684962853967], [ 0.2816035507792589132304605, 0.1826034150449235888667637], [ 0.4580167776572273863424194, 0.1691565193950025381893121], [ 0.6178762444026437484466718, 0.1495959888165767320815017], [ 0.7554044083550030338951012, 0.1246289712555338720524763], [ 0.8656312023878317438804679, 0.0951585116824927848099251], [ 0.9445750230732325760779884, 0.0622535239386478928628438], [ 0.9894009349916499325961542, 0.0271524594117540948517806]], dtype=float), 17: np.array( [[-0.9905754753144173356754340, 0.0241483028685479319601100], [ -0.9506755217687677612227170, 0.0554595293739872011294402], [ -0.8802391537269859021229557, 0.0850361483171791808835354], [ -0.7815140038968014069252301, 0.1118838471934039710947884], [ -0.6576711592166907658503022, 0.1351363684685254732863200], [ -0.5126905370864769678862466, 0.1540457610768102880814316], [ -0.3512317634538763152971855, 0.1680041021564500445099707], [ -0.1784841814958478558506775, 0.1765627053669926463252710], [ 0, 0.1794464703562065254582656], [ 0.1784841814958478558506775, 0.1765627053669926463252710], [ 0.3512317634538763152971855, 0.1680041021564500445099707], [ 0.5126905370864769678862466, 0.1540457610768102880814316], [ 0.6576711592166907658503022, 0.1351363684685254732863200], [ 0.7815140038968014069252301, 0.1118838471934039710947884], [ 0.8802391537269859021229557, 0.0850361483171791808835354], [ 0.9506755217687677612227170, 0.0554595293739872011294402], [ 0.9905754753144173356754340, 0.0241483028685479319601100]], dtype=float), 18: np.array( [[-0.9915651684209309467300160, 0.0216160135264833103133427], [ -0.9558239495713977551811959, 0.0497145488949697964533349], [ -0.8926024664975557392060606, 0.0764257302548890565291297], [ -0.8037049589725231156824175, 0.1009420441062871655628140], [ -0.6916870430603532078748911, 0.1225552067114784601845191], [ -0.5597708310739475346078715, 0.1406429146706506512047313], [ -0.4117511614628426460359318, 0.1546846751262652449254180], [ -0.2518862256915055095889729, 0.1642764837458327229860538], [ -0.0847750130417353012422619, 0.1691423829631435918406565], [ 0.0847750130417353012422619, 0.1691423829631435918406565], [ 0.2518862256915055095889729, 0.1642764837458327229860538], [ 0.4117511614628426460359318, 0.1546846751262652449254180], [ 0.5597708310739475346078715, 0.1406429146706506512047313], [ 0.6916870430603532078748911, 0.1225552067114784601845191], [ 0.8037049589725231156824175, 0.1009420441062871655628140], [ 0.8926024664975557392060606, 0.0764257302548890565291297], [ 0.9558239495713977551811959, 0.0497145488949697964533349], [ 0.9915651684209309467300160, 0.0216160135264833103133427]], dtype=float), 19: np.array( [[-0.9924068438435844031890177, 0.0194617882297264770363120], [ -0.9602081521348300308527788, 0.0448142267656996003328382], [ -0.9031559036148179016426609, 0.0690445427376412265807083], [ -0.8227146565371428249789225, 0.0914900216224499994644621], [ -0.7209661773352293786170959, 0.1115666455473339947160239], [ -0.6005453046616810234696382, 0.1287539625393362276755158], [ -0.4645707413759609457172671, 0.1426067021736066117757461], [ -0.3165640999636298319901173, 0.1527660420658596667788554], [ -0.1603586456402253758680961, 0.1589688433939543476499564], [ 0, 0.1610544498487836959791636], [ 0.1603586456402253758680961, 0.1589688433939543476499564], [ 0.3165640999636298319901173, 0.1527660420658596667788554], [ 0.4645707413759609457172671, 0.1426067021736066117757461], [ 0.6005453046616810234696382, 0.1287539625393362276755158], [ 0.7209661773352293786170959, 0.1115666455473339947160239], [ 0.8227146565371428249789225, 0.0914900216224499994644621], [ 0.9031559036148179016426609, 0.0690445427376412265807083], [ 0.9602081521348300308527788, 0.0448142267656996003328382], [ 0.9924068438435844031890177, 0.0194617882297264770363120]], dtype=float), 20: np.array( [[-0.9931285991850949247861224, 0.0176140071391521183118620], [ -0.9639719272779137912676661, 0.0406014298003869413310400], [ -0.9122344282513259058677524, 0.0626720483341090635695065], [ -0.8391169718222188233945291, 0.0832767415767047487247581], [ -0.7463319064601507926143051, 0.1019301198172404350367501], [ -0.6360536807265150254528367, 0.1181945319615184173123774], [ -0.5108670019508270980043641, 0.1316886384491766268984945], [ -0.3737060887154195606725482, 0.1420961093183820513292983], [ -0.2277858511416450780804962, 0.1491729864726037467878287], [ -0.0765265211334973337546404, 0.1527533871307258506980843], [ 0.0765265211334973337546404, 0.1527533871307258506980843], [ 0.2277858511416450780804962, 0.1491729864726037467878287], [ 0.3737060887154195606725482, 0.1420961093183820513292983], [ 0.5108670019508270980043641, 0.1316886384491766268984945], [ 0.6360536807265150254528367, 0.1181945319615184173123774], [ 0.7463319064601507926143051, 0.1019301198172404350367501], [ 0.8391169718222188233945291, 0.0832767415767047487247581], [ 0.9122344282513259058677524, 0.0626720483341090635695065], [ 0.9639719272779137912676661, 0.0406014298003869413310400], [ 0.9931285991850949247861224, 0.0176140071391521183118620]], dtype=float), 32: np.array( [[-0.9972638618494815635449811, 0.0070186100094700966004071], [ -0.9856115115452683354001750, 0.0162743947309056706051706], [ -0.9647622555875064307738119, 0.0253920653092620594557526], [ -0.9349060759377396891709191, 0.0342738629130214331026877], [ -0.8963211557660521239653072, 0.0428358980222266806568786], [ -0.8493676137325699701336930, 0.0509980592623761761961632], [ -0.7944837959679424069630973, 0.0586840934785355471452836], [ -0.7321821187402896803874267, 0.0658222227763618468376501], [ -0.6630442669302152009751152, 0.0723457941088485062253994], [ -0.5877157572407623290407455, 0.0781938957870703064717409], [ -0.5068999089322293900237475, 0.0833119242269467552221991], [ -0.4213512761306353453641194, 0.0876520930044038111427715], [ -0.3318686022821276497799168, 0.0911738786957638847128686], [ -0.2392873622521370745446032, 0.0938443990808045656391802], [ -0.1444719615827964934851864, 0.0956387200792748594190820], [ -0.0483076656877383162348126, 0.0965400885147278005667648], [ 0.0483076656877383162348126, 0.0965400885147278005667648], [ 0.1444719615827964934851864, 0.0956387200792748594190820], [ 0.2392873622521370745446032, 0.0938443990808045656391802], [ 0.3318686022821276497799168, 0.0911738786957638847128686], [ 0.4213512761306353453641194, 0.0876520930044038111427715], [ 0.5068999089322293900237475, 0.0833119242269467552221991], [ 0.5877157572407623290407455, 0.0781938957870703064717409], [ 0.6630442669302152009751152, 0.0723457941088485062253994], [ 0.7321821187402896803874267, 0.0658222227763618468376501], [ 0.7944837959679424069630973, 0.0586840934785355471452836], [ 0.8493676137325699701336930, 0.0509980592623761761961632], [ 0.8963211557660521239653072, 0.0428358980222266806568786], [ 0.9349060759377396891709191, 0.0342738629130214331026877], [ 0.9647622555875064307738119, 0.0253920653092620594557526], [ 0.9856115115452683354001750, 0.0162743947309056706051706], [ 0.9972638618494815635449811, 0.0070186100094700966004071]], dtype=float), 64: np.array( [[-0.9993050417357721394569056, 0.0017832807216964329472961], [ -0.9963401167719552793469245, 0.0041470332605624676352875], [ -0.9910133714767443207393824, 0.0065044579689783628561174], [ -0.9833362538846259569312993, 0.0088467598263639477230309], [ -0.9733268277899109637418535, 0.0111681394601311288185905], [ -0.9610087996520537189186141, 0.0134630478967186425980608], [ -0.9464113748584028160624815, 0.0157260304760247193219660], [ -0.9295691721319395758214902, 0.0179517157756973430850453], [ -0.9105221370785028057563807, 0.0201348231535302093723403], [ -0.8893154459951141058534040, 0.0222701738083832541592983], [ -0.8659993981540928197607834, 0.0243527025687108733381776], [ -0.8406292962525803627516915, 0.0263774697150546586716918], [ -0.8132653151227975597419233, 0.0283396726142594832275113], [ -0.7839723589433414076102205, 0.0302346570724024788679741], [ -0.7528199072605318966118638, 0.0320579283548515535854675], [ -0.7198818501716108268489402, 0.0338051618371416093915655], [ -0.6852363130542332425635584, 0.0354722132568823838106931], [ -0.6489654712546573398577612, 0.0370551285402400460404151], [ -0.6111553551723932502488530, 0.0385501531786156291289625], [ -0.5718956462026340342838781, 0.0399537411327203413866569], [ -0.5312794640198945456580139, 0.0412625632426235286101563], [ -0.4894031457070529574785263, 0.0424735151236535890073398], [ -0.4463660172534640879849477, 0.0435837245293234533768279], [ -0.4022701579639916036957668, 0.0445905581637565630601347], [ -0.3572201583376681159504426, 0.0454916279274181444797710], [ -0.3113228719902109561575127, 0.0462847965813144172959532], [ -0.2646871622087674163739642, 0.0469681828162100173253263], [ -0.2174236437400070841496487, 0.0475401657148303086622822], [ -0.1696444204239928180373136, 0.0479993885964583077281262], [ -0.1214628192961205544703765, 0.0483447622348029571697695], [ -0.0729931217877990394495429, 0.0485754674415034269347991], [ -0.0243502926634244325089558, 0.0486909570091397203833654], [ 0.0243502926634244325089558, 0.0486909570091397203833654], [ 0.0729931217877990394495429, 0.0485754674415034269347991], [ 0.1214628192961205544703765, 0.0483447622348029571697695], [ 0.1696444204239928180373136, 0.0479993885964583077281262], [ 0.2174236437400070841496487, 0.0475401657148303086622822], [ 0.2646871622087674163739642, 0.0469681828162100173253263], [ 0.3113228719902109561575127, 0.0462847965813144172959532], [ 0.3572201583376681159504426, 0.0454916279274181444797710], [ 0.4022701579639916036957668, 0.0445905581637565630601347], [ 0.4463660172534640879849477, 0.0435837245293234533768279], [ 0.4894031457070529574785263, 0.0424735151236535890073398], [ 0.5312794640198945456580139, 0.0412625632426235286101563], [ 0.5718956462026340342838781, 0.0399537411327203413866569], [ 0.6111553551723932502488530, 0.0385501531786156291289625], [ 0.6489654712546573398577612, 0.0370551285402400460404151], [ 0.6852363130542332425635584, 0.0354722132568823838106931], [ 0.7198818501716108268489402, 0.0338051618371416093915655], [ 0.7528199072605318966118638, 0.0320579283548515535854675], [ 0.7839723589433414076102205, 0.0302346570724024788679741], [ 0.8132653151227975597419233, 0.0283396726142594832275113], [ 0.8406292962525803627516915, 0.0263774697150546586716918], [ 0.8659993981540928197607834, 0.0243527025687108733381776], [ 0.8893154459951141058534040, 0.0222701738083832541592983], [ 0.9105221370785028057563807, 0.0201348231535302093723403], [ 0.9295691721319395758214902, 0.0179517157756973430850453], [ 0.9464113748584028160624815, 0.0157260304760247193219660], [ 0.9610087996520537189186141, 0.0134630478967186425980608], [ 0.9733268277899109637418535, 0.0111681394601311288185905], [ 0.9833362538846259569312993, 0.0088467598263639477230309], [ 0.9910133714767443207393824, 0.0065044579689783628561174], [ 0.9963401167719552793469245, 0.0041470332605624676352875], [ 0.9993050417357721394569056, 0.0017832807216964329472961]], dtype=float), 100: np.array( [[-0.9997137267734412336782285, 0.0007346344905056717304063], [ -0.9984919506395958184001634, 0.0017093926535181052395294], [ -0.9962951347331251491861317, 0.0026839253715534824194396], [ -0.9931249370374434596520099, 0.0036559612013263751823425], [ -0.9889843952429917480044187, 0.0046244500634221193510958], [ -0.9838775407060570154961002, 0.0055884280038655151572119], [ -0.9778093584869182885537811, 0.0065469484508453227641521], [ -0.9707857757637063319308979, 0.0074990732554647115788287], [ -0.9628136542558155272936593, 0.0084438714696689714026208], [ -0.9539007829254917428493369, 0.0093804196536944579514182], [ -0.9440558701362559779627747, 0.0103078025748689695857821], [ -0.9332885350430795459243337, 0.0112251140231859771172216], [ -0.9216092981453339526669513, 0.0121314576629794974077448], [ -0.9090295709825296904671263, 0.0130259478929715422855586], [ -0.8955616449707269866985210, 0.0139077107037187726879541], [ -0.8812186793850184155733168, 0.0147758845274413017688800], [ -0.8660146884971646234107400, 0.0156296210775460027239369], [ -0.8499645278795912842933626, 0.0164680861761452126431050], [ -0.8330838798884008235429158, 0.0172904605683235824393442], [ -0.8153892383391762543939888, 0.0180959407221281166643908], [ -0.7968978923903144763895729, 0.0188837396133749045529412], [ -0.7776279096494954756275514, 0.0196530874944353058653815], [ -0.7575981185197071760356680, 0.0204032326462094327668389], [ -0.7368280898020207055124277, 0.0211334421125276415426723], [ -0.7153381175730564464599671, 0.0218430024162473863139537], [ -0.6931491993558019659486479, 0.0225312202563362727017970], [ -0.6702830156031410158025870, 0.0231974231852541216224889], [ -0.6467619085141292798326303, 0.0238409602659682059625604], [ -0.6226088602037077716041908, 0.0244612027079570527199750], [ -0.5978474702471787212648065, 0.0250575444815795897037642], [ -0.5725019326213811913168704, 0.0256294029102081160756420], [ -0.5465970120650941674679943, 0.0261762192395456763423087], [ -0.5201580198817630566468157, 0.0266974591835709626603847], [ -0.4932107892081909335693088, 0.0271926134465768801364916], [ -0.4657816497733580422492166, 0.0276611982207923882942042], [ -0.4378974021720315131089780, 0.0281027556591011733176483], [ -0.4095852916783015425288684, 0.0285168543223950979909368], [ -0.3808729816246299567633625, 0.0289030896011252031348762], [ -0.3517885263724217209723438, 0.0292610841106382766201190], [ -0.3223603439005291517224766, 0.0295904880599126425117545], [ -0.2926171880384719647375559, 0.0298909795933328309168368], [ -0.2625881203715034791689293, 0.0301622651051691449190687], [ -0.2323024818449739696495100, 0.0304040795264548200165079], [ -0.2017898640957359972360489, 0.0306161865839804484964594], [ -0.1710800805386032748875324, 0.0307983790311525904277139], [ -0.1402031372361139732075146, 0.0309504788504909882340635], [ -0.1091892035800611150034260, 0.0310723374275665165878102], [ -0.0780685828134366366948174, 0.0311638356962099067838183], [ -0.0468716824215916316149239, 0.0312248842548493577323765], [ -0.0156289844215430828722167, 0.0312554234538633569476425], [ 0.0156289844215430828722167, 0.0312554234538633569476425], [ 0.0468716824215916316149239, 0.0312248842548493577323765], [ 0.0780685828134366366948174, 0.0311638356962099067838183], [ 0.1091892035800611150034260, 0.0310723374275665165878102], [ 0.1402031372361139732075146, 0.0309504788504909882340635], [ 0.1710800805386032748875324, 0.0307983790311525904277139], [ 0.2017898640957359972360489, 0.0306161865839804484964594], [ 0.2323024818449739696495100, 0.0304040795264548200165079], [ 0.2625881203715034791689293, 0.0301622651051691449190687], [ 0.2926171880384719647375559, 0.0298909795933328309168368], [ 0.3223603439005291517224766, 0.0295904880599126425117545], [ 0.3517885263724217209723438, 0.0292610841106382766201190], [ 0.3808729816246299567633625, 0.0289030896011252031348762], [ 0.4095852916783015425288684, 0.0285168543223950979909368], [ 0.4378974021720315131089780, 0.0281027556591011733176483], [ 0.4657816497733580422492166, 0.0276611982207923882942042], [ 0.4932107892081909335693088, 0.0271926134465768801364916], [ 0.5201580198817630566468157, 0.0266974591835709626603847], [ 0.5465970120650941674679943, 0.0261762192395456763423087], [ 0.5725019326213811913168704, 0.0256294029102081160756420], [ 0.5978474702471787212648065, 0.0250575444815795897037642], [ 0.6226088602037077716041908, 0.0244612027079570527199750], [ 0.6467619085141292798326303, 0.0238409602659682059625604], [ 0.6702830156031410158025870, 0.0231974231852541216224889], [ 0.6931491993558019659486479, 0.0225312202563362727017970], [ 0.7153381175730564464599671, 0.0218430024162473863139537], [ 0.7368280898020207055124277, 0.0211334421125276415426723], [ 0.7575981185197071760356680, 0.0204032326462094327668389], [ 0.7776279096494954756275514, 0.0196530874944353058653815], [ 0.7968978923903144763895729, 0.0188837396133749045529412], [ 0.8153892383391762543939888, 0.0180959407221281166643908], [ 0.8330838798884008235429158, 0.0172904605683235824393442], [ 0.8499645278795912842933626, 0.0164680861761452126431050], [ 0.8660146884971646234107400, 0.0156296210775460027239369], [ 0.8812186793850184155733168, 0.0147758845274413017688800], [ 0.8955616449707269866985210, 0.0139077107037187726879541], [ 0.9090295709825296904671263, 0.0130259478929715422855586], [ 0.9216092981453339526669513, 0.0121314576629794974077448], [ 0.9332885350430795459243337, 0.0112251140231859771172216], [ 0.9440558701362559779627747, 0.0103078025748689695857821], [ 0.9539007829254917428493369, 0.0093804196536944579514182], [ 0.9628136542558155272936593, 0.0084438714696689714026208], [ 0.9707857757637063319308979, 0.0074990732554647115788287], [ 0.9778093584869182885537811, 0.0065469484508453227641521], [ 0.9838775407060570154961002, 0.0055884280038655151572119], [ 0.9889843952429917480044187, 0.0046244500634221193510958], [ 0.9931249370374434596520099, 0.0036559612013263751823425], [ 0.9962951347331251491861317, 0.0026839253715534824194396], [ 0.9984919506395958184001634, 0.0017093926535181052395294], [ 0.9997137267734412336782285, 0.0007346344905056717304063]], dtype=float), } return weights[n][:,0], weights[n][:,1] def shanks_table(seq, table=None, randomized=False): r'''Copied from sympy.mpmath.mpmath.calculus.extrapolation.py This shanks function is taken almost verbatim (minus an initial ctx argument???) from sympy.mpmath.mpmath.calculus.extrapolation.py: - http://docs.sympy.org/dev/modules/mpmath/calculus/sums_limits.html#mpmath.shanks - https://github.com/sympy/sympy/blob/master/sympy/mpmath/calculus/extrapolation.py mpmath is BSD license Notes ----- Given a list ``seq`` of the first `N` elements of a slowly convergent infinite sequence `(A_k)`, :func:`~mpmath.shanks` computes the iterated Shanks transformation `S(A), S(S(A)), \ldots, S^{N/2}(A)`. The Shanks transformation often provides strong convergence acceleration, especially if the sequence is oscillating. The iterated Shanks transformation is computed using the Wynn epsilon algorithm (see [1]). :func:`~mpmath.shanks` returns the full epsilon table generated by Wynn's algorithm, which can be read off as follows: - The table is a list of lists forming a lower triangular matrix, where higher row and column indices correspond to more accurate values. - The columns with even index hold dummy entries (required for the computation) and the columns with odd index hold the actual extrapolates. - The last element in the last row is typically the most accurate estimate of the limit. - The difference to the third last element in the last row provides an estimate of the approximation error. - The magnitude of the second last element provides an estimate of the numerical accuracy lost to cancellation. For convenience, so the extrapolation is stopped at an odd index so that ``shanks(seq)[-1][-1]`` always gives an estimate of the limit. Optionally, an existing table can be passed to :func:`~mpmath.shanks`. This can be used to efficiently extend a previous computation after new elements have been appended to the sequence. The table will then be updated in-place. The Shanks transformation: The Shanks transformation is defined as follows (see [2]): given the input sequence `(A_0, A_1, \ldots)`, the transformed sequence is given by .. math :: S(A_k) = \frac{A_{k+1}A_{k-1}-A_k^2}{A_{k+1}+A_{k-1}-2 A_k} The Shanks transformation gives the exact limit `A_{\infty}` in a single step if `A_k = A + a q^k`. Note in particular that it extrapolates the exact sum of a geometric series in a single step. Applying the Shanks transformation once often improves convergence substantially for an arbitrary sequence, but the optimal effect is obtained by applying it iteratively: `S(S(A_k)), S(S(S(A_k))), \ldots`. Wynn's epsilon algorithm provides an efficient way to generate the table of iterated Shanks transformations. It reduces the computation of each element to essentially a single division, at the cost of requiring dummy elements in the table. See [1] for details. Precision issues: Due to cancellation effects, the sequence must be typically be computed at a much higher precision than the target accuracy of the extrapolation. If the Shanks transformation converges to the exact limit (such as if the sequence is a geometric series), then a division by zero occurs. By default, :func:`~mpmath.shanks` handles this case by terminating the iteration and returning the table it has generated so far. With *randomized=True*, it will instead replace the zero by a pseudorandom number close to zero. (TODO: find a better solution to this problem.) Examples (truncated from original) We illustrate by applying Shanks transformation to the Leibniz series for `\pi`: >>> S = [4*sum((-1)**n/(2*n+1) for n in range(m)) ... for m in range(1,30)] >>> >>> T = shanks_table(S[:7]) >>> for row in T: ... print('['+', '.join(['{:.6g}'.format(v) for v in row])+']') ... [-0.75] [1.25, 3.16667] [-1.75, 3.13333, -28.75] [2.25, 3.14524, 82.25, 3.14234] [-2.75, 3.13968, -177.75, 3.14139, -969.938] [3.25, 3.14271, 327.25, 3.14166, 3515.06, 3.14161] ''' if len(seq) < 2: raise ValueError("seq should be of minimum length 2") if table: START = len(table) else: START = 0 table = [] STOP = len(seq) - 1 if STOP & 1: STOP -= 1 one = 1.0#ctx.one eps = np.spacing(1)#+ctx.eps if randomized: from random import Random rnd = Random() rnd.seed(START) for i in range(START, STOP): row = [] for j in range(i+1): if j == 0: a, b = 0, seq[i+1]-seq[i] else: if j == 1: a = seq[i] else: a = table[i-1][j-2] b = row[j-1] - table[i-1][j-1] if not b: if randomized: b = rnd.getrandbits(10)*eps elif i & 1: return table[:-1] else: return table row.append(a + one/b) table.append(row) return table def shanks(seq, ind=0): """Iterated Shanks transformation to accelerate series convergence Though normally applied to a 1d array, `shanks` will actually operate on the last dimension of seq which allows for multi-dimensional arrays. e.g. for 2d data each row of sequence whould be a separate sequence Parameters ---------- seq : list or array If seq is a numpy array then it's elements will be modified in-place. If seq is a list then seq will not be modified. ind : int, optional Start index for extrapolation. Can be negative, e.g. ind=-5 will extrapolate based on the last 5 elements of the `seq`. default ind=0 i.e. use all elements. Returns ------- out : array with 1 dim less than `seq`, or float if seq is only 1d. Extrapolated value. If `seq` is a numpy array then due to in-place modification the result will also be in seq[..., -1]. See Also -------- shanks_table : Copy of sympy.mpmath.calculus.extrapolation.shanks Provides the whole epsilon table and error estimates. numpy.apply_along_axis : If your sequence is not in the last dimension of an array then use np.apply_along_axis to apply it along a specific axis. Notes ----- I think this will also work on multi-dimensional data. The shanks extrapolation will be performed on the last dimension of the data. So for 2d data each row is a separate sequence. For sequence: .. math A=\\sum_{m=0}^{\\infty}a_m The partial sum is first defined as: .. math:: A_n=\\sum_{m=0}^{n}a_m This forms a new sequence, the convergence of which can be sped up by repeated use of: .. math:: S(A_n)=\\frac{A_{n+1}A_{n-1}-A_n^2}{A_{n+1}-2A_n+A_{n-1}} """ seq = np.atleast_1d(seq) if ind is None: return +seq[..., -1] if ind < 0: ind = seq.shape[-1] + ind ind = max(ind, 0) for i in range(ind, seq.shape[-1] - 2, 2): denom = (seq[..., i + 2:] - 2 * seq[..., i + 1: -1] + seq[..., i:-2]) if np.any(denom==0): return +seq[..., -1] seq[..., i + 2:] = ( (seq[..., i + 2:] * seq[..., i:-2] - seq[..., i + 1:-1]**2) / denom) return +seq[...,-1] def gk_quad(f, a, b, args=(), n=10, sum_intervals=False): """Integration by Gauss-Kronrod quadrature between intervals Parameters ---------- f : function or method Function to integrate. a, b : 1d array Limits of integration. Must have len(a)==len(b). args : tuple, optional `args` will be passed to f using f(x, *args). Default args=(). n : [7,10,15,20,25,30], optional Number of gauss quadrature evaluation points. Default n=10. There will be 2*n+1 Kronrod quadrature points. sum_intervals : [False, True] If sum_intervals=True the integral for each a and b, will be summed. Otherwise each interval integration will be returned. The sum of the error estimates will also be summed. Returns ------- igral : ndarray Integral of f between a and b. If sum_intervals=False then shape of igral will be (len(a), ...) where ... corresponds to however many dimensions are returned from f with scalar arguments. Each value in igral corresponds to the corresponding a-b interval. If sum_intervals=True then igral will have shape (...). err_estimate : ndarray same size as igal Estimate of the error in the integral. i.e. absolute value of fine integral minus coarse integral. """ ai = np.atleast_1d(a) bi = np.atleast_1d(b) xj_, wj1, wj2 = gauss_kronrod_abscissae_and_weights(n) # dim1 = each integration limits, a and b # dim2 = each quadrature point ai = ai[:, np.newaxis] bi = bi[:, np.newaxis] xj_ = xj_[np.newaxis, :] wj1 = wj1[np.newaxis, :] wj2 = wj2[np.newaxis, :] bma = (bi - ai) / 2 # b minus a bpa = (ai + bi) /2 # b plus a xij = bma * xj_ + bpa # xj_ are in [-1, 1] so need to transform to [a, b] #get shape of output with scalar argument and form a slice that will ensure #any extra dims are appended to the args. extra = np.array(f(xij.flat[0], *args)) gen_slice = [slice(None)] * xij.ndim + [None] * extra.ndim fij = f(xij[gen_slice], *args) # igral1 = np.ravel(bma) * np.sum(fij * wj1, axis=1) # igral2 = np.ravel(bma) * np.sum(fij * wj2, axis=1) # igral1 = bma[:, 0] * np.sum(fij * wj1, axis=1) # igral2 = bma[:, 0] * np.sum(fij * wj2, axis=1) igral1 = np.sum(bma[gen_slice] *fij * wj1[gen_slice], axis=1) igral2 = np.sum(bma[gen_slice] *fij * wj2[gen_slice], axis=1) err_estimate = np.abs(igral2 - igral1) if sum_intervals: igral1 = np.sum(igral1, axis=0) igral2 = np.sum(igral2, axis=0) err_estimate = np.sum(err_estimate, axis=0) return igral2, err_estimate def gl_quad(f, a, b, args=(), n=10, shanks_ind=False, sum_intervals=False): """Integration by Gauss-Legendre quadrature with subdivided interval Parameters ---------- f : function or method function to integrate. Must accept vector aguments for x. Might need to use numpy.vecotrize. a, b : 1d array limits of integration args : tuple, optional args will be passed to f using f(x, *args). default=() n : [2-20, 32, 64, 100], optional number of quadrature evaluation points. default=10 sum_intervals : [False, True] If sum_intervals=True the integral for each a and b, will be summed. Otherwise each interval integration will be returned. Returns ------- igral : ndarray Integral of f between a and b. If sum_intervals=False then shape of igral will be (len(a), ...) where ... corresponds to however many dimensions are returned from f with scalar arguments. Each value in igral corresponds to the corresponding a-b interval. If sum_intervals=True then igral will have shape (...). Notes ----- Be careful when using large values of n.There may be precision issues. If f returns an ndarray when x is scalar. igral will have additonal dimensions corresponding to those of the f-with-scalar-x output. """ ai = np.atleast_1d(a) bi = np.atleast_1d(b) xj_, wj = gauss_legendre_abscissae_and_weights(n) # dim1 = each integration limits, a and b # dim2 = each quadrature point ai = ai[:, np.newaxis] bi = bi[:, np.newaxis] xj_ = xj_[np.newaxis, :] wj = wj[np.newaxis, :] bma = (bi - ai) / 2 # b minus a bpa = (ai + bi) /2 # b plus a xij = bma * xj_ + bpa # xj_ are in [-1, 1] so need to transform to [a, b] #get shape of output with scalar argument and form a slice that will ensure #any extra dims are appended to the args. extra = np.array(f(xij.flat[0], *args)) gen_slice = [slice(None)] * xij.ndim + [None] * extra.ndim fij = f(xij[gen_slice], *args) igral = np.sum(bma[gen_slice] * fij *wj[gen_slice], axis=1) if sum_intervals: igral = np.sum(igral, axis=0) return igral
gpl-3.0
evgchz/scikit-learn
sklearn/ensemble/gradient_boosting.py
6
63474
"""Gradient Boosted Regression Trees This module contains methods for fitting gradient boosted regression trees for both classification and regression. The module structure is the following: - The ``BaseGradientBoosting`` base class implements a common ``fit`` method for all the estimators in the module. Regression and classification only differ in the concrete ``LossFunction`` used. - ``GradientBoostingClassifier`` implements gradient boosting for classification problems. - ``GradientBoostingRegressor`` implements gradient boosting for regression problems. """ # Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti, # Arnaud Joly # License: BSD 3 clause from __future__ import print_function from __future__ import division from abc import ABCMeta, abstractmethod from warnings import warn from time import time import numbers import numpy as np from scipy import stats from .base import BaseEnsemble from ..base import BaseEstimator from ..base import ClassifierMixin from ..base import RegressorMixin from ..utils import check_random_state, check_array, check_X_y, column_or_1d from ..utils.extmath import logsumexp from ..utils.stats import _weighted_percentile from ..externals import six from ..feature_selection.from_model import _LearntSelectorMixin from ..tree.tree import DecisionTreeRegressor from ..tree._tree import DTYPE, TREE_LEAF from ..tree._tree import PresortBestSplitter from ..tree._tree import FriedmanMSE from ._gradient_boosting import predict_stages from ._gradient_boosting import predict_stage from ._gradient_boosting import _random_sample_mask class QuantileEstimator(BaseEstimator): """An estimator predicting the alpha-quantile of the training targets.""" def __init__(self, alpha=0.9): if not 0 < alpha < 1.0: raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha) self.alpha = alpha def fit(self, X, y, sample_weight=None): if sample_weight is None: self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0) else: self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0) def predict(self, X): y = np.empty((X.shape[0], 1), dtype=np.float64) y.fill(self.quantile) return y class MeanEstimator(BaseEstimator): """An estimator predicting the mean of the training targets.""" def fit(self, X, y, sample_weight=None): if sample_weight is None: self.mean = np.mean(y) else: self.mean = np.average(y, weights=sample_weight) def predict(self, X): y = np.empty((X.shape[0], 1), dtype=np.float64) y.fill(self.mean) return y class LogOddsEstimator(BaseEstimator): """An estimator predicting the log odds ratio.""" scale = 1.0 def fit(self, X, y, sample_weight=None): # pre-cond: pos, neg are encoded as 1, 0 if sample_weight is None: pos = np.sum(y) neg = y.shape[0] - pos else: pos = np.sum(sample_weight * y) neg = np.sum(sample_weight * (1 - y)) if neg == 0 or pos == 0: raise ValueError('y contains non binary labels.') self.prior = self.scale * np.log(pos / neg) def predict(self, X): y = np.empty((X.shape[0], 1), dtype=np.float64) y.fill(self.prior) return y class ScaledLogOddsEstimator(LogOddsEstimator): """Log odds ratio scaled by 0.5 -- for exponential loss. """ scale = 0.5 class PriorProbabilityEstimator(BaseEstimator): """An estimator predicting the probability of each class in the training data. """ def fit(self, X, y, sample_weight=None): if sample_weight is None: sample_weight = np.ones_like(y, dtype=np.float) class_counts = np.bincount(y, weights=sample_weight) self.priors = class_counts / class_counts.sum() def predict(self, X): y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64) y[:] = self.priors return y class ZeroEstimator(BaseEstimator): """An estimator that simply predicts zero. """ def fit(self, X, y, sample_weight=None): if np.issubdtype(y.dtype, int): # classification self.n_classes = np.unique(y).shape[0] if self.n_classes == 2: self.n_classes = 1 else: # regression self.n_classes = 1 def predict(self, X): y = np.empty((X.shape[0], self.n_classes), dtype=np.float64) y.fill(0.0) return y class LossFunction(six.with_metaclass(ABCMeta, object)): """Abstract base class for various loss functions. Attributes ---------- K : int The number of regression trees to be induced; 1 for regression and binary classification; ``n_classes`` for multi-class classification. """ is_multi_class = False def __init__(self, n_classes): self.K = n_classes def init_estimator(self): """Default ``init`` estimator for loss function. """ raise NotImplementedError() @abstractmethod def __call__(self, y, pred, sample_weight=None): """Compute the loss of prediction ``pred`` and ``y``. """ @abstractmethod def negative_gradient(self, y, y_pred, **kargs): """Compute the negative gradient. Parameters --------- y : np.ndarray, shape=(n,) The target labels. y_pred : np.ndarray, shape=(n,): The predictions. """ def update_terminal_regions(self, tree, X, y, residual, y_pred, sample_weight, sample_mask, learning_rate=1.0, k=0): """Update the terminal regions (=leaves) of the given tree and updates the current predictions of the model. Traverses tree and invokes template method `_update_terminal_region`. Parameters ---------- tree : tree.Tree The tree object. X : np.ndarray, shape=(n, m) The data array. y : np.ndarray, shape=(n,) The target labels. residual : np.ndarray, shape=(n,) The residuals (usually the negative gradient). y_pred : np.ndarray, shape=(n,): The predictions. sample_weight np.ndarray, shape=(n,): The weight of each sample. """ # compute leaf for each sample in ``X``. terminal_regions = tree.apply(X) # mask all which are not in sample mask. masked_terminal_regions = terminal_regions.copy() masked_terminal_regions[~sample_mask] = -1 # update each leaf (= perform line search) for leaf in np.where(tree.children_left == TREE_LEAF)[0]: self._update_terminal_region(tree, masked_terminal_regions, leaf, X, y, residual, y_pred[:, k], sample_weight) # update predictions (both in-bag and out-of-bag) y_pred[:, k] += (learning_rate * tree.value[:, 0, 0].take(terminal_regions, axis=0)) @abstractmethod def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): """Template method for updating terminal regions (=leaves). """ class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)): """Base class for regression loss functions. """ def __init__(self, n_classes): if n_classes != 1: raise ValueError("``n_classes`` must be 1 for regression but " "was %r" % n_classes) super(RegressionLossFunction, self).__init__(n_classes) class LeastSquaresError(RegressionLossFunction): """Loss function for least squares (LS) estimation. Terminal regions need not to be updated for least squares. """ def init_estimator(self): return MeanEstimator() def __call__(self, y, pred, sample_weight=None): if sample_weight is None: return np.mean((y - pred.ravel()) ** 2.0) else: return (1.0 / sample_weight.sum()) * \ np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)) def negative_gradient(self, y, pred, **kargs): return y - pred.ravel() def update_terminal_regions(self, tree, X, y, residual, y_pred, sample_weight, sample_mask, learning_rate=1.0, k=0): """Least squares does not need to update terminal regions. But it has to update the predictions. """ # update predictions y_pred[:, k] += learning_rate * tree.predict(X).ravel() def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): pass class LeastAbsoluteError(RegressionLossFunction): """Loss function for least absolute deviation (LAD) regression. """ def init_estimator(self): return QuantileEstimator(alpha=0.5) def __call__(self, y, pred, sample_weight=None): if sample_weight is None: return np.abs(y - pred.ravel()).mean() else: return (1.0 / sample_weight.sum()) * \ np.sum(sample_weight * np.abs(y - pred.ravel())) def negative_gradient(self, y, pred, **kargs): """1.0 if y - pred > 0.0 else -1.0""" pred = pred.ravel() return 2.0 * (y - pred > 0.0) - 1.0 def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): """LAD updates terminal regions to median estimates. """ terminal_region = np.where(terminal_regions == leaf)[0] sample_weight = sample_weight.take(terminal_region, axis=0) diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0) tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50) class HuberLossFunction(RegressionLossFunction): """Huber loss function for robust regression. M-Regression proposed in Friedman 2001. See --- J. Friedman, Greedy Function Approximation: A Gradient Boosting Machine, The Annals of Statistics, Vol. 29, No. 5, 2001. """ def __init__(self, n_classes, alpha=0.9): super(HuberLossFunction, self).__init__(n_classes) self.alpha = alpha self.gamma = None def init_estimator(self): return QuantileEstimator(alpha=0.5) def __call__(self, y, pred, sample_weight=None): pred = pred.ravel() diff = y - pred gamma = self.gamma if gamma is None: if sample_weight is None: gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100) else: gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100) gamma_mask = np.abs(diff) <= gamma if sample_weight is None: sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0) lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0)) loss = (sq_loss + lin_loss) / y.shape[0] else: sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0) lin_loss = np.sum(gamma * sample_weight[~gamma_mask] * (np.abs(diff[~gamma_mask]) - gamma / 2.0)) loss = (sq_loss + lin_loss) / sample_weight.sum() return loss def negative_gradient(self, y, pred, sample_weight=None, **kargs): pred = pred.ravel() diff = y - pred if sample_weight is None: gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100) else: gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100) gamma_mask = np.abs(diff) <= gamma residual = np.zeros((y.shape[0],), dtype=np.float64) residual[gamma_mask] = diff[gamma_mask] residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask]) self.gamma = gamma return residual def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): terminal_region = np.where(terminal_regions == leaf)[0] sample_weight = sample_weight.take(terminal_region, axis=0) gamma = self.gamma diff = (y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)) median = _weighted_percentile(diff, sample_weight, percentile=50) diff_minus_median = diff - median tree.value[leaf, 0] = median + np.mean( np.sign(diff_minus_median) * np.minimum(np.abs(diff_minus_median), gamma)) class QuantileLossFunction(RegressionLossFunction): """Loss function for quantile regression. Quantile regression allows to estimate the percentiles of the conditional distribution of the target. """ def __init__(self, n_classes, alpha=0.9): super(QuantileLossFunction, self).__init__(n_classes) assert 0 < alpha < 1.0 self.alpha = alpha self.percentile = alpha * 100.0 def init_estimator(self): return QuantileEstimator(self.alpha) def __call__(self, y, pred, sample_weight=None): pred = pred.ravel() diff = y - pred alpha = self.alpha mask = y > pred if sample_weight is None: loss = (alpha * diff[mask].sum() + (1.0 - alpha) * diff[~mask].sum()) / y.shape[0] else: loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) + (1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) / sample_weight.sum()) return loss def negative_gradient(self, y, pred, **kargs): alpha = self.alpha pred = pred.ravel() mask = y > pred return (alpha * mask) - ((1.0 - alpha) * ~mask) def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): terminal_region = np.where(terminal_regions == leaf)[0] diff = (y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)) sample_weight = sample_weight.take(terminal_region, axis=0) val = _weighted_percentile(diff, sample_weight, self.percentile) tree.value[leaf, 0] = val class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)): """Base class for classification loss functions. """ def _score_to_proba(self, score): """Template method to convert scores to probabilities. If the loss does not support probabilites raises AttributeError. """ raise TypeError('%s does not support predict_proba' % type(self).__name__) @abstractmethod def _score_to_decision(self, score): """Template method to convert scores to decisions. Returns int arrays. """ class BinomialDeviance(ClassificationLossFunction): """Binomial deviance loss function for binary classification. Binary classification is a special case; here, we only need to fit one tree instead of ``n_classes`` trees. """ def __init__(self, n_classes): if n_classes != 2: raise ValueError("{0:s} requires 2 classes.".format( self.__class__.__name__)) # we only need to fit one tree for binary clf. super(BinomialDeviance, self).__init__(1) def init_estimator(self): return LogOddsEstimator() def __call__(self, y, pred, sample_weight=None): """Compute the deviance (= 2 * negative log-likelihood). """ # logaddexp(0, v) == log(1.0 + exp(v)) pred = pred.ravel() if sample_weight is None: return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred)) else: return (-2.0 / sample_weight.sum() * np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred)))) def negative_gradient(self, y, pred, **kargs): """Compute the residual (= negative gradient). """ return y - 1.0 / (1.0 + np.exp(-pred.ravel())) def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): """Make a single Newton-Raphson step. our node estimate is given by: sum(w * (y - prob)) / sum(w * prob * (1 - prob)) we take advantage that: y - prob = residual """ terminal_region = np.where(terminal_regions == leaf)[0] residual = residual.take(terminal_region, axis=0) y = y.take(terminal_region, axis=0) sample_weight = sample_weight.take(terminal_region, axis=0) numerator = np.sum(sample_weight * residual) denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual)) if denominator == 0.0: tree.value[leaf, 0, 0] = 0.0 else: tree.value[leaf, 0, 0] = numerator / denominator def _score_to_proba(self, score): proba = np.ones((score.shape[0], 2), dtype=np.float64) proba[:, 1] = 1.0 / (1.0 + np.exp(-score.ravel())) proba[:, 0] -= proba[:, 1] return proba def _score_to_decision(self, score): proba = self._score_to_proba(score) return np.argmax(proba, axis=1) class MultinomialDeviance(ClassificationLossFunction): """Multinomial deviance loss function for multi-class classification. For multi-class classification we need to fit ``n_classes`` trees at each stage. """ is_multi_class = True def __init__(self, n_classes): if n_classes < 3: raise ValueError("{0:s} requires more than 2 classes.".format( self.__class__.__name__)) super(MultinomialDeviance, self).__init__(n_classes) def init_estimator(self): return PriorProbabilityEstimator() def __call__(self, y, pred, sample_weight=None): # create one-hot label encoding Y = np.zeros((y.shape[0], self.K), dtype=np.float64) for k in range(self.K): Y[:, k] = y == k if sample_weight is None: return np.sum(-1 * (Y * pred).sum(axis=1) + logsumexp(pred, axis=1)) else: return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) + logsumexp(pred, axis=1)) def negative_gradient(self, y, pred, k=0, **kwargs): """Compute negative gradient for the ``k``-th class. """ return y - np.nan_to_num(np.exp(pred[:, k] - logsumexp(pred, axis=1))) def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): """Make a single Newton-Raphson step. """ terminal_region = np.where(terminal_regions == leaf)[0] residual = residual.take(terminal_region, axis=0) y = y.take(terminal_region, axis=0) sample_weight = sample_weight.take(terminal_region, axis=0) numerator = np.sum(sample_weight * residual) numerator *= (self.K - 1) / self.K denominator = np.sum(sample_weight * (y - residual) * (1.0 - y + residual)) if denominator == 0.0: tree.value[leaf, 0, 0] = 0.0 else: tree.value[leaf, 0, 0] = numerator / denominator def _score_to_proba(self, score): return np.nan_to_num( np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis]))) def _score_to_decision(self, score): proba = self._score_to_proba(score) return np.argmax(proba, axis=1) class ExponentialLoss(ClassificationLossFunction): """Exponential loss function for binary classification. Same loss as AdaBoost. See --- Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007 """ def __init__(self, n_classes): if n_classes != 2: raise ValueError("{0:s} requires 2 classes.".format( self.__class__.__name__)) # we only need to fit one tree for binary clf. super(ExponentialLoss, self).__init__(1) def init_estimator(self): return ScaledLogOddsEstimator() def __call__(self, y, pred, sample_weight=None): pred = pred.ravel() if sample_weight is None: return np.mean(np.exp(-(2. * y - 1.) * pred)) else: return (1.0 / sample_weight.sum()) * \ np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)) def negative_gradient(self, y, pred, **kargs): y_ = -(2. * y - 1.) return y_ * np.exp(y_ * pred.ravel()) def _update_terminal_region(self, tree, terminal_regions, leaf, X, y, residual, pred, sample_weight): terminal_region = np.where(terminal_regions == leaf)[0] pred = pred.take(terminal_region, axis=0) y = y.take(terminal_region, axis=0) sample_weight = sample_weight.take(terminal_region, axis=0) y_ = 2. * y - 1. numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred)) denominator = np.sum(sample_weight * np.exp(-y_ * pred)) if denominator == 0.0: tree.value[leaf, 0, 0] = 0.0 else: tree.value[leaf, 0, 0] = numerator / denominator def _score_to_proba(self, score): proba = np.ones((score.shape[0], 2), dtype=np.float64) proba[:, 1] = 1.0 / (1.0 + np.exp(-2.0 * score.ravel())) proba[:, 0] -= proba[:, 1] return proba def _score_to_decision(self, score): return (score.ravel() >= 0.0).astype(np.int) LOSS_FUNCTIONS = {'ls': LeastSquaresError, 'lad': LeastAbsoluteError, 'huber': HuberLossFunction, 'quantile': QuantileLossFunction, 'deviance': None, # for both, multinomial and binomial 'exponential': ExponentialLoss, } INIT_ESTIMATORS = {'zero': ZeroEstimator} class VerboseReporter(object): """Reports verbose output to stdout. If ``verbose==1`` output is printed once in a while (when iteration mod verbose_mod is zero).; if larger than 1 then output is printed for each update. """ def __init__(self, verbose): self.verbose = verbose def init(self, est, begin_at_stage=0): # header fields and line format str header_fields = ['Iter', 'Train Loss'] verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}'] # do oob? if est.subsample < 1: header_fields.append('OOB Improve') verbose_fmt.append('{oob_impr:>16.4f}') header_fields.append('Remaining Time') verbose_fmt.append('{remaining_time:>16s}') # print the header line print(('%10s ' + '%16s ' * (len(header_fields) - 1)) % tuple(header_fields)) self.verbose_fmt = ' '.join(verbose_fmt) # plot verbose info each time i % verbose_mod == 0 self.verbose_mod = 1 self.start_time = time() self.begin_at_stage = begin_at_stage def update(self, j, est): """Update reporter with new iteration. """ do_oob = est.subsample < 1 # we need to take into account if we fit additional estimators. i = j - self.begin_at_stage # iteration relative to the start iter if (i + 1) % self.verbose_mod == 0: oob_impr = est.oob_improvement_[j] if do_oob else 0 remaining_time = ((est.n_estimators - (j + 1)) * (time() - self.start_time) / float(i + 1)) if remaining_time > 60: remaining_time = '{0:.2f}m'.format(remaining_time / 60.0) else: remaining_time = '{0:.2f}s'.format(remaining_time) print(self.verbose_fmt.format(iter=j + 1, train_score=est.train_score_[j], oob_impr=oob_impr, remaining_time=remaining_time)) if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0): # adjust verbose frequency (powers of 10) self.verbose_mod *= 10 class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble, _LearntSelectorMixin)): """Abstract base class for Gradient Boosting. """ @abstractmethod def __init__(self, loss, learning_rate, n_estimators, min_samples_split, min_samples_leaf, min_weight_fraction_leaf, max_depth, init, subsample, max_features, random_state, alpha=0.9, verbose=0, max_leaf_nodes=None, warm_start=False): self.n_estimators = n_estimators self.learning_rate = learning_rate self.loss = loss self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.min_weight_fraction_leaf = min_weight_fraction_leaf self.subsample = subsample self.max_features = max_features self.max_depth = max_depth self.init = init self.random_state = random_state self.alpha = alpha self.verbose = verbose self.max_leaf_nodes = max_leaf_nodes self.warm_start = warm_start self.estimators_ = np.empty((0, 0), dtype=np.object) def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask, criterion, splitter, random_state): """Fit another stage of ``n_classes_`` trees to the boosting model. """ assert sample_mask.dtype == np.bool loss = self.loss_ original_y = y for k in range(loss.K): if loss.is_multi_class: y = np.array(original_y == k, dtype=np.float64) residual = loss.negative_gradient(y, y_pred, k=k, sample_weight=sample_weight) # induce regression tree on residuals tree = DecisionTreeRegressor( criterion=criterion, splitter=splitter, max_depth=self.max_depth, min_samples_split=self.min_samples_split, min_samples_leaf=self.min_samples_leaf, min_weight_fraction_leaf=self.min_weight_fraction_leaf, max_features=self.max_features, max_leaf_nodes=self.max_leaf_nodes, random_state=random_state) if self.subsample < 1.0: # no inplace multiplication! sample_weight = sample_weight * sample_mask.astype(np.float64) tree.fit(X, residual, sample_weight=sample_weight, check_input=False) # update tree leaves loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred, sample_weight, sample_mask, self.learning_rate, k=k) # add tree to ensemble self.estimators_[i, k] = tree return y_pred def _check_params(self): """Check validity of parameters and raise ValueError if not valid. """ if self.n_estimators <= 0: raise ValueError("n_estimators must be greater than 0 but " "was %r" % self.n_estimators) if self.learning_rate <= 0.0: raise ValueError("learning_rate must be greater than 0 but " "was %r" % self.learning_rate) if (self.loss not in self._SUPPORTED_LOSS or self.loss not in LOSS_FUNCTIONS): raise ValueError("Loss '{0:s}' not supported. ".format(self.loss)) if self.loss == 'deviance': loss_class = (MultinomialDeviance if len(self.classes_) > 2 else BinomialDeviance) else: loss_class = LOSS_FUNCTIONS[self.loss] if self.loss in ('huber', 'quantile'): self.loss_ = loss_class(self.n_classes_, self.alpha) else: self.loss_ = loss_class(self.n_classes_) if not (0.0 < self.subsample <= 1.0): raise ValueError("subsample must be in (0,1] but " "was %r" % self.subsample) if self.init is not None: if isinstance(self.init, six.string_types): if self.init not in INIT_ESTIMATORS: raise ValueError('init="%s" is not supported' % self.init) else: if (not hasattr(self.init, 'fit') or not hasattr(self.init, 'predict')): raise ValueError("init=%r must be valid BaseEstimator " "and support both fit and " "predict" % self.init) if not (0.0 < self.alpha < 1.0): raise ValueError("alpha must be in (0.0, 1.0) but " "was %r" % self.alpha) if isinstance(self.max_features, six.string_types): if self.max_features == "auto": # if is_classification if self.n_classes_ > 1: max_features = max(1, int(np.sqrt(self.n_features))) else: # is regression max_features = self.n_features elif self.max_features == "sqrt": max_features = max(1, int(np.sqrt(self.n_features))) elif self.max_features == "log2": max_features = max(1, int(np.log2(self.n_features))) else: raise ValueError("Invalid value for max_features: %r. " "Allowed string values are 'auto', 'sqrt' " "or 'log2'." % self.max_features) elif self.max_features is None: max_features = self.n_features elif isinstance(self.max_features, (numbers.Integral, np.integer)): max_features = self.max_features else: # float max_features = int(self.max_features * self.n_features) self.max_features_ = max_features def _init_state(self): """Initialize model state and allocate model state data structures. """ if self.init is None: self.init_ = self.loss_.init_estimator() elif isinstance(self.init, six.string_types): self.init_ = INIT_ESTIMATORS[self.init]() else: self.init_ = self.init self.estimators_ = np.empty((self.n_estimators, self.loss_.K), dtype=np.object) self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64) # do oob? if self.subsample < 1.0: self.oob_improvement_ = np.zeros((self.n_estimators), dtype=np.float64) def _clear_state(self): """Clear the state of the gradient boosting model. """ if hasattr(self, 'estimators_'): self.estimators_ = np.empty((0, 0), dtype=np.object) if hasattr(self, 'train_score_'): del self.train_score_ if hasattr(self, 'oob_improvement_'): del self.oob_improvement_ if hasattr(self, 'init_'): del self.init_ def _resize_state(self): """Add additional ``n_estimators`` entries to all attributes. """ # self.n_estimators is the number of additional est to fit total_n_estimators = self.n_estimators if total_n_estimators < self.estimators_.shape[0]: raise ValueError('resize with smaller n_estimators %d < %d' % (total_n_estimators, self.estimators_[0])) self.estimators_.resize((total_n_estimators, self.loss_.K)) self.train_score_.resize(total_n_estimators) if (self.subsample < 1 or hasattr(self, 'oob_improvement_')): # if do oob resize arrays or create new if not available if hasattr(self, 'oob_improvement_'): self.oob_improvement_.resize(total_n_estimators) else: self.oob_improvement_ = np.zeros((total_n_estimators,), dtype=np.float64) def _is_initialized(self): return len(getattr(self, 'estimators_', [])) > 0 def fit(self, X, y, sample_weight=None, monitor=None): """Fit the gradient boosting model. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (integers in classification, real numbers in regression) For classification, labels must correspond to classes ``0, 1, ..., n_classes_-1`` sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. In the case of classification, splits are also ignored if they would result in any single class carrying a negative weight in either child node. monitor : callable, optional The monitor is called after each iteration with the current iteration, a reference to the estimator and the local variables of ``_fit_stages`` as keyword arguments ``callable(i, self, locals())``. If the callable returns ``True`` the fitting procedure is stopped. The monitor can be used for various things such as computing held-out estimates, early stopping, model introspect, and snapshoting. Returns ------- self : object Returns self. """ # if not warmstart - clear the estimator state if not self.warm_start: self._clear_state() # Check input X, y = check_X_y(X, y, dtype=DTYPE) n_samples, n_features = X.shape if sample_weight is None: sample_weight = np.ones(n_samples, dtype=np.float32) else: sample_weight = column_or_1d(sample_weight, warn=True) if y.shape[0] != n_samples: raise ValueError('Shape mismatch of X and y: %d != %d' % (n_samples, y.shape[0])) if n_samples != sample_weight.shape[0]: raise ValueError('Shape mismatch of sample_weight: %d != %d' % (sample_weight.shape[0], n_samples)) self.n_features = n_features random_state = check_random_state(self.random_state) self._check_params() if not self._is_initialized(): # init state self._init_state() # fit initial model - FIXME make sample_weight optional self.init_.fit(X, y, sample_weight) # init predictions y_pred = self.init_.predict(X) begin_at_stage = 0 else: # add more estimators to fitted model # invariant: warm_start = True if self.n_estimators < self.estimators_.shape[0]: raise ValueError('n_estimators=%d must be larger or equal to ' 'estimators_.shape[0]=%d when ' 'warm_start==True' % (self.n_estimators, self.estimators_.shape[0])) begin_at_stage = self.estimators_.shape[0] y_pred = self._decision_function(X) self._resize_state() # fit the boosting stages n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state, begin_at_stage, monitor) # change shape of arrays after fit (early-stopping or additional ests) if n_stages != self.estimators_.shape[0]: self.estimators_ = self.estimators_[:n_stages] self.train_score_ = self.train_score_[:n_stages] if hasattr(self, 'oob_improvement_'): self.oob_improvement_ = self.oob_improvement_[:n_stages] return self def _fit_stages(self, X, y, y_pred, sample_weight, random_state, begin_at_stage=0, monitor=None): """Iteratively fits the stages. For each stage it computes the progress (OOB, train score) and delegates to ``_fit_stage``. Returns the number of stages fit; might differ from ``n_estimators`` due to early stopping. """ n_samples = X.shape[0] do_oob = self.subsample < 1.0 sample_mask = np.ones((n_samples, ), dtype=np.bool) n_inbag = max(1, int(self.subsample * n_samples)) loss_ = self.loss_ # init criterion and splitter criterion = FriedmanMSE(1) splitter = PresortBestSplitter(criterion, self.max_features_, self.min_samples_leaf, self.min_weight_fraction_leaf, random_state) if self.verbose: verbose_reporter = VerboseReporter(self.verbose) verbose_reporter.init(self, begin_at_stage) # perform boosting iterations i = begin_at_stage for i in range(begin_at_stage, self.n_estimators): # subsampling if do_oob: sample_mask = _random_sample_mask(n_samples, n_inbag, random_state) # OOB score before adding this stage old_oob_score = loss_(y[~sample_mask], y_pred[~sample_mask], sample_weight[~sample_mask]) # fit next stage of trees y_pred = self._fit_stage(i, X, y, y_pred, sample_weight, sample_mask, criterion, splitter, random_state) # track deviance (= loss) if do_oob: self.train_score_[i] = loss_(y[sample_mask], y_pred[sample_mask], sample_weight[sample_mask]) self.oob_improvement_[i] = (old_oob_score - loss_(y[~sample_mask], y_pred[~sample_mask], sample_weight[~sample_mask])) else: # no need to fancy index w/ no subsampling self.train_score_[i] = loss_(y, y_pred, sample_weight) if self.verbose > 0: verbose_reporter.update(i, self) if monitor is not None: early_stopping = monitor(i, self, locals()) if early_stopping: break return i + 1 def _make_estimator(self, append=True): # we don't need _make_estimator raise NotImplementedError() def _init_decision_function(self, X): """Check input and compute prediction of ``init``. """ if self.estimators_ is None or len(self.estimators_) == 0: raise ValueError("Estimator not fitted, call `fit` " "before making predictions`.") if X.shape[1] != self.n_features: raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format( self.n_features, X.shape[1])) score = self.init_.predict(X).astype(np.float64) return score def _decision_function(self, X): # for use in inner loop, not raveling the output in single-class case, # not doing input validation. score = self._init_decision_function(X) predict_stages(self.estimators_, X, self.learning_rate, score) return score def decision_function(self, X): """Compute the decision function of ``X``. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- score : array, shape = [n_samples, n_classes] or [n_samples] The decision function of the input samples. The order of the classes corresponds to that in the attribute `classes_`. Regression and binary classification produce an array of shape [n_samples]. """ X = check_array(X, dtype=DTYPE, order="C") score = self._decision_function(X) if score.shape[1] == 1: return score.ravel() return score def staged_decision_function(self, X): """Compute decision function of ``X`` for each iteration. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- score : generator of array, shape = [n_samples, k] The decision function of the input samples. The order of the classes corresponds to that in the attribute `classes_`. Regression and binary classification are special cases with ``k == 1``, otherwise ``k==n_classes``. """ X = check_array(X, dtype=DTYPE, order="C") score = self._init_decision_function(X) for i in range(self.estimators_.shape[0]): predict_stage(self.estimators_, i, X, self.learning_rate, score) yield score @property def feature_importances_(self): """Return the feature importances (the higher, the more important the feature). Returns ------- feature_importances_ : array, shape = [n_features] """ if self.estimators_ is None or len(self.estimators_) == 0: raise ValueError("Estimator not fitted, " "call `fit` before `feature_importances_`.") total_sum = np.zeros((self.n_features, ), dtype=np.float64) for stage in self.estimators_: stage_sum = sum(tree.feature_importances_ for tree in stage) / len(stage) total_sum += stage_sum importances = total_sum / len(self.estimators_) return importances class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin): """Gradient Boosting for classification. GB builds an additive model in a forward stage-wise fashion; it allows for the optimization of arbitrary differentiable loss functions. In each stage ``n_classes_`` regression trees are fit on the negative gradient of the binomial or multinomial deviance loss function. Binary classification is a special case where only a single regression tree is induced. Parameters ---------- loss : {'deviance', 'exponential'}, optional (default='deviance') loss function to be optimized. 'deviance' refers to deviance (= logistic regression) for classification with probabilistic outputs. For loss 'exponential' gradient boosting recoveres the AdaBoost algorithm. learning_rate : float, optional (default=0.1) learning rate shrinks the contribution of each tree by `learning_rate`. There is a trade-off between learning_rate and n_estimators. n_estimators : int (default=100) The number of boosting stages to perform. Gradient boosting is fairly robust to over-fitting so a large number usually results in better performance. max_depth : integer, optional (default=3) maximum depth of the individual regression estimators. The maximum depth limits the number of nodes in the tree. Tune this parameter for best performance; the best value depends on the interaction of the input variables. Ignored if ``max_samples_leaf`` is not None. min_samples_split : integer, optional (default=2) The minimum number of samples required to split an internal node. min_samples_leaf : integer, optional (default=1) The minimum number of samples required to be at a leaf node. min_weight_fraction_leaf : float, optional (default=0.) The minimum weighted fraction of the input samples required to be at a leaf node. subsample : float, optional (default=1.0) The fraction of samples to be used for fitting the individual base learners. If smaller than 1.0 this results in Stochastic Gradient Boosting. `subsample` interacts with the parameter `n_estimators`. Choosing `subsample < 1.0` leads to a reduction of variance and an increase in bias. max_features : int, float, string or None, optional (default=None) The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split. - If float, then `max_features` is a percentage and `int(max_features * n_features)` features are considered at each split. - If "auto", then `max_features=sqrt(n_features)`. - If "sqrt", then `max_features=sqrt(n_features)`. - If "log2", then `max_features=log2(n_features)`. - If None, then `max_features=n_features`. Choosing `max_features < n_features` leads to a reduction of variance and an increase in bias. Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than ``max_features`` features. max_leaf_nodes : int or None, optional (default=None) Grow trees with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes. If not None then ``max_depth`` will be ignored. init : BaseEstimator, None, optional (default=None) An estimator object that is used to compute the initial predictions. ``init`` has to provide ``fit`` and ``predict``. If None it uses ``loss.init_estimator``. verbose : int, default: 0 Enable verbose output. If 1 then it prints progress and performance once in a while (the more trees the lower the frequency). If greater than 1 then it prints progress and performance for every tree. warm_start : bool, default: False When set to ``True``, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just erase the previous solution. Attributes ---------- feature_importances_ : array, shape = [n_features] The feature importances (the higher, the more important the feature). oob_improvement_ : array, shape = [n_estimators] The improvement in loss (= deviance) on the out-of-bag samples relative to the previous iteration. ``oob_improvement_[0]`` is the improvement in loss of the first stage over the ``init`` estimator. train_score_ : array, shape = [n_estimators] The i-th score ``train_score_[i]`` is the deviance (= loss) of the model at iteration ``i`` on the in-bag sample. If ``subsample == 1`` this is the deviance on the training data. loss_ : LossFunction The concrete ``LossFunction`` object. `init` : BaseEstimator The estimator that provides the initial predictions. Set via the ``init`` argument or ``loss.init_estimator``. estimators_ : list of DecisionTreeRegressor The collection of fitted sub-estimators. See also -------- sklearn.tree.DecisionTreeClassifier, RandomForestClassifier AdaBoostClassifier References ---------- J. Friedman, Greedy Function Approximation: A Gradient Boosting Machine, The Annals of Statistics, Vol. 29, No. 5, 2001. J. Friedman, Stochastic Gradient Boosting, 1999 T. Hastie, R. Tibshirani and J. Friedman. Elements of Statistical Learning Ed. 2, Springer, 2009. """ _SUPPORTED_LOSS = ('deviance', 'exponential') def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100, subsample=1.0, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_depth=3, init=None, random_state=None, max_features=None, verbose=0, max_leaf_nodes=None, warm_start=False): super(GradientBoostingClassifier, self).__init__( loss, learning_rate, n_estimators, min_samples_split, min_samples_leaf, min_weight_fraction_leaf, max_depth, init, subsample, max_features, random_state, verbose=verbose, max_leaf_nodes=max_leaf_nodes, warm_start=warm_start) def fit(self, X, y, sample_weight=None, monitor=None): """Fit the gradient boosting model. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (integers in classification, real numbers in regression) For classification, labels must correspond to classes ``0, 1, ..., n_classes_-1``. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. In the case of classification, splits are also ignored if they would result in any single class carrying a negative weight in either child node. monitor : callable, optional The monitor is called after each iteration with the current iteration, a reference to the estimator and the local variables of ``_fit_stages`` as keyword arguments ``callable(i, self, locals())``. If the callable returns ``True`` the fitting procedure is stopped. The monitor can be used for various things such as computing held-out estimates, early stopping, model introspect, and snapshoting. Returns ------- self : object Returns self. """ y = column_or_1d(y, warn=True) self.classes_, y = np.unique(y, return_inverse=True) self.n_classes_ = len(self.classes_) return super(GradientBoostingClassifier, self).fit(X, y, sample_weight, monitor) def predict_proba(self, X): """Predict class probabilities for X. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Raises ------ AttributeError If the ``loss`` does not support probabilities. Returns ------- p : array of shape = [n_samples] The class probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`. """ score = self.decision_function(X) try: return self.loss_._score_to_proba(score) except AttributeError: raise AttributeError('loss=%r does not support predict_proba' % self.loss) def staged_predict_proba(self, X): """Predict class probabilities at each stage for X. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- y : array of shape = [n_samples] The predicted value of the input samples. """ try: for score in self.staged_decision_function(X): yield self.loss_._score_to_proba(score) except AttributeError: raise AttributeError('loss=%r does not support predict_proba' % self.loss) def predict(self, X): """Predict class for X. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- y : array of shape = [n_samples] The predicted classes. """ score = self.decision_function(X) decisions = self.loss_._score_to_decision(score) return self.classes_.take(decisions, axis=0) def staged_predict(self, X): """Predict classes at each stage for X. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- y : array of shape = [n_samples] The predicted value of the input samples. """ for score in self.staged_decision_function(X): decisions = self.loss_._score_to_decision(score) yield self.classes_.take(decisions, axis=0) class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin): """Gradient Boosting for regression. GB builds an additive model in a forward stage-wise fashion; it allows for the optimization of arbitrary differentiable loss functions. In each stage a regression tree is fit on the negative gradient of the given loss function. Parameters ---------- loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls') loss function to be optimized. 'ls' refers to least squares regression. 'lad' (least absolute deviation) is a highly robust loss function solely based on order information of the input variables. 'huber' is a combination of the two. 'quantile' allows quantile regression (use `alpha` to specify the quantile). learning_rate : float, optional (default=0.1) learning rate shrinks the contribution of each tree by `learning_rate`. There is a trade-off between learning_rate and n_estimators. n_estimators : int (default=100) The number of boosting stages to perform. Gradient boosting is fairly robust to over-fitting so a large number usually results in better performance. max_depth : integer, optional (default=3) maximum depth of the individual regression estimators. The maximum depth limits the number of nodes in the tree. Tune this parameter for best performance; the best value depends on the interaction of the input variables. min_samples_split : integer, optional (default=2) The minimum number of samples required to split an internal node. min_samples_leaf : integer, optional (default=1) The minimum number of samples required to be at a leaf node. min_weight_fraction_leaf : float, optional (default=0.) The minimum weighted fraction of the input samples required to be at a leaf node. subsample : float, optional (default=1.0) The fraction of samples to be used for fitting the individual base learners. If smaller than 1.0 this results in Stochastic Gradient Boosting. `subsample` interacts with the parameter `n_estimators`. Choosing `subsample < 1.0` leads to a reduction of variance and an increase in bias. max_features : int, float, string or None, optional (default=None) The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split. - If float, then `max_features` is a percentage and `int(max_features * n_features)` features are considered at each split. - If "auto", then `max_features=n_features`. - If "sqrt", then `max_features=sqrt(n_features)`. - If "log2", then `max_features=log2(n_features)`. - If None, then `max_features=n_features`. Choosing `max_features < n_features` leads to a reduction of variance and an increase in bias. Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than ``max_features`` features. max_leaf_nodes : int or None, optional (default=None) Grow trees with ``max_leaf_nodes`` in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes. alpha : float (default=0.9) The alpha-quantile of the huber loss function and the quantile loss function. Only if ``loss='huber'`` or ``loss='quantile'``. init : BaseEstimator, None, optional (default=None) An estimator object that is used to compute the initial predictions. ``init`` has to provide ``fit`` and ``predict``. If None it uses ``loss.init_estimator``. verbose : int, default: 0 Enable verbose output. If 1 then it prints progress and performance once in a while (the more trees the lower the frequency). If greater than 1 then it prints progress and performance for every tree. warm_start : bool, default: False When set to ``True``, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just erase the previous solution. Attributes ---------- feature_importances_ : array, shape = [n_features] The feature importances (the higher, the more important the feature). oob_improvement_ : array, shape = [n_estimators] The improvement in loss (= deviance) on the out-of-bag samples relative to the previous iteration. ``oob_improvement_[0]`` is the improvement in loss of the first stage over the ``init`` estimator. train_score_ : array, shape = [n_estimators] The i-th score ``train_score_[i]`` is the deviance (= loss) of the model at iteration ``i`` on the in-bag sample. If ``subsample == 1`` this is the deviance on the training data. loss_ : LossFunction The concrete ``LossFunction`` object. `init` : BaseEstimator The estimator that provides the initial predictions. Set via the ``init`` argument or ``loss.init_estimator``. estimators_ : list of DecisionTreeRegressor The collection of fitted sub-estimators. See also -------- DecisionTreeRegressor, RandomForestRegressor References ---------- J. Friedman, Greedy Function Approximation: A Gradient Boosting Machine, The Annals of Statistics, Vol. 29, No. 5, 2001. J. Friedman, Stochastic Gradient Boosting, 1999 T. Hastie, R. Tibshirani and J. Friedman. Elements of Statistical Learning Ed. 2, Springer, 2009. """ _SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile') def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100, subsample=1.0, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0., max_depth=3, init=None, random_state=None, max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None, warm_start=False): super(GradientBoostingRegressor, self).__init__( loss, learning_rate, n_estimators, min_samples_split, min_samples_leaf, min_weight_fraction_leaf, max_depth, init, subsample, max_features, random_state, alpha, verbose, max_leaf_nodes=max_leaf_nodes, warm_start=warm_start) def fit(self, X, y, sample_weight=None, monitor=None): """Fit the gradient boosting model. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values (integers in classification, real numbers in regression) For classification, labels must correspond to classes ``0, 1, ..., n_classes_-1``. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. In the case of classification, splits are also ignored if they would result in any single class carrying a negative weight in either child node. monitor : callable, optional The monitor is called after each iteration with the current iteration, a reference to the estimator and the local variables of ``_fit_stages`` as keyword arguments ``callable(i, self, locals())``. If the callable returns ``True`` the fitting procedure is stopped. The monitor can be used for various things such as computing held-out estimates, early stopping, model introspect, and snapshoting. Returns ------- self : object Returns self. """ self.n_classes_ = 1 return super(GradientBoostingRegressor, self).fit(X, y, sample_weight, monitor) def predict(self, X): """Predict regression target for X. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- y: array of shape = [n_samples] The predicted values. """ return self.decision_function(X).ravel() def staged_predict(self, X): """Predict regression target at each stage for X. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. Returns ------- y : array of shape = [n_samples] The predicted value of the input samples. """ for y in self.staged_decision_function(X): yield y.ravel()
bsd-3-clause
rs2/pandas
pandas/tests/indexes/test_base.py
1
93051
from collections import defaultdict from datetime import datetime, timedelta from io import StringIO import math import operator import re import numpy as np import pytest import pandas._config.config as cf from pandas._libs.tslib import Timestamp from pandas.compat.numpy import np_datetime64_compat from pandas.util._test_decorators import async_mark from pandas.core.dtypes.generic import ABCIndex import pandas as pd from pandas import ( CategoricalIndex, DataFrame, DatetimeIndex, Float64Index, Int64Index, PeriodIndex, RangeIndex, Series, TimedeltaIndex, UInt64Index, date_range, isna, period_range, ) import pandas._testing as tm from pandas.core.indexes.api import ( Index, MultiIndex, _get_combined_index, ensure_index, ensure_index_from_sequences, ) from pandas.tests.indexes.common import Base class TestIndex(Base): _holder = Index def create_index(self) -> Index: return Index(list("abcde")) def test_can_hold_identifiers(self): index = self.create_index() key = index[0] assert index._can_hold_identifiers_and_holds_name(key) is True @pytest.mark.parametrize("index", ["datetime"], indirect=True) def test_new_axis(self, index): with tm.assert_produces_warning(FutureWarning): # GH#30588 multi-dimensional indexing deprecated new_index = index[None, :] assert new_index.ndim == 2 assert isinstance(new_index, np.ndarray) def test_constructor_regular(self, index): tm.assert_contains_all(index, index) @pytest.mark.parametrize("index", ["string"], indirect=True) def test_constructor_casting(self, index): # casting arr = np.array(index) new_index = Index(arr) tm.assert_contains_all(arr, new_index) tm.assert_index_equal(index, new_index) @pytest.mark.parametrize("index", ["string"], indirect=True) def test_constructor_copy(self, index): # copy # index = self.create_index() arr = np.array(index) new_index = Index(arr, copy=True, name="name") assert isinstance(new_index, Index) assert new_index.name == "name" tm.assert_numpy_array_equal(arr, new_index.values) arr[0] = "SOMEBIGLONGSTRING" assert new_index[0] != "SOMEBIGLONGSTRING" # FIXME: dont leave commented-out # what to do here? # arr = np.array(5.) # pytest.raises(Exception, arr.view, Index) @pytest.mark.parametrize("cast_as_obj", [True, False]) @pytest.mark.parametrize( "index", [ pd.date_range( "2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern", name="Green Eggs & Ham", ), # DTI with tz pd.date_range("2015-01-01 10:00", freq="D", periods=3), # DTI no tz pd.timedelta_range("1 days", freq="D", periods=3), # td pd.period_range("2015-01-01", freq="D", periods=3), # period ], ) def test_constructor_from_index_dtlike(self, cast_as_obj, index): if cast_as_obj: result = pd.Index(index.astype(object)) else: result = pd.Index(index) tm.assert_index_equal(result, index) if isinstance(index, pd.DatetimeIndex): assert result.tz == index.tz if cast_as_obj: # GH#23524 check that Index(dti, dtype=object) does not # incorrectly raise ValueError, and that nanoseconds are not # dropped index += pd.Timedelta(nanoseconds=50) result = pd.Index(index, dtype=object) assert result.dtype == np.object_ assert list(result) == list(index) @pytest.mark.parametrize( "index,has_tz", [ ( pd.date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern"), True, ), # datetimetz (pd.timedelta_range("1 days", freq="D", periods=3), False), # td (pd.period_range("2015-01-01", freq="D", periods=3), False), # period ], ) def test_constructor_from_series_dtlike(self, index, has_tz): result = pd.Index(pd.Series(index)) tm.assert_index_equal(result, index) if has_tz: assert result.tz == index.tz def test_constructor_from_series_freq(self): # GH 6273 # create from a series, passing a freq dts = ["1-1-1990", "2-1-1990", "3-1-1990", "4-1-1990", "5-1-1990"] expected = DatetimeIndex(dts, freq="MS") s = Series(pd.to_datetime(dts)) result = DatetimeIndex(s, freq="MS") tm.assert_index_equal(result, expected) def test_constructor_from_frame_series_freq(self): # GH 6273 # create from a series, passing a freq dts = ["1-1-1990", "2-1-1990", "3-1-1990", "4-1-1990", "5-1-1990"] expected = DatetimeIndex(dts, freq="MS") df = pd.DataFrame(np.random.rand(5, 3)) df["date"] = dts result = DatetimeIndex(df["date"], freq="MS") assert df["date"].dtype == object expected.name = "date" tm.assert_index_equal(result, expected) expected = pd.Series(dts, name="date") tm.assert_series_equal(df["date"], expected) # GH 6274 # infer freq of same freq = pd.infer_freq(df["date"]) assert freq == "MS" @pytest.mark.parametrize( "array", [ np.arange(5), np.array(["a", "b", "c"]), date_range("2000-01-01", periods=3).values, ], ) def test_constructor_ndarray_like(self, array): # GH 5460#issuecomment-44474502 # it should be possible to convert any object that satisfies the numpy # ndarray interface directly into an Index class ArrayLike: def __init__(self, array): self.array = array def __array__(self, dtype=None) -> np.ndarray: return self.array expected = pd.Index(array) result = pd.Index(ArrayLike(array)) tm.assert_index_equal(result, expected) def test_constructor_int_dtype_nan(self): # see gh-15187 data = [np.nan] expected = Float64Index(data) result = Index(data, dtype="float") tm.assert_index_equal(result, expected) @pytest.mark.parametrize("dtype", ["int64", "uint64"]) def test_constructor_int_dtype_nan_raises(self, dtype): # see gh-15187 data = [np.nan] msg = "cannot convert" with pytest.raises(ValueError, match=msg): Index(data, dtype=dtype) def test_constructor_no_pandas_array(self): ser = pd.Series([1, 2, 3]) result = pd.Index(ser.array) expected = pd.Index([1, 2, 3]) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( "klass,dtype,na_val", [ (pd.Float64Index, np.float64, np.nan), (pd.DatetimeIndex, "datetime64[ns]", pd.NaT), ], ) def test_index_ctor_infer_nan_nat(self, klass, dtype, na_val): # GH 13467 na_list = [na_val, na_val] expected = klass(na_list) assert expected.dtype == dtype result = Index(na_list) tm.assert_index_equal(result, expected) result = Index(np.array(na_list)) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( "vals,dtype", [ ([1, 2, 3, 4, 5], "int"), ([1.1, np.nan, 2.2, 3.0], "float"), (["A", "B", "C", np.nan], "obj"), ], ) def test_constructor_simple_new(self, vals, dtype): index = Index(vals, name=dtype) result = index._simple_new(index.values, dtype) tm.assert_index_equal(result, index) @pytest.mark.parametrize( "vals", [ [1, 2, 3], np.array([1, 2, 3]), np.array([1, 2, 3], dtype=int), # below should coerce [1.0, 2.0, 3.0], np.array([1.0, 2.0, 3.0], dtype=float), ], ) def test_constructor_dtypes_to_int64(self, vals): index = Index(vals, dtype=int) assert isinstance(index, Int64Index) @pytest.mark.parametrize( "vals", [ [1, 2, 3], [1.0, 2.0, 3.0], np.array([1.0, 2.0, 3.0]), np.array([1, 2, 3], dtype=int), np.array([1.0, 2.0, 3.0], dtype=float), ], ) def test_constructor_dtypes_to_float64(self, vals): index = Index(vals, dtype=float) assert isinstance(index, Float64Index) @pytest.mark.parametrize( "vals", [ [1, 2, 3], np.array([1, 2, 3], dtype=int), np.array( [np_datetime64_compat("2011-01-01"), np_datetime64_compat("2011-01-02")] ), [datetime(2011, 1, 1), datetime(2011, 1, 2)], ], ) def test_constructor_dtypes_to_categorical(self, vals): index = Index(vals, dtype="category") assert isinstance(index, CategoricalIndex) @pytest.mark.parametrize("cast_index", [True, False]) @pytest.mark.parametrize( "vals", [ Index( np.array( [ np_datetime64_compat("2011-01-01"), np_datetime64_compat("2011-01-02"), ] ) ), Index([datetime(2011, 1, 1), datetime(2011, 1, 2)]), ], ) def test_constructor_dtypes_to_datetime(self, cast_index, vals): if cast_index: index = Index(vals, dtype=object) assert isinstance(index, Index) assert index.dtype == object else: index = Index(vals) assert isinstance(index, DatetimeIndex) @pytest.mark.parametrize("cast_index", [True, False]) @pytest.mark.parametrize( "vals", [ np.array([np.timedelta64(1, "D"), np.timedelta64(1, "D")]), [timedelta(1), timedelta(1)], ], ) def test_constructor_dtypes_to_timedelta(self, cast_index, vals): if cast_index: index = Index(vals, dtype=object) assert isinstance(index, Index) assert index.dtype == object else: index = Index(vals) assert isinstance(index, TimedeltaIndex) @pytest.mark.parametrize("attr", ["values", "asi8"]) @pytest.mark.parametrize("klass", [pd.Index, pd.DatetimeIndex]) def test_constructor_dtypes_datetime(self, tz_naive_fixture, attr, klass): # Test constructing with a datetimetz dtype # .values produces numpy datetimes, so these are considered naive # .asi8 produces integers, so these are considered epoch timestamps # ^the above will be true in a later version. Right now we `.view` # the i8 values as NS_DTYPE, effectively treating them as wall times. index = pd.date_range("2011-01-01", periods=5) arg = getattr(index, attr) index = index.tz_localize(tz_naive_fixture) dtype = index.dtype if attr == "asi8": result = pd.DatetimeIndex(arg).tz_localize(tz_naive_fixture) else: result = klass(arg, tz=tz_naive_fixture) tm.assert_index_equal(result, index) if attr == "asi8": result = pd.DatetimeIndex(arg).astype(dtype) else: result = klass(arg, dtype=dtype) tm.assert_index_equal(result, index) if attr == "asi8": result = pd.DatetimeIndex(list(arg)).tz_localize(tz_naive_fixture) else: result = klass(list(arg), tz=tz_naive_fixture) tm.assert_index_equal(result, index) if attr == "asi8": result = pd.DatetimeIndex(list(arg)).astype(dtype) else: result = klass(list(arg), dtype=dtype) tm.assert_index_equal(result, index) @pytest.mark.parametrize("attr", ["values", "asi8"]) @pytest.mark.parametrize("klass", [pd.Index, pd.TimedeltaIndex]) def test_constructor_dtypes_timedelta(self, attr, klass): index = pd.timedelta_range("1 days", periods=5) index = index._with_freq(None) # wont be preserved by constructors dtype = index.dtype values = getattr(index, attr) result = klass(values, dtype=dtype) tm.assert_index_equal(result, index) result = klass(list(values), dtype=dtype) tm.assert_index_equal(result, index) @pytest.mark.parametrize("value", [[], iter([]), (_ for _ in [])]) @pytest.mark.parametrize( "klass", [ Index, Float64Index, Int64Index, UInt64Index, CategoricalIndex, DatetimeIndex, TimedeltaIndex, ], ) def test_constructor_empty(self, value, klass): empty = klass(value) assert isinstance(empty, klass) assert not len(empty) @pytest.mark.parametrize( "empty,klass", [ (PeriodIndex([], freq="B"), PeriodIndex), (PeriodIndex(iter([]), freq="B"), PeriodIndex), (PeriodIndex((_ for _ in []), freq="B"), PeriodIndex), (RangeIndex(step=1), pd.RangeIndex), (MultiIndex(levels=[[1, 2], ["blue", "red"]], codes=[[], []]), MultiIndex), ], ) def test_constructor_empty_special(self, empty, klass): assert isinstance(empty, klass) assert not len(empty) def test_constructor_overflow_int64(self): # see gh-15832 msg = ( "The elements provided in the data cannot " "all be casted to the dtype int64" ) with pytest.raises(OverflowError, match=msg): Index([np.iinfo(np.uint64).max - 1], dtype="int64") @pytest.mark.parametrize( "index", [ "datetime", "float", "int", "period", "range", "repeats", "timedelta", "tuples", "uint", ], indirect=True, ) def test_view_with_args(self, index): index.view("i8") @pytest.mark.parametrize( "index", [ "unicode", "string", pytest.param("categorical", marks=pytest.mark.xfail(reason="gh-25464")), "bool", "empty", ], indirect=True, ) def test_view_with_args_object_array_raises(self, index): msg = "Cannot change data-type for object array" with pytest.raises(TypeError, match=msg): index.view("i8") @pytest.mark.parametrize("index", ["int", "range"], indirect=True) def test_astype(self, index): casted = index.astype("i8") # it works! casted.get_loc(5) # pass on name index.name = "foobar" casted = index.astype("i8") assert casted.name == "foobar" def test_equals_object(self): # same assert Index(["a", "b", "c"]).equals(Index(["a", "b", "c"])) @pytest.mark.parametrize( "comp", [Index(["a", "b"]), Index(["a", "b", "d"]), ["a", "b", "c"]] ) def test_not_equals_object(self, comp): assert not Index(["a", "b", "c"]).equals(comp) def test_insert_missing(self, nulls_fixture): # GH 22295 # test there is no mangling of NA values expected = Index(["a", nulls_fixture, "b", "c"]) result = Index(list("abc")).insert(1, nulls_fixture) tm.assert_index_equal(result, expected) def test_delete_raises(self): index = Index(["a", "b", "c", "d"], name="index") msg = "index 5 is out of bounds for axis 0 with size 4" with pytest.raises(IndexError, match=msg): index.delete(5) def test_identical(self): # index i1 = Index(["a", "b", "c"]) i2 = Index(["a", "b", "c"]) assert i1.identical(i2) i1 = i1.rename("foo") assert i1.equals(i2) assert not i1.identical(i2) i2 = i2.rename("foo") assert i1.identical(i2) i3 = Index([("a", "a"), ("a", "b"), ("b", "a")]) i4 = Index([("a", "a"), ("a", "b"), ("b", "a")], tupleize_cols=False) assert not i3.identical(i4) def test_is_(self): ind = Index(range(10)) assert ind.is_(ind) assert ind.is_(ind.view().view().view().view()) assert not ind.is_(Index(range(10))) assert not ind.is_(ind.copy()) assert not ind.is_(ind.copy(deep=False)) assert not ind.is_(ind[:]) assert not ind.is_(np.array(range(10))) # quasi-implementation dependent assert ind.is_(ind.view()) ind2 = ind.view() ind2.name = "bob" assert ind.is_(ind2) assert ind2.is_(ind) # doesn't matter if Indices are *actually* views of underlying data, assert not ind.is_(Index(ind.values)) arr = np.array(range(1, 11)) ind1 = Index(arr, copy=False) ind2 = Index(arr, copy=False) assert not ind1.is_(ind2) @pytest.mark.parametrize("index", ["datetime"], indirect=True) def test_asof(self, index): d = index[0] assert index.asof(d) == d assert isna(index.asof(d - timedelta(1))) d = index[-1] assert index.asof(d + timedelta(1)) == d d = index[0].to_pydatetime() assert isinstance(index.asof(d), Timestamp) def test_asof_datetime_partial(self): index = pd.date_range("2010-01-01", periods=2, freq="m") expected = Timestamp("2010-02-28") result = index.asof("2010-02") assert result == expected assert not isinstance(result, Index) def test_nanosecond_index_access(self): s = Series([Timestamp("20130101")]).values.view("i8")[0] r = DatetimeIndex([s + 50 + i for i in range(100)]) x = Series(np.random.randn(100), index=r) first_value = x.asof(x.index[0]) # this does not yet work, as parsing strings is done via dateutil # assert first_value == x['2013-01-01 00:00:00.000000050+0000'] expected_ts = np_datetime64_compat("2013-01-01 00:00:00.000000050+0000", "ns") assert first_value == x[Timestamp(expected_ts)] @pytest.mark.parametrize("index", ["string"], indirect=True) def test_booleanindex(self, index): bool_index = np.ones(len(index), dtype=bool) bool_index[5:30:2] = False sub_index = index[bool_index] for i, val in enumerate(sub_index): assert sub_index.get_loc(val) == i sub_index = index[list(bool_index)] for i, val in enumerate(sub_index): assert sub_index.get_loc(val) == i def test_fancy(self): index = self.create_index() sl = index[[1, 2, 3]] for i in sl: assert i == sl[sl.get_loc(i)] @pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True) @pytest.mark.parametrize("dtype", [np.int_, np.bool_]) def test_empty_fancy(self, index, dtype): empty_arr = np.array([], dtype=dtype) empty_index = type(index)([]) assert index[[]].identical(empty_index) assert index[empty_arr].identical(empty_index) @pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True) def test_empty_fancy_raises(self, index): # pd.DatetimeIndex is excluded, because it overrides getitem and should # be tested separately. empty_farr = np.array([], dtype=np.float_) empty_index = type(index)([]) assert index[[]].identical(empty_index) # np.ndarray only accepts ndarray of int & bool dtypes, so should Index msg = r"arrays used as indices must be of integer \(or boolean\) type" with pytest.raises(IndexError, match=msg): index[empty_farr] @pytest.mark.parametrize("index", ["string"], indirect=True) def test_intersection(self, index, sort): first = index[:20] second = index[:10] intersect = first.intersection(second, sort=sort) if sort is None: tm.assert_index_equal(intersect, second.sort_values()) assert tm.equalContents(intersect, second) # Corner cases inter = first.intersection(first, sort=sort) assert inter is first @pytest.mark.parametrize( "index2,keeps_name", [ (Index([3, 4, 5, 6, 7], name="index"), True), # preserve same name (Index([3, 4, 5, 6, 7], name="other"), False), # drop diff names (Index([3, 4, 5, 6, 7]), False), ], ) def test_intersection_name_preservation(self, index2, keeps_name, sort): index1 = Index([1, 2, 3, 4, 5], name="index") expected = Index([3, 4, 5]) result = index1.intersection(index2, sort) if keeps_name: expected.name = "index" assert result.name == expected.name tm.assert_index_equal(result, expected) @pytest.mark.parametrize("index", ["string"], indirect=True) @pytest.mark.parametrize( "first_name,second_name,expected_name", [("A", "A", "A"), ("A", "B", None), (None, "B", None)], ) def test_intersection_name_preservation2( self, index, first_name, second_name, expected_name, sort ): first = index[5:20] second = index[:10] first.name = first_name second.name = second_name intersect = first.intersection(second, sort=sort) assert intersect.name == expected_name @pytest.mark.parametrize( "index2,keeps_name", [ (Index([4, 7, 6, 5, 3], name="index"), True), (Index([4, 7, 6, 5, 3], name="other"), False), ], ) def test_intersection_monotonic(self, index2, keeps_name, sort): index1 = Index([5, 3, 2, 4, 1], name="index") expected = Index([5, 3, 4]) if keeps_name: expected.name = "index" result = index1.intersection(index2, sort=sort) if sort is None: expected = expected.sort_values() tm.assert_index_equal(result, expected) @pytest.mark.parametrize( "index2,expected_arr", [(Index(["B", "D"]), ["B"]), (Index(["B", "D", "A"]), ["A", "B", "A"])], ) def test_intersection_non_monotonic_non_unique(self, index2, expected_arr, sort): # non-monotonic non-unique index1 = Index(["A", "B", "A", "C"]) expected = Index(expected_arr, dtype="object") result = index1.intersection(index2, sort=sort) if sort is None: expected = expected.sort_values() tm.assert_index_equal(result, expected) def test_intersect_str_dates(self, sort): dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)] i1 = Index(dt_dates, dtype=object) i2 = Index(["aa"], dtype=object) result = i2.intersection(i1, sort=sort) assert len(result) == 0 @pytest.mark.xfail(reason="Not implemented") def test_intersection_equal_sort_true(self): # TODO decide on True behaviour idx = pd.Index(["c", "a", "b"]) sorted_ = pd.Index(["a", "b", "c"]) tm.assert_index_equal(idx.intersection(idx, sort=True), sorted_) def test_chained_union(self, sort): # Chained unions handles names correctly i1 = Index([1, 2], name="i1") i2 = Index([5, 6], name="i2") i3 = Index([3, 4], name="i3") union = i1.union(i2.union(i3, sort=sort), sort=sort) expected = i1.union(i2, sort=sort).union(i3, sort=sort) tm.assert_index_equal(union, expected) j1 = Index([1, 2], name="j1") j2 = Index([], name="j2") j3 = Index([], name="j3") union = j1.union(j2.union(j3, sort=sort), sort=sort) expected = j1.union(j2, sort=sort).union(j3, sort=sort) tm.assert_index_equal(union, expected) @pytest.mark.parametrize("index", ["string"], indirect=True) def test_union(self, index, sort): first = index[5:20] second = index[:10] everything = index[:20] union = first.union(second, sort=sort) if sort is None: tm.assert_index_equal(union, everything.sort_values()) assert tm.equalContents(union, everything) @pytest.mark.parametrize("slice_", [slice(None), slice(0)]) def test_union_sort_other_special(self, slice_): # https://github.com/pandas-dev/pandas/issues/24959 idx = pd.Index([1, 0, 2]) # default, sort=None other = idx[slice_] tm.assert_index_equal(idx.union(other), idx) tm.assert_index_equal(other.union(idx), idx) # sort=False tm.assert_index_equal(idx.union(other, sort=False), idx) @pytest.mark.xfail(reason="Not implemented") @pytest.mark.parametrize("slice_", [slice(None), slice(0)]) def test_union_sort_special_true(self, slice_): # TODO decide on True behaviour # sort=True idx = pd.Index([1, 0, 2]) # default, sort=None other = idx[slice_] result = idx.union(other, sort=True) expected = pd.Index([0, 1, 2]) tm.assert_index_equal(result, expected) @pytest.mark.parametrize("klass", [np.array, Series, list]) @pytest.mark.parametrize("index", ["string"], indirect=True) def test_union_from_iterables(self, index, klass, sort): # GH 10149 first = index[5:20] second = index[:10] everything = index[:20] case = klass(second.values) result = first.union(case, sort=sort) if sort is None: tm.assert_index_equal(result, everything.sort_values()) assert tm.equalContents(result, everything) @pytest.mark.parametrize("index", ["string"], indirect=True) def test_union_identity(self, index, sort): first = index[5:20] union = first.union(first, sort=sort) # i.e. identity is not preserved when sort is True assert (union is first) is (not sort) # This should no longer be the same object, since [] is not consistent, # both objects will be recast to dtype('O') union = first.union([], sort=sort) assert (union is first) is (not sort) union = Index([]).union(first, sort=sort) assert (union is first) is (not sort) @pytest.mark.parametrize("first_list", [list("ba"), list()]) @pytest.mark.parametrize("second_list", [list("ab"), list()]) @pytest.mark.parametrize( "first_name, second_name, expected_name", [("A", "B", None), (None, "B", None), ("A", None, None)], ) def test_union_name_preservation( self, first_list, second_list, first_name, second_name, expected_name, sort ): first = Index(first_list, name=first_name) second = Index(second_list, name=second_name) union = first.union(second, sort=sort) vals = set(first_list).union(second_list) if sort is None and len(first_list) > 0 and len(second_list) > 0: expected = Index(sorted(vals), name=expected_name) tm.assert_index_equal(union, expected) else: expected = Index(vals, name=expected_name) assert tm.equalContents(union, expected) def test_union_dt_as_obj(self, sort): # TODO: Replace with fixturesult index = self.create_index() date_index = pd.date_range("2019-01-01", periods=10) first_cat = index.union(date_index) second_cat = index.union(index) if date_index.dtype == np.object_: appended = np.append(index, date_index) else: appended = np.append(index, date_index.astype("O")) assert tm.equalContents(first_cat, appended) assert tm.equalContents(second_cat, index) tm.assert_contains_all(index, first_cat) tm.assert_contains_all(index, second_cat) tm.assert_contains_all(date_index, first_cat) def test_map_identity_mapping(self, index): # GH 12766 tm.assert_index_equal(index, index.map(lambda x: x)) def test_map_with_tuples(self): # GH 12766 # Test that returning a single tuple from an Index # returns an Index. index = tm.makeIntIndex(3) result = tm.makeIntIndex(3).map(lambda x: (x,)) expected = Index([(i,) for i in index]) tm.assert_index_equal(result, expected) # Test that returning a tuple from a map of a single index # returns a MultiIndex object. result = index.map(lambda x: (x, x == 1)) expected = MultiIndex.from_tuples([(i, i == 1) for i in index]) tm.assert_index_equal(result, expected) def test_map_with_tuples_mi(self): # Test that returning a single object from a MultiIndex # returns an Index. first_level = ["foo", "bar", "baz"] multi_index = MultiIndex.from_tuples(zip(first_level, [1, 2, 3])) reduced_index = multi_index.map(lambda x: x[0]) tm.assert_index_equal(reduced_index, Index(first_level)) @pytest.mark.parametrize( "attr", ["makeDateIndex", "makePeriodIndex", "makeTimedeltaIndex"] ) def test_map_tseries_indices_return_index(self, attr): index = getattr(tm, attr)(10) expected = Index([1] * 10) result = index.map(lambda x: 1) tm.assert_index_equal(expected, result) def test_map_tseries_indices_accsr_return_index(self): date_index = tm.makeDateIndex(24, freq="h", name="hourly") expected = Index(range(24), name="hourly") tm.assert_index_equal(expected, date_index.map(lambda x: x.hour)) @pytest.mark.parametrize( "mapper", [ lambda values, index: {i: e for e, i in zip(values, index)}, lambda values, index: pd.Series(values, index), ], ) def test_map_dictlike_simple(self, mapper): # GH 12756 expected = Index(["foo", "bar", "baz"]) index = tm.makeIntIndex(3) result = index.map(mapper(expected.values, index)) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( "mapper", [ lambda values, index: {i: e for e, i in zip(values, index)}, lambda values, index: pd.Series(values, index), ], ) def test_map_dictlike(self, index, mapper): # GH 12756 if isinstance(index, CategoricalIndex): # Tested in test_categorical return elif not index.is_unique: # Cannot map duplicated index return if index.empty: # to match proper result coercion for uints expected = Index([]) else: expected = Index(np.arange(len(index), 0, -1)) result = index.map(mapper(expected, index)) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( "mapper", [Series(["foo", 2.0, "baz"], index=[0, 2, -1]), {0: "foo", 2: 2.0, -1: "baz"}], ) def test_map_with_non_function_missing_values(self, mapper): # GH 12756 expected = Index([2.0, np.nan, "foo"]) result = Index([2, 1, 0]).map(mapper) tm.assert_index_equal(expected, result) def test_map_na_exclusion(self): index = Index([1.5, np.nan, 3, np.nan, 5]) result = index.map(lambda x: x * 2, na_action="ignore") expected = index * 2 tm.assert_index_equal(result, expected) def test_map_defaultdict(self): index = Index([1, 2, 3]) default_dict = defaultdict(lambda: "blank") default_dict[1] = "stuff" result = index.map(default_dict) expected = Index(["stuff", "blank", "blank"]) tm.assert_index_equal(result, expected) @pytest.mark.parametrize("name,expected", [("foo", "foo"), ("bar", None)]) def test_append_empty_preserve_name(self, name, expected): left = Index([], name="foo") right = Index([1, 2, 3], name=name) result = left.append(right) assert result.name == expected @pytest.mark.parametrize("index", ["string"], indirect=True) @pytest.mark.parametrize("second_name,expected", [(None, None), ("name", "name")]) def test_difference_name_preservation(self, index, second_name, expected, sort): first = index[5:20] second = index[:10] answer = index[10:20] first.name = "name" second.name = second_name result = first.difference(second, sort=sort) assert tm.equalContents(result, answer) if expected is None: assert result.name is None else: assert result.name == expected @pytest.mark.parametrize("index", ["string"], indirect=True) def test_difference_empty_arg(self, index, sort): first = index[5:20] first.name = "name" result = first.difference([], sort) assert tm.equalContents(result, first) assert result.name == first.name @pytest.mark.parametrize("index", ["string"], indirect=True) def test_difference_identity(self, index, sort): first = index[5:20] first.name = "name" result = first.difference(first, sort) assert len(result) == 0 assert result.name == first.name @pytest.mark.parametrize("index", ["string"], indirect=True) def test_difference_sort(self, index, sort): first = index[5:20] second = index[:10] result = first.difference(second, sort) expected = index[10:20] if sort is None: expected = expected.sort_values() tm.assert_index_equal(result, expected) def test_symmetric_difference(self, sort): # smoke index1 = Index([5, 2, 3, 4], name="index1") index2 = Index([2, 3, 4, 1]) result = index1.symmetric_difference(index2, sort=sort) expected = Index([5, 1]) assert tm.equalContents(result, expected) assert result.name is None if sort is None: expected = expected.sort_values() tm.assert_index_equal(result, expected) # __xor__ syntax expected = index1 ^ index2 assert tm.equalContents(result, expected) assert result.name is None @pytest.mark.parametrize("opname", ["difference", "symmetric_difference"]) def test_difference_incomparable(self, opname): a = pd.Index([3, pd.Timestamp("2000"), 1]) b = pd.Index([2, pd.Timestamp("1999"), 1]) op = operator.methodcaller(opname, b) # sort=None, the default result = op(a) expected = pd.Index([3, pd.Timestamp("2000"), 2, pd.Timestamp("1999")]) if opname == "difference": expected = expected[:2] tm.assert_index_equal(result, expected) # sort=False op = operator.methodcaller(opname, b, sort=False) result = op(a) tm.assert_index_equal(result, expected) @pytest.mark.xfail(reason="Not implemented") @pytest.mark.parametrize("opname", ["difference", "symmetric_difference"]) def test_difference_incomparable_true(self, opname): # TODO decide on True behaviour # # sort=True, raises a = pd.Index([3, pd.Timestamp("2000"), 1]) b = pd.Index([2, pd.Timestamp("1999"), 1]) op = operator.methodcaller(opname, b, sort=True) with pytest.raises(TypeError, match="Cannot compare"): op(a) def test_symmetric_difference_mi(self, sort): index1 = MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])) index2 = MultiIndex.from_tuples([("foo", 1), ("bar", 3)]) result = index1.symmetric_difference(index2, sort=sort) expected = MultiIndex.from_tuples([("bar", 2), ("baz", 3), ("bar", 3)]) if sort is None: expected = expected.sort_values() tm.assert_index_equal(result, expected) assert tm.equalContents(result, expected) @pytest.mark.parametrize( "index2,expected", [ (Index([0, 1, np.nan]), Index([2.0, 3.0, 0.0])), (Index([0, 1]), Index([np.nan, 2.0, 3.0, 0.0])), ], ) def test_symmetric_difference_missing(self, index2, expected, sort): # GH 13514 change: {nan} - {nan} == {} # (GH 6444, sorting of nans, is no longer an issue) index1 = Index([1, np.nan, 2, 3]) result = index1.symmetric_difference(index2, sort=sort) if sort is None: expected = expected.sort_values() tm.assert_index_equal(result, expected) def test_symmetric_difference_non_index(self, sort): index1 = Index([1, 2, 3, 4], name="index1") index2 = np.array([2, 3, 4, 5]) expected = Index([1, 5]) result = index1.symmetric_difference(index2, sort=sort) assert tm.equalContents(result, expected) assert result.name == "index1" result = index1.symmetric_difference(index2, result_name="new_name", sort=sort) assert tm.equalContents(result, expected) assert result.name == "new_name" def test_difference_type(self, index, sort): # GH 20040 # If taking difference of a set and itself, it # needs to preserve the type of the index if not index.is_unique: return result = index.difference(index, sort=sort) expected = index.drop(index) tm.assert_index_equal(result, expected) def test_intersection_difference(self, index, sort): # GH 20040 # Test that the intersection of an index with an # empty index produces the same index as the difference # of an index with itself. Test for all types if not index.is_unique: return inter = index.intersection(index.drop(index)) diff = index.difference(index, sort=sort) tm.assert_index_equal(inter, diff) def test_is_mixed_deprecated(self): # GH#32922 index = self.create_index() with tm.assert_produces_warning(FutureWarning): index.is_mixed() @pytest.mark.parametrize( "index, expected", [ ("string", False), ("bool", False), ("categorical", False), ("int", True), ("datetime", False), ("float", True), ], indirect=["index"], ) def test_is_numeric(self, index, expected): assert index.is_numeric() is expected @pytest.mark.parametrize( "index, expected", [ ("string", True), ("bool", True), ("categorical", False), ("int", False), ("datetime", False), ("float", False), ], indirect=["index"], ) def test_is_object(self, index, expected): assert index.is_object() is expected @pytest.mark.parametrize( "index, expected", [ ("string", False), ("bool", False), ("categorical", False), ("int", False), ("datetime", True), ("float", False), ], indirect=["index"], ) def test_is_all_dates(self, index, expected): assert index.is_all_dates is expected def test_summary(self, index): self._check_method_works(Index._summary, index) def test_summary_bug(self): # GH3869` ind = Index(["{other}%s", "~:{range}:0"], name="A") result = ind._summary() # shouldn't be formatted accidentally. assert "~:{range}:0" in result assert "{other}%s" in result def test_format_different_scalar_lengths(self): # GH35439 idx = Index(["aaaaaaaaa", "b"]) expected = ["aaaaaaaaa", "b"] assert idx.format() == expected def test_format_bug(self): # GH 14626 # windows has different precision on datetime.datetime.now (it doesn't # include us since the default for Timestamp shows these but Index # formatting does not we are skipping) now = datetime.now() if not str(now).endswith("000"): index = Index([now]) formatted = index.format() expected = [str(index[0])] assert formatted == expected Index([]).format() @pytest.mark.parametrize("vals", [[1, 2.0 + 3.0j, 4.0], ["a", "b", "c"]]) def test_format_missing(self, vals, nulls_fixture): # 2845 vals = list(vals) # Copy for each iteration vals.append(nulls_fixture) index = Index(vals) formatted = index.format() expected = [str(index[0]), str(index[1]), str(index[2]), "NaN"] assert formatted == expected assert index[3] is nulls_fixture def test_format_with_name_time_info(self): # bug I fixed 12/20/2011 dates = date_range("2011-01-01 04:00:00", periods=10, name="something") formatted = dates.format(name=True) assert formatted[0] == "something" def test_format_datetime_with_time(self): t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)]) result = t.format() expected = ["2012-02-07 00:00:00", "2012-02-07 23:00:00"] assert len(result) == 2 assert result == expected @pytest.mark.parametrize("op", ["any", "all"]) def test_logical_compat(self, op): index = self.create_index() assert getattr(index, op)() == getattr(index.values, op)() def _check_method_works(self, method, index): method(index) def test_get_indexer(self): index1 = Index([1, 2, 3, 4, 5]) index2 = Index([2, 4, 6]) r1 = index1.get_indexer(index2) e1 = np.array([1, 3, -1], dtype=np.intp) tm.assert_almost_equal(r1, e1) @pytest.mark.parametrize("reverse", [True, False]) @pytest.mark.parametrize( "expected,method", [ (np.array([-1, 0, 0, 1, 1], dtype=np.intp), "pad"), (np.array([-1, 0, 0, 1, 1], dtype=np.intp), "ffill"), (np.array([0, 0, 1, 1, 2], dtype=np.intp), "backfill"), (np.array([0, 0, 1, 1, 2], dtype=np.intp), "bfill"), ], ) def test_get_indexer_methods(self, reverse, expected, method): index1 = Index([1, 2, 3, 4, 5]) index2 = Index([2, 4, 6]) if reverse: index1 = index1[::-1] expected = expected[::-1] result = index2.get_indexer(index1, method=method) tm.assert_almost_equal(result, expected) def test_get_indexer_invalid(self): # GH10411 index = Index(np.arange(10)) with pytest.raises(ValueError, match="tolerance argument"): index.get_indexer([1, 0], tolerance=1) with pytest.raises(ValueError, match="limit argument"): index.get_indexer([1, 0], limit=1) @pytest.mark.parametrize( "method, tolerance, indexer, expected", [ ("pad", None, [0, 5, 9], [0, 5, 9]), ("backfill", None, [0, 5, 9], [0, 5, 9]), ("nearest", None, [0, 5, 9], [0, 5, 9]), ("pad", 0, [0, 5, 9], [0, 5, 9]), ("backfill", 0, [0, 5, 9], [0, 5, 9]), ("nearest", 0, [0, 5, 9], [0, 5, 9]), ("pad", None, [0.2, 1.8, 8.5], [0, 1, 8]), ("backfill", None, [0.2, 1.8, 8.5], [1, 2, 9]), ("nearest", None, [0.2, 1.8, 8.5], [0, 2, 9]), ("pad", 1, [0.2, 1.8, 8.5], [0, 1, 8]), ("backfill", 1, [0.2, 1.8, 8.5], [1, 2, 9]), ("nearest", 1, [0.2, 1.8, 8.5], [0, 2, 9]), ("pad", 0.2, [0.2, 1.8, 8.5], [0, -1, -1]), ("backfill", 0.2, [0.2, 1.8, 8.5], [-1, 2, -1]), ("nearest", 0.2, [0.2, 1.8, 8.5], [0, 2, -1]), ], ) def test_get_indexer_nearest(self, method, tolerance, indexer, expected): index = Index(np.arange(10)) actual = index.get_indexer(indexer, method=method, tolerance=tolerance) tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp)) @pytest.mark.parametrize("listtype", [list, tuple, Series, np.array]) @pytest.mark.parametrize( "tolerance, expected", list( zip( [[0.3, 0.3, 0.1], [0.2, 0.1, 0.1], [0.1, 0.5, 0.5]], [[0, 2, -1], [0, -1, -1], [-1, 2, 9]], ) ), ) def test_get_indexer_nearest_listlike_tolerance( self, tolerance, expected, listtype ): index = Index(np.arange(10)) actual = index.get_indexer( [0.2, 1.8, 8.5], method="nearest", tolerance=listtype(tolerance) ) tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp)) def test_get_indexer_nearest_error(self): index = Index(np.arange(10)) with pytest.raises(ValueError, match="limit argument"): index.get_indexer([1, 0], method="nearest", limit=1) with pytest.raises(ValueError, match="tolerance size must match"): index.get_indexer([1, 0], method="nearest", tolerance=[1, 2, 3]) @pytest.mark.parametrize( "method,expected", [("pad", [8, 7, 0]), ("backfill", [9, 8, 1]), ("nearest", [9, 7, 0])], ) def test_get_indexer_nearest_decreasing(self, method, expected): index = Index(np.arange(10))[::-1] actual = index.get_indexer([0, 5, 9], method=method) tm.assert_numpy_array_equal(actual, np.array([9, 4, 0], dtype=np.intp)) actual = index.get_indexer([0.2, 1.8, 8.5], method=method) tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp)) @pytest.mark.parametrize( "method,expected", [ ("pad", np.array([-1, 0, 1, 1], dtype=np.intp)), ("backfill", np.array([0, 0, 1, -1], dtype=np.intp)), ], ) def test_get_indexer_strings(self, method, expected): index = pd.Index(["b", "c"]) actual = index.get_indexer(["a", "b", "c", "d"], method=method) tm.assert_numpy_array_equal(actual, expected) def test_get_indexer_strings_raises(self): index = pd.Index(["b", "c"]) msg = r"unsupported operand type\(s\) for -: 'str' and 'str'" with pytest.raises(TypeError, match=msg): index.get_indexer(["a", "b", "c", "d"], method="nearest") with pytest.raises(TypeError, match=msg): index.get_indexer(["a", "b", "c", "d"], method="pad", tolerance=2) with pytest.raises(TypeError, match=msg): index.get_indexer( ["a", "b", "c", "d"], method="pad", tolerance=[2, 2, 2, 2] ) @pytest.mark.parametrize("idx_class", [Int64Index, RangeIndex, Float64Index]) def test_get_indexer_numeric_index_boolean_target(self, idx_class): # GH 16877 numeric_index = idx_class(RangeIndex(4)) result = numeric_index.get_indexer([True, False, True]) expected = np.array([-1, -1, -1], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) def test_get_indexer_with_NA_values( self, unique_nulls_fixture, unique_nulls_fixture2 ): # GH 22332 # check pairwise, that no pair of na values # is mangled if unique_nulls_fixture is unique_nulls_fixture2: return # skip it, values are not unique arr = np.array([unique_nulls_fixture, unique_nulls_fixture2], dtype=object) index = pd.Index(arr, dtype=object) result = index.get_indexer( [unique_nulls_fixture, unique_nulls_fixture2, "Unknown"] ) expected = np.array([0, 1, -1], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize("method", [None, "pad", "backfill", "nearest"]) def test_get_loc(self, method): index = pd.Index([0, 1, 2]) assert index.get_loc(1, method=method) == 1 if method: assert index.get_loc(1, method=method, tolerance=0) == 1 @pytest.mark.parametrize("method", [None, "pad", "backfill", "nearest"]) def test_get_loc_raises_bad_label(self, method): index = pd.Index([0, 1, 2]) if method: msg = "not supported between" else: msg = "invalid key" with pytest.raises(TypeError, match=msg): index.get_loc([1, 2], method=method) @pytest.mark.parametrize( "method,loc", [("pad", 1), ("backfill", 2), ("nearest", 1)] ) def test_get_loc_tolerance(self, method, loc): index = pd.Index([0, 1, 2]) assert index.get_loc(1.1, method) == loc assert index.get_loc(1.1, method, tolerance=1) == loc @pytest.mark.parametrize("method", ["pad", "backfill", "nearest"]) def test_get_loc_outside_tolerance_raises(self, method): index = pd.Index([0, 1, 2]) with pytest.raises(KeyError, match="1.1"): index.get_loc(1.1, method, tolerance=0.05) def test_get_loc_bad_tolerance_raises(self): index = pd.Index([0, 1, 2]) with pytest.raises(ValueError, match="must be numeric"): index.get_loc(1.1, "nearest", tolerance="invalid") def test_get_loc_tolerance_no_method_raises(self): index = pd.Index([0, 1, 2]) with pytest.raises(ValueError, match="tolerance .* valid if"): index.get_loc(1.1, tolerance=1) def test_get_loc_raises_missized_tolerance(self): index = pd.Index([0, 1, 2]) with pytest.raises(ValueError, match="tolerance size must match"): index.get_loc(1.1, "nearest", tolerance=[1, 1]) def test_get_loc_raises_object_nearest(self): index = pd.Index(["a", "c"]) with pytest.raises(TypeError, match="unsupported operand type"): index.get_loc("a", method="nearest") def test_get_loc_raises_object_tolerance(self): index = pd.Index(["a", "c"]) with pytest.raises(TypeError, match="unsupported operand type"): index.get_loc("a", method="pad", tolerance="invalid") @pytest.mark.parametrize("dtype", [int, float]) def test_slice_locs(self, dtype): index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype)) n = len(index) assert index.slice_locs(start=2) == (2, n) assert index.slice_locs(start=3) == (3, n) assert index.slice_locs(3, 8) == (3, 6) assert index.slice_locs(5, 10) == (3, n) assert index.slice_locs(end=8) == (0, 6) assert index.slice_locs(end=9) == (0, 7) # reversed index2 = index[::-1] assert index2.slice_locs(8, 2) == (2, 6) assert index2.slice_locs(7, 3) == (2, 5) @pytest.mark.parametrize("dtype", [int, float]) def test_slice_float_locs(self, dtype): index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype)) n = len(index) assert index.slice_locs(5.0, 10.0) == (3, n) assert index.slice_locs(4.5, 10.5) == (3, 8) index2 = index[::-1] assert index2.slice_locs(8.5, 1.5) == (2, 6) assert index2.slice_locs(10.5, -1) == (0, n) def test_slice_locs_dup(self): index = Index(["a", "a", "b", "c", "d", "d"]) assert index.slice_locs("a", "d") == (0, 6) assert index.slice_locs(end="d") == (0, 6) assert index.slice_locs("a", "c") == (0, 4) assert index.slice_locs("b", "d") == (2, 6) index2 = index[::-1] assert index2.slice_locs("d", "a") == (0, 6) assert index2.slice_locs(end="a") == (0, 6) assert index2.slice_locs("d", "b") == (0, 4) assert index2.slice_locs("c", "a") == (2, 6) @pytest.mark.parametrize("dtype", [int, float]) def test_slice_locs_dup_numeric(self, dtype): index = Index(np.array([10, 12, 12, 14], dtype=dtype)) assert index.slice_locs(12, 12) == (1, 3) assert index.slice_locs(11, 13) == (1, 3) index2 = index[::-1] assert index2.slice_locs(12, 12) == (1, 3) assert index2.slice_locs(13, 11) == (1, 3) def test_slice_locs_na(self): index = Index([np.nan, 1, 2]) assert index.slice_locs(1) == (1, 3) assert index.slice_locs(np.nan) == (0, 3) index = Index([0, np.nan, np.nan, 1, 2]) assert index.slice_locs(np.nan) == (1, 5) def test_slice_locs_na_raises(self): index = Index([np.nan, 1, 2]) with pytest.raises(KeyError, match=""): index.slice_locs(start=1.5) with pytest.raises(KeyError, match=""): index.slice_locs(end=1.5) @pytest.mark.parametrize( "in_slice,expected", [ # error: Slice index must be an integer or None (pd.IndexSlice[::-1], "yxdcb"), (pd.IndexSlice["b":"y":-1], ""), # type: ignore[misc] (pd.IndexSlice["b"::-1], "b"), # type: ignore[misc] (pd.IndexSlice[:"b":-1], "yxdcb"), # type: ignore[misc] (pd.IndexSlice[:"y":-1], "y"), # type: ignore[misc] (pd.IndexSlice["y"::-1], "yxdcb"), # type: ignore[misc] (pd.IndexSlice["y"::-4], "yb"), # type: ignore[misc] # absent labels (pd.IndexSlice[:"a":-1], "yxdcb"), # type: ignore[misc] (pd.IndexSlice[:"a":-2], "ydb"), # type: ignore[misc] (pd.IndexSlice["z"::-1], "yxdcb"), # type: ignore[misc] (pd.IndexSlice["z"::-3], "yc"), # type: ignore[misc] (pd.IndexSlice["m"::-1], "dcb"), # type: ignore[misc] (pd.IndexSlice[:"m":-1], "yx"), # type: ignore[misc] (pd.IndexSlice["a":"a":-1], ""), # type: ignore[misc] (pd.IndexSlice["z":"z":-1], ""), # type: ignore[misc] (pd.IndexSlice["m":"m":-1], ""), # type: ignore[misc] ], ) def test_slice_locs_negative_step(self, in_slice, expected): index = Index(list("bcdxy")) s_start, s_stop = index.slice_locs(in_slice.start, in_slice.stop, in_slice.step) result = index[s_start : s_stop : in_slice.step] expected = pd.Index(list(expected)) tm.assert_index_equal(result, expected) @pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True) def test_drop_by_str_label(self, index): n = len(index) drop = index[list(range(5, 10))] dropped = index.drop(drop) expected = index[list(range(5)) + list(range(10, n))] tm.assert_index_equal(dropped, expected) dropped = index.drop(index[0]) expected = index[1:] tm.assert_index_equal(dropped, expected) @pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True) @pytest.mark.parametrize("keys", [["foo", "bar"], ["1", "bar"]]) def test_drop_by_str_label_raises_missing_keys(self, index, keys): with pytest.raises(KeyError, match=""): index.drop(keys) @pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True) def test_drop_by_str_label_errors_ignore(self, index): n = len(index) drop = index[list(range(5, 10))] mixed = drop.tolist() + ["foo"] dropped = index.drop(mixed, errors="ignore") expected = index[list(range(5)) + list(range(10, n))] tm.assert_index_equal(dropped, expected) dropped = index.drop(["foo", "bar"], errors="ignore") expected = index[list(range(n))] tm.assert_index_equal(dropped, expected) def test_drop_by_numeric_label_loc(self): # TODO: Parametrize numeric and str tests after self.strIndex fixture index = Index([1, 2, 3]) dropped = index.drop(1) expected = Index([2, 3]) tm.assert_index_equal(dropped, expected) def test_drop_by_numeric_label_raises_missing_keys(self): index = Index([1, 2, 3]) with pytest.raises(KeyError, match=""): index.drop([3, 4]) @pytest.mark.parametrize( "key,expected", [(4, Index([1, 2, 3])), ([3, 4, 5], Index([1, 2]))] ) def test_drop_by_numeric_label_errors_ignore(self, key, expected): index = Index([1, 2, 3]) dropped = index.drop(key, errors="ignore") tm.assert_index_equal(dropped, expected) @pytest.mark.parametrize( "values", [["a", "b", ("c", "d")], ["a", ("c", "d"), "b"], [("c", "d"), "a", "b"]], ) @pytest.mark.parametrize("to_drop", [[("c", "d"), "a"], ["a", ("c", "d")]]) def test_drop_tuple(self, values, to_drop): # GH 18304 index = pd.Index(values) expected = pd.Index(["b"]) result = index.drop(to_drop) tm.assert_index_equal(result, expected) removed = index.drop(to_drop[0]) for drop_me in to_drop[1], [to_drop[1]]: result = removed.drop(drop_me) tm.assert_index_equal(result, expected) removed = index.drop(to_drop[1]) msg = fr"\"\[{re.escape(to_drop[1].__repr__())}\] not found in axis\"" for drop_me in to_drop[1], [to_drop[1]]: with pytest.raises(KeyError, match=msg): removed.drop(drop_me) @pytest.mark.parametrize( "method,expected,sort", [ ( "intersection", np.array( [(1, "A"), (2, "A"), (1, "B"), (2, "B")], dtype=[("num", int), ("let", "a1")], ), False, ), ( "intersection", np.array( [(1, "A"), (1, "B"), (2, "A"), (2, "B")], dtype=[("num", int), ("let", "a1")], ), None, ), ( "union", np.array( [(1, "A"), (1, "B"), (1, "C"), (2, "A"), (2, "B"), (2, "C")], dtype=[("num", int), ("let", "a1")], ), None, ), ], ) def test_tuple_union_bug(self, method, expected, sort): index1 = Index( np.array( [(1, "A"), (2, "A"), (1, "B"), (2, "B")], dtype=[("num", int), ("let", "a1")], ) ) index2 = Index( np.array( [(1, "A"), (2, "A"), (1, "B"), (2, "B"), (1, "C"), (2, "C")], dtype=[("num", int), ("let", "a1")], ) ) result = getattr(index1, method)(index2, sort=sort) assert result.ndim == 1 expected = Index(expected) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( "attr", [ "is_monotonic_increasing", "is_monotonic_decreasing", "_is_strictly_monotonic_increasing", "_is_strictly_monotonic_decreasing", ], ) def test_is_monotonic_incomparable(self, attr): index = Index([5, datetime.now(), 7]) assert not getattr(index, attr) def test_set_value_deprecated(self): # GH 28621 idx = self.create_index() arr = np.array([1, 2, 3]) with tm.assert_produces_warning(FutureWarning): idx.set_value(arr, idx[1], 80) assert arr[1] == 80 @pytest.mark.parametrize( "index", ["string", "int", "datetime", "timedelta"], indirect=True ) def test_get_value(self, index): # TODO: Remove function? GH 19728 values = np.random.randn(100) value = index[67] with pytest.raises(AttributeError, match="has no attribute '_values'"): # Index.get_value requires a Series, not an ndarray with tm.assert_produces_warning(FutureWarning): index.get_value(values, value) with tm.assert_produces_warning(FutureWarning): result = index.get_value(Series(values, index=values), value) tm.assert_almost_equal(result, values[67]) @pytest.mark.parametrize("values", [["foo", "bar", "quux"], {"foo", "bar", "quux"}]) @pytest.mark.parametrize( "index,expected", [ (Index(["qux", "baz", "foo", "bar"]), np.array([False, False, True, True])), (Index([]), np.array([], dtype=bool)), # empty ], ) def test_isin(self, values, index, expected): result = index.isin(values) tm.assert_numpy_array_equal(result, expected) def test_isin_nan_common_object(self, nulls_fixture, nulls_fixture2): # Test cartesian product of null fixtures and ensure that we don't # mangle the various types (save a corner case with PyPy) # all nans are the same if ( isinstance(nulls_fixture, float) and isinstance(nulls_fixture2, float) and math.isnan(nulls_fixture) and math.isnan(nulls_fixture2) ): tm.assert_numpy_array_equal( Index(["a", nulls_fixture]).isin([nulls_fixture2]), np.array([False, True]), ) elif nulls_fixture is nulls_fixture2: # should preserve NA type tm.assert_numpy_array_equal( Index(["a", nulls_fixture]).isin([nulls_fixture2]), np.array([False, True]), ) else: tm.assert_numpy_array_equal( Index(["a", nulls_fixture]).isin([nulls_fixture2]), np.array([False, False]), ) def test_isin_nan_common_float64(self, nulls_fixture): if nulls_fixture is pd.NaT: pytest.skip("pd.NaT not compatible with Float64Index") # Float64Index overrides isin, so must be checked separately if nulls_fixture is pd.NA: pytest.xfail("Float64Index cannot contain pd.NA") tm.assert_numpy_array_equal( Float64Index([1.0, nulls_fixture]).isin([np.nan]), np.array([False, True]) ) # we cannot compare NaT with NaN tm.assert_numpy_array_equal( Float64Index([1.0, nulls_fixture]).isin([pd.NaT]), np.array([False, False]) ) @pytest.mark.parametrize("level", [0, -1]) @pytest.mark.parametrize( "index", [ Index(["qux", "baz", "foo", "bar"]), # Float64Index overrides isin, so must be checked separately Float64Index([1.0, 2.0, 3.0, 4.0]), ], ) def test_isin_level_kwarg(self, level, index): values = index.tolist()[-2:] + ["nonexisting"] expected = np.array([False, False, True, True]) tm.assert_numpy_array_equal(expected, index.isin(values, level=level)) index.name = "foobar" tm.assert_numpy_array_equal(expected, index.isin(values, level="foobar")) def test_isin_level_kwarg_bad_level_raises(self, index): for level in [10, index.nlevels, -(index.nlevels + 1)]: with pytest.raises(IndexError, match="Too many levels"): index.isin([], level=level) @pytest.mark.parametrize("label", [1.0, "foobar", "xyzzy", np.nan]) def test_isin_level_kwarg_bad_label_raises(self, label, index): if isinstance(index, MultiIndex): index = index.rename(["foo", "bar"] + index.names[2:]) msg = f"'Level {label} not found'" else: index = index.rename("foo") msg = fr"Requested level \({label}\) does not match index name \(foo\)" with pytest.raises(KeyError, match=msg): index.isin([], level=label) @pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])]) def test_isin_empty(self, empty): # see gh-16991 index = Index(["a", "b"]) expected = np.array([False, False]) result = index.isin(empty) tm.assert_numpy_array_equal(expected, result) @pytest.mark.parametrize( "values", [ [1, 2, 3, 4], [1.0, 2.0, 3.0, 4.0], [True, True, True, True], ["foo", "bar", "baz", "qux"], pd.date_range("2018-01-01", freq="D", periods=4), ], ) def test_boolean_cmp(self, values): index = Index(values) result = index == values expected = np.array([True, True, True, True], dtype=bool) tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize("index", ["string"], indirect=True) @pytest.mark.parametrize("name,level", [(None, 0), ("a", "a")]) def test_get_level_values(self, index, name, level): expected = index.copy() if name: expected.name = name result = expected.get_level_values(level) tm.assert_index_equal(result, expected) def test_slice_keep_name(self): index = Index(["a", "b"], name="asdf") assert index.name == index[1:].name @pytest.mark.parametrize( "index", ["unicode", "string", "datetime", "int", "uint", "float"], indirect=True, ) def test_join_self(self, index, join_type): joined = index.join(index, how=join_type) assert index is joined @pytest.mark.parametrize("method", ["strip", "rstrip", "lstrip"]) def test_str_attribute(self, method): # GH9068 index = Index([" jack", "jill ", " jesse ", "frank"]) expected = Index([getattr(str, method)(x) for x in index.values]) result = getattr(index.str, method)() tm.assert_index_equal(result, expected) @pytest.mark.parametrize( "index", [ Index(range(5)), tm.makeDateIndex(10), MultiIndex.from_tuples([("foo", "1"), ("bar", "3")]), period_range(start="2000", end="2010", freq="A"), ], ) def test_str_attribute_raises(self, index): with pytest.raises(AttributeError, match="only use .str accessor"): index.str.repeat(2) @pytest.mark.parametrize( "expand,expected", [ (None, Index([["a", "b", "c"], ["d", "e"], ["f"]])), (False, Index([["a", "b", "c"], ["d", "e"], ["f"]])), ( True, MultiIndex.from_tuples( [("a", "b", "c"), ("d", "e", np.nan), ("f", np.nan, np.nan)] ), ), ], ) def test_str_split(self, expand, expected): index = Index(["a b c", "d e", "f"]) if expand is not None: result = index.str.split(expand=expand) else: result = index.str.split() tm.assert_index_equal(result, expected) def test_str_bool_return(self): # test boolean case, should return np.array instead of boolean Index index = Index(["a1", "a2", "b1", "b2"]) result = index.str.startswith("a") expected = np.array([True, True, False, False]) tm.assert_numpy_array_equal(result, expected) assert isinstance(result, np.ndarray) def test_str_bool_series_indexing(self): index = Index(["a1", "a2", "b1", "b2"]) s = Series(range(4), index=index) result = s[s.index.str.startswith("a")] expected = Series(range(2), index=["a1", "a2"]) tm.assert_series_equal(result, expected) @pytest.mark.parametrize( "index,expected", [(Index(list("abcd")), True), (Index(range(4)), False)] ) def test_tab_completion(self, index, expected): # GH 9910 result = "str" in dir(index) assert result == expected def test_indexing_doesnt_change_class(self): index = Index([1, 2, 3, "a", "b", "c"]) assert index[1:3].identical(pd.Index([2, 3], dtype=np.object_)) assert index[[0, 1]].identical(pd.Index([1, 2], dtype=np.object_)) def test_outer_join_sort(self): left_index = Index(np.random.permutation(15)) right_index = tm.makeDateIndex(10) with tm.assert_produces_warning(RuntimeWarning): result = left_index.join(right_index, how="outer") # right_index in this case because DatetimeIndex has join precedence # over Int64Index with tm.assert_produces_warning(RuntimeWarning): expected = right_index.astype(object).union(left_index.astype(object)) tm.assert_index_equal(result, expected) def test_nan_first_take_datetime(self): index = Index([pd.NaT, Timestamp("20130101"), Timestamp("20130102")]) result = index.take([-1, 0, 1]) expected = Index([index[-1], index[0], index[1]]) tm.assert_index_equal(result, expected) def test_take_fill_value(self): # GH 12631 index = pd.Index(list("ABC"), name="xxx") result = index.take(np.array([1, 0, -1])) expected = pd.Index(list("BAC"), name="xxx") tm.assert_index_equal(result, expected) # fill_value result = index.take(np.array([1, 0, -1]), fill_value=True) expected = pd.Index(["B", "A", np.nan], name="xxx") tm.assert_index_equal(result, expected) # allow_fill=False result = index.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) expected = pd.Index(["B", "A", "C"], name="xxx") tm.assert_index_equal(result, expected) def test_take_fill_value_none_raises(self): index = pd.Index(list("ABC"), name="xxx") msg = ( "When allow_fill=True and fill_value is not None, " "all indices must be >= -1" ) with pytest.raises(ValueError, match=msg): index.take(np.array([1, 0, -2]), fill_value=True) with pytest.raises(ValueError, match=msg): index.take(np.array([1, 0, -5]), fill_value=True) def test_take_bad_bounds_raises(self): index = pd.Index(list("ABC"), name="xxx") with pytest.raises(IndexError, match="out of bounds"): index.take(np.array([1, -5])) @pytest.mark.parametrize("name", [None, "foobar"]) @pytest.mark.parametrize( "labels", [ [], np.array([]), ["A", "B", "C"], ["C", "B", "A"], np.array(["A", "B", "C"]), np.array(["C", "B", "A"]), # Must preserve name even if dtype changes pd.date_range("20130101", periods=3).values, pd.date_range("20130101", periods=3).tolist(), ], ) def test_reindex_preserves_name_if_target_is_list_or_ndarray(self, name, labels): # GH6552 index = pd.Index([0, 1, 2]) index.name = name assert index.reindex(labels)[0].name == name @pytest.mark.parametrize("labels", [[], np.array([]), np.array([], dtype=np.int64)]) def test_reindex_preserves_type_if_target_is_empty_list_or_array(self, labels): # GH7774 index = pd.Index(list("abc")) assert index.reindex(labels)[0].dtype.type == np.object_ @pytest.mark.parametrize( "labels,dtype", [ (pd.Int64Index([]), np.int64), (pd.Float64Index([]), np.float64), (pd.DatetimeIndex([]), np.datetime64), ], ) def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self, labels, dtype): # GH7774 index = pd.Index(list("abc")) assert index.reindex(labels)[0].dtype.type == dtype def test_reindex_no_type_preserve_target_empty_mi(self): index = pd.Index(list("abc")) result = index.reindex( pd.MultiIndex([pd.Int64Index([]), pd.Float64Index([])], [[], []]) )[0] assert result.levels[0].dtype.type == np.int64 assert result.levels[1].dtype.type == np.float64 def test_groupby(self): index = Index(range(5)) result = index.groupby(np.array([1, 1, 2, 2, 2])) expected = {1: pd.Index([0, 1]), 2: pd.Index([2, 3, 4])} tm.assert_dict_equal(result, expected) @pytest.mark.parametrize( "mi,expected", [ (MultiIndex.from_tuples([(1, 2), (4, 5)]), np.array([True, True])), (MultiIndex.from_tuples([(1, 2), (4, 6)]), np.array([True, False])), ], ) def test_equals_op_multiindex(self, mi, expected): # GH9785 # test comparisons of multiindex df = pd.read_csv(StringIO("a,b,c\n1,2,3\n4,5,6"), index_col=[0, 1]) result = df.index == mi tm.assert_numpy_array_equal(result, expected) def test_equals_op_multiindex_identify(self): df = pd.read_csv(StringIO("a,b,c\n1,2,3\n4,5,6"), index_col=[0, 1]) result = df.index == df.index expected = np.array([True, True]) tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize( "index", [ MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)]), Index(["foo", "bar", "baz"]), ], ) def test_equals_op_mismatched_multiindex_raises(self, index): df = pd.read_csv(StringIO("a,b,c\n1,2,3\n4,5,6"), index_col=[0, 1]) with pytest.raises(ValueError, match="Lengths must match"): df.index == index def test_equals_op_index_vs_mi_same_length(self): mi = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)]) index = Index(["foo", "bar", "baz"]) result = mi == index expected = np.array([False, False, False]) tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize("dt_conv", [pd.to_datetime, pd.to_timedelta]) def test_dt_conversion_preserves_name(self, dt_conv): # GH 10875 index = pd.Index(["01:02:03", "01:02:04"], name="label") assert index.name == dt_conv(index).name @pytest.mark.parametrize( "index,expected", [ # ASCII # short ( pd.Index(["a", "bb", "ccc"]), """Index(['a', 'bb', 'ccc'], dtype='object')""", ), # multiple lines ( pd.Index(["a", "bb", "ccc"] * 10), """\ Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'], dtype='object')""", ), # truncated ( pd.Index(["a", "bb", "ccc"] * 100), """\ Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', ... 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'], dtype='object', length=300)""", ), # Non-ASCII # short ( pd.Index(["あ", "いい", "ううう"]), """Index(['あ', 'いい', 'ううう'], dtype='object')""", ), # multiple lines ( pd.Index(["あ", "いい", "ううう"] * 10), ( "Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', " "'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n" " 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', " "'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n" " 'あ', 'いい', 'ううう', 'あ', 'いい', " "'ううう'],\n" " dtype='object')" ), ), # truncated ( pd.Index(["あ", "いい", "ううう"] * 100), ( "Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', " "'あ', 'いい', 'ううう', 'あ',\n" " ...\n" " 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', " "'ううう', 'あ', 'いい', 'ううう'],\n" " dtype='object', length=300)" ), ), ], ) def test_string_index_repr(self, index, expected): result = repr(index) assert result == expected @pytest.mark.parametrize( "index,expected", [ # short ( pd.Index(["あ", "いい", "ううう"]), ("Index(['あ', 'いい', 'ううう'], dtype='object')"), ), # multiple lines ( pd.Index(["あ", "いい", "ううう"] * 10), ( "Index(['あ', 'いい', 'ううう', 'あ', 'いい', " "'ううう', 'あ', 'いい', 'ううう',\n" " 'あ', 'いい', 'ううう', 'あ', 'いい', " "'ううう', 'あ', 'いい', 'ううう',\n" " 'あ', 'いい', 'ううう', 'あ', 'いい', " "'ううう', 'あ', 'いい', 'ううう',\n" " 'あ', 'いい', 'ううう'],\n" " dtype='object')" "" ), ), # truncated ( pd.Index(["あ", "いい", "ううう"] * 100), ( "Index(['あ', 'いい', 'ううう', 'あ', 'いい', " "'ううう', 'あ', 'いい', 'ううう',\n" " 'あ',\n" " ...\n" " 'ううう', 'あ', 'いい', 'ううう', 'あ', " "'いい', 'ううう', 'あ', 'いい',\n" " 'ううう'],\n" " dtype='object', length=300)" ), ), ], ) def test_string_index_repr_with_unicode_option(self, index, expected): # Enable Unicode option ----------------------------------------- with cf.option_context("display.unicode.east_asian_width", True): result = repr(index) assert result == expected def test_cached_properties_not_settable(self): index = pd.Index([1, 2, 3]) with pytest.raises(AttributeError, match="Can't set attribute"): index.is_unique = False @async_mark() async def test_tab_complete_warning(self, ip): # https://github.com/pandas-dev/pandas/issues/16409 pytest.importorskip("IPython", minversion="6.0.0") from IPython.core.completer import provisionalcompleter code = "import pandas as pd; idx = pd.Index([1, 2])" await ip.run_code(code) # GH 31324 newer jedi version raises Deprecation warning import jedi if jedi.__version__ < "0.16.0": warning = tm.assert_produces_warning(None) else: warning = tm.assert_produces_warning( DeprecationWarning, check_stacklevel=False ) with warning: with provisionalcompleter("ignore"): list(ip.Completer.completions("idx.", 4)) def test_contains_method_removed(self, index): # GH#30103 method removed for all types except IntervalIndex if isinstance(index, pd.IntervalIndex): index.contains(1) else: msg = f"'{type(index).__name__}' object has no attribute 'contains'" with pytest.raises(AttributeError, match=msg): index.contains(1) class TestMixedIntIndex(Base): # Mostly the tests from common.py for which the results differ # in py2 and py3 because ints and strings are uncomparable in py3 # (GH 13514) _holder = Index @pytest.fixture(params=[[0, "a", 1, "b", 2, "c"]], ids=["mixedIndex"]) def index(self, request): return Index(request.param) def create_index(self) -> Index: return Index([0, "a", 1, "b", 2, "c"]) def test_argsort(self): index = self.create_index() with pytest.raises(TypeError, match="'>|<' not supported"): index.argsort() def test_numpy_argsort(self): index = self.create_index() with pytest.raises(TypeError, match="'>|<' not supported"): np.argsort(index) def test_copy_name(self): # Check that "name" argument passed at initialization is honoured # GH12309 index = self.create_index() first = type(index)(index, copy=True, name="mario") second = type(first)(first, copy=False) # Even though "copy=False", we want a new object. assert first is not second tm.assert_index_equal(first, second) assert first.name == "mario" assert second.name == "mario" s1 = Series(2, index=first) s2 = Series(3, index=second[:-1]) s3 = s1 * s2 assert s3.index.name == "mario" def test_copy_name2(self): # Check that adding a "name" parameter to the copy is honored # GH14302 index = pd.Index([1, 2], name="MyName") index1 = index.copy() tm.assert_index_equal(index, index1) index2 = index.copy(name="NewName") tm.assert_index_equal(index, index2, check_names=False) assert index.name == "MyName" assert index2.name == "NewName" index3 = index.copy(names=["NewName"]) tm.assert_index_equal(index, index3, check_names=False) assert index.name == "MyName" assert index.names == ["MyName"] assert index3.name == "NewName" assert index3.names == ["NewName"] def test_unique_na(self): idx = pd.Index([2, np.nan, 2, 1], name="my_index") expected = pd.Index([2, np.nan, 1], name="my_index") result = idx.unique() tm.assert_index_equal(result, expected) def test_logical_compat(self): index = self.create_index() assert index.all() == index.values.all() assert index.any() == index.values.any() @pytest.mark.parametrize("how", ["any", "all"]) @pytest.mark.parametrize("dtype", [None, object, "category"]) @pytest.mark.parametrize( "vals,expected", [ ([1, 2, 3], [1, 2, 3]), ([1.0, 2.0, 3.0], [1.0, 2.0, 3.0]), ([1.0, 2.0, np.nan, 3.0], [1.0, 2.0, 3.0]), (["A", "B", "C"], ["A", "B", "C"]), (["A", np.nan, "B", "C"], ["A", "B", "C"]), ], ) def test_dropna(self, how, dtype, vals, expected): # GH 6194 index = pd.Index(vals, dtype=dtype) result = index.dropna(how=how) expected = pd.Index(expected, dtype=dtype) tm.assert_index_equal(result, expected) @pytest.mark.parametrize("how", ["any", "all"]) @pytest.mark.parametrize( "index,expected", [ ( pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]), pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]), ), ( pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03", pd.NaT]), pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]), ), ( pd.TimedeltaIndex(["1 days", "2 days", "3 days"]), pd.TimedeltaIndex(["1 days", "2 days", "3 days"]), ), ( pd.TimedeltaIndex([pd.NaT, "1 days", "2 days", "3 days", pd.NaT]), pd.TimedeltaIndex(["1 days", "2 days", "3 days"]), ), ( pd.PeriodIndex(["2012-02", "2012-04", "2012-05"], freq="M"), pd.PeriodIndex(["2012-02", "2012-04", "2012-05"], freq="M"), ), ( pd.PeriodIndex(["2012-02", "2012-04", "NaT", "2012-05"], freq="M"), pd.PeriodIndex(["2012-02", "2012-04", "2012-05"], freq="M"), ), ], ) def test_dropna_dt_like(self, how, index, expected): result = index.dropna(how=how) tm.assert_index_equal(result, expected) def test_dropna_invalid_how_raises(self): msg = "invalid how option: xxx" with pytest.raises(ValueError, match=msg): pd.Index([1, 2, 3]).dropna(how="xxx") def test_get_combined_index(self): result = _get_combined_index([]) expected = Index([]) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( "index", [ pd.Index([np.nan]), pd.Index([np.nan, 1]), pd.Index([1, 2, np.nan]), pd.Index(["a", "b", np.nan]), pd.to_datetime(["NaT"]), pd.to_datetime(["NaT", "2000-01-01"]), pd.to_datetime(["2000-01-01", "NaT", "2000-01-02"]), pd.to_timedelta(["1 day", "NaT"]), ], ) def test_is_monotonic_na(self, index): assert index.is_monotonic_increasing is False assert index.is_monotonic_decreasing is False assert index._is_strictly_monotonic_increasing is False assert index._is_strictly_monotonic_decreasing is False def test_repr_summary(self): with cf.option_context("display.max_seq_items", 10): result = repr(pd.Index(np.arange(1000))) assert len(result) < 200 assert "..." in result @pytest.mark.parametrize("klass", [Series, DataFrame]) def test_int_name_format(self, klass): index = Index(["a", "b", "c"], name=0) result = klass(list(range(3)), index=index) assert "0" in repr(result) def test_str_to_bytes_raises(self): # GH 26447 index = Index([str(x) for x in range(10)]) msg = "^'str' object cannot be interpreted as an integer$" with pytest.raises(TypeError, match=msg): bytes(index) def test_intersect_str_dates(self): dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)] index1 = Index(dt_dates, dtype=object) index2 = Index(["aa"], dtype=object) result = index2.intersection(index1) expected = Index([], dtype=object) tm.assert_index_equal(result, expected) def test_index_repr_bool_nan(self): # GH32146 arr = Index([True, False, np.nan], dtype=object) exp1 = arr.format() out1 = ["True", "False", "NaN"] assert out1 == exp1 exp2 = repr(arr) out2 = "Index([True, False, nan], dtype='object')" assert out2 == exp2 @pytest.mark.filterwarnings("ignore:elementwise comparison failed:FutureWarning") def test_index_with_tuple_bool(self): # GH34123 # TODO: remove tupleize_cols=False once correct behaviour is restored # TODO: also this op right now produces FutureWarning from numpy idx = Index([("a", "b"), ("b", "c"), ("c", "a")], tupleize_cols=False) result = idx == ("c", "a") expected = np.array([False, False, True]) tm.assert_numpy_array_equal(result, expected) class TestIndexUtils: @pytest.mark.parametrize( "data, names, expected", [ ([[1, 2, 3]], None, Index([1, 2, 3])), ([[1, 2, 3]], ["name"], Index([1, 2, 3], name="name")), ( [["a", "a"], ["c", "d"]], None, MultiIndex([["a"], ["c", "d"]], [[0, 0], [0, 1]]), ), ( [["a", "a"], ["c", "d"]], ["L1", "L2"], MultiIndex([["a"], ["c", "d"]], [[0, 0], [0, 1]], names=["L1", "L2"]), ), ], ) def test_ensure_index_from_sequences(self, data, names, expected): result = ensure_index_from_sequences(data, names) tm.assert_index_equal(result, expected) def test_ensure_index_mixed_closed_intervals(self): # GH27172 intervals = [ pd.Interval(0, 1, closed="left"), pd.Interval(1, 2, closed="right"), pd.Interval(2, 3, closed="neither"), pd.Interval(3, 4, closed="both"), ] result = ensure_index(intervals) expected = Index(intervals, dtype=object) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( "opname", [ "eq", "ne", "le", "lt", "ge", "gt", "add", "radd", "sub", "rsub", "mul", "rmul", "truediv", "rtruediv", "floordiv", "rfloordiv", "pow", "rpow", "mod", "divmod", ], ) def test_generated_op_names(opname, index): if isinstance(index, ABCIndex) and opname == "rsub": # pd.Index.__rsub__ does not exist; though the method does exist # for subclasses. see GH#19723 return opname = f"__{opname}__" method = getattr(index, opname) assert method.__name__ == opname @pytest.mark.parametrize("index_maker", tm.index_subclass_makers_generator()) def test_index_subclass_constructor_wrong_kwargs(index_maker): # GH #19348 with pytest.raises(TypeError, match="unexpected keyword argument"): index_maker(foo="bar") def test_deprecated_fastpath(): msg = "[Uu]nexpected keyword argument" with pytest.raises(TypeError, match=msg): pd.Index(np.array(["a", "b"], dtype=object), name="test", fastpath=True) with pytest.raises(TypeError, match=msg): pd.Int64Index(np.array([1, 2, 3], dtype="int64"), name="test", fastpath=True) with pytest.raises(TypeError, match=msg): pd.RangeIndex(0, 5, 2, name="test", fastpath=True) with pytest.raises(TypeError, match=msg): pd.CategoricalIndex(["a", "b", "c"], name="test", fastpath=True) def test_shape_of_invalid_index(): # Currently, it is possible to create "invalid" index objects backed by # a multi-dimensional array (see https://github.com/pandas-dev/pandas/issues/27125 # about this). However, as long as this is not solved in general,this test ensures # that the returned shape is consistent with this underlying array for # compat with matplotlib (see https://github.com/pandas-dev/pandas/issues/27775) idx = pd.Index([0, 1, 2, 3]) with tm.assert_produces_warning(FutureWarning): # GH#30588 multi-dimensional indexing deprecated assert idx[:, None].shape == (4, 1) def test_validate_1d_input(): # GH#27125 check that we do not have >1-dimensional input msg = "Index data must be 1-dimensional" arr = np.arange(8).reshape(2, 2, 2) with pytest.raises(ValueError, match=msg): pd.Index(arr) with pytest.raises(ValueError, match=msg): pd.Float64Index(arr.astype(np.float64)) with pytest.raises(ValueError, match=msg): pd.Int64Index(arr.astype(np.int64)) with pytest.raises(ValueError, match=msg): pd.UInt64Index(arr.astype(np.uint64)) df = pd.DataFrame(arr.reshape(4, 2)) with pytest.raises(ValueError, match=msg): pd.Index(df) # GH#13601 trying to assign a multi-dimensional array to an index is not # allowed ser = pd.Series(0, range(4)) with pytest.raises(ValueError, match=msg): ser.index = np.array([[2, 3]] * 4) def test_convert_almost_null_slice(index): # slice with None at both ends, but not step key = slice(None, None, "foo") if isinstance(index, pd.IntervalIndex): msg = "label-based slicing with step!=1 is not supported for IntervalIndex" with pytest.raises(ValueError, match=msg): index._convert_slice_indexer(key, "loc") else: msg = "'>=' not supported between instances of 'str' and 'int'" with pytest.raises(TypeError, match=msg): index._convert_slice_indexer(key, "loc") dtlike_dtypes = [ np.dtype("timedelta64[ns]"), np.dtype("datetime64[ns]"), pd.DatetimeTZDtype("ns", "Asia/Tokyo"), pd.PeriodDtype("ns"), ] @pytest.mark.parametrize("ldtype", dtlike_dtypes) @pytest.mark.parametrize("rdtype", dtlike_dtypes) def test_get_indexer_non_unique_wrong_dtype(ldtype, rdtype): vals = np.tile(3600 * 10 ** 9 * np.arange(3), 2) def construct(dtype): if dtype is dtlike_dtypes[-1]: # PeriodArray will try to cast ints to strings return pd.DatetimeIndex(vals).astype(dtype) return pd.Index(vals, dtype=dtype) left = construct(ldtype) right = construct(rdtype) result = left.get_indexer_non_unique(right) if ldtype is rdtype: ex1 = np.array([0, 3, 1, 4, 2, 5] * 2, dtype=np.intp) ex2 = np.array([], dtype=np.intp) tm.assert_numpy_array_equal(result[0], ex1) tm.assert_numpy_array_equal(result[1], ex2) else: no_matches = np.array([-1] * 6, dtype=np.intp) tm.assert_numpy_array_equal(result[0], no_matches) tm.assert_numpy_array_equal(result[1], no_matches)
bsd-3-clause
olafhauk/mne-python
mne/utils/numerics.py
4
36095
# -*- coding: utf-8 -*- """Some utility functions.""" # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # # License: BSD (3-clause) from contextlib import contextmanager import hashlib from io import BytesIO, StringIO from math import sqrt import numbers import operator import os import os.path as op from math import ceil import shutil import sys from datetime import datetime, timedelta, timezone import numpy as np from scipy import sparse from ._logging import logger, warn, verbose from .check import check_random_state, _ensure_int, _validate_type from ..fixes import _infer_dimension_, svd_flip, stable_cumsum, _safe_svd from .docs import fill_doc def split_list(v, n, idx=False): """Split list in n (approx) equal pieces, possibly giving indices.""" n = int(n) tot = len(v) sz = tot // n start = stop = 0 for i in range(n - 1): stop += sz yield (np.arange(start, stop), v[start:stop]) if idx else v[start:stop] start += sz yield (np.arange(start, tot), v[start:]) if idx else v[start] def array_split_idx(ary, indices_or_sections, axis=0, n_per_split=1): """Do what numpy.array_split does, but add indices.""" # this only works for indices_or_sections as int indices_or_sections = _ensure_int(indices_or_sections) ary_split = np.array_split(ary, indices_or_sections, axis=axis) idx_split = np.array_split(np.arange(ary.shape[axis]), indices_or_sections) idx_split = (np.arange(sp[0] * n_per_split, (sp[-1] + 1) * n_per_split) for sp in idx_split) return zip(idx_split, ary_split) def create_chunks(sequence, size): """Generate chunks from a sequence. Parameters ---------- sequence : iterable Any iterable object size : int The chunksize to be returned """ return (sequence[p:p + size] for p in range(0, len(sequence), size)) def sum_squared(X): """Compute norm of an array. Parameters ---------- X : array Data whose norm must be found. Returns ------- value : float Sum of squares of the input array X. """ X_flat = X.ravel(order='F' if np.isfortran(X) else 'C') return np.dot(X_flat, X_flat) def _compute_row_norms(data): """Compute scaling based on estimated norm.""" norms = np.sqrt(np.sum(data ** 2, axis=1)) norms[norms == 0] = 1.0 return norms def _reg_pinv(x, reg=0, rank='full', rcond=1e-15): """Compute a regularized pseudoinverse of Hermitian matrices. Regularization is performed by adding a constant value to each diagonal element of the matrix before inversion. This is known as "diagonal loading". The loading factor is computed as ``reg * np.trace(x) / len(x)``. The pseudo-inverse is computed through SVD decomposition and inverting the singular values. When the matrix is rank deficient, some singular values will be close to zero and will not be used during the inversion. The number of singular values to use can either be manually specified or automatically estimated. Parameters ---------- x : ndarray, shape (..., n, n) Square, Hermitian matrices to invert. reg : float Regularization parameter. Defaults to 0. rank : int | None | 'full' This controls the effective rank of the covariance matrix when computing the inverse. The rank can be set explicitly by specifying an integer value. If ``None``, the rank will be automatically estimated. Since applying regularization will always make the covariance matrix full rank, the rank is estimated before regularization in this case. If 'full', the rank will be estimated after regularization and hence will mean using the full rank, unless ``reg=0`` is used. Defaults to 'full'. rcond : float | 'auto' Cutoff for detecting small singular values when attempting to estimate the rank of the matrix (``rank='auto'``). Singular values smaller than the cutoff are set to zero. When set to 'auto', a cutoff based on floating point precision will be used. Defaults to 1e-15. Returns ------- x_inv : ndarray, shape (..., n, n) The inverted matrix. loading_factor : float Value added to the diagonal of the matrix during regularization. rank : int If ``rank`` was set to an integer value, this value is returned, else the estimated rank of the matrix, before regularization, is returned. """ from ..rank import _estimate_rank_from_s if rank is not None and rank != 'full': rank = int(operator.index(rank)) if x.ndim < 2 or x.shape[-2] != x.shape[-1]: raise ValueError('Input matrix must be square.') if not np.allclose(x, x.conj().swapaxes(-2, -1)): raise ValueError('Input matrix must be Hermitian (symmetric)') assert x.ndim >= 2 and x.shape[-2] == x.shape[-1] n = x.shape[-1] # Decompose the matrix, not necessarily positive semidefinite from mne.fixes import svd U, s, Vh = svd(x, hermitian=True) # Estimate the rank before regularization tol = 'auto' if rcond == 'auto' else rcond * s[..., :1] rank_before = _estimate_rank_from_s(s, tol) # Decompose the matrix again after regularization loading_factor = reg * np.mean(s, axis=-1) if reg: U, s, Vh = svd( x + loading_factor[..., np.newaxis, np.newaxis] * np.eye(n), hermitian=True) # Estimate the rank after regularization tol = 'auto' if rcond == 'auto' else rcond * s[..., :1] rank_after = _estimate_rank_from_s(s, tol) # Warn the user if both all parameters were kept at their defaults and the # matrix is rank deficient. if (rank_after < n).any() and reg == 0 and \ rank == 'full' and rcond == 1e-15: warn('Covariance matrix is rank-deficient and no regularization is ' 'done.') elif isinstance(rank, int) and rank > n: raise ValueError('Invalid value for the rank parameter (%d) given ' 'the shape of the input matrix (%d x %d).' % (rank, x.shape[0], x.shape[1])) # Pick the requested number of singular values mask = np.arange(s.shape[-1]).reshape((1,) * (x.ndim - 2) + (-1,)) if rank is None: cmp = ret = rank_before elif rank == 'full': cmp = rank_after ret = rank_before else: cmp = ret = rank mask = mask < np.asarray(cmp)[..., np.newaxis] mask &= s > 0 # Invert only non-zero singular values s_inv = np.zeros(s.shape) s_inv[mask] = 1. / s[mask] # Compute the pseudo inverse x_inv = np.matmul(U * s_inv[..., np.newaxis, :], Vh) return x_inv, loading_factor, ret def _gen_events(n_epochs): """Generate event structure from number of epochs.""" events = np.c_[np.arange(n_epochs), np.zeros(n_epochs, int), np.ones(n_epochs, int)] return events def _reject_data_segments(data, reject, flat, decim, info, tstep): """Reject data segments using peak-to-peak amplitude.""" from ..epochs import _is_good from ..io.pick import channel_indices_by_type data_clean = np.empty_like(data) idx_by_type = channel_indices_by_type(info) step = int(ceil(tstep * info['sfreq'])) if decim is not None: step = int(ceil(step / float(decim))) this_start = 0 this_stop = 0 drop_inds = [] for first in range(0, data.shape[1], step): last = first + step data_buffer = data[:, first:last] if data_buffer.shape[1] < (last - first): break # end of the time segment if _is_good(data_buffer, info['ch_names'], idx_by_type, reject, flat, ignore_chs=info['bads']): this_stop = this_start + data_buffer.shape[1] data_clean[:, this_start:this_stop] = data_buffer this_start += data_buffer.shape[1] else: logger.info("Artifact detected in [%d, %d]" % (first, last)) drop_inds.append((first, last)) data = data_clean[:, :this_stop] if not data.any(): raise RuntimeError('No clean segment found. Please ' 'consider updating your rejection ' 'thresholds.') return data, drop_inds def _get_inst_data(inst): """Get data view from MNE object instance like Raw, Epochs or Evoked.""" from ..io.base import BaseRaw from ..epochs import BaseEpochs from .. import Evoked from ..time_frequency.tfr import _BaseTFR _validate_type(inst, (BaseRaw, BaseEpochs, Evoked, _BaseTFR), "Instance") if not inst.preload: inst.load_data() return inst._data def compute_corr(x, y): """Compute pearson correlations between a vector and a matrix.""" if len(x) == 0 or len(y) == 0: raise ValueError('x or y has zero length') X = np.array(x, float) Y = np.array(y, float) X -= X.mean(0) Y -= Y.mean(0) x_sd = X.std(0, ddof=1) # if covariance matrix is fully expanded, Y needs a # transpose / broadcasting else Y is correct y_sd = Y.std(0, ddof=1)[:, None if X.shape == Y.shape else Ellipsis] return (np.dot(X.T, Y) / float(len(X) - 1)) / (x_sd * y_sd) @fill_doc def random_permutation(n_samples, random_state=None): """Emulate the randperm matlab function. It returns a vector containing a random permutation of the integers between 0 and n_samples-1. It returns the same random numbers than randperm matlab function whenever the random_state is the same as the matlab's random seed. This function is useful for comparing against matlab scripts which use the randperm function. Note: the randperm(n_samples) matlab function generates a random sequence between 1 and n_samples, whereas random_permutation(n_samples, random_state) function generates a random sequence between 0 and n_samples-1, that is: randperm(n_samples) = random_permutation(n_samples, random_state) - 1 Parameters ---------- n_samples : int End point of the sequence to be permuted (excluded, i.e., the end point is equal to n_samples-1) %(random_state)s Returns ------- randperm : ndarray, int Randomly permuted sequence between 0 and n-1. """ rng = check_random_state(random_state) # This can't just be rng.permutation(n_samples) because it's not identical # to what MATLAB produces idx = rng.uniform(size=n_samples) randperm = np.argsort(idx) return randperm @verbose def _apply_scaling_array(data, picks_list, scalings, verbose=None): """Scale data type-dependently for estimation.""" scalings = _check_scaling_inputs(data, picks_list, scalings) if isinstance(scalings, dict): logger.debug(' Scaling using mapping %s.' % (scalings,)) picks_dict = dict(picks_list) scalings = [(picks_dict[k], v) for k, v in scalings.items() if k in picks_dict] for idx, scaling in scalings: data[idx, :] *= scaling # F - order else: logger.debug(' Scaling using computed norms.') data *= scalings[:, np.newaxis] # F - order def _invert_scalings(scalings): if isinstance(scalings, dict): scalings = {k: 1. / v for k, v in scalings.items()} elif isinstance(scalings, np.ndarray): scalings = 1. / scalings return scalings def _undo_scaling_array(data, picks_list, scalings): scalings = _invert_scalings(_check_scaling_inputs(data, picks_list, scalings)) return _apply_scaling_array(data, picks_list, scalings, verbose=False) @contextmanager def _scaled_array(data, picks_list, scalings): """Scale, use, unscale array.""" _apply_scaling_array(data, picks_list=picks_list, scalings=scalings) try: yield finally: _undo_scaling_array(data, picks_list=picks_list, scalings=scalings) def _apply_scaling_cov(data, picks_list, scalings): """Scale resulting data after estimation.""" scalings = _check_scaling_inputs(data, picks_list, scalings) scales = None if isinstance(scalings, dict): n_channels = len(data) covinds = list(zip(*picks_list))[1] assert len(data) == sum(len(k) for k in covinds) assert list(sorted(np.concatenate(covinds))) == list(range(len(data))) scales = np.zeros(n_channels) for ch_t, idx in picks_list: scales[idx] = scalings[ch_t] elif isinstance(scalings, np.ndarray): if len(scalings) != len(data): raise ValueError('Scaling factors and data are of incompatible ' 'shape') scales = scalings elif scalings is None: pass else: raise RuntimeError('Arff...') if scales is not None: assert np.sum(scales == 0.) == 0 data *= (scales[None, :] * scales[:, None]) def _undo_scaling_cov(data, picks_list, scalings): scalings = _invert_scalings(_check_scaling_inputs(data, picks_list, scalings)) return _apply_scaling_cov(data, picks_list, scalings) def _check_scaling_inputs(data, picks_list, scalings): """Aux function.""" rescale_dict_ = dict(mag=1e15, grad=1e13, eeg=1e6) scalings_ = None if isinstance(scalings, str) and scalings == 'norm': scalings_ = 1. / _compute_row_norms(data) elif isinstance(scalings, dict): rescale_dict_.update(scalings) scalings_ = rescale_dict_ elif isinstance(scalings, np.ndarray): scalings_ = scalings elif scalings is None: pass else: raise NotImplementedError("No way! That's not a rescaling " 'option: %s' % scalings) return scalings_ def hashfunc(fname, block_size=1048576, hash_type="md5"): # 2 ** 20 """Calculate the hash for a file. Parameters ---------- fname : str Filename. block_size : int Block size to use when reading. Returns ------- hash_ : str The hexadecimal digest of the hash. """ if hash_type == "md5": hasher = hashlib.md5() elif hash_type == "sha1": hasher = hashlib.sha1() with open(fname, 'rb') as fid: while True: data = fid.read(block_size) if not data: break hasher.update(data) return hasher.hexdigest() def _replace_md5(fname): """Replace a file based on MD5sum.""" # adapted from sphinx-gallery assert fname.endswith('.new') fname_old = fname[:-4] if op.isfile(fname_old) and hashfunc(fname) == hashfunc(fname_old): os.remove(fname) else: shutil.move(fname, fname_old) def create_slices(start, stop, step=None, length=1): """Generate slices of time indexes. Parameters ---------- start : int Index where first slice should start. stop : int Index where last slice should maximally end. length : int Number of time sample included in a given slice. step: int | None Number of time samples separating two slices. If step = None, step = length. Returns ------- slices : list List of slice objects. """ # default parameters if step is None: step = length # slicing slices = [slice(t, t + length, 1) for t in range(start, stop - length + 1, step)] return slices def _time_mask(times, tmin=None, tmax=None, sfreq=None, raise_error=True, include_tmax=True): """Safely find sample boundaries.""" orig_tmin = tmin orig_tmax = tmax tmin = -np.inf if tmin is None else tmin tmax = np.inf if tmax is None else tmax if not np.isfinite(tmin): tmin = times[0] if not np.isfinite(tmax): tmax = times[-1] include_tmax = True # ignore this param when tmax is infinite if sfreq is not None: # Push to a bit past the nearest sample boundary first sfreq = float(sfreq) tmin = int(round(tmin * sfreq)) / sfreq - 0.5 / sfreq tmax = int(round(tmax * sfreq)) / sfreq tmax += (0.5 if include_tmax else -0.5) / sfreq else: assert include_tmax # can only be used when sfreq is known if raise_error and tmin > tmax: raise ValueError('tmin (%s) must be less than or equal to tmax (%s)' % (orig_tmin, orig_tmax)) mask = (times >= tmin) mask &= (times <= tmax) if raise_error and not mask.any(): extra = '' if include_tmax else 'when include_tmax=False ' raise ValueError('No samples remain when using tmin=%s and tmax=%s %s' '(original time bounds are [%s, %s])' % (orig_tmin, orig_tmax, extra, times[0], times[-1])) return mask def _freq_mask(freqs, sfreq, fmin=None, fmax=None, raise_error=True): """Safely find frequency boundaries.""" orig_fmin = fmin orig_fmax = fmax fmin = -np.inf if fmin is None else fmin fmax = np.inf if fmax is None else fmax if not np.isfinite(fmin): fmin = freqs[0] if not np.isfinite(fmax): fmax = freqs[-1] if sfreq is None: raise ValueError('sfreq can not be None') # Push 0.5/sfreq past the nearest frequency boundary first sfreq = float(sfreq) fmin = int(round(fmin * sfreq)) / sfreq - 0.5 / sfreq fmax = int(round(fmax * sfreq)) / sfreq + 0.5 / sfreq if raise_error and fmin > fmax: raise ValueError('fmin (%s) must be less than or equal to fmax (%s)' % (orig_fmin, orig_fmax)) mask = (freqs >= fmin) mask &= (freqs <= fmax) if raise_error and not mask.any(): raise ValueError('No frequencies remain when using fmin=%s and ' 'fmax=%s (original frequency bounds are [%s, %s])' % (orig_fmin, orig_fmax, freqs[0], freqs[-1])) return mask def grand_average(all_inst, interpolate_bads=True, drop_bads=True): """Make grand average of a list of Evoked or AverageTFR data. For :class:`mne.Evoked` data, the function interpolates bad channels based on the ``interpolate_bads`` parameter. If ``interpolate_bads`` is True, the grand average file will contain good channels and the bad channels interpolated from the good MEG/EEG channels. For :class:`mne.time_frequency.AverageTFR` data, the function takes the subset of channels not marked as bad in any of the instances. The ``grand_average.nave`` attribute will be equal to the number of evoked datasets used to calculate the grand average. .. note:: A grand average evoked should not be used for source localization. Parameters ---------- all_inst : list of Evoked or AverageTFR The evoked datasets. interpolate_bads : bool If True, bad MEG and EEG channels are interpolated. Ignored for AverageTFR. drop_bads : bool If True, drop all bad channels marked as bad in any data set. If neither interpolate_bads nor drop_bads is True, in the output file, every channel marked as bad in at least one of the input files will be marked as bad, but no interpolation or dropping will be performed. Returns ------- grand_average : Evoked | AverageTFR The grand average data. Same type as input. Notes ----- .. versionadded:: 0.11.0 """ # check if all elements in the given list are evoked data from ..evoked import Evoked from ..time_frequency import AverageTFR from ..channels.channels import equalize_channels if not all_inst: raise ValueError('Please pass a list of Evoked or AverageTFR objects.') elif len(all_inst) == 1: warn('Only a single dataset was passed to mne.grand_average().') inst_type = type(all_inst[0]) _validate_type(all_inst[0], (Evoked, AverageTFR), 'All elements') for inst in all_inst: _validate_type(inst, inst_type, 'All elements', 'of the same type') # Copy channels to leave the original evoked datasets intact. all_inst = [inst.copy() for inst in all_inst] # Interpolates if necessary if isinstance(all_inst[0], Evoked): if interpolate_bads: all_inst = [inst.interpolate_bads() if len(inst.info['bads']) > 0 else inst for inst in all_inst] from ..evoked import combine_evoked as combine else: # isinstance(all_inst[0], AverageTFR): from ..time_frequency.tfr import combine_tfr as combine if drop_bads: bads = list({b for inst in all_inst for b in inst.info['bads']}) if bads: for inst in all_inst: inst.drop_channels(bads) equalize_channels(all_inst, copy=False) # make grand_average object using combine_[evoked/tfr] grand_average = combine(all_inst, weights='equal') # change the grand_average.nave to the number of Evokeds grand_average.nave = len(all_inst) # change comment field grand_average.comment = "Grand average (n = %d)" % grand_average.nave return grand_average def object_hash(x, h=None): """Hash a reasonable python object. Parameters ---------- x : object Object to hash. Can be anything comprised of nested versions of: {dict, list, tuple, ndarray, str, bytes, float, int, None}. h : hashlib HASH object | None Optional, object to add the hash to. None creates an MD5 hash. Returns ------- digest : int The digest resulting from the hash. """ if h is None: h = hashlib.md5() if hasattr(x, 'keys'): # dict-like types keys = _sort_keys(x) for key in keys: object_hash(key, h) object_hash(x[key], h) elif isinstance(x, bytes): # must come before "str" below h.update(x) elif isinstance(x, (str, float, int, type(None))): h.update(str(type(x)).encode('utf-8')) h.update(str(x).encode('utf-8')) elif isinstance(x, (np.ndarray, np.number, np.bool_)): x = np.asarray(x) h.update(str(x.shape).encode('utf-8')) h.update(str(x.dtype).encode('utf-8')) h.update(x.tobytes()) elif isinstance(x, datetime): object_hash(_dt_to_stamp(x)) elif hasattr(x, '__len__'): # all other list-like types h.update(str(type(x)).encode('utf-8')) for xx in x: object_hash(xx, h) else: raise RuntimeError('unsupported type: %s (%s)' % (type(x), x)) return int(h.hexdigest(), 16) def object_size(x, memo=None): """Estimate the size of a reasonable python object. Parameters ---------- x : object Object to approximate the size of. Can be anything comprised of nested versions of: {dict, list, tuple, ndarray, str, bytes, float, int, None}. memo : dict | None The memodict. Returns ------- size : int The estimated size in bytes of the object. """ # Note: this will not process object arrays properly (since those only) # hold references if memo is None: memo = dict() id_ = id(x) if id_ in memo: return 0 # do not add already existing ones if isinstance(x, (bytes, str, int, float, type(None))): size = sys.getsizeof(x) elif isinstance(x, np.ndarray): # On newer versions of NumPy, just doing sys.getsizeof(x) works, # but on older ones you always get something small :( size = sys.getsizeof(np.array([])) if x.base is None or id(x.base) not in memo: size += x.nbytes elif isinstance(x, np.generic): size = x.nbytes elif isinstance(x, dict): size = sys.getsizeof(x) for key, value in x.items(): size += object_size(key, memo) size += object_size(value, memo) elif isinstance(x, (list, tuple)): size = sys.getsizeof(x) + sum(object_size(xx, memo) for xx in x) elif isinstance(x, datetime): size = object_size(_dt_to_stamp(x), memo) elif sparse.isspmatrix_csc(x) or sparse.isspmatrix_csr(x): size = sum(sys.getsizeof(xx) for xx in [x, x.data, x.indices, x.indptr]) else: raise RuntimeError('unsupported type: %s (%s)' % (type(x), x)) memo[id_] = size return size def _sort_keys(x): """Sort and return keys of dict.""" keys = list(x.keys()) # note: not thread-safe idx = np.argsort([str(k) for k in keys]) keys = [keys[ii] for ii in idx] return keys def _array_equal_nan(a, b): try: np.testing.assert_array_equal(a, b) except AssertionError: return False return True def object_diff(a, b, pre=''): """Compute all differences between two python variables. Parameters ---------- a : object Currently supported: dict, list, tuple, ndarray, int, str, bytes, float, StringIO, BytesIO. b : object Must be same type as ``a``. pre : str String to prepend to each line. Returns ------- diffs : str A string representation of the differences. """ out = '' if type(a) != type(b): # Deal with NamedInt and NamedFloat for sub in (int, float): if isinstance(a, sub) and isinstance(b, sub): break else: return pre + ' type mismatch (%s, %s)\n' % (type(a), type(b)) if isinstance(a, dict): k1s = _sort_keys(a) k2s = _sort_keys(b) m1 = set(k2s) - set(k1s) if len(m1): out += pre + ' left missing keys %s\n' % (m1) for key in k1s: if key not in k2s: out += pre + ' right missing key %s\n' % key else: out += object_diff(a[key], b[key], pre=(pre + '[%s]' % repr(key))) elif isinstance(a, (list, tuple)): if len(a) != len(b): out += pre + ' length mismatch (%s, %s)\n' % (len(a), len(b)) else: for ii, (xx1, xx2) in enumerate(zip(a, b)): out += object_diff(xx1, xx2, pre + '[%s]' % ii) elif isinstance(a, float): if not _array_equal_nan(a, b): out += pre + ' value mismatch (%s, %s)\n' % (a, b) elif isinstance(a, (str, int, bytes, np.generic)): if a != b: out += pre + ' value mismatch (%s, %s)\n' % (a, b) elif a is None: if b is not None: out += pre + ' left is None, right is not (%s)\n' % (b) elif isinstance(a, np.ndarray): if not _array_equal_nan(a, b): out += pre + ' array mismatch\n' elif isinstance(a, (StringIO, BytesIO)): if a.getvalue() != b.getvalue(): out += pre + ' StringIO mismatch\n' elif isinstance(a, datetime): if (a - b).total_seconds() != 0: out += pre + ' datetime mismatch\n' elif sparse.isspmatrix(a): # sparsity and sparse type of b vs a already checked above by type() if b.shape != a.shape: out += pre + (' sparse matrix a and b shape mismatch' '(%s vs %s)' % (a.shape, b.shape)) else: c = a - b c.eliminate_zeros() if c.nnz > 0: out += pre + (' sparse matrix a and b differ on %s ' 'elements' % c.nnz) elif hasattr(a, '__getstate__'): out += object_diff(a.__getstate__(), b.__getstate__(), pre) else: raise RuntimeError(pre + ': unsupported type %s (%s)' % (type(a), a)) return out class _PCA(object): """Principal component analysis (PCA).""" # Adapted from sklearn and stripped down to just use linalg.svd # and make it easier to later provide a "center" option if we want def __init__(self, n_components=None, whiten=False): self.n_components = n_components self.whiten = whiten def fit_transform(self, X, y=None): X = X.copy() U, S, _ = self._fit(X) U = U[:, :self.n_components_] if self.whiten: # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples) U *= sqrt(X.shape[0] - 1) else: # X_new = X * V = U * S * V^T * V = U * S U *= S[:self.n_components_] return U def _fit(self, X): if self.n_components is None: n_components = min(X.shape) else: n_components = self.n_components n_samples, n_features = X.shape if n_components == 'mle': if n_samples < n_features: raise ValueError("n_components='mle' is only supported " "if n_samples >= n_features") elif not 0 <= n_components <= min(n_samples, n_features): raise ValueError("n_components=%r must be between 0 and " "min(n_samples, n_features)=%r with " "svd_solver='full'" % (n_components, min(n_samples, n_features))) elif n_components >= 1: if not isinstance(n_components, (numbers.Integral, np.integer)): raise ValueError("n_components=%r must be of type int " "when greater than or equal to 1, " "was of type=%r" % (n_components, type(n_components))) self.mean_ = np.mean(X, axis=0) X -= self.mean_ U, S, V = _safe_svd(X, full_matrices=False) # flip eigenvectors' sign to enforce deterministic output U, V = svd_flip(U, V) components_ = V # Get variance explained by singular values explained_variance_ = (S ** 2) / (n_samples - 1) total_var = explained_variance_.sum() explained_variance_ratio_ = explained_variance_ / total_var singular_values_ = S.copy() # Store the singular values. # Postprocess the number of components required if n_components == 'mle': n_components = \ _infer_dimension_(explained_variance_, n_samples, n_features) elif 0 < n_components < 1.0: # number of components for which the cumulated explained # variance percentage is superior to the desired threshold ratio_cumsum = stable_cumsum(explained_variance_ratio_) n_components = np.searchsorted(ratio_cumsum, n_components) + 1 # Compute noise covariance using Probabilistic PCA model # The sigma2 maximum likelihood (cf. eq. 12.46) if n_components < min(n_features, n_samples): self.noise_variance_ = explained_variance_[n_components:].mean() else: self.noise_variance_ = 0. self.n_samples_, self.n_features_ = n_samples, n_features self.components_ = components_[:n_components] self.n_components_ = n_components self.explained_variance_ = explained_variance_[:n_components] self.explained_variance_ratio_ = \ explained_variance_ratio_[:n_components] self.singular_values_ = singular_values_[:n_components] return U, S, V def _mask_to_onsets_offsets(mask): """Group boolean mask into contiguous onset:offset pairs.""" assert mask.dtype == bool and mask.ndim == 1 mask = mask.astype(int) diff = np.diff(mask) onsets = np.where(diff > 0)[0] + 1 if mask[0]: onsets = np.concatenate([[0], onsets]) offsets = np.where(diff < 0)[0] + 1 if mask[-1]: offsets = np.concatenate([offsets, [len(mask)]]) assert len(onsets) == len(offsets) return onsets, offsets def _julian_to_dt(jd): """Convert Julian integer to a datetime object. Parameters ---------- jd : int Julian date - number of days since julian day 0 Julian day number 0 assigned to the day starting at noon on January 1, 4713 BC, proleptic Julian calendar November 24, 4714 BC, in the proleptic Gregorian calendar Returns ------- jd_date : datetime Datetime representation of jd """ # https://aa.usno.navy.mil/data/docs/JulianDate.php # Thursday, A.D. 1970 Jan 1 12:00:00.0 2440588.000000 jd_t0 = 2440588 datetime_t0 = datetime(1970, 1, 1, 12, 0, 0, 0, tzinfo=timezone.utc) dt = timedelta(days=(jd - jd_t0)) return datetime_t0 + dt def _dt_to_julian(jd_date): """Convert datetime object to a Julian integer. Parameters ---------- jd_date : datetime Returns ------- jd : float Julian date corresponding to jd_date - number of days since julian day 0 Julian day number 0 assigned to the day starting at noon on January 1, 4713 BC, proleptic Julian calendar November 24, 4714 BC, in the proleptic Gregorian calendar """ # https://aa.usno.navy.mil/data/docs/JulianDate.php # Thursday, A.D. 1970 Jan 1 12:00:00.0 2440588.000000 jd_t0 = 2440588 datetime_t0 = datetime(1970, 1, 1, 12, 0, 0, 0, tzinfo=timezone.utc) dt = jd_date - datetime_t0 return jd_t0 + dt.days def _cal_to_julian(year, month, day): """Convert calendar date (year, month, day) to a Julian integer. Parameters ---------- year : int Year as an integer. month : int Month as an integer. day : int Day as an integer. Returns ------- jd: int Julian date. """ return int(_dt_to_julian(datetime(year, month, day, 12, 0, 0, tzinfo=timezone.utc))) def _julian_to_cal(jd): """Convert calendar date (year, month, day) to a Julian integer. Parameters ---------- jd: int, float Julian date. Returns ------- year : int Year as an integer. month : int Month as an integer. day : int Day as an integer. """ tmp_date = _julian_to_dt(jd) return tmp_date.year, tmp_date.month, tmp_date.day def _check_dt(dt): if not isinstance(dt, datetime) or dt.tzinfo is None or \ dt.tzinfo is not timezone.utc: raise ValueError('Date must be datetime object in UTC: %r' % (dt,)) def _dt_to_stamp(inp_date): """Convert a datetime object to a timestamp.""" _check_dt(inp_date) return int(inp_date.timestamp() // 1), inp_date.microsecond def _stamp_to_dt(utc_stamp): """Convert timestamp to datetime object in Windows-friendly way.""" # The min on windows is 86400 stamp = [int(s) for s in utc_stamp] if len(stamp) == 1: # In case there is no microseconds information stamp.append(0) return (datetime.fromtimestamp(0, tz=timezone.utc) + timedelta(0, stamp[0], stamp[1])) # day, sec, µs class _ReuseCycle(object): """Cycle over a variable, preferring to reuse earlier indices. Requires the values in ``x`` to be hashable and unique. This holds nicely for matplotlib's color cycle, which gives HTML hex color strings. """ def __init__(self, x): self.indices = list() self.popped = dict() assert len(x) > 0 self.x = x def __iter__(self): while True: yield self.__next__() def __next__(self): if not len(self.indices): self.indices = list(range(len(self.x))) self.popped = dict() idx = self.indices.pop(0) val = self.x[idx] assert val not in self.popped self.popped[val] = idx return val def restore(self, val): try: idx = self.popped.pop(val) except KeyError: warn('Could not find value: %s' % (val,)) else: loc = np.searchsorted(self.indices, idx) self.indices.insert(loc, idx)
bsd-3-clause
cwu2011/seaborn
doc/sphinxext/ipython_directive.py
37
37557
# -*- coding: utf-8 -*- """ Sphinx directive to support embedded IPython code. This directive allows pasting of entire interactive IPython sessions, prompts and all, and their code will actually get re-executed at doc build time, with all prompts renumbered sequentially. It also allows you to input code as a pure python input by giving the argument python to the directive. The output looks like an interactive ipython section. To enable this directive, simply list it in your Sphinx ``conf.py`` file (making sure the directory where you placed it is visible to sphinx, as is needed for all Sphinx directives). For example, to enable syntax highlighting and the IPython directive:: extensions = ['IPython.sphinxext.ipython_console_highlighting', 'IPython.sphinxext.ipython_directive'] The IPython directive outputs code-blocks with the language 'ipython'. So if you do not have the syntax highlighting extension enabled as well, then all rendered code-blocks will be uncolored. By default this directive assumes that your prompts are unchanged IPython ones, but this can be customized. The configurable options that can be placed in conf.py are: ipython_savefig_dir: The directory in which to save the figures. This is relative to the Sphinx source directory. The default is `html_static_path`. ipython_rgxin: The compiled regular expression to denote the start of IPython input lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You shouldn't need to change this. ipython_rgxout: The compiled regular expression to denote the start of IPython output lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You shouldn't need to change this. ipython_promptin: The string to represent the IPython input prompt in the generated ReST. The default is 'In [%d]:'. This expects that the line numbers are used in the prompt. ipython_promptout: The string to represent the IPython prompt in the generated ReST. The default is 'Out [%d]:'. This expects that the line numbers are used in the prompt. ipython_mplbackend: The string which specifies if the embedded Sphinx shell should import Matplotlib and set the backend. The value specifies a backend that is passed to `matplotlib.use()` before any lines in `ipython_execlines` are executed. If not specified in conf.py, then the default value of 'agg' is used. To use the IPython directive without matplotlib as a dependency, set the value to `None`. It may end up that matplotlib is still imported if the user specifies so in `ipython_execlines` or makes use of the @savefig pseudo decorator. ipython_execlines: A list of strings to be exec'd in the embedded Sphinx shell. Typical usage is to make certain packages always available. Set this to an empty list if you wish to have no imports always available. If specified in conf.py as `None`, then it has the effect of making no imports available. If omitted from conf.py altogether, then the default value of ['import numpy as np', 'import matplotlib.pyplot as plt'] is used. ipython_holdcount When the @suppress pseudo-decorator is used, the execution count can be incremented or not. The default behavior is to hold the execution count, corresponding to a value of `True`. Set this to `False` to increment the execution count after each suppressed command. As an example, to use the IPython directive when `matplotlib` is not available, one sets the backend to `None`:: ipython_mplbackend = None An example usage of the directive is: .. code-block:: rst .. ipython:: In [1]: x = 1 In [2]: y = x**2 In [3]: print(y) See http://matplotlib.org/sampledoc/ipython_directive.html for additional documentation. ToDo ---- - Turn the ad-hoc test() function into a real test suite. - Break up ipython-specific functionality from matplotlib stuff into better separated code. Authors ------- - John D Hunter: orignal author. - Fernando Perez: refactoring, documentation, cleanups, port to 0.11. - VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations. - Skipper Seabold, refactoring, cleanups, pure python addition """ from __future__ import print_function from __future__ import unicode_literals #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Stdlib import os import re import sys import tempfile import ast from pandas.compat import zip, range, map, lmap, u, cStringIO as StringIO import warnings # To keep compatibility with various python versions try: from hashlib import md5 except ImportError: from md5 import md5 # Third-party import sphinx from docutils.parsers.rst import directives from docutils import nodes from sphinx.util.compat import Directive # Our own from IPython import Config, InteractiveShell from IPython.core.profiledir import ProfileDir from IPython.utils import io from IPython.utils.py3compat import PY3 if PY3: from io import StringIO text_type = str else: from StringIO import StringIO text_type = unicode #----------------------------------------------------------------------------- # Globals #----------------------------------------------------------------------------- # for tokenizing blocks COMMENT, INPUT, OUTPUT = range(3) #----------------------------------------------------------------------------- # Functions and class declarations #----------------------------------------------------------------------------- def block_parser(part, rgxin, rgxout, fmtin, fmtout): """ part is a string of ipython text, comprised of at most one input, one ouput, comments, and blank lines. The block parser parses the text into a list of:: blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...] where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and data is, depending on the type of token:: COMMENT : the comment string INPUT: the (DECORATOR, INPUT_LINE, REST) where DECORATOR: the input decorator (or None) INPUT_LINE: the input as string (possibly multi-line) REST : any stdout generated by the input line (not OUTPUT) OUTPUT: the output string, possibly multi-line """ block = [] lines = part.split('\n') N = len(lines) i = 0 decorator = None while 1: if i==N: # nothing left to parse -- the last line break line = lines[i] i += 1 line_stripped = line.strip() if line_stripped.startswith('#'): block.append((COMMENT, line)) continue if line_stripped.startswith('@'): # we're assuming at most one decorator -- may need to # rethink decorator = line_stripped continue # does this look like an input line? matchin = rgxin.match(line) if matchin: lineno, inputline = int(matchin.group(1)), matchin.group(2) # the ....: continuation string continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2)) Nc = len(continuation) # input lines can continue on for more than one line, if # we have a '\' line continuation char or a function call # echo line 'print'. The input line can only be # terminated by the end of the block or an output line, so # we parse out the rest of the input line if it is # multiline as well as any echo text rest = [] while i<N: # look ahead; if the next line is blank, or a comment, or # an output line, we're done nextline = lines[i] matchout = rgxout.match(nextline) #print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation)) if matchout or nextline.startswith('#'): break elif nextline.startswith(continuation): nextline = nextline[Nc:] if nextline and nextline[0] == ' ': nextline = nextline[1:] inputline += '\n' + nextline else: rest.append(nextline) i+= 1 block.append((INPUT, (decorator, inputline, '\n'.join(rest)))) continue # if it looks like an output line grab all the text to the end # of the block matchout = rgxout.match(line) if matchout: lineno, output = int(matchout.group(1)), matchout.group(2) if i<N-1: output = '\n'.join([output] + lines[i:]) block.append((OUTPUT, output)) break return block class DecodingStringIO(StringIO, object): def __init__(self,buf='',encodings=('utf8',), *args, **kwds): super(DecodingStringIO, self).__init__(buf, *args, **kwds) self.set_encodings(encodings) def set_encodings(self, encodings): self.encodings = encodings def write(self,data): if isinstance(data, text_type): return super(DecodingStringIO, self).write(data) else: for enc in self.encodings: try: data = data.decode(enc) return super(DecodingStringIO, self).write(data) except : pass # default to brute utf8 if no encoding succeded return super(DecodingStringIO, self).write(data.decode('utf8', 'replace')) class EmbeddedSphinxShell(object): """An embedded IPython instance to run inside Sphinx""" def __init__(self, exec_lines=None,state=None): self.cout = DecodingStringIO(u'') if exec_lines is None: exec_lines = [] self.state = state # Create config object for IPython config = Config() config.InteractiveShell.autocall = False config.InteractiveShell.autoindent = False config.InteractiveShell.colors = 'NoColor' # create a profile so instance history isn't saved tmp_profile_dir = tempfile.mkdtemp(prefix='profile_') profname = 'auto_profile_sphinx_build' pdir = os.path.join(tmp_profile_dir,profname) profile = ProfileDir.create_profile_dir(pdir) # Create and initialize global ipython, but don't start its mainloop. # This will persist across different EmbededSphinxShell instances. IP = InteractiveShell.instance(config=config, profile_dir=profile) # io.stdout redirect must be done after instantiating InteractiveShell io.stdout = self.cout io.stderr = self.cout # For debugging, so we can see normal output, use this: #from IPython.utils.io import Tee #io.stdout = Tee(self.cout, channel='stdout') # dbg #io.stderr = Tee(self.cout, channel='stderr') # dbg # Store a few parts of IPython we'll need. self.IP = IP self.user_ns = self.IP.user_ns self.user_global_ns = self.IP.user_global_ns self.input = '' self.output = '' self.is_verbatim = False self.is_doctest = False self.is_suppress = False # Optionally, provide more detailed information to shell. self.directive = None # on the first call to the savefig decorator, we'll import # pyplot as plt so we can make a call to the plt.gcf().savefig self._pyplot_imported = False # Prepopulate the namespace. for line in exec_lines: self.process_input_line(line, store_history=False) def clear_cout(self): self.cout.seek(0) self.cout.truncate(0) def process_input_line(self, line, store_history=True): """process the input, capturing stdout""" stdout = sys.stdout splitter = self.IP.input_splitter try: sys.stdout = self.cout splitter.push(line) more = splitter.push_accepts_more() if not more: try: source_raw = splitter.source_raw_reset()[1] except: # recent ipython #4504 source_raw = splitter.raw_reset() self.IP.run_cell(source_raw, store_history=store_history) finally: sys.stdout = stdout def process_image(self, decorator): """ # build out an image directive like # .. image:: somefile.png # :width 4in # # from an input like # savefig somefile.png width=4in """ savefig_dir = self.savefig_dir source_dir = self.source_dir saveargs = decorator.split(' ') filename = saveargs[1] # insert relative path to image file in source outfile = os.path.relpath(os.path.join(savefig_dir,filename), source_dir) imagerows = ['.. image:: %s'%outfile] for kwarg in saveargs[2:]: arg, val = kwarg.split('=') arg = arg.strip() val = val.strip() imagerows.append(' :%s: %s'%(arg, val)) image_file = os.path.basename(outfile) # only return file name image_directive = '\n'.join(imagerows) return image_file, image_directive # Callbacks for each type of token def process_input(self, data, input_prompt, lineno): """ Process data block for INPUT token. """ decorator, input, rest = data image_file = None image_directive = None is_verbatim = decorator=='@verbatim' or self.is_verbatim is_doctest = (decorator is not None and \ decorator.startswith('@doctest')) or self.is_doctest is_suppress = decorator=='@suppress' or self.is_suppress is_okexcept = decorator=='@okexcept' or self.is_okexcept is_okwarning = decorator=='@okwarning' or self.is_okwarning is_savefig = decorator is not None and \ decorator.startswith('@savefig') # set the encodings to be used by DecodingStringIO # to convert the execution output into unicode if # needed. this attrib is set by IpythonDirective.run() # based on the specified block options, defaulting to ['ut self.cout.set_encodings(self.output_encoding) input_lines = input.split('\n') if len(input_lines) > 1: if input_lines[-1] != "": input_lines.append('') # make sure there's a blank line # so splitter buffer gets reset continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2)) if is_savefig: image_file, image_directive = self.process_image(decorator) ret = [] is_semicolon = False # Hold the execution count, if requested to do so. if is_suppress and self.hold_count: store_history = False else: store_history = True # Note: catch_warnings is not thread safe with warnings.catch_warnings(record=True) as ws: for i, line in enumerate(input_lines): if line.endswith(';'): is_semicolon = True if i == 0: # process the first input line if is_verbatim: self.process_input_line('') self.IP.execution_count += 1 # increment it anyway else: # only submit the line in non-verbatim mode self.process_input_line(line, store_history=store_history) formatted_line = '%s %s'%(input_prompt, line) else: # process a continuation line if not is_verbatim: self.process_input_line(line, store_history=store_history) formatted_line = '%s %s'%(continuation, line) if not is_suppress: ret.append(formatted_line) if not is_suppress and len(rest.strip()) and is_verbatim: # the "rest" is the standard output of the # input, which needs to be added in # verbatim mode ret.append(rest) self.cout.seek(0) output = self.cout.read() if not is_suppress and not is_semicolon: ret.append(output) elif is_semicolon: # get spacing right ret.append('') # context information filename = self.state.document.current_source lineno = self.state.document.current_line # output any exceptions raised during execution to stdout # unless :okexcept: has been specified. if not is_okexcept and "Traceback" in output: s = "\nException in %s at block ending on line %s\n" % (filename, lineno) s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n" sys.stdout.write('\n\n>>>' + ('-' * 73)) sys.stdout.write(s) sys.stdout.write(output) sys.stdout.write('<<<' + ('-' * 73) + '\n\n') # output any warning raised during execution to stdout # unless :okwarning: has been specified. if not is_okwarning: for w in ws: s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno) s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n" sys.stdout.write('\n\n>>>' + ('-' * 73)) sys.stdout.write(s) sys.stdout.write('-' * 76 + '\n') s=warnings.formatwarning(w.message, w.category, w.filename, w.lineno, w.line) sys.stdout.write(s) sys.stdout.write('<<<' + ('-' * 73) + '\n') self.cout.truncate(0) return (ret, input_lines, output, is_doctest, decorator, image_file, image_directive) def process_output(self, data, output_prompt, input_lines, output, is_doctest, decorator, image_file): """ Process data block for OUTPUT token. """ TAB = ' ' * 4 if is_doctest and output is not None: found = output found = found.strip() submitted = data.strip() if self.directive is None: source = 'Unavailable' content = 'Unavailable' else: source = self.directive.state.document.current_source content = self.directive.content # Add tabs and join into a single string. content = '\n'.join([TAB + line for line in content]) # Make sure the output contains the output prompt. ind = found.find(output_prompt) if ind < 0: e = ('output does not contain output prompt\n\n' 'Document source: {0}\n\n' 'Raw content: \n{1}\n\n' 'Input line(s):\n{TAB}{2}\n\n' 'Output line(s):\n{TAB}{3}\n\n') e = e.format(source, content, '\n'.join(input_lines), repr(found), TAB=TAB) raise RuntimeError(e) found = found[len(output_prompt):].strip() # Handle the actual doctest comparison. if decorator.strip() == '@doctest': # Standard doctest if found != submitted: e = ('doctest failure\n\n' 'Document source: {0}\n\n' 'Raw content: \n{1}\n\n' 'On input line(s):\n{TAB}{2}\n\n' 'we found output:\n{TAB}{3}\n\n' 'instead of the expected:\n{TAB}{4}\n\n') e = e.format(source, content, '\n'.join(input_lines), repr(found), repr(submitted), TAB=TAB) raise RuntimeError(e) else: self.custom_doctest(decorator, input_lines, found, submitted) def process_comment(self, data): """Process data fPblock for COMMENT token.""" if not self.is_suppress: return [data] def save_image(self, image_file): """ Saves the image file to disk. """ self.ensure_pyplot() command = ('plt.gcf().savefig("%s", bbox_inches="tight", ' 'dpi=100)' % image_file) #print 'SAVEFIG', command # dbg self.process_input_line('bookmark ipy_thisdir', store_history=False) self.process_input_line('cd -b ipy_savedir', store_history=False) self.process_input_line(command, store_history=False) self.process_input_line('cd -b ipy_thisdir', store_history=False) self.process_input_line('bookmark -d ipy_thisdir', store_history=False) self.clear_cout() def process_block(self, block): """ process block from the block_parser and return a list of processed lines """ ret = [] output = None input_lines = None lineno = self.IP.execution_count input_prompt = self.promptin % lineno output_prompt = self.promptout % lineno image_file = None image_directive = None for token, data in block: if token == COMMENT: out_data = self.process_comment(data) elif token == INPUT: (out_data, input_lines, output, is_doctest, decorator, image_file, image_directive) = \ self.process_input(data, input_prompt, lineno) elif token == OUTPUT: out_data = \ self.process_output(data, output_prompt, input_lines, output, is_doctest, decorator, image_file) if out_data: ret.extend(out_data) # save the image files if image_file is not None: self.save_image(image_file) return ret, image_directive def ensure_pyplot(self): """ Ensures that pyplot has been imported into the embedded IPython shell. Also, makes sure to set the backend appropriately if not set already. """ # We are here if the @figure pseudo decorator was used. Thus, it's # possible that we could be here even if python_mplbackend were set to # `None`. That's also strange and perhaps worthy of raising an # exception, but for now, we just set the backend to 'agg'. if not self._pyplot_imported: if 'matplotlib.backends' not in sys.modules: # Then ipython_matplotlib was set to None but there was a # call to the @figure decorator (and ipython_execlines did # not set a backend). #raise Exception("No backend was set, but @figure was used!") import matplotlib matplotlib.use('agg') # Always import pyplot into embedded shell. self.process_input_line('import matplotlib.pyplot as plt', store_history=False) self._pyplot_imported = True def process_pure_python(self, content): """ content is a list of strings. it is unedited directive content This runs it line by line in the InteractiveShell, prepends prompts as needed capturing stderr and stdout, then returns the content as a list as if it were ipython code """ output = [] savefig = False # keep up with this to clear figure multiline = False # to handle line continuation multiline_start = None fmtin = self.promptin ct = 0 for lineno, line in enumerate(content): line_stripped = line.strip() if not len(line): output.append(line) continue # handle decorators if line_stripped.startswith('@'): output.extend([line]) if 'savefig' in line: savefig = True # and need to clear figure continue # handle comments if line_stripped.startswith('#'): output.extend([line]) continue # deal with lines checking for multiline continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2)) if not multiline: modified = u"%s %s" % (fmtin % ct, line_stripped) output.append(modified) ct += 1 try: ast.parse(line_stripped) output.append(u'') except Exception: # on a multiline multiline = True multiline_start = lineno else: # still on a multiline modified = u'%s %s' % (continuation, line) output.append(modified) # if the next line is indented, it should be part of multiline if len(content) > lineno + 1: nextline = content[lineno + 1] if len(nextline) - len(nextline.lstrip()) > 3: continue try: mod = ast.parse( '\n'.join(content[multiline_start:lineno+1])) if isinstance(mod.body[0], ast.FunctionDef): # check to see if we have the whole function for element in mod.body[0].body: if isinstance(element, ast.Return): multiline = False else: output.append(u'') multiline = False except Exception: pass if savefig: # clear figure if plotted self.ensure_pyplot() self.process_input_line('plt.clf()', store_history=False) self.clear_cout() savefig = False return output def custom_doctest(self, decorator, input_lines, found, submitted): """ Perform a specialized doctest. """ from .custom_doctests import doctests args = decorator.split() doctest_type = args[1] if doctest_type in doctests: doctests[doctest_type](self, args, input_lines, found, submitted) else: e = "Invalid option to @doctest: {0}".format(doctest_type) raise Exception(e) class IPythonDirective(Directive): has_content = True required_arguments = 0 optional_arguments = 4 # python, suppress, verbatim, doctest final_argumuent_whitespace = True option_spec = { 'python': directives.unchanged, 'suppress' : directives.flag, 'verbatim' : directives.flag, 'doctest' : directives.flag, 'okexcept': directives.flag, 'okwarning': directives.flag, 'output_encoding': directives.unchanged_required } shell = None seen_docs = set() def get_config_options(self): # contains sphinx configuration variables config = self.state.document.settings.env.config # get config variables to set figure output directory confdir = self.state.document.settings.env.app.confdir savefig_dir = config.ipython_savefig_dir source_dir = os.path.dirname(self.state.document.current_source) if savefig_dir is None: savefig_dir = config.html_static_path if isinstance(savefig_dir, list): savefig_dir = savefig_dir[0] # safe to assume only one path? savefig_dir = os.path.join(confdir, savefig_dir) # get regex and prompt stuff rgxin = config.ipython_rgxin rgxout = config.ipython_rgxout promptin = config.ipython_promptin promptout = config.ipython_promptout mplbackend = config.ipython_mplbackend exec_lines = config.ipython_execlines hold_count = config.ipython_holdcount return (savefig_dir, source_dir, rgxin, rgxout, promptin, promptout, mplbackend, exec_lines, hold_count) def setup(self): # Get configuration values. (savefig_dir, source_dir, rgxin, rgxout, promptin, promptout, mplbackend, exec_lines, hold_count) = self.get_config_options() if self.shell is None: # We will be here many times. However, when the # EmbeddedSphinxShell is created, its interactive shell member # is the same for each instance. if mplbackend: import matplotlib # Repeated calls to use() will not hurt us since `mplbackend` # is the same each time. matplotlib.use(mplbackend) # Must be called after (potentially) importing matplotlib and # setting its backend since exec_lines might import pylab. self.shell = EmbeddedSphinxShell(exec_lines, self.state) # Store IPython directive to enable better error messages self.shell.directive = self # reset the execution count if we haven't processed this doc #NOTE: this may be borked if there are multiple seen_doc tmp files #check time stamp? if not self.state.document.current_source in self.seen_docs: self.shell.IP.history_manager.reset() self.shell.IP.execution_count = 1 self.shell.IP.prompt_manager.width = 0 self.seen_docs.add(self.state.document.current_source) # and attach to shell so we don't have to pass them around self.shell.rgxin = rgxin self.shell.rgxout = rgxout self.shell.promptin = promptin self.shell.promptout = promptout self.shell.savefig_dir = savefig_dir self.shell.source_dir = source_dir self.shell.hold_count = hold_count # setup bookmark for saving figures directory self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir, store_history=False) self.shell.clear_cout() return rgxin, rgxout, promptin, promptout def teardown(self): # delete last bookmark self.shell.process_input_line('bookmark -d ipy_savedir', store_history=False) self.shell.clear_cout() def run(self): debug = False #TODO, any reason block_parser can't be a method of embeddable shell # then we wouldn't have to carry these around rgxin, rgxout, promptin, promptout = self.setup() options = self.options self.shell.is_suppress = 'suppress' in options self.shell.is_doctest = 'doctest' in options self.shell.is_verbatim = 'verbatim' in options self.shell.is_okexcept = 'okexcept' in options self.shell.is_okwarning = 'okwarning' in options self.shell.output_encoding = [options.get('output_encoding', 'utf8')] # handle pure python code if 'python' in self.arguments: content = self.content self.content = self.shell.process_pure_python(content) parts = '\n'.join(self.content).split('\n\n') lines = ['.. code-block:: ipython', ''] figures = [] for part in parts: block = block_parser(part, rgxin, rgxout, promptin, promptout) if len(block): rows, figure = self.shell.process_block(block) for row in rows: lines.extend([' %s'%line for line in row.split('\n')]) if figure is not None: figures.append(figure) for figure in figures: lines.append('') lines.extend(figure.split('\n')) lines.append('') if len(lines)>2: if debug: print('\n'.join(lines)) else: # This has to do with input, not output. But if we comment # these lines out, then no IPython code will appear in the # final output. self.state_machine.insert_input( lines, self.state_machine.input_lines.source(0)) # cleanup self.teardown() return [] # Enable as a proper Sphinx directive def setup(app): setup.app = app app.add_directive('ipython', IPythonDirective) app.add_config_value('ipython_savefig_dir', None, 'env') app.add_config_value('ipython_rgxin', re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env') app.add_config_value('ipython_rgxout', re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env') app.add_config_value('ipython_promptin', 'In [%d]:', 'env') app.add_config_value('ipython_promptout', 'Out[%d]:', 'env') # We could just let matplotlib pick whatever is specified as the default # backend in the matplotlibrc file, but this would cause issues if the # backend didn't work in headless environments. For this reason, 'agg' # is a good default backend choice. app.add_config_value('ipython_mplbackend', 'agg', 'env') # If the user sets this config value to `None`, then EmbeddedSphinxShell's # __init__ method will treat it as []. execlines = ['import numpy as np', 'import matplotlib.pyplot as plt'] app.add_config_value('ipython_execlines', execlines, 'env') app.add_config_value('ipython_holdcount', True, 'env') # Simple smoke test, needs to be converted to a proper automatic test. def test(): examples = [ r""" In [9]: pwd Out[9]: '/home/jdhunter/py4science/book' In [10]: cd bookdata/ /home/jdhunter/py4science/book/bookdata In [2]: from pylab import * In [2]: ion() In [3]: im = imread('stinkbug.png') @savefig mystinkbug.png width=4in In [4]: imshow(im) Out[4]: <matplotlib.image.AxesImage object at 0x39ea850> """, r""" In [1]: x = 'hello world' # string methods can be # used to alter the string @doctest In [2]: x.upper() Out[2]: 'HELLO WORLD' @verbatim In [3]: x.st<TAB> x.startswith x.strip """, r""" In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\ .....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv' In [131]: print url.split('&') ['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv'] In [60]: import urllib """, r"""\ In [133]: import numpy.random @suppress In [134]: numpy.random.seed(2358) @doctest In [135]: numpy.random.rand(10,2) Out[135]: array([[ 0.64524308, 0.59943846], [ 0.47102322, 0.8715456 ], [ 0.29370834, 0.74776844], [ 0.99539577, 0.1313423 ], [ 0.16250302, 0.21103583], [ 0.81626524, 0.1312433 ], [ 0.67338089, 0.72302393], [ 0.7566368 , 0.07033696], [ 0.22591016, 0.77731835], [ 0.0072729 , 0.34273127]]) """, r""" In [106]: print x jdh In [109]: for i in range(10): .....: print i .....: .....: 0 1 2 3 4 5 6 7 8 9 """, r""" In [144]: from pylab import * In [145]: ion() # use a semicolon to suppress the output @savefig test_hist.png width=4in In [151]: hist(np.random.randn(10000), 100); @savefig test_plot.png width=4in In [151]: plot(np.random.randn(10000), 'o'); """, r""" # use a semicolon to suppress the output In [151]: plt.clf() @savefig plot_simple.png width=4in In [151]: plot([1,2,3]) @savefig hist_simple.png width=4in In [151]: hist(np.random.randn(10000), 100); """, r""" # update the current fig In [151]: ylabel('number') In [152]: title('normal distribution') @savefig hist_with_text.png In [153]: grid(True) @doctest float In [154]: 0.1 + 0.2 Out[154]: 0.3 @doctest float In [155]: np.arange(16).reshape(4,4) Out[155]: array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) In [1]: x = np.arange(16, dtype=float).reshape(4,4) In [2]: x[0,0] = np.inf In [3]: x[0,1] = np.nan @doctest float In [4]: x Out[4]: array([[ inf, nan, 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.], [ 12., 13., 14., 15.]]) """, ] # skip local-file depending first example: examples = examples[1:] #ipython_directive.DEBUG = True # dbg #options = dict(suppress=True) # dbg options = dict() for example in examples: content = example.split('\n') IPythonDirective('debug', arguments=None, options=options, content=content, lineno=0, content_offset=None, block_text=None, state=None, state_machine=None, ) # Run test suite as a script if __name__=='__main__': if not os.path.isdir('_static'): os.mkdir('_static') test() print('All OK? Check figures in _static/')
bsd-3-clause
jhamman/xray
xarray/tests/test_variable.py
1
54048
from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import namedtuple from copy import copy, deepcopy from datetime import datetime, timedelta from textwrap import dedent import pytest from distutils.version import LooseVersion import numpy as np import pytz import pandas as pd from xarray import Variable, IndexVariable, Coordinate, Dataset from xarray.core import indexing from xarray.core.variable import as_variable, as_compatible_data from xarray.core.indexing import PandasIndexAdapter, LazilyIndexedArray from xarray.core.pycompat import PY3, OrderedDict from xarray.core.common import full_like, zeros_like, ones_like from . import TestCase, source_ndarray, requires_dask class VariableSubclassTestCases(object): def test_properties(self): data = 0.5 * np.arange(10) v = self.cls(['time'], data, {'foo': 'bar'}) self.assertEqual(v.dims, ('time',)) self.assertArrayEqual(v.values, data) self.assertEqual(v.dtype, float) self.assertEqual(v.shape, (10,)) self.assertEqual(v.size, 10) self.assertEqual(v.sizes, {'time': 10}) self.assertEqual(v.nbytes, 80) self.assertEqual(v.ndim, 1) self.assertEqual(len(v), 10) self.assertEqual(v.attrs, {'foo': u'bar'}) def test_attrs(self): v = self.cls(['time'], 0.5 * np.arange(10)) self.assertEqual(v.attrs, {}) attrs = {'foo': 'bar'} v.attrs = attrs self.assertEqual(v.attrs, attrs) self.assertIsInstance(v.attrs, OrderedDict) v.attrs['foo'] = 'baz' self.assertEqual(v.attrs['foo'], 'baz') def test_getitem_dict(self): v = self.cls(['x'], np.random.randn(5)) actual = v[{'x': 0}] expected = v[0] self.assertVariableIdentical(expected, actual) def _assertIndexedLikeNDArray(self, variable, expected_value0, expected_dtype=None): """Given a 1-dimensional variable, verify that the variable is indexed like a numpy.ndarray. """ self.assertEqual(variable[0].shape, ()) self.assertEqual(variable[0].ndim, 0) self.assertEqual(variable[0].size, 1) # test identity self.assertTrue(variable.equals(variable.copy())) self.assertTrue(variable.identical(variable.copy())) # check value is equal for both ndarray and Variable self.assertEqual(variable.values[0], expected_value0) self.assertEqual(variable[0].values, expected_value0) # check type or dtype is consistent for both ndarray and Variable if expected_dtype is None: # check output type instead of array dtype self.assertEqual(type(variable.values[0]), type(expected_value0)) self.assertEqual(type(variable[0].values), type(expected_value0)) elif expected_dtype is not False: self.assertEqual(variable.values[0].dtype, expected_dtype) self.assertEqual(variable[0].values.dtype, expected_dtype) def test_index_0d_int(self): for value, dtype in [(0, np.int_), (np.int32(0), np.int32)]: x = self.cls(['x'], [value]) self._assertIndexedLikeNDArray(x, value, dtype) def test_index_0d_float(self): for value, dtype in [(0.5, np.float_), (np.float32(0.5), np.float32)]: x = self.cls(['x'], [value]) self._assertIndexedLikeNDArray(x, value, dtype) def test_index_0d_string(self): for value, dtype in [('foo', np.dtype('U3' if PY3 else 'S3')), (u'foo', np.dtype('U3'))]: x = self.cls(['x'], [value]) self._assertIndexedLikeNDArray(x, value, dtype) def test_index_0d_datetime(self): d = datetime(2000, 1, 1) x = self.cls(['x'], [d]) self._assertIndexedLikeNDArray(x, np.datetime64(d)) x = self.cls(['x'], [np.datetime64(d)]) self._assertIndexedLikeNDArray(x, np.datetime64(d), 'datetime64[ns]') x = self.cls(['x'], pd.DatetimeIndex([d])) self._assertIndexedLikeNDArray(x, np.datetime64(d), 'datetime64[ns]') def test_index_0d_timedelta64(self): td = timedelta(hours=1) x = self.cls(['x'], [np.timedelta64(td)]) self._assertIndexedLikeNDArray(x, np.timedelta64(td), 'timedelta64[ns]') x = self.cls(['x'], pd.to_timedelta([td])) self._assertIndexedLikeNDArray(x, np.timedelta64(td), 'timedelta64[ns]') def test_index_0d_not_a_time(self): d = np.datetime64('NaT', 'ns') x = self.cls(['x'], [d]) self._assertIndexedLikeNDArray(x, d) def test_index_0d_object(self): class HashableItemWrapper(object): def __init__(self, item): self.item = item def __eq__(self, other): return self.item == other.item def __hash__(self): return hash(self.item) def __repr__(self): return '%s(item=%r)' % (type(self).__name__, self.item) item = HashableItemWrapper((1, 2, 3)) x = self.cls('x', [item]) self._assertIndexedLikeNDArray(x, item, expected_dtype=False) def test_0d_object_array_with_list(self): listarray = np.empty((1,), dtype=object) listarray[0] = [1, 2, 3] x = self.cls('x', listarray) assert x.data == listarray assert x[0].data == listarray.squeeze() assert x.squeeze().data == listarray.squeeze() def test_index_and_concat_datetime(self): # regression test for #125 date_range = pd.date_range('2011-09-01', periods=10) for dates in [date_range, date_range.values, date_range.to_pydatetime()]: expected = self.cls('t', dates) for times in [[expected[i] for i in range(10)], [expected[i:(i + 1)] for i in range(10)], [expected[[i]] for i in range(10)]]: actual = Variable.concat(times, 't') self.assertEqual(expected.dtype, actual.dtype) self.assertArrayEqual(expected, actual) def test_0d_time_data(self): # regression test for #105 x = self.cls('time', pd.date_range('2000-01-01', periods=5)) expected = np.datetime64('2000-01-01T00Z', 'ns') self.assertEqual(x[0].values, expected) def test_datetime64_conversion(self): times = pd.date_range('2000-01-01', periods=3) for values, preserve_source in [ (times, True), (times.values, True), (times.values.astype('datetime64[s]'), False), (times.to_pydatetime(), False), ]: v = self.cls(['t'], values) self.assertEqual(v.dtype, np.dtype('datetime64[ns]')) self.assertArrayEqual(v.values, times.values) self.assertEqual(v.values.dtype, np.dtype('datetime64[ns]')) same_source = source_ndarray(v.values) is source_ndarray(values) assert preserve_source == same_source def test_timedelta64_conversion(self): times = pd.timedelta_range(start=0, periods=3) for values, preserve_source in [ (times, True), (times.values, True), (times.values.astype('timedelta64[s]'), False), (times.to_pytimedelta(), False), ]: v = self.cls(['t'], values) self.assertEqual(v.dtype, np.dtype('timedelta64[ns]')) self.assertArrayEqual(v.values, times.values) self.assertEqual(v.values.dtype, np.dtype('timedelta64[ns]')) same_source = source_ndarray(v.values) is source_ndarray(values) assert preserve_source == same_source def test_object_conversion(self): data = np.arange(5).astype(str).astype(object) actual = self.cls('x', data) self.assertEqual(actual.dtype, data.dtype) def test_pandas_data(self): v = self.cls(['x'], pd.Series([0, 1, 2], index=[3, 2, 1])) self.assertVariableIdentical(v, v[[0, 1, 2]]) v = self.cls(['x'], pd.Index([0, 1, 2])) self.assertEqual(v[0].values, v.values[0]) def test_pandas_period_index(self): v = self.cls(['x'], pd.period_range(start='2000', periods=20, freq='B')) self.assertEqual(v[0], pd.Period('2000', freq='B')) assert "Period('2000-01-03', 'B')" in repr(v) def test_1d_math(self): x = 1.0 * np.arange(5) y = np.ones(5) # should we need `.to_base_variable()`? # probably a break that `+v` changes type? v = self.cls(['x'], x) base_v = v.to_base_variable() # unary ops self.assertVariableIdentical(base_v, +v) self.assertVariableIdentical(base_v, abs(v)) self.assertArrayEqual((-v).values, -x) # binary ops with numbers self.assertVariableIdentical(base_v, v + 0) self.assertVariableIdentical(base_v, 0 + v) self.assertVariableIdentical(base_v, v * 1) self.assertArrayEqual((v > 2).values, x > 2) self.assertArrayEqual((0 == v).values, 0 == x) self.assertArrayEqual((v - 1).values, x - 1) self.assertArrayEqual((1 - v).values, 1 - x) # binary ops with numpy arrays self.assertArrayEqual((v * x).values, x ** 2) self.assertArrayEqual((x * v).values, x ** 2) self.assertArrayEqual(v - y, v - 1) self.assertArrayEqual(y - v, 1 - v) # verify attributes are dropped v2 = self.cls(['x'], x, {'units': 'meters'}) self.assertVariableIdentical(base_v, +v2) # binary ops with all variables self.assertArrayEqual(v + v, 2 * v) w = self.cls(['x'], y, {'foo': 'bar'}) self.assertVariableIdentical(v + w, self.cls(['x'], x + y).to_base_variable()) self.assertArrayEqual((v * w).values, x * y) # something complicated self.assertArrayEqual((v ** 2 * w - 1 + x).values, x ** 2 * y - 1 + x) # make sure dtype is preserved (for Index objects) self.assertEqual(float, (+v).dtype) self.assertEqual(float, (+v).values.dtype) self.assertEqual(float, (0 + v).dtype) self.assertEqual(float, (0 + v).values.dtype) # check types of returned data self.assertIsInstance(+v, Variable) self.assertNotIsInstance(+v, IndexVariable) self.assertIsInstance(0 + v, Variable) self.assertNotIsInstance(0 + v, IndexVariable) def test_1d_reduce(self): x = np.arange(5) v = self.cls(['x'], x) actual = v.sum() expected = Variable((), 10) self.assertVariableIdentical(expected, actual) self.assertIs(type(actual), Variable) def test_array_interface(self): x = np.arange(5) v = self.cls(['x'], x) self.assertArrayEqual(np.asarray(v), x) # test patched in methods self.assertArrayEqual(v.astype(float), x.astype(float)) # think this is a break, that argsort changes the type self.assertVariableIdentical(v.argsort(), v.to_base_variable()) self.assertVariableIdentical(v.clip(2, 3), self.cls('x', x.clip(2, 3)).to_base_variable()) # test ufuncs self.assertVariableIdentical(np.sin(v), self.cls(['x'], np.sin(x)).to_base_variable()) self.assertIsInstance(np.sin(v), Variable) self.assertNotIsInstance(np.sin(v), IndexVariable) def example_1d_objects(self): for data in [range(3), 0.5 * np.arange(3), 0.5 * np.arange(3, dtype=np.float32), pd.date_range('2000-01-01', periods=3), np.array(['a', 'b', 'c'], dtype=object)]: yield (self.cls('x', data), data) def test___array__(self): for v, data in self.example_1d_objects(): self.assertArrayEqual(v.values, np.asarray(data)) self.assertArrayEqual(np.asarray(v), np.asarray(data)) self.assertEqual(v[0].values, np.asarray(data)[0]) self.assertEqual(np.asarray(v[0]), np.asarray(data)[0]) def test_equals_all_dtypes(self): for v, _ in self.example_1d_objects(): v2 = v.copy() self.assertTrue(v.equals(v2)) self.assertTrue(v.identical(v2)) self.assertTrue(v.no_conflicts(v2)) self.assertTrue(v[0].equals(v2[0])) self.assertTrue(v[0].identical(v2[0])) self.assertTrue(v[0].no_conflicts(v2[0])) self.assertTrue(v[:2].equals(v2[:2])) self.assertTrue(v[:2].identical(v2[:2])) self.assertTrue(v[:2].no_conflicts(v2[:2])) def test_eq_all_dtypes(self): # ensure that we don't choke on comparisons for which numpy returns # scalars expected = Variable('x', 3 * [False]) for v, _ in self.example_1d_objects(): actual = 'z' == v self.assertVariableIdentical(expected, actual) actual = ~('z' != v) self.assertVariableIdentical(expected, actual) def test_encoding_preserved(self): expected = self.cls('x', range(3), {'foo': 1}, {'bar': 2}) for actual in [expected.T, expected[...], expected.squeeze(), expected.isel(x=slice(None)), expected.set_dims({'x': 3}), expected.copy(deep=True), expected.copy(deep=False)]: self.assertVariableIdentical(expected.to_base_variable(), actual.to_base_variable()) self.assertEqual(expected.encoding, actual.encoding) def test_concat(self): x = np.arange(5) y = np.arange(5, 10) v = self.cls(['a'], x) w = self.cls(['a'], y) self.assertVariableIdentical(Variable(['b', 'a'], np.array([x, y])), Variable.concat([v, w], 'b')) self.assertVariableIdentical(Variable(['b', 'a'], np.array([x, y])), Variable.concat((v, w), 'b')) self.assertVariableIdentical(Variable(['b', 'a'], np.array([x, y])), Variable.concat((v, w), 'b')) with self.assertRaisesRegexp(ValueError, 'inconsistent dimensions'): Variable.concat([v, Variable(['c'], y)], 'b') # test indexers actual = Variable.concat( [v, w], positions=[np.arange(0, 10, 2), np.arange(1, 10, 2)], dim='a') expected = Variable('a', np.array([x, y]).ravel(order='F')) self.assertVariableIdentical(expected, actual) # test concatenating along a dimension v = Variable(['time', 'x'], np.random.random((10, 8))) self.assertVariableIdentical(v, Variable.concat([v[:5], v[5:]], 'time')) self.assertVariableIdentical(v, Variable.concat([v[:5], v[5:6], v[6:]], 'time')) self.assertVariableIdentical(v, Variable.concat([v[:1], v[1:]], 'time')) # test dimension order self.assertVariableIdentical(v, Variable.concat([v[:, :5], v[:, 5:]], 'x')) with self.assertRaisesRegexp(ValueError, 'all input arrays must have'): Variable.concat([v[:, 0], v[:, 1:]], 'x') def test_concat_attrs(self): # different or conflicting attributes should be removed v = self.cls('a', np.arange(5), {'foo': 'bar'}) w = self.cls('a', np.ones(5)) expected = self.cls('a', np.concatenate([np.arange(5), np.ones(5)])).to_base_variable() self.assertVariableIdentical(expected, Variable.concat([v, w], 'a')) w.attrs['foo'] = 2 self.assertVariableIdentical(expected, Variable.concat([v, w], 'a')) w.attrs['foo'] = 'bar' expected.attrs['foo'] = 'bar' self.assertVariableIdentical(expected, Variable.concat([v, w], 'a')) def test_concat_fixed_len_str(self): # regression test for #217 for kind in ['S', 'U']: x = self.cls('animal', np.array(['horse'], dtype=kind)) y = self.cls('animal', np.array(['aardvark'], dtype=kind)) actual = Variable.concat([x, y], 'animal') expected = Variable( 'animal', np.array(['horse', 'aardvark'], dtype=kind)) self.assertVariableEqual(expected, actual) def test_concat_number_strings(self): # regression test for #305 a = self.cls('x', ['0', '1', '2']) b = self.cls('x', ['3', '4']) actual = Variable.concat([a, b], dim='x') expected = Variable('x', np.arange(5).astype(str).astype(object)) self.assertVariableIdentical(expected, actual) self.assertEqual(expected.dtype, object) self.assertEqual(type(expected.values[0]), str) def test_copy(self): v = self.cls('x', 0.5 * np.arange(10), {'foo': 'bar'}) for deep in [True, False]: w = v.copy(deep=deep) self.assertIs(type(v), type(w)) self.assertVariableIdentical(v, w) self.assertEqual(v.dtype, w.dtype) if self.cls is Variable: if deep: self.assertIsNot(source_ndarray(v.values), source_ndarray(w.values)) else: self.assertIs(source_ndarray(v.values), source_ndarray(w.values)) self.assertVariableIdentical(v, copy(v)) def test_copy_index(self): midx = pd.MultiIndex.from_product([['a', 'b'], [1, 2], [-1, -2]], names=('one', 'two', 'three')) v = self.cls('x', midx) for deep in [True, False]: w = v.copy(deep=deep) self.assertIsInstance(w._data, PandasIndexAdapter) self.assertIsInstance(w.to_index(), pd.MultiIndex) self.assertArrayEqual(v._data.array, w._data.array) def test_real_and_imag(self): v = self.cls('x', np.arange(3) - 1j * np.arange(3), {'foo': 'bar'}) expected_re = self.cls('x', np.arange(3), {'foo': 'bar'}) self.assertVariableIdentical(v.real, expected_re) expected_im = self.cls('x', -np.arange(3), {'foo': 'bar'}) self.assertVariableIdentical(v.imag, expected_im) expected_abs = self.cls('x', np.sqrt(2 * np.arange(3) ** 2)).to_base_variable() self.assertVariableAllClose(abs(v), expected_abs) def test_aggregate_complex(self): # should skip NaNs v = self.cls('x', [1, 2j, np.nan]) expected = Variable((), 0.5 + 1j) self.assertVariableAllClose(v.mean(), expected) def test_pandas_cateogrical_dtype(self): data = pd.Categorical(np.arange(10, dtype='int64')) v = self.cls('x', data) print(v) # should not error assert v.dtype == 'int64' def test_pandas_datetime64_with_tz(self): data = pd.date_range(start='2000-01-01', tz=pytz.timezone('America/New_York'), periods=10, freq='1h') v = self.cls('x', data) print(v) # should not error if 'America/New_York' in str(data.dtype): # pandas is new enough that it has datetime64 with timezone dtype assert v.dtype == 'object' def test_multiindex(self): idx = pd.MultiIndex.from_product([list('abc'), [0, 1]]) v = self.cls('x', idx) self.assertVariableIdentical(Variable((), ('a', 0)), v[0]) self.assertVariableIdentical(v, v[:]) def test_load(self): array = self.cls('x', np.arange(5)) orig_data = array._data copied = array.copy(deep=True) array.load() assert type(array._data) is type(orig_data) assert type(copied._data) is type(orig_data) self.assertVariableIdentical(array, copied) class TestVariable(TestCase, VariableSubclassTestCases): cls = staticmethod(Variable) def setUp(self): self.d = np.random.random((10, 3)).astype(np.float64) def test_data_and_values(self): v = Variable(['time', 'x'], self.d) self.assertArrayEqual(v.data, self.d) self.assertArrayEqual(v.values, self.d) self.assertIs(source_ndarray(v.values), self.d) with self.assertRaises(ValueError): # wrong size v.values = np.random.random(5) d2 = np.random.random((10, 3)) v.values = d2 self.assertIs(source_ndarray(v.values), d2) d3 = np.random.random((10, 3)) v.data = d3 self.assertIs(source_ndarray(v.data), d3) def test_numpy_same_methods(self): v = Variable([], np.float32(0.0)) self.assertEqual(v.item(), 0) self.assertIs(type(v.item()), float) v = IndexVariable('x', np.arange(5)) self.assertEqual(2, v.searchsorted(2)) def test_datetime64_conversion_scalar(self): expected = np.datetime64('2000-01-01T00:00:00Z', 'ns') for values in [ np.datetime64('2000-01-01T00Z'), pd.Timestamp('2000-01-01T00'), datetime(2000, 1, 1), ]: v = Variable([], values) self.assertEqual(v.dtype, np.dtype('datetime64[ns]')) self.assertEqual(v.values, expected) self.assertEqual(v.values.dtype, np.dtype('datetime64[ns]')) def test_timedelta64_conversion_scalar(self): expected = np.timedelta64(24 * 60 * 60 * 10 ** 9, 'ns') for values in [ np.timedelta64(1, 'D'), pd.Timedelta('1 day'), timedelta(days=1), ]: v = Variable([], values) self.assertEqual(v.dtype, np.dtype('timedelta64[ns]')) self.assertEqual(v.values, expected) self.assertEqual(v.values.dtype, np.dtype('timedelta64[ns]')) def test_0d_str(self): v = Variable([], u'foo') self.assertEqual(v.dtype, np.dtype('U3')) self.assertEqual(v.values, 'foo') v = Variable([], np.string_('foo')) self.assertEqual(v.dtype, np.dtype('S3')) self.assertEqual(v.values, bytes('foo', 'ascii') if PY3 else 'foo') def test_0d_datetime(self): v = Variable([], pd.Timestamp('2000-01-01')) self.assertEqual(v.dtype, np.dtype('datetime64[ns]')) self.assertEqual(v.values, np.datetime64('2000-01-01T00Z', 'ns')) def test_0d_timedelta(self): for td in [pd.to_timedelta('1s'), np.timedelta64(1, 's')]: v = Variable([], td) self.assertEqual(v.dtype, np.dtype('timedelta64[ns]')) self.assertEqual(v.values, np.timedelta64(10 ** 9, 'ns')) def test_equals_and_identical(self): d = np.random.rand(10, 3) d[0, 0] = np.nan v1 = Variable(('dim1', 'dim2'), data=d, attrs={'att1': 3, 'att2': [1, 2, 3]}) v2 = Variable(('dim1', 'dim2'), data=d, attrs={'att1': 3, 'att2': [1, 2, 3]}) self.assertTrue(v1.equals(v2)) self.assertTrue(v1.identical(v2)) v3 = Variable(('dim1', 'dim3'), data=d) self.assertFalse(v1.equals(v3)) v4 = Variable(('dim1', 'dim2'), data=d) self.assertTrue(v1.equals(v4)) self.assertFalse(v1.identical(v4)) v5 = deepcopy(v1) v5.values[:] = np.random.rand(10, 3) self.assertFalse(v1.equals(v5)) self.assertFalse(v1.equals(None)) self.assertFalse(v1.equals(d)) self.assertFalse(v1.identical(None)) self.assertFalse(v1.identical(d)) def test_broadcast_equals(self): v1 = Variable((), np.nan) v2 = Variable(('x'), [np.nan, np.nan]) self.assertTrue(v1.broadcast_equals(v2)) self.assertFalse(v1.equals(v2)) self.assertFalse(v1.identical(v2)) v3 = Variable(('x'), [np.nan]) self.assertTrue(v1.broadcast_equals(v3)) self.assertFalse(v1.equals(v3)) self.assertFalse(v1.identical(v3)) self.assertFalse(v1.broadcast_equals(None)) v4 = Variable(('x'), [np.nan] * 3) self.assertFalse(v2.broadcast_equals(v4)) def test_no_conflicts(self): v1 = Variable(('x'), [1, 2, np.nan, np.nan]) v2 = Variable(('x'), [np.nan, 2, 3, np.nan]) self.assertTrue(v1.no_conflicts(v2)) self.assertFalse(v1.equals(v2)) self.assertFalse(v1.broadcast_equals(v2)) self.assertFalse(v1.identical(v2)) self.assertFalse(v1.no_conflicts(None)) v3 = Variable(('y'), [np.nan, 2, 3, np.nan]) self.assertFalse(v3.no_conflicts(v1)) d = np.array([1, 2, np.nan, np.nan]) self.assertFalse(v1.no_conflicts(d)) self.assertFalse(v2.no_conflicts(d)) v4 = Variable(('w', 'x'), [d]) self.assertTrue(v1.no_conflicts(v4)) def test_as_variable(self): data = np.arange(10) expected = Variable('x', data) expected_extra = Variable('x', data, attrs={'myattr': 'val'}, encoding={'scale_factor': 1}) self.assertVariableIdentical(expected, as_variable(expected)) ds = Dataset({'x': expected}) var = as_variable(ds['x']).to_base_variable() self.assertVariableIdentical(expected, var) self.assertNotIsInstance(ds['x'], Variable) self.assertIsInstance(as_variable(ds['x']), Variable) FakeVariable = namedtuple('FakeVariable', 'values dims') fake_xarray = FakeVariable(expected.values, expected.dims) self.assertVariableIdentical(expected, as_variable(fake_xarray)) FakeVariable = namedtuple('FakeVariable', 'data dims') fake_xarray = FakeVariable(expected.data, expected.dims) self.assertVariableIdentical(expected, as_variable(fake_xarray)) FakeVariable = namedtuple('FakeVariable', 'data values dims attrs encoding') fake_xarray = FakeVariable(expected_extra.data, expected_extra.values, expected_extra.dims, expected_extra.attrs, expected_extra.encoding) self.assertVariableIdentical(expected_extra, as_variable(fake_xarray)) xarray_tuple = (expected_extra.dims, expected_extra.values, expected_extra.attrs, expected_extra.encoding) self.assertVariableIdentical(expected_extra, as_variable(xarray_tuple)) with self.assertRaisesRegexp(TypeError, 'tuples to convert'): as_variable(tuple(data)) with self.assertRaisesRegexp( TypeError, 'without an explicit list of dimensions'): as_variable(data) actual = as_variable(data, name='x') self.assertVariableIdentical(expected.to_index_variable(), actual) actual = as_variable(0) expected = Variable([], 0) self.assertVariableIdentical(expected, actual) data = np.arange(9).reshape((3, 3)) expected = Variable(('x', 'y'), data) with self.assertRaisesRegexp( ValueError, 'without explicit dimension names'): as_variable(data, name='x') with self.assertRaisesRegexp( ValueError, 'has more than 1-dimension'): as_variable(expected, name='x') def test_repr(self): v = Variable(['time', 'x'], [[1, 2, 3], [4, 5, 6]], {'foo': 'bar'}) expected = dedent(""" <xarray.Variable (time: 2, x: 3)> array([[1, 2, 3], [4, 5, 6]]) Attributes: foo: bar """).strip() self.assertEqual(expected, repr(v)) def test_repr_lazy_data(self): v = Variable('x', LazilyIndexedArray(np.arange(2e5))) self.assertIn('200000 values with dtype', repr(v)) self.assertIsInstance(v._data, LazilyIndexedArray) def test_items(self): data = np.random.random((10, 11)) v = Variable(['x', 'y'], data) # test slicing self.assertVariableIdentical(v, v[:]) self.assertVariableIdentical(v, v[...]) self.assertVariableIdentical(Variable(['y'], data[0]), v[0]) self.assertVariableIdentical(Variable(['x'], data[:, 0]), v[:, 0]) self.assertVariableIdentical(Variable(['x', 'y'], data[:3, :2]), v[:3, :2]) # test array indexing x = Variable(['x'], np.arange(10)) y = Variable(['y'], np.arange(11)) self.assertVariableIdentical(v, v[x.values]) self.assertVariableIdentical(v, v[x]) self.assertVariableIdentical(v[:3], v[x < 3]) self.assertVariableIdentical(v[:, 3:], v[:, y >= 3]) self.assertVariableIdentical(v[:3, 3:], v[x < 3, y >= 3]) self.assertVariableIdentical(v[:3, :2], v[x[:3], y[:2]]) self.assertVariableIdentical(v[:3, :2], v[range(3), range(2)]) # test iteration for n, item in enumerate(v): self.assertVariableIdentical(Variable(['y'], data[n]), item) with self.assertRaisesRegexp(TypeError, 'iteration over a 0-d'): iter(Variable([], 0)) # test setting v.values[:] = 0 self.assertTrue(np.all(v.values == 0)) # test orthogonal setting v[range(10), range(11)] = 1 self.assertArrayEqual(v.values, np.ones((10, 11))) def test_isel(self): v = Variable(['time', 'x'], self.d) self.assertVariableIdentical(v.isel(time=slice(None)), v) self.assertVariableIdentical(v.isel(time=0), v[0]) self.assertVariableIdentical(v.isel(time=slice(0, 3)), v[:3]) self.assertVariableIdentical(v.isel(x=0), v[:, 0]) with self.assertRaisesRegexp(ValueError, 'do not exist'): v.isel(not_a_dim=0) def test_index_0d_numpy_string(self): # regression test to verify our work around for indexing 0d strings v = Variable([], np.string_('asdf')) self.assertVariableIdentical(v[()], v) v = Variable([], np.unicode_(u'asdf')) self.assertVariableIdentical(v[()], v) def test_indexing_0d_unicode(self): # regression test for GH568 actual = Variable(('x'), [u'tmax'])[0][()] expected = Variable((), u'tmax') self.assertVariableIdentical(actual, expected) def test_shift(self): v = Variable('x', [1, 2, 3, 4, 5]) self.assertVariableIdentical(v, v.shift(x=0)) self.assertIsNot(v, v.shift(x=0)) expected = Variable('x', [np.nan, 1, 2, 3, 4]) self.assertVariableIdentical(expected, v.shift(x=1)) expected = Variable('x', [np.nan, np.nan, 1, 2, 3]) self.assertVariableIdentical(expected, v.shift(x=2)) expected = Variable('x', [2, 3, 4, 5, np.nan]) self.assertVariableIdentical(expected, v.shift(x=-1)) expected = Variable('x', [np.nan] * 5) self.assertVariableIdentical(expected, v.shift(x=5)) self.assertVariableIdentical(expected, v.shift(x=6)) with self.assertRaisesRegexp(ValueError, 'dimension'): v.shift(z=0) v = Variable('x', [1, 2, 3, 4, 5], {'foo': 'bar'}) self.assertVariableIdentical(v, v.shift(x=0)) expected = Variable('x', [np.nan, 1, 2, 3, 4], {'foo': 'bar'}) self.assertVariableIdentical(expected, v.shift(x=1)) def test_shift2d(self): v = Variable(('x', 'y'), [[1, 2], [3, 4]]) expected = Variable(('x', 'y'), [[np.nan, np.nan], [np.nan, 1]]) self.assertVariableIdentical(expected, v.shift(x=1, y=1)) def test_roll(self): v = Variable('x', [1, 2, 3, 4, 5]) self.assertVariableIdentical(v, v.roll(x=0)) self.assertIsNot(v, v.roll(x=0)) expected = Variable('x', [5, 1, 2, 3, 4]) self.assertVariableIdentical(expected, v.roll(x=1)) self.assertVariableIdentical(expected, v.roll(x=-4)) self.assertVariableIdentical(expected, v.roll(x=6)) expected = Variable('x', [4, 5, 1, 2, 3]) self.assertVariableIdentical(expected, v.roll(x=2)) self.assertVariableIdentical(expected, v.roll(x=-3)) with self.assertRaisesRegexp(ValueError, 'dimension'): v.roll(z=0) def test_roll_consistency(self): v = Variable(('x', 'y'), np.random.randn(5, 6)) for axis, dim in [(0, 'x'), (1, 'y')]: for shift in [-3, 0, 1, 7, 11]: expected = np.roll(v.values, shift, axis=axis) actual = v.roll(**{dim: shift}).values self.assertArrayEqual(expected, actual) def test_transpose(self): v = Variable(['time', 'x'], self.d) v2 = Variable(['x', 'time'], self.d.T) self.assertVariableIdentical(v, v2.transpose()) self.assertVariableIdentical(v.transpose(), v.T) x = np.random.randn(2, 3, 4, 5) w = Variable(['a', 'b', 'c', 'd'], x) w2 = Variable(['d', 'b', 'c', 'a'], np.einsum('abcd->dbca', x)) self.assertEqual(w2.shape, (5, 3, 4, 2)) self.assertVariableIdentical(w2, w.transpose('d', 'b', 'c', 'a')) self.assertVariableIdentical(w, w2.transpose('a', 'b', 'c', 'd')) w3 = Variable(['b', 'c', 'd', 'a'], np.einsum('abcd->bcda', x)) self.assertVariableIdentical(w, w3.transpose('a', 'b', 'c', 'd')) def test_transpose_0d(self): for value in [ 3.5, ('a', 1), np.datetime64('2000-01-01'), np.timedelta64(1, 'h'), None, object(), ]: variable = Variable([], value) actual = variable.transpose() assert actual.identical(variable) def test_squeeze(self): v = Variable(['x', 'y'], [[1]]) self.assertVariableIdentical(Variable([], 1), v.squeeze()) self.assertVariableIdentical(Variable(['y'], [1]), v.squeeze('x')) self.assertVariableIdentical(Variable(['y'], [1]), v.squeeze(['x'])) self.assertVariableIdentical(Variable(['x'], [1]), v.squeeze('y')) self.assertVariableIdentical(Variable([], 1), v.squeeze(['x', 'y'])) v = Variable(['x', 'y'], [[1, 2]]) self.assertVariableIdentical(Variable(['y'], [1, 2]), v.squeeze()) self.assertVariableIdentical(Variable(['y'], [1, 2]), v.squeeze('x')) with self.assertRaisesRegexp(ValueError, 'cannot select a dimension'): v.squeeze('y') def test_get_axis_num(self): v = Variable(['x', 'y', 'z'], np.random.randn(2, 3, 4)) self.assertEqual(v.get_axis_num('x'), 0) self.assertEqual(v.get_axis_num(['x']), (0,)) self.assertEqual(v.get_axis_num(['x', 'y']), (0, 1)) self.assertEqual(v.get_axis_num(['z', 'y', 'x']), (2, 1, 0)) with self.assertRaisesRegexp(ValueError, 'not found in array dim'): v.get_axis_num('foobar') def test_set_dims(self): v = Variable(['x'], [0, 1]) actual = v.set_dims(['x', 'y']) expected = Variable(['x', 'y'], [[0], [1]]) self.assertVariableIdentical(actual, expected) actual = v.set_dims(['y', 'x']) self.assertVariableIdentical(actual, expected.T) actual = v.set_dims(OrderedDict([('x', 2), ('y', 2)])) expected = Variable(['x', 'y'], [[0, 0], [1, 1]]) self.assertVariableIdentical(actual, expected) v = Variable(['foo'], [0, 1]) actual = v.set_dims('foo') expected = v self.assertVariableIdentical(actual, expected) with self.assertRaisesRegexp(ValueError, 'must be a superset'): v.set_dims(['z']) def test_set_dims_object_dtype(self): v = Variable([], ('a', 1)) actual = v.set_dims(('x',), (3,)) exp_values = np.empty((3,), dtype=object) for i in range(3): exp_values[i] = ('a', 1) expected = Variable(['x'], exp_values) assert actual.identical(expected) def test_stack(self): v = Variable(['x', 'y'], [[0, 1], [2, 3]], {'foo': 'bar'}) actual = v.stack(z=('x', 'y')) expected = Variable('z', [0, 1, 2, 3], v.attrs) self.assertVariableIdentical(actual, expected) actual = v.stack(z=('x',)) expected = Variable(('y', 'z'), v.data.T, v.attrs) self.assertVariableIdentical(actual, expected) actual = v.stack(z=(),) self.assertVariableIdentical(actual, v) actual = v.stack(X=('x',), Y=('y',)).transpose('X', 'Y') expected = Variable(('X', 'Y'), v.data, v.attrs) self.assertVariableIdentical(actual, expected) def test_stack_errors(self): v = Variable(['x', 'y'], [[0, 1], [2, 3]], {'foo': 'bar'}) with self.assertRaisesRegexp(ValueError, 'invalid existing dim'): v.stack(z=('x1',)) with self.assertRaisesRegexp(ValueError, 'cannot create a new dim'): v.stack(x=('x',)) def test_unstack(self): v = Variable('z', [0, 1, 2, 3], {'foo': 'bar'}) actual = v.unstack(z=OrderedDict([('x', 2), ('y', 2)])) expected = Variable(('x', 'y'), [[0, 1], [2, 3]], v.attrs) self.assertVariableIdentical(actual, expected) actual = v.unstack(z=OrderedDict([('x', 4), ('y', 1)])) expected = Variable(('x', 'y'), [[0], [1], [2], [3]], v.attrs) self.assertVariableIdentical(actual, expected) actual = v.unstack(z=OrderedDict([('x', 4)])) expected = Variable('x', [0, 1, 2, 3], v.attrs) self.assertVariableIdentical(actual, expected) def test_unstack_errors(self): v = Variable('z', [0, 1, 2, 3]) with self.assertRaisesRegexp(ValueError, 'invalid existing dim'): v.unstack(foo={'x': 4}) with self.assertRaisesRegexp(ValueError, 'cannot create a new dim'): v.stack(z=('z',)) with self.assertRaisesRegexp(ValueError, 'the product of the new dim'): v.unstack(z={'x': 5}) def test_unstack_2d(self): v = Variable(['x', 'y'], [[0, 1], [2, 3]]) actual = v.unstack(y={'z': 2}) expected = Variable(['x', 'z'], v.data) self.assertVariableIdentical(actual, expected) actual = v.unstack(x={'z': 2}) expected = Variable(['y', 'z'], v.data.T) self.assertVariableIdentical(actual, expected) def test_stack_unstack_consistency(self): v = Variable(['x', 'y'], [[0, 1], [2, 3]]) actual = (v.stack(z=('x', 'y')) .unstack(z=OrderedDict([('x', 2), ('y', 2)]))) self.assertVariableIdentical(actual, v) def test_broadcasting_math(self): x = np.random.randn(2, 3) v = Variable(['a', 'b'], x) # 1d to 2d broadcasting self.assertVariableIdentical( v * v, Variable(['a', 'b'], np.einsum('ab,ab->ab', x, x))) self.assertVariableIdentical( v * v[0], Variable(['a', 'b'], np.einsum('ab,b->ab', x, x[0]))) self.assertVariableIdentical( v[0] * v, Variable(['b', 'a'], np.einsum('b,ab->ba', x[0], x))) self.assertVariableIdentical( v[0] * v[:, 0], Variable(['b', 'a'], np.einsum('b,a->ba', x[0], x[:, 0]))) # higher dim broadcasting y = np.random.randn(3, 4, 5) w = Variable(['b', 'c', 'd'], y) self.assertVariableIdentical( v * w, Variable(['a', 'b', 'c', 'd'], np.einsum('ab,bcd->abcd', x, y))) self.assertVariableIdentical( w * v, Variable(['b', 'c', 'd', 'a'], np.einsum('bcd,ab->bcda', y, x))) self.assertVariableIdentical( v * w[0], Variable(['a', 'b', 'c', 'd'], np.einsum('ab,cd->abcd', x, y[0]))) def test_broadcasting_failures(self): a = Variable(['x'], np.arange(10)) b = Variable(['x'], np.arange(5)) c = Variable(['x', 'x'], np.arange(100).reshape(10, 10)) with self.assertRaisesRegexp(ValueError, 'mismatched lengths'): a + b with self.assertRaisesRegexp(ValueError, 'duplicate dimensions'): a + c def test_inplace_math(self): x = np.arange(5) v = Variable(['x'], x) v2 = v v2 += 1 self.assertIs(v, v2) # since we provided an ndarray for data, it is also modified in-place self.assertIs(source_ndarray(v.values), x) self.assertArrayEqual(v.values, np.arange(5) + 1) with self.assertRaisesRegexp(ValueError, 'dimensions cannot change'): v += Variable('y', np.arange(5)) def test_reduce(self): v = Variable(['x', 'y'], self.d, {'ignored': 'attributes'}) self.assertVariableIdentical(v.reduce(np.std, 'x'), Variable(['y'], self.d.std(axis=0))) self.assertVariableIdentical(v.reduce(np.std, axis=0), v.reduce(np.std, dim='x')) self.assertVariableIdentical(v.reduce(np.std, ['y', 'x']), Variable([], self.d.std(axis=(0, 1)))) self.assertVariableIdentical(v.reduce(np.std), Variable([], self.d.std())) self.assertVariableIdentical( v.reduce(np.mean, 'x').reduce(np.std, 'y'), Variable([], self.d.mean(axis=0).std())) self.assertVariableAllClose(v.mean('x'), v.reduce(np.mean, 'x')) with self.assertRaisesRegexp(ValueError, 'cannot supply both'): v.mean(dim='x', axis=0) @pytest.mark.skipif(LooseVersion(np.__version__) < LooseVersion('1.10.0'), reason='requires numpy version 1.10.0 or later') def test_quantile(self): v = Variable(['x', 'y'], self.d) for q in [0.25, [0.50], [0.25, 0.75]]: for axis, dim in zip([None, 0, [0], [0, 1]], [None, 'x', ['x'], ['x', 'y']]): actual = v.quantile(q, dim=dim) expected = np.nanpercentile(self.d, np.array(q) * 100, axis=axis) np.testing.assert_allclose(actual.values, expected) @requires_dask def test_quantile_dask_raises(self): # regression for GH1524 v = Variable(['x', 'y'], self.d).chunk(2) with self.assertRaisesRegexp(TypeError, 'arrays stored as dask'): v.quantile(0.5, dim='x') def test_big_endian_reduce(self): # regression test for GH489 data = np.ones(5, dtype='>f4') v = Variable(['x'], data) expected = Variable([], 5) self.assertVariableIdentical(expected, v.sum()) def test_reduce_funcs(self): v = Variable('x', np.array([1, np.nan, 2, 3])) self.assertVariableIdentical(v.mean(), Variable([], 2)) self.assertVariableIdentical(v.mean(skipna=True), Variable([], 2)) self.assertVariableIdentical(v.mean(skipna=False), Variable([], np.nan)) self.assertVariableIdentical(np.mean(v), Variable([], 2)) self.assertVariableIdentical(v.prod(), Variable([], 6)) self.assertVariableIdentical(v.cumsum(axis=0), Variable('x', np.array([1, 1, 3, 6]))) self.assertVariableIdentical(v.cumprod(axis=0), Variable('x', np.array([1, 1, 2, 6]))) self.assertVariableIdentical(v.var(), Variable([], 2.0 / 3)) if LooseVersion(np.__version__) < '1.9': with self.assertRaises(NotImplementedError): v.median() else: self.assertVariableIdentical(v.median(), Variable([], 2)) v = Variable('x', [True, False, False]) self.assertVariableIdentical(v.any(), Variable([], True)) self.assertVariableIdentical(v.all(dim='x'), Variable([], False)) v = Variable('t', pd.date_range('2000-01-01', periods=3)) with self.assertRaises(NotImplementedError): v.max(skipna=True) self.assertVariableIdentical( v.max(), Variable([], pd.Timestamp('2000-01-03'))) def test_reduce_keep_attrs(self): _attrs = {'units': 'test', 'long_name': 'testing'} v = Variable(['x', 'y'], self.d, _attrs) # Test dropped attrs vm = v.mean() self.assertEqual(len(vm.attrs), 0) self.assertEqual(vm.attrs, OrderedDict()) # Test kept attrs vm = v.mean(keep_attrs=True) self.assertEqual(len(vm.attrs), len(_attrs)) self.assertEqual(vm.attrs, _attrs) def test_count(self): expected = Variable([], 3) actual = Variable(['x'], [1, 2, 3, np.nan]).count() self.assertVariableIdentical(expected, actual) v = Variable(['x'], np.array(['1', '2', '3', np.nan], dtype=object)) actual = v.count() self.assertVariableIdentical(expected, actual) actual = Variable(['x'], [True, False, True]).count() self.assertVariableIdentical(expected, actual) self.assertEqual(actual.dtype, int) expected = Variable(['x'], [2, 3]) actual = Variable(['x', 'y'], [[1, 0, np.nan], [1, 1, 1]]).count('y') self.assertVariableIdentical(expected, actual) class TestIndexVariable(TestCase, VariableSubclassTestCases): cls = staticmethod(IndexVariable) def test_init(self): with self.assertRaisesRegexp(ValueError, 'must be 1-dimensional'): IndexVariable((), 0) def test_to_index(self): data = 0.5 * np.arange(10) v = IndexVariable(['time'], data, {'foo': 'bar'}) self.assertTrue(pd.Index(data, name='time').identical(v.to_index())) def test_multiindex_default_level_names(self): midx = pd.MultiIndex.from_product([['a', 'b'], [1, 2]]) v = IndexVariable(['x'], midx, {'foo': 'bar'}) self.assertEqual(v.to_index().names, ('x_level_0', 'x_level_1')) def test_data(self): x = IndexVariable('x', np.arange(3.0)) self.assertIsInstance(x._data, PandasIndexAdapter) self.assertIsInstance(x.data, np.ndarray) self.assertEqual(float, x.dtype) self.assertArrayEqual(np.arange(3), x) self.assertEqual(float, x.values.dtype) with self.assertRaisesRegexp(TypeError, 'cannot be modified'): x[:] = 0 def test_name(self): coord = IndexVariable('x', [10.0]) self.assertEqual(coord.name, 'x') with self.assertRaises(AttributeError): coord.name = 'y' def test_level_names(self): midx = pd.MultiIndex.from_product([['a', 'b'], [1, 2]], names=['level_1', 'level_2']) x = IndexVariable('x', midx) self.assertEqual(x.level_names, midx.names) self.assertIsNone(IndexVariable('y', [10.0]).level_names) def test_get_level_variable(self): midx = pd.MultiIndex.from_product([['a', 'b'], [1, 2]], names=['level_1', 'level_2']) x = IndexVariable('x', midx) level_1 = IndexVariable('x', midx.get_level_values('level_1')) self.assertVariableIdentical(x.get_level_variable('level_1'), level_1) with self.assertRaisesRegexp(ValueError, 'has no MultiIndex'): IndexVariable('y', [10.0]).get_level_variable('level') def test_concat_periods(self): periods = pd.period_range('2000-01-01', periods=10) coords = [IndexVariable('t', periods[:5]), IndexVariable('t', periods[5:])] expected = IndexVariable('t', periods) actual = IndexVariable.concat(coords, dim='t') assert actual.identical(expected) assert isinstance(actual.to_index(), pd.PeriodIndex) positions = [list(range(5)), list(range(5, 10))] actual = IndexVariable.concat(coords, dim='t', positions=positions) assert actual.identical(expected) assert isinstance(actual.to_index(), pd.PeriodIndex) def test_concat_multiindex(self): idx = pd.MultiIndex.from_product([[0, 1, 2], ['a', 'b']]) coords = [IndexVariable('x', idx[:2]), IndexVariable('x', idx[2:])] expected = IndexVariable('x', idx) actual = IndexVariable.concat(coords, dim='x') assert actual.identical(expected) assert isinstance(actual.to_index(), pd.MultiIndex) def test_coordinate_alias(self): with self.assertWarns('deprecated'): x = Coordinate('x', [1, 2, 3]) self.assertIsInstance(x, IndexVariable) class TestAsCompatibleData(TestCase): def test_unchanged_types(self): types = (np.asarray, PandasIndexAdapter, indexing.LazilyIndexedArray) for t in types: for data in [np.arange(3), pd.date_range('2000-01-01', periods=3), pd.date_range('2000-01-01', periods=3).values]: x = t(data) self.assertIs(source_ndarray(x), source_ndarray(as_compatible_data(x))) def test_converted_types(self): for input_array in [[[0, 1, 2]], pd.DataFrame([[0, 1, 2]])]: actual = as_compatible_data(input_array) self.assertArrayEqual(np.asarray(input_array), actual) self.assertEqual(np.ndarray, type(actual)) self.assertEqual(np.asarray(input_array).dtype, actual.dtype) def test_masked_array(self): original = np.ma.MaskedArray(np.arange(5)) expected = np.arange(5) actual = as_compatible_data(original) self.assertArrayEqual(expected, actual) self.assertEqual(np.dtype(int), actual.dtype) original = np.ma.MaskedArray(np.arange(5), mask=4 * [False] + [True]) expected = np.arange(5.0) expected[-1] = np.nan actual = as_compatible_data(original) self.assertArrayEqual(expected, actual) self.assertEqual(np.dtype(float), actual.dtype) def test_datetime(self): expected = np.datetime64('2000-01-01T00Z') actual = as_compatible_data(expected) self.assertEqual(expected, actual) self.assertEqual(np.ndarray, type(actual)) self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype) expected = np.array([np.datetime64('2000-01-01T00Z')]) actual = as_compatible_data(expected) self.assertEqual(np.asarray(expected), actual) self.assertEqual(np.ndarray, type(actual)) self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype) expected = np.array([np.datetime64('2000-01-01T00Z', 'ns')]) actual = as_compatible_data(expected) self.assertEqual(np.asarray(expected), actual) self.assertEqual(np.ndarray, type(actual)) self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype) self.assertIs(expected, source_ndarray(np.asarray(actual))) expected = np.datetime64('2000-01-01T00Z', 'ns') actual = as_compatible_data(datetime(2000, 1, 1)) self.assertEqual(np.asarray(expected), actual) self.assertEqual(np.ndarray, type(actual)) self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype) def test_full_like(self): # For more thorough tests, see test_variable.py orig = Variable(dims=('x', 'y'), data=[[1.5 ,2.0], [3.1, 4.3]], attrs={'foo': 'bar'}) expect = orig.copy(deep=True) expect.values = [[2.0, 2.0], [2.0, 2.0]] self.assertVariableIdentical(expect, full_like(orig, 2)) # override dtype expect.values = [[True, True], [True, True]] self.assertEquals(expect.dtype, bool) self.assertVariableIdentical(expect, full_like(orig, True, dtype=bool)) @requires_dask def test_full_like_dask(self): orig = Variable(dims=('x', 'y'), data=[[1.5, 2.0], [3.1, 4.3]], attrs={'foo': 'bar'}).chunk(((1, 1), (2,))) def check(actual, expect_dtype, expect_values): self.assertEqual(actual.dtype, expect_dtype) self.assertEqual(actual.shape, orig.shape) self.assertEqual(actual.dims, orig.dims) self.assertEqual(actual.attrs, orig.attrs) self.assertEqual(actual.chunks, orig.chunks) self.assertArrayEqual(actual.values, expect_values) check(full_like(orig, 2), orig.dtype, np.full_like(orig.values, 2)) # override dtype check(full_like(orig, True, dtype=bool), bool, np.full_like(orig.values, True, dtype=bool)) # Check that there's no array stored inside dask # (e.g. we didn't create a numpy array and then we chunked it!) dsk = full_like(orig, 1).data.dask for v in dsk.values(): if isinstance(v, tuple): for vi in v: assert not isinstance(vi, np.ndarray) else: assert not isinstance(v, np.ndarray) def test_zeros_like(self): orig = Variable(dims=('x', 'y'), data=[[1.5 ,2.0], [3.1, 4.3]], attrs={'foo': 'bar'}) self.assertVariableIdentical(zeros_like(orig), full_like(orig, 0)) self.assertVariableIdentical(zeros_like(orig, dtype=int), full_like(orig, 0, dtype=int)) def test_ones_like(self): orig = Variable(dims=('x', 'y'), data=[[1.5 ,2.0], [3.1, 4.3]], attrs={'foo': 'bar'}) self.assertVariableIdentical(ones_like(orig), full_like(orig, 1)) self.assertVariableIdentical(ones_like(orig, dtype=int), full_like(orig, 1, dtype=int))
apache-2.0
NeuralEnsemble/elephant
elephant/asset/asset.py
2
102992
# -*- coding: utf-8 -*- """ ASSET is a statistical method :cite:`asset-Torre16_e1004939` for the detection of repeating sequences of synchronous spiking events in parallel spike trains. ASSET analysis class object of finding patterns ----------------------------------------------- .. autosummary:: :toctree: _toctree/asset/ ASSET Patterns post-exploration ------------------------- .. autosummary:: :toctree: _toctree/asset/ synchronous_events_intersection synchronous_events_difference synchronous_events_identical synchronous_events_no_overlap synchronous_events_contained_in synchronous_events_contains_all synchronous_events_overlap Tutorial -------- :doc:`View tutorial <../tutorials/asset>` Run tutorial interactively: .. image:: https://mybinder.org/badge.svg :target: https://mybinder.org/v2/gh/NeuralEnsemble/elephant/master ?filepath=doc/tutorials/asset.ipynb Examples -------- In this example we * simulate two noisy synfire chains; * shuffle the neurons to destroy visual appearance; * run ASSET analysis to recover the original neurons arrangement. 1. Simulate two noise synfire chains, shuffle the neurons to destroy the pattern visually, and store shuffled activations in neo.SpikeTrains. >>> import neo >>> import numpy as np >>> import quantities as pq >>> np.random.seed(10) >>> spiketrain = np.linspace(0, 50, num=10) >>> np.random.shuffle(spiketrain) >>> spiketrains = np.c_[spiketrain, spiketrain + 100] >>> spiketrains += np.random.random_sample(spiketrains.shape) * 5 >>> spiketrains = [neo.SpikeTrain(st, units='ms', t_stop=1 * pq.s) ... for st in spiketrains] 2. Create `ASSET` class object that holds spike trains. `ASSET` requires at least one argument - a list of spike trains. If `spiketrains_y` is not provided, the same spike trains are used to build an intersection matrix with. >>> from elephant import asset >>> asset_obj = asset.ASSET(spiketrains, bin_size=3*pq.ms) 3. Build the intersection matrix `imat`: >>> imat = asset_obj.intersection_matrix() 4. Estimate the probability matrix `pmat`, using the analytical method: >>> pmat = asset_obj.probability_matrix_analytical(imat, ... kernel_width=50*pq.ms) 5. Compute the joint probability matrix `jmat`, using a suitable filter: >>> jmat = asset_obj.joint_probability_matrix(pmat, filter_shape=(5, 1), ... n_largest=3) 6. Create the masked version of the intersection matrix, `mmat`, from `pmat` and `jmat`: >>> mmat = asset_obj.mask_matrices([pmat, jmat], thresholds=.9) 7. Cluster significant elements of imat into diagonal structures: >>> cmat = asset_obj.cluster_matrix_entries(mmat, max_distance=11, ... min_neighbors=3, stretch=5) 9. Extract sequences of synchronous events: >>> sses = asset_obj.extract_synchronous_events(cmat) The ASSET found the following sequences of synchronous events: >>> sses {1: {(36, 2): {5}, (37, 4): {1}, (40, 6): {4}, (41, 7): {8}, (43, 9): {2}, (47, 14): {7}, (48, 15): {0}, (50, 17): {9}}} To visualize them, refer to Viziphant documentation and an example plot :func:`viziphant.asset.plot_synchronous_events`. """ from __future__ import division, print_function, unicode_literals import math import os import subprocess import sys import tempfile import warnings from pathlib import Path import neo import numpy as np import quantities as pq import scipy.spatial import scipy.stats from sklearn.cluster import dbscan from sklearn.metrics import pairwise_distances, pairwise_distances_chunked from tqdm import trange, tqdm import elephant.conversion as conv from elephant import spike_train_surrogates from elephant.utils import get_cuda_capability_major try: from mpi4py import MPI mpi_accelerated = True comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() except ImportError: mpi_accelerated = False size = 1 rank = 0 __all__ = [ "ASSET", "synchronous_events_intersection", "synchronous_events_difference", "synchronous_events_identical", "synchronous_events_no_overlap", "synchronous_events_contained_in", "synchronous_events_contains_all", "synchronous_events_overlap" ] # ============================================================================= # Some Utility Functions to be dealt with in some way or another # ============================================================================= def _signals_same_attribute(signals, attr_name): """ Check whether a list of signals (`neo.AnalogSignal` or `neo.SpikeTrain`) have same attribute `attr_name`. If so, return that value. Otherwise, raise ValueError. Parameters ---------- signals : list A list of signals (e.g. `neo.AnalogSignal` or `neo.SpikeTrain`) having attribute `attr_name`. Returns ------- pq.Quantity The value of the common attribute `attr_name` of the list of signals. Raises ------ ValueError If `signals` is an empty list. If `signals` have different `attr_name` attribute values. """ if len(signals) == 0: raise ValueError('Empty signals list') attribute = getattr(signals[0], attr_name) for sig in signals[1:]: if getattr(sig, attr_name) != attribute: raise ValueError( "Signals have different '{}' values".format(attr_name)) return attribute def _quantities_almost_equal(x, y): """ Returns True if two quantities are almost equal, i.e., if `x - y` is "very close to 0" (not larger than machine precision for floats). Parameters ---------- x : pq.Quantity First Quantity to compare. y : pq.Quantity Second Quantity to compare. Must have same unit type as `x`, but not necessarily the same shape. Any shapes of `x` and `y` for which `x - y` can be calculated are permitted. Returns ------- np.ndarray Array of `bool`, which is True at any position where `x - y` is almost zero. Notes ----- Not the same as `numpy.testing.assert_allclose` (which does not work with Quantities) and `numpy.testing.assert_almost_equal` (which works only with decimals) """ eps = np.finfo(float).eps relative_diff = (x - y).magnitude return np.all([-eps <= relative_diff, relative_diff <= eps], axis=0) def _transactions(spiketrains, bin_size, t_start, t_stop, ids=None): """ Transform parallel spike trains into a list of sublists, called transactions, each corresponding to a time bin and containing the list of spikes in `spiketrains` falling into that bin. To compute each transaction, the spike trains are binned (with adjacent exclusive binning) and clipped (i.e., spikes from the same train falling in the same bin are counted as one event). The list of spike IDs within each bin form the corresponding transaction. Parameters ---------- spiketrains : list of neo.SpikeTrain or list of tuple A list of `neo.SpikeTrain` objects, or list of pairs (Train_ID, `neo.SpikeTrain`), where `Train_ID` can be any hashable object. bin_size : pq.Quantity Width of each time bin. Time is binned to determine synchrony. t_start : pq.Quantity The starting time. Only spikes occurring at times `t >= t_start` are considered. The first transaction contains spikes falling into the time segment `[t_start, t_start+bin_size]`. If None, takes the value of `spiketrain.t_start`, common for all input `spiketrains` (raises ValueError if it's not the case). Default: None t_stop : pq.Quantity The ending time. Only spikes occurring at times `t < t_stop` are considered. If None, takes the value of `spiketrain.t_stop`, common for all input `spiketrains` (raises ValueError if it's not the case). Default: None ids : list of int, optional List of spike train IDs. If None, the IDs `0` to `N-1` are used, where `N` is the number of input spike trains. Default: None Returns ------- list of list A list of transactions, where each transaction corresponds to a time bin and represents the list of spike train IDs having a spike in that time bin. Raises ------ TypeError If `spiketrains` is not a list of `neo.SpikeTrain` or a list of tuples (id, `neo.SpikeTrain`). """ if all(isinstance(st, neo.SpikeTrain) for st in spiketrains): trains = spiketrains if ids is None: ids = range(len(spiketrains)) else: # (id, SpikeTrain) pairs try: ids, trains = zip(*spiketrains) except TypeError: raise TypeError('spiketrains must be either a list of ' + 'SpikeTrains or a list of (id, SpikeTrain) pairs') # Bin the spike trains and take for each of them the ids of filled bins binned = conv.BinnedSpikeTrain( trains, bin_size=bin_size, t_start=t_start, t_stop=t_stop) filled_bins = binned.spike_indices # Compute and return the transaction list return [[train_id for train_id, b in zip(ids, filled_bins) if bin_id in b] for bin_id in range(binned.n_bins)] def _analog_signal_step_interp(signal, times): """ Compute the step-wise interpolation of a signal at desired times. Given a signal (e.g. a `neo.AnalogSignal`) `s` taking values `s[t0]` and `s[t1]` at two consecutive time points `t0` and `t1` (`t0 < t1`), the value of the step-wise interpolation at time `t: t0 <= t < t1` is given by `s[t] = s[t0]`. Parameters ---------- signal : neo.AnalogSignal The analog signal, containing the discretization of the function to interpolate. times : pq.Quantity A vector of time points at which the step interpolation is computed. Returns ------- pq.Quantity Object with same shape of `times` and containing the values of the interpolated signal at the time points in `times`. """ dt = signal.sampling_period # Compute the ids of the signal times to the left of each time in times time_ids = np.floor( ((times - signal.t_start) / dt).rescale( pq.dimensionless).magnitude).astype('i') return (signal.magnitude[time_ids] * signal.units).rescale(signal.units) # ============================================================================= # HERE ASSET STARTS # ============================================================================= def _stretched_metric_2d(x, y, stretch, ref_angle, working_memory=None): r""" Given a list of points on the real plane, identified by their abscissa `x` and ordinate `y`, compute a stretched transformation of the Euclidean distance among each of them. The classical euclidean distance `d` between points `(x1, y1)` and `(x2, y2)`, i.e., :math:`\sqrt((x1-x2)^2 + (y1-y2)^2)`, is multiplied by a factor .. math:: 1 + (stretch - 1.) * \abs(\sin(ref_angle - \theta)), where :math:`\theta` is the angle between the points and the 45 degree direction (i.e., the line `y = x`). The stretching factor thus steadily varies between 1 (if the line connecting `(x1, y1)` and `(x2, y2)` has inclination `ref_angle`) and `stretch` (if that line has inclination `90 + ref_angle`). Parameters ---------- x : (n,) np.ndarray Array of abscissas of all points among which to compute the distance. y : (n,) np.ndarray Array of ordinates of all points among which to compute the distance (same shape as `x`). stretch : float Maximum stretching factor, applied if the line connecting the points has inclination `90 + ref_angle`. ref_angle : float Reference angle in degrees (i.e., the inclination along which the stretching factor is 1). Returns ------- D : (n,n) np.ndarray Square matrix of distances between all pairs of points. """ alpha = np.deg2rad(ref_angle) # reference angle in radians # Create the array of points (one per row) for which to compute the # stretched distance points = np.column_stack([x, y]) x_array = np.expand_dims(x, axis=0) y_array = np.expand_dims(y, axis=0) def calculate_stretch_mat(theta_mat, D_mat): # Transform [-pi, pi] back to [-pi/2, pi/2] theta_mat[theta_mat < -np.pi / 2] += np.pi theta_mat[theta_mat > np.pi / 2] -= np.pi # Compute the matrix of stretching factors for each pair of points. # Equivalent to: # stretch_mat = 1 + (stretch - 1.) * np.abs(np.sin(alpha - theta)) _stretch_mat = np.subtract(alpha, theta_mat, out=theta_mat) _stretch_mat = np.sin(_stretch_mat, out=_stretch_mat) _stretch_mat = np.abs(_stretch_mat, out=_stretch_mat) _stretch_mat = np.multiply(stretch - 1, _stretch_mat, out=_stretch_mat) _stretch_mat = np.add(1, _stretch_mat, out=_stretch_mat) _stretch_mat = np.multiply(D_mat, _stretch_mat, out=_stretch_mat) return _stretch_mat if working_memory is None: # Compute the matrix D[i, j] of euclidean distances among points # i and j D = pairwise_distances(points) # Compute the angular coefficients of the line between each pair of # points # dX[i,j]: x difference between points i and j # dY[i,j]: y difference between points i and j dX = x_array.T - x_array dY = y_array.T - y_array # Compute the matrix Theta of angles between each pair of points theta = np.arctan2(dY, dX, dtype=np.float32) stretch_mat = calculate_stretch_mat(theta, D) else: start = 0 # x and y sizes are the same stretch_mat = np.empty((len(x), len(y)), dtype=np.float32) for D_chunk in pairwise_distances_chunked( points, working_memory=working_memory): chunk_size = D_chunk.shape[0] dX = x_array[:, start: start + chunk_size].T - x_array dY = y_array[:, start: start + chunk_size].T - y_array theta_chunk = np.arctan2( dY, dX, out=stretch_mat[start: start + chunk_size, :]) # stretch_mat (theta_chunk) is updated in-place here calculate_stretch_mat(theta_chunk, D_chunk) start += chunk_size # Return the stretched distance matrix return stretch_mat def _interpolate_signals(signals, sampling_times, verbose=False): """ Interpolate signals at given sampling times. """ # Reshape all signals to one-dimensional array object (e.g. AnalogSignal) for i, signal in enumerate(signals): if signal.ndim == 2: signals[i] = signal.flatten() elif signal.ndim > 2: raise ValueError('elements in fir_rates must have 2 dimensions') if verbose: print('create time slices of the rates...') # Interpolate in the time bins interpolated_signal = np.vstack([_analog_signal_step_interp( signal, sampling_times).rescale('Hz').magnitude for signal in signals]) * pq.Hz return interpolated_signal class _GPUBackend: """ Parameters ---------- max_chunk_size: int or None, optional Defines the maximum chunk size used in the `_split_axis` function. The users typically don't need to set this parameter manually - it's used to simulate scenarios when the input matrix is so large that it cannot fit into GPU memory. Setting this parameter manually can resolve GPU memory errors in case automatic parameters adjustment fails. Notes ----- 1. PyOpenCL backend takes some time to compile the kernel for the first time - the caching will affect your benchmarks unless you run each program twice. 2. Pinned Host Memory. Host (CPU) data allocations are pageable by default. The GPU cannot access data directly from pageable host memory, so when a data transfer from pageable host memory to device memory is invoked, the CUDA driver must first allocate a temporary page-locked, or "pinned", host array, copy the host data to the pinned array, and then transfer the data from the pinned array to device memory, as illustrated at https://developer.nvidia.com/blog/how-optimize-data-transfers-cuda-cc/ Same for OpenCL. Therefore, Python memory analyzers show increments in the used RAM each time an OpenCL/CUDA buffer is created. As with any Python objects, PyOpenCL and PyCUDA clean up and free allocated memory automatically when garbage collection is executed. """ def __init__(self, max_chunk_size=None): self.max_chunk_size = max_chunk_size def _choose_backend(self): # If CUDA is detected, always use CUDA. # If OpenCL is detected, don't use it by default to avoid the system # becoming unresponsive until the program terminates. use_cuda = int(os.getenv("ELEPHANT_USE_CUDA", '1')) use_opencl = int(os.getenv("ELEPHANT_USE_OPENCL", '1')) cuda_detected = get_cuda_capability_major() != 0 if use_cuda and cuda_detected: return self.pycuda if use_opencl: return self.pyopencl return self.cpu def _split_axis(self, chunk_size, axis_size, min_chunk_size=None): chunk_size = min(chunk_size, axis_size) if self.max_chunk_size is not None: chunk_size = min(chunk_size, self.max_chunk_size) if min_chunk_size is not None and chunk_size < min_chunk_size: raise ValueError(f"[GPU not enough memory] Impossible to split " f"the array into chunks of size at least " f"{min_chunk_size} to fit into GPU memory") n_chunks = math.ceil(axis_size / chunk_size) chunk_size = math.ceil(axis_size / n_chunks) # align in size if min_chunk_size is not None: chunk_size = max(chunk_size, min_chunk_size) split_idx = list(range(0, axis_size, chunk_size)) last_id = split_idx[-1] last_size = axis_size - last_id # last is the smallest split_idx = list(zip(split_idx[:-1], split_idx[1:])) if min_chunk_size is not None and last_size < min_chunk_size: # Overlap the last chunk with the previous. # The overlapped part (intersection) will be computed twice. last_id = axis_size - min_chunk_size split_idx.append((last_id, axis_size)) return chunk_size, split_idx class _JSFUniformOrderStat3D(_GPUBackend): def __init__(self, n, d, precision='float', verbose=False, cuda_threads=64, cuda_cwr_loops=32, tolerance=1e-5, max_chunk_size=None): super().__init__(max_chunk_size=max_chunk_size) if d > n: raise ValueError(f"d ({d}) must be less or equal n ({n})") self.n = n self.d = d self.precision = precision self.verbose = verbose and rank == 0 self.cuda_threads = cuda_threads self.cuda_cwr_loops = cuda_cwr_loops self.map_iterations = self._create_iteration_table() bits = 32 if precision == "float" else 64 self.dtype = np.dtype(f"float{bits}") self.tolerance = tolerance @property def num_iterations(self): # map_iterations table is populated with element indices, not counts; # therefore, we add 1 return self.map_iterations[:, -1].sum() + 1 def _create_iteration_table(self): # do not use numpy arrays - they are limited to uint64 map_iterations = [list(range(self.n))] for row_id in range(1, self.d): prev_row = map_iterations[row_id - 1] curr_row = [0] * (row_id + 1) for col_id in range(row_id + 1, self.n): cumsum = prev_row[col_id] + curr_row[-1] curr_row.append(cumsum) map_iterations.append(curr_row) # here we can wrap the resulting array in numpy: # if at least one item is greater than 2<<63 - 1, # the data type will be set to 'object' map_iterations = np.vstack(map_iterations) return map_iterations def _combinations_with_replacement(self): # Generate sequences of {a_i} such that # a_0 >= a_1 >= ... >= a_(d-1) and # d-i <= a_i <= n, for each i in [0, d-1]. # # Almost equivalent to # list(itertools.combinations_with_replacement(range(n, 0, -1), r=d)) # [::-1] # # Example: # _combinations_with_replacement(n=13, d=3) --> # (3, 2, 1), (3, 2, 2), (3, 3, 1), ... , (13, 13, 12), (13, 13, 13). # # The implementation follows the insertion sort algorithm: # insert a new element a_i from right to left to keep the reverse # sorted order. Now substitute increment operation for insert. if self.d > self.n: return if self.d == 1: for matrix_entry in range(1, self.n + 1): yield (matrix_entry,) return sequence_sorted = list(range(self.d, 0, -1)) input_order = tuple(sequence_sorted) # fixed while sequence_sorted[0] != self.n + 1: for last_element in range(1, sequence_sorted[-2] + 1): sequence_sorted[-1] = last_element yield tuple(sequence_sorted) increment_id = self.d - 2 while increment_id > 0 and sequence_sorted[increment_id - 1] == \ sequence_sorted[increment_id]: increment_id -= 1 sequence_sorted[increment_id + 1:] = input_order[increment_id + 1:] sequence_sorted[increment_id] += 1 def cpu(self, log_du): log_1 = np.log(1.) # Compute the log of the integral's coefficient logK = np.sum(np.log(np.arange(1, self.n + 1))) # Add to the 3D matrix u a bottom layer equal to 0 and a # top layer equal to 1. Then compute the difference du along # the first dimension. # prepare arrays for usage inside the loop di_scratch = np.empty_like(log_du, dtype=np.int32) log_du_scratch = np.empty_like(log_du) # precompute log(factorial)s # pad with a zero to get 0! = 1 log_factorial = np.hstack((0, np.cumsum(np.log(range(1, self.n + 1))))) # compute the probabilities for each unique row of du # only loop over the indices and do all du entries at once # using matrix algebra # initialise probabilities to 0 P_total = np.zeros( log_du.shape[0], dtype=np.float32 if self.precision == 'float' else np.float64 ) for iter_id, matrix_entries in enumerate( tqdm(self._combinations_with_replacement(), total=self.num_iterations, desc="Joint survival function", disable=not self.verbose)): # if we are running with MPI if mpi_accelerated and iter_id % size != rank: continue # we only need the differences of the indices: di = -np.diff((self.n,) + matrix_entries + (0,)) # reshape the matrix to be compatible with du di_scratch[:, range(len(di))] = di # use precomputed factorials sum_log_di_factorial = log_factorial[di].sum() # Compute for each i,j the contribution to the probability # given by this step, and add it to the total probability # Use precomputed log np.copyto(log_du_scratch, log_du) # for each a=0,1,...,A-1 and b=0,1,...,B-1, replace du with 1 # whenever di_scratch = 0, so that du ** di_scratch = 1 (this # avoids nans when both du and di_scratch are 0, and is # mathematically correct) log_du_scratch[di_scratch == 0] = log_1 di_log_du = di_scratch * log_du_scratch sum_di_log_du = di_log_du.sum(axis=1) logP = sum_di_log_du - sum_log_di_factorial P_total += np.exp(logP + logK) if mpi_accelerated: totals = np.zeros_like(P_total) # exchange all the results mpi_float_type = MPI.FLOAT \ if self.precision == 'float' else MPI.DOUBLE comm.Allreduce( [P_total, mpi_float_type], [totals, mpi_float_type], op=MPI.SUM) # We need to return the collected totals instead of the local # P_total P_total = totals return P_total def _compile_template(self, template_name, **kwargs): from jinja2 import Template cu_template_path = Path(__file__).parent / template_name cu_template = Template(cu_template_path.read_text()) asset_cu = cu_template.render( precision=self.precision, CWR_LOOPS=self.cuda_cwr_loops, N=self.n, D=self.d, **kwargs) return asset_cu def pyopencl(self, log_du, device_id=0): import pyopencl as cl import pyopencl.array as cl_array self._check_input(log_du) it_todo = self.num_iterations u_length = log_du.shape[0] context = cl.create_some_context(interactive=False) if self.verbose: print("Available OpenCL devices:\n", context.devices) device = context.devices[device_id] # A queue bounded to the device queue = cl.CommandQueue(context) max_l_block = device.local_mem_size // ( self.dtype.itemsize * (self.d + 2)) n_threads = min(self.cuda_threads, max_l_block, device.max_work_group_size) if n_threads > 32: # It's more efficient to make the number of threads # a multiple of the warp size (32). n_threads -= n_threads % 32 iteration_table_str = ", ".join(f"{val}LU" for val in self.map_iterations.flatten()) iteration_table_str = "{%s}" % iteration_table_str log_factorial = np.r_[0, np.cumsum(np.log(range(1, self.n + 1)))] logK = log_factorial[-1] log_factorial_str = ", ".join(f"{val:.10f}" for val in log_factorial) log_factorial_str = "{%s}" % log_factorial_str atomic_int = 'int' if self.precision == 'float' else 'long' # GPU_MAX_HEAP_SIZE OpenCL flag is set to 2 Gb (1 << 31) by default mem_avail = min(device.max_mem_alloc_size, device.global_mem_size, 1 << 31) # 4 * (D + 1) * size + 8 * size == mem_avail chunk_size = mem_avail // (4 * log_du.shape[1] + self.dtype.itemsize) chunk_size, split_idx = self._split_axis(chunk_size=chunk_size, axis_size=u_length) P_total = np.empty(u_length, dtype=self.dtype) P_total_gpu = cl_array.Array(queue, shape=chunk_size, dtype=self.dtype) for i_start, i_end in split_idx: log_du_gpu = cl_array.to_device(queue, log_du[i_start: i_end], async_=True) P_total_gpu.fill(0, queue=queue) chunk_size = i_end - i_start l_block = min(n_threads, chunk_size) l_num_blocks = math.ceil(chunk_size / l_block) grid_size = math.ceil(it_todo / (n_threads * self.cuda_cwr_loops)) if grid_size > l_num_blocks: # make grid_size divisible by l_num_blocks grid_size -= grid_size % l_num_blocks else: # grid_size must be at least l_num_blocks grid_size = l_num_blocks if self.verbose: print(f"[Joint prob. matrix] it_todo={it_todo}, " f"grid_size={grid_size}, L_BLOCK={l_block}, " f"N_THREADS={n_threads}") # OpenCL defines unsigned long as uint64, therefore we're adding # the LU suffix, not LLU, which would indicate unsupported uint128 # data type format. asset_cl = self._compile_template( template_name="joint_pmat.cl", L=f"{chunk_size}LU", L_BLOCK=l_block, L_NUM_BLOCKS=l_num_blocks, ITERATIONS_TODO=f"{it_todo}LU", logK=f"{logK:.10f}f", iteration_table=iteration_table_str, log_factorial=log_factorial_str, ATOMIC_UINT=f"unsigned {atomic_int}", ASSET_ENABLE_DOUBLE_SUPPORT=int(self.precision == "double") ) program = cl.Program(context, asset_cl).build() # synchronize cl.enqueue_barrier(queue) kernel = program.jsf_uniform_orderstat_3d_kernel kernel(queue, (grid_size,), (n_threads,), P_total_gpu.data, log_du_gpu.data, g_times_l=True) P_total_gpu[:chunk_size].get(ary=P_total[i_start: i_end]) return P_total def pycuda(self, log_du): try: # PyCuda should not be in requirements-extra because CPU limited # users won't be able to install Elephant. import pycuda.autoinit import pycuda.gpuarray as gpuarray import pycuda.driver as drv from pycuda.compiler import SourceModule except ImportError as err: raise ImportError( "Install pycuda with 'pip install pycuda'") from err self._check_input(log_du) it_todo = self.num_iterations u_length = log_du.shape[0] device = pycuda.autoinit.device max_l_block = device.MAX_SHARED_MEMORY_PER_BLOCK // ( self.dtype.itemsize * (self.d + 2)) n_threads = min(self.cuda_threads, max_l_block, device.MAX_THREADS_PER_BLOCK) if n_threads > device.WARP_SIZE: # It's more efficient to make the number of threads # a multiple of the warp size (32). n_threads -= n_threads % device.WARP_SIZE log_factorial = np.r_[0, np.cumsum(np.log(range(1, self.n + 1)))] log_factorial = log_factorial.astype(self.dtype) logK = log_factorial[-1] free, total = drv.mem_get_info() # 4 * (D + 1) * size + 8 * size == mem_avail chunk_size = free // (4 * log_du.shape[1] + self.dtype.itemsize) chunk_size, split_idx = self._split_axis(chunk_size=chunk_size, axis_size=u_length) P_total = np.empty(u_length, dtype=self.dtype) P_total_gpu = gpuarray.GPUArray(chunk_size, dtype=self.dtype) log_du_gpu = drv.mem_alloc(4 * chunk_size * log_du.shape[1]) for i_start, i_end in split_idx: drv.memcpy_htod_async(dest=log_du_gpu, src=log_du[i_start: i_end]) P_total_gpu.fill(0) chunk_size = i_end - i_start l_block = min(n_threads, chunk_size) l_num_blocks = math.ceil(chunk_size / l_block) grid_size = math.ceil(it_todo / (n_threads * self.cuda_cwr_loops)) grid_size = min(grid_size, device.MAX_GRID_DIM_X) if grid_size > l_num_blocks: # make grid_size divisible by l_num_blocks grid_size -= grid_size % l_num_blocks else: # grid_size must be at least l_num_blocks grid_size = l_num_blocks if self.verbose: print(f"[Joint prob. matrix] it_todo={it_todo}, " f"grid_size={grid_size}, L_BLOCK={l_block}, " f"N_THREADS={n_threads}") asset_cu = self._compile_template( template_name="joint_pmat.cu", L=f"{chunk_size}LLU", L_BLOCK=l_block, L_NUM_BLOCKS=l_num_blocks, ITERATIONS_TODO=f"{it_todo}LLU", logK=f"{logK:.10f}f", ) module = SourceModule(asset_cu) iteration_table_gpu, _ = module.get_global("iteration_table") iteration_table = self.map_iterations.astype(np.uint64) drv.memcpy_htod(iteration_table_gpu, iteration_table) log_factorial_gpu, _ = module.get_global("log_factorial") drv.memcpy_htod(log_factorial_gpu, log_factorial) drv.Context.synchronize() kernel = module.get_function("jsf_uniform_orderstat_3d_kernel") kernel(P_total_gpu.gpudata, log_du_gpu, grid=(grid_size, 1), block=(n_threads, 1, 1)) P_total_gpu[:chunk_size].get(ary=P_total[i_start: i_end]) return P_total def _cuda(self, log_du): # Compile a self-contained joint_pmat_old.cu file and run it # in a terminal. Having this function is useful to debug ASSET CUDA # application because it's self-contained and the logic is documented. # Don't use this backend when the 'log_du' arrays are huge because # of the disk I/O operations. # A note to developers: remove this backend in half a year once the # pycuda backend proves to be stable. self._check_input(log_du) asset_cu = self._compile_template( template_name="joint_pmat_old.cu", L=f"{log_du.shape[0]}LLU", N_THREADS=self.cuda_threads, ITERATIONS_TODO=f"{self.num_iterations}LLU", ASSET_DEBUG=int(self.verbose) ) with tempfile.TemporaryDirectory() as asset_tmp_folder: asset_cu_path = os.path.join(asset_tmp_folder, 'asset.cu') asset_bin_path = os.path.join(asset_tmp_folder, 'asset.o') with open(asset_cu_path, 'w') as f: f.write(asset_cu) # -O3 optimization flag is for the host code only; # by default, GPU device code is optimized with -O3. # -w to ignore warnings. compile_cmd = ['nvcc', '-w', '-O3', '-o', asset_bin_path, asset_cu_path] if self.precision == 'double' and get_cuda_capability_major() >= 6: # atomicAdd(double) requires compute capability 6.x compile_cmd.extend(['-arch', 'sm_60']) compile_status = subprocess.run( compile_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if self.verbose: print(compile_status.stdout.decode()) print(compile_status.stderr.decode(), file=sys.stderr) compile_status.check_returncode() log_du_path = os.path.join(asset_tmp_folder, "log_du.dat") P_total_path = os.path.join(asset_tmp_folder, "P_total.dat") with open(log_du_path, 'wb') as f: log_du.tofile(f) run_status = subprocess.run( [asset_bin_path, log_du_path, P_total_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE) if self.verbose: print(run_status.stdout.decode()) print(run_status.stderr.decode(), file=sys.stderr) run_status.check_returncode() with open(P_total_path, 'rb') as f: P_total = np.fromfile(f, dtype=self.dtype) return P_total def _check_input(self, log_du): it_todo = self.num_iterations if it_todo > np.iinfo(np.uint64).max: raise ValueError(f"it_todo ({it_todo}) is larger than MAX_UINT64." " Only Python backend is supported.") # Don't convert log_du to float32 transparently for the user to avoid # situations when the user accidentally passes an array with float64. # Doing so wastes memory for nothing. if log_du.dtype != np.float32: raise ValueError("'log_du' must be a float32 array") if log_du.shape[1] != self.d + 1: raise ValueError(f"log_du.shape[1] ({log_du.shape[1]}) must be " f"equal to D+1 ({self.d + 1})") def compute(self, u): if u.shape[1] != self.d: raise ValueError("Invalid input data shape axis 1: expected {}, " "got {}".format(self.d, u.shape[1])) # A faster and memory efficient implementation of # du = np.diff(u, prepend=0, append=1, axis=1).astype(np.float32) du = np.empty((u.shape[0], u.shape[1] + 1), dtype=np.float32) du[:, 0] = u[:, 0] np.subtract(u[:, 1:], u[:, :-1], out=du[:, 1:-1]) np.subtract(1, u[:, -1], out=du[:, -1]) # precompute logarithms # ignore warnings about infinities, see inside the loop: # we replace 0 * ln(0) by 1 to get exp(0 * ln(0)) = 0 ** 0 = 1 # the remaining infinities correctly evaluate to # exp(ln(0)) = exp(-inf) = 0 with warnings.catch_warnings(): warnings.simplefilter('ignore', RuntimeWarning) log_du = np.log(du, out=du) jsf_backend = self._choose_backend() P_total = jsf_backend(log_du) # Captures non-finite values like NaN, inf inside = (P_total > -self.tolerance) & (P_total < 1 + self.tolerance) outside_vals = P_total[~inside] if len(outside_vals) > 0: # A watchdog for unexpected results. warnings.warn(f"{len(outside_vals)}/{P_total.shape[0]} values of " "the computed joint prob. matrix lie outside of the " f"valid [0, 1] interval:\n{outside_vals}\nIf you're " "using PyOpenCL backend, make sure you've disabled " "GPU Hangcheck as described here https://" "software.intel.com/content/www/us/en/develop/" "documentation/get-started-with-intel-oneapi-" "base-linux/top/before-you-begin.html\n" "Clipping the output array to 0 and 1.") P_total = np.clip(P_total, a_min=0., a_max=1., out=P_total) return P_total class _PMatNeighbors(_GPUBackend): """ Parameters ---------- filter_shape : tuple of int A pair of integers representing the kernel shape `(l, w)`. n_largest : int The number of largest neighbors to collect for each entry in `mat`. """ def __init__(self, filter_shape, n_largest, max_chunk_size=None): super().__init__(max_chunk_size=max_chunk_size) self.n_largest = n_largest self.max_chunk_size = max_chunk_size filter_size, filter_width = filter_shape if filter_width >= filter_size: raise ValueError('filter_shape width must be lower than length') if not ((filter_width % 2) and (filter_size % 2)): warnings.warn( 'The kernel is not centered on the datapoint in whose' 'calculation it is used. Consider using odd values' 'for both entries of filter_shape.') # Construct the kernel filt = np.ones((filter_size, filter_size), dtype=bool) filt = np.triu(filt, -filter_width) filt = np.tril(filt, filter_width) if n_largest > len(filt.nonzero()[0]): raise ValueError(f"Too small filter shape {filter_shape} to " f"select {n_largest} largest elements.") self.filter_kernel = filt def _check_input(self, mat): symmetric = np.all(np.diagonal(mat) == 0.5) # Check consistent arguments filter_size = self.filter_kernel.shape[0] if (symmetric and mat.shape[0] < 2 * filter_size - 1) \ or (not symmetric and min(mat.shape) < filter_size): raise ValueError(f"'filter_shape' {self.filter_kernel.shape} is " f"too large for the input matrix of shape " f"{mat.shape}") if mat.dtype != np.float32: raise ValueError("The input matrix dtype must be float32.") def pyopencl(self, mat): import pyopencl as cl import pyopencl.array as cl_array from jinja2 import Template context = cl.create_some_context(interactive=False) device = context.devices[0] queue = cl.CommandQueue(context) # if the matrix is symmetric the diagonal was set to 0.5 # when computing the probability matrix symmetric = np.all(np.diagonal(mat) == 0.5) self._check_input(mat) filt_size = self.filter_kernel.shape[0] # filt is a square matrix filt_rows, filt_cols = self.filter_kernel.nonzero() filt_rows = "{%s}" % ", ".join(f"{row}U" for row in filt_rows) filt_cols = "{%s}" % ", ".join(f"{col}U" for col in filt_cols) lmat_padded = np.zeros((mat.shape[0], mat.shape[1], self.n_largest), dtype=np.float32) if symmetric: mat = mat[filt_size:] lmat = lmat_padded[filt_size + filt_size // 2: -filt_size // 2 + 1] else: lmat = lmat_padded[filt_size // 2: -filt_size // 2 + 1] # GPU_MAX_HEAP_SIZE OpenCL flag is set to 2 Gb (1 << 31) by default mem_avail = min(device.max_mem_alloc_size, device.global_mem_size, 1 << 31) # 4 * size * n_cols * n_largest + 4 * (size + filt_size) * n_cols chunk_size = (mem_avail // 4 - filt_size * lmat.shape[1]) // ( lmat.shape[1] * (self.n_largest + 1)) chunk_size, split_idx = self._split_axis(chunk_size=chunk_size, axis_size=lmat.shape[0], min_chunk_size=filt_size) pmat_cl_path = Path(__file__).parent / "pmat_neighbors.cl" pmat_cl_template = Template(pmat_cl_path.read_text()) lmat_gpu = cl_array.Array( queue, shape=(chunk_size, lmat.shape[1], self.n_largest), dtype=np.float32 ) for i_start, i_end in split_idx: mat_gpu = cl_array.to_device(queue, mat[i_start: i_end + filt_size], async_=True) lmat_gpu.fill(0, queue=queue) chunk_size = i_end - i_start it_todo = chunk_size * (lmat.shape[1] - filt_size + 1) pmat_neighbors_cl = pmat_cl_template.render( FILT_SIZE=filt_size, N_LARGEST=self.n_largest, PMAT_COLS=f"{lmat.shape[1]}LU", Y_OFFSET=f"{i_start}LU", NONZERO_SIZE=self.filter_kernel.sum(), SYMMETRIC=int(symmetric), filt_rows=filt_rows, filt_cols=filt_cols ) program = cl.Program(context, pmat_neighbors_cl).build() # synchronize cl.enqueue_barrier(queue) kernel = program.pmat_neighbors # When the grid size is set to the total number of work items to # execute and the local size is set to None, PyOpenCL chooses the # number of threads automatically such that the total number of # work items exactly matches the desired number of iterations. kernel(queue, (it_todo,), None, lmat_gpu.data, mat_gpu.data) lmat_gpu[:chunk_size].get(ary=lmat[i_start: i_end]) return lmat_padded def pycuda(self, mat): from jinja2 import Template try: # PyCuda should not be in requirements-extra because CPU limited # users won't be able to install Elephant. import pycuda.autoinit import pycuda.gpuarray as gpuarray import pycuda.driver as drv from pycuda.compiler import SourceModule except ImportError as err: raise ImportError( "Install pycuda with 'pip install pycuda'") from err # if the matrix is symmetric the diagonal was set to 0.5 # when computing the probability matrix symmetric = np.all(np.diagonal(mat) == 0.5) self._check_input(mat) device = pycuda.autoinit.device n_threads = device.MAX_THREADS_PER_BLOCK filt_size = self.filter_kernel.shape[0] filt_rows, filt_cols = self.filter_kernel.nonzero() lmat_padded = np.zeros((mat.shape[0], mat.shape[1], self.n_largest), dtype=np.float32) if symmetric: mat = mat[filt_size:] lmat = lmat_padded[filt_size + filt_size // 2: -filt_size // 2 + 1] else: lmat = lmat_padded[filt_size // 2: -filt_size // 2 + 1] free, total = drv.mem_get_info() # 4 * size * n_cols * n_largest + 4 * (size + filt_size) * n_cols chunk_size = (free // 4 - filt_size * lmat.shape[1]) // ( lmat.shape[1] * (self.n_largest + 1)) chunk_size, split_idx = self._split_axis(chunk_size=chunk_size, axis_size=lmat.shape[0], min_chunk_size=filt_size) pmat_cu_path = Path(__file__).parent / "pmat_neighbors.cu" pmat_cu_template = Template(pmat_cu_path.read_text()) lmat_gpu = gpuarray.GPUArray( (chunk_size, lmat.shape[1], self.n_largest), dtype=np.float32) mat_gpu = drv.mem_alloc(4 * (chunk_size + filt_size) * mat.shape[1]) for i_start, i_end in split_idx: drv.memcpy_htod_async(dest=mat_gpu, src=mat[i_start: i_end + filt_size]) lmat_gpu.fill(0) chunk_size = i_end - i_start it_todo = chunk_size * (lmat.shape[1] - filt_size + 1) pmat_neighbors_cu = pmat_cu_template.render( FILT_SIZE=filt_size, N_LARGEST=self.n_largest, PMAT_COLS=f"{lmat.shape[1]}LLU", Y_OFFSET=f"{i_start}LLU", NONZERO_SIZE=self.filter_kernel.sum(), SYMMETRIC=int(symmetric), IT_TODO=it_todo, ) module = SourceModule(pmat_neighbors_cu) filt_rows_gpu, _ = module.get_global("filt_rows") drv.memcpy_htod(filt_rows_gpu, filt_rows.astype(np.uint32)) filt_cols_gpu, _ = module.get_global("filt_cols") drv.memcpy_htod(filt_cols_gpu, filt_cols.astype(np.uint32)) drv.Context.synchronize() grid_size = math.ceil(it_todo / n_threads) if grid_size > device.MAX_GRID_DIM_X: raise ValueError("Cannot launch a CUDA kernel with " f"{grid_size} num. of blocks. Adjust the " "'max_chunk_size' parameter.") kernel = module.get_function("pmat_neighbors") kernel(lmat_gpu.gpudata, mat_gpu, grid=(grid_size, 1), block=(n_threads, 1, 1)) lmat_gpu[:chunk_size].get(ary=lmat[i_start: i_end]) return lmat_padded def compute(self, mat): """ Build the 3D matrix `L` of largest neighbors of elements in a 2D matrix `mat`. For each entry `mat[i, j]`, collects the `n_largest` elements with largest values around `mat[i, j]`, say `z_i, i=1,2,...,n_largest`, and assigns them to `L[i, j, :]`. The zone around `mat[i, j]` where largest neighbors are collected from is a rectangular area (kernel) of shape `(l, w) = filter_shape` centered around `mat[i, j]` and aligned along the diagonal. If `mat` is symmetric, only the triangle below the diagonal is considered. Parameters ---------- mat : np.ndarray A square matrix of real-valued elements. Returns ------- lmat : np.ndarray A matrix of shape `(l, w, n_largest)` containing along the last dimension `lmat[i, j, :]` the largest neighbors of `mat[i, j]`. """ backend = self._choose_backend() lmat = backend(mat) return lmat def cpu(self, mat): # if the matrix is symmetric the diagonal was set to 0.5 # when computing the probability matrix symmetric = np.all(np.diagonal(mat) == 0.5) self._check_input(mat) filter_size = self.filter_kernel.shape[0] # Initialize the matrix of d-largest values as a matrix of zeroes lmat = np.zeros((mat.shape[0], mat.shape[1], self.n_largest), dtype=np.float32) N_bin_y = mat.shape[0] N_bin_x = mat.shape[1] # if the matrix is symmetric do not use kernel positions intersected # by the diagonal if symmetric: bin_range_y = range(filter_size, N_bin_y - filter_size + 1) else: bin_range_y = range(N_bin_y - filter_size + 1) bin_range_x = range(N_bin_x - filter_size + 1) # compute matrix of largest values for y in bin_range_y: if symmetric: # x range depends on y position bin_range_x = range(y - filter_size + 1) for x in bin_range_x: patch = mat[y: y + filter_size, x: x + filter_size] mskd = patch[self.filter_kernel] largest_vals = np.sort(mskd)[-self.n_largest:] lmat[y + (filter_size // 2), x + (filter_size // 2), :] = \ largest_vals return lmat def synchronous_events_intersection(sse1, sse2, intersection='linkwise'): """ Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each consisting of a pool of positions `(iK, jK)` of matrix entries and associated synchronous events `SK`, finds the intersection among them. The intersection can be performed 'pixelwise' or 'linkwise'. * if 'pixelwise', it yields a new SSE which retains only events in `sse1` whose pixel position matches a pixel position in `sse2`. This operation is not symmetric: `intersection(sse1, sse2) != intersection(sse2, sse1)`. * if 'linkwise', an additional step is performed where each retained synchronous event `SK` in `sse1` is intersected with the corresponding event in `sse2`. This yields a symmetric operation: `intersection(sse1, sse2) = intersection(sse2, sse1)`. Both `sse1` and `sse2` must be provided as dictionaries of the type .. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK}, where each `i`, `j` is an integer and each `S` is a set of neuron IDs. Parameters ---------- sse1, sse2 : dict Each is a dictionary of pixel positions `(i, j)` as keys and sets `S` of synchronous events as values (see above). intersection : {'pixelwise', 'linkwise'}, optional The type of intersection to perform among the two SSEs (see above). Default: 'linkwise' Returns ------- sse_new : dict A new SSE (same structure as `sse1` and `sse2`) which retains only the events of `sse1` associated to keys present both in `sse1` and `sse2`. If `intersection = 'linkwise'`, such events are additionally intersected with the associated events in `sse2`. See Also -------- ASSET.extract_synchronous_events : extract SSEs from given spike trains """ sse_new = sse1.copy() for pixel1 in sse1.keys(): if pixel1 not in sse2.keys(): del sse_new[pixel1] if intersection == 'linkwise': for pixel1, link1 in sse_new.items(): sse_new[pixel1] = link1.intersection(sse2[pixel1]) if len(sse_new[pixel1]) == 0: del sse_new[pixel1] elif intersection == 'pixelwise': pass else: raise ValueError( "intersection (=%s) can only be" % intersection + " 'pixelwise' or 'linkwise'") return sse_new def synchronous_events_difference(sse1, sse2, difference='linkwise'): """ Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each consisting of a pool of pixel positions and associated synchronous events (see below), computes the difference between `sse1` and `sse2`. The difference can be performed 'pixelwise' or 'linkwise': * if 'pixelwise', it yields a new SSE which contains all (and only) the events in `sse1` whose pixel position doesn't match any pixel in `sse2`. * if 'linkwise', for each pixel `(i, j)` in `sse1` and corresponding synchronous event `S1`, if `(i, j)` is a pixel in `sse2` corresponding to the event `S2`, it retains the set difference `S1 - S2`. If `(i, j)` is not a pixel in `sse2`, it retains the full set `S1`. Note that in either case the difference is a non-symmetric operation: `intersection(sse1, sse2) != intersection(sse2, sse1)`. Both `sse1` and `sse2` must be provided as dictionaries of the type .. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK}, where each `i`, `j` is an integer and each `S` is a set of neuron IDs. Parameters ---------- sse1, sse2 : dict Dictionaries of pixel positions `(i, j)` as keys and sets `S` of synchronous events as values (see above). difference : {'pixelwise', 'linkwise'}, optional The type of difference to perform between `sse1` and `sse2` (see above). Default: 'linkwise' Returns ------- sse_new : dict A new SSE (same structure as `sse1` and `sse2`) which retains the difference between `sse1` and `sse2`. See Also -------- ASSET.extract_synchronous_events : extract SSEs from given spike trains """ sse_new = sse1.copy() for pixel1 in sse1.keys(): if pixel1 in sse2.keys(): if difference == 'pixelwise': del sse_new[pixel1] elif difference == 'linkwise': sse_new[pixel1] = sse_new[pixel1].difference(sse2[pixel1]) if len(sse_new[pixel1]) == 0: del sse_new[pixel1] else: raise ValueError( "difference (=%s) can only be" % difference + " 'pixelwise' or 'linkwise'") return sse_new def _remove_empty_events(sse): """ Given a sequence of synchronous events (SSE) `sse` consisting of a pool of pixel positions and associated synchronous events (see below), returns a copy of `sse` where all empty events have been removed. `sse` must be provided as a dictionary of type .. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK}, where each `i`, `j` is an integer and each `S` is a set of neuron IDs. Parameters ---------- sse : dict A dictionary of pixel positions `(i, j)` as keys, and sets `S` of synchronous events as values (see above). Returns ------- sse_new : dict A copy of `sse` where all empty events have been removed. """ sse_new = sse.copy() for pixel, link in sse.items(): if link == set([]): del sse_new[pixel] return sse_new def synchronous_events_identical(sse1, sse2): """ Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each consisting of a pool of pixel positions and associated synchronous events (see below), determines whether `sse1` is strictly contained in `sse2`. `sse1` is strictly contained in `sse2` if all its pixels are pixels of `sse2`, if its associated events are subsets of the corresponding events in `sse2`, and if `sse2` contains events, or neuron IDs in some event, which do not belong to `sse1` (i.e., `sse1` and `sse2` are not identical). Both `sse1` and `sse2` must be provided as dictionaries of the type .. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK}, where each `i`, `j` is an integer and each `S` is a set of neuron IDs. Parameters ---------- sse1, sse2 : dict Dictionaries of pixel positions `(i, j)` as keys and sets `S` of synchronous events as values. Returns ------- bool True if `sse1` is identical to `sse2`. See Also -------- ASSET.extract_synchronous_events : extract SSEs from given spike trains """ # Remove empty links from sse11 and sse22, if any sse11 = _remove_empty_events(sse1) sse22 = _remove_empty_events(sse2) # Return whether sse11 == sse22 return sse11 == sse22 def synchronous_events_no_overlap(sse1, sse2): """ Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each consisting of a pool of pixel positions and associated synchronous events (see below), determines whether `sse1` and `sse2` are disjoint. Two SSEs are disjoint if they don't share pixels, or if the events associated to common pixels are disjoint. Both `sse1` and `sse2` must be provided as dictionaries of the type .. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK}, where each `i`, `j` is an integer and each `S` is a set of neuron IDs. Parameters ---------- sse1, sse2 : dict Dictionaries of pixel positions `(i, j)` as keys and sets `S` of synchronous events as values. Returns ------- bool True if `sse1` is disjoint from `sse2`. See Also -------- ASSET.extract_synchronous_events : extract SSEs from given spike trains """ # Remove empty links from sse11 and sse22, if any sse11 = _remove_empty_events(sse1) sse22 = _remove_empty_events(sse2) # If both SSEs are empty, return False (we consider them equal) if sse11 == {} and sse22 == {}: return False common_pixels = set(sse11.keys()).intersection(set(sse22.keys())) if len(common_pixels) == 0: return True if all(sse11[p].isdisjoint(sse22[p]) for p in common_pixels): return True return False def synchronous_events_contained_in(sse1, sse2): """ Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each consisting of a pool of pixel positions and associated synchronous events (see below), determines whether `sse1` is strictly contained in `sse2`. `sse1` is strictly contained in `sse2` if all its pixels are pixels of `sse2`, if its associated events are subsets of the corresponding events in `sse2`, and if `sse2` contains non-empty events, or neuron IDs in some event, which do not belong to `sse1` (i.e., `sse1` and `sse2` are not identical). Both `sse1` and `sse2` must be provided as dictionaries of the type .. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK}, where each `i`, `j` is an integer and each `S` is a set of neuron IDs. Parameters ---------- sse1, sse2 : dict Dictionaries of pixel positions `(i, j)` as keys and sets `S` of synchronous events as values. Returns ------- bool True if `sse1` is a subset of `sse2`. See Also -------- ASSET.extract_synchronous_events : extract SSEs from given spike trains """ # Remove empty links from sse11 and sse22, if any sse11 = _remove_empty_events(sse1) sse22 = _remove_empty_events(sse2) # Return False if sse11 and sse22 are disjoint if synchronous_events_identical(sse11, sse22): return False # Return False if any pixel in sse1 is not contained in sse2, or if any # link of sse1 is not a subset of the corresponding link in sse2. # Otherwise (if sse1 is a subset of sse2) continue for pixel1, link1 in sse11.items(): if pixel1 not in sse22.keys(): return False if not link1.issubset(sse22[pixel1]): return False # Check that sse1 is a STRICT subset of sse2, i.e. that sse2 contains at # least one pixel or neuron id not present in sse1. return not synchronous_events_identical(sse11, sse22) def synchronous_events_contains_all(sse1, sse2): """ Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each consisting of a pool of pixel positions and associated synchronous events (see below), determines whether `sse1` strictly contains `sse2`. `sse1` strictly contains `sse2` if it contains all pixels of `sse2`, if all associated events in `sse1` contain those in `sse2`, and if `sse1` additionally contains other pixels / events not contained in `sse2`. Both `sse1` and `sse2` must be provided as dictionaries of the type .. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK}, where each `i`, `j` is an integer and each `S` is a set of neuron IDs. Parameters ---------- sse1, sse2 : dict Dictionaries of pixel positions `(i, j)` as keys and sets `S` of synchronous events as values. Returns ------- bool True if `sse1` strictly contains `sse2`. Notes ----- `synchronous_events_contains_all(sse1, sse2)` is identical to `synchronous_events_is_subsequence(sse2, sse1)`. See Also -------- ASSET.extract_synchronous_events : extract SSEs from given spike trains """ return synchronous_events_contained_in(sse2, sse1) def synchronous_events_overlap(sse1, sse2): """ Given two sequences of synchronous events (SSEs) `sse1` and `sse2`, each consisting of a pool of pixel positions and associated synchronous events (see below), determines whether the two SSEs overlap. The SSEs overlap if they are not equal and none of them is a superset of the other one but they are also not disjoint. Both `sse1` and `sse2` must be provided as dictionaries of the type .. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK}, where each `i`, `j` is an integer and each `S` is a set of neuron IDs. Parameters ---------- sse1, sse2 : dict Dictionaries of pixel positions `(i, j)` as keys and sets `S` of synchronous events as values. Returns ------- bool True if `sse1` and `sse2` overlap. See Also -------- ASSET.extract_synchronous_events : extract SSEs from given spike trains """ contained_in = synchronous_events_contained_in(sse1, sse2) contains_all = synchronous_events_contains_all(sse1, sse2) identical = synchronous_events_identical(sse1, sse2) is_disjoint = synchronous_events_no_overlap(sse1, sse2) return not (contained_in or contains_all or identical or is_disjoint) def _signals_t_start_stop(signals, t_start=None, t_stop=None): if t_start is None: t_start = _signals_same_attribute(signals, 't_start') if t_stop is None: t_stop = _signals_same_attribute(signals, 't_stop') return t_start, t_stop def _intersection_matrix(spiketrains, spiketrains_y, bin_size, t_start_x, t_start_y, t_stop_x, t_stop_y, normalization=None): if spiketrains_y is None: spiketrains_y = spiketrains # Compute the binned spike train matrices, along both time axes spiketrains_binned = conv.BinnedSpikeTrain( spiketrains, bin_size=bin_size, t_start=t_start_x, t_stop=t_stop_x) spiketrains_binned_y = conv.BinnedSpikeTrain( spiketrains_y, bin_size=bin_size, t_start=t_start_y, t_stop=t_stop_y) # Compute imat by matrix multiplication bsts_x = spiketrains_binned.sparse_matrix bsts_y = spiketrains_binned_y.sparse_matrix # Compute the number of spikes in each bin, for both time axes # 'A1' property returns self as a flattened ndarray. spikes_per_bin_x = bsts_x.sum(axis=0).A1 spikes_per_bin_y = bsts_y.sum(axis=0).A1 # Compute the intersection matrix imat imat = bsts_x.T.dot(bsts_y).toarray().astype(np.float32) for ii in range(bsts_x.shape[1]): # Normalize the row col_sum = bsts_x[:, ii].sum() if normalization is None or col_sum == 0: norm_coef = 1. elif normalization == 'intersection': norm_coef = np.minimum( spikes_per_bin_x[ii], spikes_per_bin_y) elif normalization == 'mean': # geometric mean norm_coef = np.sqrt( spikes_per_bin_x[ii] * spikes_per_bin_y) elif normalization == 'union': norm_coef = np.array([(bsts_x[:, ii] + bsts_y[:, jj]).count_nonzero() for jj in range(bsts_y.shape[1])]) else: raise ValueError( "Invalid parameter 'norm': {}".format(normalization)) # If normalization required, for each j such that bsts_y[j] is # identically 0 the code above sets imat[:, j] to identically nan. # Substitute 0s instead. imat[ii, :] = np.divide(imat[ii, :], norm_coef, out=np.zeros(imat.shape[1], dtype=np.float32), where=norm_coef != 0) # Return the intersection matrix and the edges of the bins used for the # x and y axes, respectively. return imat class ASSET(object): """ Analysis of Sequences of Synchronous EvenTs class. Parameters ---------- spiketrains_i, spiketrains_j : list of neo.SpikeTrain Input spike trains for the first and second time dimensions, respectively, to compute the p-values from. If `spiketrains_y` is None, it's set to `spiketrains`. bin_size : pq.Quantity, optional The width of the time bins used to compute the probability matrix. t_start_i, t_start_j : pq.Quantity, optional The start time of the binning for the first and second axes, respectively. If None, the attribute `t_start` of the spike trains is used (if the same for all spike trains). Default: None t_stop_i, t_stop_j : pq.Quantity, optional The stop time of the binning for the first and second axes, respectively. If None, the attribute `t_stop` of the spike trains is used (if the same for all spike trains). Default: None verbose : bool, optional If True, print messages and show progress bar. Default: True Raises ------ ValueError If the `t_start` & `t_stop` times are not (one of): perfectly aligned; fully disjoint. """ def __init__(self, spiketrains_i, spiketrains_j=None, bin_size=3 * pq.ms, t_start_i=None, t_start_j=None, t_stop_i=None, t_stop_j=None, verbose=True): self.spiketrains_i = spiketrains_i if spiketrains_j is None: spiketrains_j = spiketrains_i self.spiketrains_j = spiketrains_j self.bin_size = bin_size self.t_start_i, self.t_stop_i = _signals_t_start_stop( spiketrains_i, t_start=t_start_i, t_stop=t_stop_i) self.t_start_j, self.t_stop_j = _signals_t_start_stop( spiketrains_j, t_start=t_start_j, t_stop=t_stop_j) self.verbose = verbose and rank == 0 msg = 'The time intervals for x and y need to be either identical ' \ 'or fully disjoint, but they are:\n' \ 'x: ({}, {}) and y: ({}, {}).'.format(self.t_start_i, self.t_stop_i, self.t_start_j, self.t_stop_j) # the starts have to be perfectly aligned for the binning to work # the stops can differ without impacting the binning if self.t_start_i == self.t_start_j: if not _quantities_almost_equal(self.t_stop_i, self.t_stop_j): raise ValueError(msg) elif (self.t_start_i < self.t_start_j < self.t_stop_i) \ or (self.t_start_i < self.t_stop_j < self.t_stop_i): raise ValueError(msg) # Compute the binned spike train matrices, along both time axes self.spiketrains_binned_i = conv.BinnedSpikeTrain( self.spiketrains_i, bin_size=self.bin_size, t_start=self.t_start_i, t_stop=self.t_stop_i) self.spiketrains_binned_j = conv.BinnedSpikeTrain( self.spiketrains_j, bin_size=self.bin_size, t_start=self.t_start_j, t_stop=self.t_stop_j) @property def x_edges(self): """ A Quantity array of `n+1` edges of the bins used for the horizontal axis of the intersection matrix, where `n` is the number of bins that time was discretized in. """ return self.spiketrains_binned_i.bin_edges.rescale(self.bin_size.units) @property def y_edges(self): """ A Quantity array of `n+1` edges of the bins used for the vertical axis of the intersection matrix, where `n` is the number of bins that time was discretized in. """ return self.spiketrains_binned_j.bin_edges.rescale(self.bin_size.units) def is_symmetric(self): """ Returns ------- bool Whether the intersection matrix is symmetric or not. See Also -------- ASSET.intersection_matrix """ return _quantities_almost_equal(self.x_edges[0], self.y_edges[0]) def intersection_matrix(self, normalization=None): """ Generates the intersection matrix from a list of spike trains. Given a list of `neo.SpikeTrain`, consider two binned versions of them differing for the starting and ending times of the binning: `t_start_x`, `t_stop_x`, `t_start_y` and `t_stop_y` respectively (the time intervals can be either identical or completely disjoint). Then calculate the intersection matrix `M` of the two binned data, where `M[i,j]` is the overlap of bin `i` in the first binned data and bin `j` in the second binned data (i.e., the number of spike trains spiking at both bin `i` and bin `j`). The matrix entries can be normalized to values between `0` and `1` via different normalizations (see "Parameters" section). Parameters ---------- normalization : {'intersection', 'mean', 'union'} or None, optional The normalization type to be applied to each entry `M[i,j]` of the intersection matrix `M`. Given the sets `s_i` and `s_j` of neuron IDs in the bins `i` and `j` respectively, the normalization coefficient can be: * None: no normalisation (row counts) * 'intersection': `len(intersection(s_i, s_j))` * 'mean': `sqrt(len(s_1) * len(s_2))` * 'union': `len(union(s_i, s_j))` Default: None Returns ------- imat : (n,n) np.ndarray The floating point intersection matrix of a list of spike trains. It has the shape `(n, n)`, where `n` is the number of bins that time was discretized in. """ imat = _intersection_matrix(self.spiketrains_i, self.spiketrains_j, self.bin_size, self.t_start_i, self.t_start_j, self.t_stop_i, self.t_stop_j, normalization=normalization) return imat def probability_matrix_montecarlo(self, n_surrogates, imat=None, surrogate_method='dither_spikes', surrogate_dt=None): """ Given a list of parallel spike trains, estimate the cumulative probability of each entry in their intersection matrix by a Monte Carlo approach using surrogate data. Contrarily to the analytical version (see :func:`ASSET.probability_matrix_analytical`) the Monte Carlo one does not incorporate the assumptions of Poissonianity in the null hypothesis. The method produces surrogate spike trains (using one of several methods at disposal, see "Parameters" section) and calculates their intersection matrix `M`. For each entry `(i, j)`, the intersection CDF `P[i, j]` is then given by: .. centered:: P[i, j] = #(spike_train_surrogates such that M[i, j] < I[i, j]) / #(spike_train_surrogates) If `P[i, j]` is large (close to 1), `I[i, j]` is statistically significant: the probability to observe an overlap equal to or larger than `I[i, j]` under the null hypothesis is `1 - P[i, j]`, very small. Parameters ---------- n_surrogates : int The number of spike train surrogates to generate for the bootstrap procedure. imat : (n,n) np.ndarray or None, optional The floating point intersection matrix of a list of spike trains. It has the shape `(n, n)`, where `n` is the number of bins that time was discretized in. If None, the output of :func:`ASSET.intersection_matrix` is used. Default: None surrogate_method : {'dither_spike_train', 'dither_spikes', 'jitter_spikes', 'randomise_spikes', 'shuffle_isis', 'joint_isi_dithering'}, optional The method to generate surrogate spike trains. Refer to the :func:`spike_train_surrogates.surrogates` documentation for more information about each surrogate method. Note that some of these methods need `surrogate_dt` parameter, others ignore it. Default: 'dither_spike_train' surrogate_dt : pq.Quantity, optional For surrogate methods shifting spike times randomly around their original time ('dither_spike_train', 'dither_spikes') or replacing them randomly within a certain window ('jitter_spikes'), `surrogate_dt` represents the size of that shift (window). For other methods, `surrogate_dt` is ignored. If None, it's set to `self.bin_size * 5`. Default: None Returns ------- pmat : np.ndarray The cumulative probability matrix. `pmat[i, j]` represents the estimated probability of having an overlap between bins `i` and `j` STRICTLY LOWER than the observed overlap, under the null hypothesis of independence of the input spike trains. Notes ----- We recommend playing with `surrogate_dt` parameter to see how it influences the result matrix. For this, refer to the ASSET tutorial. See Also -------- ASSET.probability_matrix_analytical : analytical derivation of the matrix """ if imat is None: # Compute the intersection matrix of the original data imat = self.intersection_matrix() if surrogate_dt is None: surrogate_dt = self.bin_size * 5 symmetric = self.is_symmetric() # Generate surrogate spike trains as a list surrs # Compute the p-value matrix pmat; pmat[i, j] counts the fraction of # surrogate data whose intersection value at (i, j) is lower than or # equal to that of the original data pmat = np.zeros(imat.shape, dtype=np.int32) for surr_id in trange(n_surrogates, desc="pmat_bootstrap", disable=not self.verbose): if mpi_accelerated and surr_id % size != rank: continue surrogates = [spike_train_surrogates.surrogates( st, n_surrogates=1, method=surrogate_method, dt=surrogate_dt, decimals=None, edges=True)[0] for st in self.spiketrains_i] if symmetric: surrogates_y = surrogates else: surrogates_y = [spike_train_surrogates.surrogates( st, n_surrogates=1, method=surrogate_method, dt=surrogate_dt, decimals=None, edges=True)[0] for st in self.spiketrains_j] imat_surr = _intersection_matrix(surrogates, surrogates_y, self.bin_size, self.t_start_i, self.t_start_j, self.t_stop_i, self.t_stop_j) pmat += (imat_surr <= (imat - 1)) del imat_surr if mpi_accelerated: pmat = comm.allreduce(pmat, op=MPI.SUM) pmat = pmat * 1. / n_surrogates if symmetric: np.fill_diagonal(pmat, 0.5) return pmat def probability_matrix_analytical(self, imat=None, firing_rates_x='estimate', firing_rates_y='estimate', kernel_width=100 * pq.ms): r""" Given a list of spike trains, approximates the cumulative probability of each entry in their intersection matrix. The approximation is analytical and works under the assumptions that the input spike trains are independent and Poisson. It works as follows: * Bin each spike train at the specified `bin_size`: this yields a binary array of 1s (spike in bin) and 0s (no spike in bin; clipping used); * If required, estimate the rate profile of each spike train by convolving the binned array with a boxcar kernel of user-defined length; * For each neuron `k` and each pair of bins `i` and `j`, compute the probability :math:`p_ijk` that neuron `k` fired in both bins `i` and `j`. * Approximate the probability distribution of the intersection value at `(i, j)` by a Poisson distribution with mean parameter :math:`l = \sum_k (p_ijk)`, justified by Le Cam's approximation of a sum of independent Bernouilli random variables with a Poisson distribution. Parameters ---------- imat : (n,n) np.ndarray or None, optional The intersection matrix of a list of spike trains. It has the shape `(n, n)`, where `n` is the number of bins that time was discretized in. If None, the output of :func:`ASSET.intersection_matrix` is used. Default: None firing_rates_x, firing_rates_y : list of neo.AnalogSignal or 'estimate' If a list, `firing_rates[i]` is the firing rate of the spike train `spiketrains[i]`. If 'estimate', firing rates are estimated by simple boxcar kernel convolution, with the specified `kernel_width`. Default: 'estimate' kernel_width : pq.Quantity, optional The total width of the kernel used to estimate the rate profiles when `firing_rates` is 'estimate'. Default: 100 * pq.ms Returns ------- pmat : np.ndarray The cumulative probability matrix. `pmat[i, j]` represents the estimated probability of having an overlap between bins `i` and `j` STRICTLY LOWER than the observed overlap, under the null hypothesis of independence of the input spike trains. """ if imat is None: # Compute the intersection matrix of the original data imat = self.intersection_matrix() symmetric = self.is_symmetric() bsts_x_matrix = self.spiketrains_binned_i.to_bool_array() if symmetric: bsts_y_matrix = bsts_x_matrix else: bsts_y_matrix = self.spiketrains_binned_j.to_bool_array() # Check that the nr. neurons is identical between the two axes if bsts_x_matrix.shape[0] != bsts_y_matrix.shape[0]: raise ValueError( 'Different number of neurons along the x and y axis!') # Define the firing rate profiles if firing_rates_x == 'estimate': # If rates are to be estimated, create the rate profiles as # Quantity objects obtained by boxcar-kernel convolution fir_rate_x = self._rate_of_binned_spiketrain(bsts_x_matrix, kernel_width) elif isinstance(firing_rates_x, list): # If rates provided as lists of AnalogSignals, create time slices # for both axes, interpolate in the time bins of interest and # convert to Quantity fir_rate_x = _interpolate_signals( firing_rates_x, self.spiketrains_binned_i.bin_edges[:-1], self.verbose) else: raise ValueError( 'fir_rates_x must be a list or the string "estimate"') if symmetric: fir_rate_y = fir_rate_x elif firing_rates_y == 'estimate': fir_rate_y = self._rate_of_binned_spiketrain(bsts_y_matrix, kernel_width) elif isinstance(firing_rates_y, list): # If rates provided as lists of AnalogSignals, create time slices # for both axes, interpolate in the time bins of interest and # convert to Quantity fir_rate_y = _interpolate_signals( firing_rates_y, self.spiketrains_binned_j.bin_edges[:-1], self.verbose) else: raise ValueError( 'fir_rates_y must be a list or the string "estimate"') # For each neuron, compute the prob. that that neuron spikes in any bin if self.verbose: print('compute the prob. that each neuron fires in each pair of ' 'bins...') rate_bins_x = (fir_rate_x * self.bin_size).simplified.magnitude spike_probs_x = 1. - np.exp(-rate_bins_x) if symmetric: spike_probs_y = spike_probs_x else: rate_bins_y = (fir_rate_y * self.bin_size).simplified.magnitude spike_probs_y = 1. - np.exp(-rate_bins_y) # Compute the matrix Mu[i, j] of parameters for the Poisson # distributions which describe, at each (i, j), the approximated # overlap probability. This matrix is just the sum of the probability # matrices p_ijk computed for each neuron k: # p_ijk is the probability that neuron k spikes in both bins i and j. # The sum of outer products is equivalent to a dot product. if self.verbose: print( "compute the probability matrix by Le Cam's approximation...") Mu = spike_probs_x.T.dot(spike_probs_y) # A straightforward implementation is: # pmat_shape = spike_probs_x.shape[1], spike_probs_y.shape[1] # Mu = np.zeros(pmat_shape, dtype=np.float64) # for probx, proby in zip(spike_probs_x, spike_probs_y): # Mu += np.outer(probx, proby) # Compute the probability matrix obtained from imat using the Poisson # pdfs pmat = scipy.stats.poisson.cdf(imat - 1, Mu) if symmetric: # Substitute 0.5 to the elements along the main diagonal if self.verbose: print("substitute 0.5 to elements along the main diagonal...") np.fill_diagonal(pmat, 0.5) return pmat def joint_probability_matrix(self, pmat, filter_shape, n_largest, min_p_value=1e-5, precision='float', cuda_threads=64, cuda_cwr_loops=32, tolerance=1e-5): """ Map a probability matrix `pmat` to a joint probability matrix `jmat`, where `jmat[i, j]` is the joint p-value of the largest neighbors of `pmat[i, j]`. The values of `pmat` are assumed to be uniformly distributed in the range [0, 1]. Centered a rectangular kernel of shape `filter_shape=(l, w)` around each entry `pmat[i, j]`, aligned along the diagonal where `pmat[i, j]` lies into, extracts the `n_largest` values falling within the kernel and computes their joint p-value `jmat[i, j]`. Parameters ---------- pmat : np.ndarray A square matrix, the output of :func:`ASSET.probability_matrix_montecarlo` or :func:`ASSET.probability_matrix_analytical`, of cumulative probability values between 0 and 1. The values are assumed to be uniformly distributed in the said range. filter_shape : tuple of int A pair of integers representing the kernel shape `(l, w)`. n_largest : int The number of the largest neighbors to collect for each entry in `jmat`. min_p_value : float, optional The minimum p-value in range `[0, 1)` for individual entries in `pmat`. Each `pmat[i, j]` is set to `min(pmat[i, j], 1-p_value_min)` to avoid that a single highly significant value in `pmat` (extreme case: `pmat[i, j] = 1`) yields joint significance of itself and its neighbors. Default: 1e-5 <<<<<<< HEAD:elephant/asset.py ======= precision : {'float', 'double'}, optional Single or double floating-point precision for the resulting `jmat` matrix. * `'float'`: 32 bits; the tolerance error is ``≲1e-3``. * `'double'`: 64 bits; the tolerance error is ``<1e-5``. Double floating-point precision is typically x4 times slower than the single floating-point equivalent. Default: 'float' cuda_threads : int, optional [CUDA/OpenCL performance parameter that does not influence the result.] The number of CUDA/OpenCL threads per block (in X axis) between 1 and 1024 and is used only if CUDA or OpenCL backend is enabled. For performance reasons, it should be a multiple of 32. Old GPUs (Tesla K80) perform faster with `cuda_threads` larger than 64 while new series (Tesla T4) with capabilities 6.x and more work best with 32 threads. Default: 64 cuda_cwr_loops : int, optional [CUDA/OpenCL performance parameter that does not influence the result.] A positive integer that defines the number of fast 'combinations_with_replacement' loops to run to reduce branch divergence. This parameter influences the performance when the number of iterations is huge (`>1e8`); in such cases, increase the value. Default: 32 tolerance : float, optional Tolerance is used to catch unexpected behavior of billions of floating point additions, when the number of iterations is huge or the data arrays are large. A warning is thrown when the resulting joint prob. matrix values are outside of the acceptable range ``[-tolerance, 1.0 + tolerance]``. Default: 1e-5 >>>>>>> master:elephant/asset/asset.py Returns ------- jmat : np.ndarray The joint probability matrix associated to `pmat`. Notes ----- 1. By default, if CUDA is detected, CUDA acceleration is used. CUDA backend is **~X1000** faster than the Python implementation. To turn off CUDA features, set the environment flag ``ELEPHANT_USE_CUDA`` to ``0``. Otherwise 2. If PyOpenCL is installed and detected, PyOpenCL backend is used. PyOpenCL backend is **~X100** faster than the Python implementation. To turn off OpenCL features, set the environment flag ``ELEPHANT_USE_OPENCL`` to ``0``. When using PyOpenCL backend, make sure you've disabled GPU Hangcheck as described in the `Intel GPU developers documentation <https://software.intel.com/content/www/us/en/develop/ documentation/get-started-with-intel-oneapi-base-linux/top/ before-you-begin.html>`_. Do it with caution - using your built-in Intel graphics card to perform computations may make the system unresponsive until the compute program terminates. """ l, w = filter_shape # Find for each P_ij in the probability matrix its neighbors and # maximize them by the maximum value 1-p_value_min pmat = np.asarray(pmat, dtype=np.float32) pmat_neighb_obj = _PMatNeighbors(filter_shape=filter_shape, n_largest=n_largest) pmat_neighb = pmat_neighb_obj.compute(pmat) pmat_neighb = np.minimum(pmat_neighb, 1. - min_p_value, out=pmat_neighb) # in order to avoid doing the same calculation multiple times: # find all unique sets of values in pmat_neighb # and store the corresponding indices # flatten the second and third dimension in order to use np.unique pmat_neighb = pmat_neighb.reshape(pmat.size, n_largest) pmat_neighb, pmat_neighb_indices = np.unique(pmat_neighb, axis=0, return_inverse=True) # Compute the joint p-value matrix jpvmat n = l * (1 + 2 * w) - w * ( w + 1) # number of entries covered by kernel jsf = _JSFUniformOrderStat3D(n=n, d=pmat_neighb.shape[1], precision=precision, verbose=self.verbose, cuda_threads=cuda_threads, cuda_cwr_loops=cuda_cwr_loops, tolerance=tolerance) jpvmat = jsf.compute(u=pmat_neighb) # restore the original shape using the stored indices jpvmat = jpvmat[pmat_neighb_indices].reshape(pmat.shape) return 1. - jpvmat @staticmethod def mask_matrices(matrices, thresholds): """ Given a list of `matrices` and a list of `thresholds`, return a boolean matrix `B` ("mask") such that `B[i,j]` is True if each input matrix in the list strictly exceeds the corresponding threshold at that position. If multiple matrices are passed along with only one threshold the same threshold is applied to all matrices. Parameters ---------- matrices : list of np.ndarray The matrices which are compared to the respective thresholds to build the mask. All matrices must have the same shape. Typically, it is a list `[pmat, jmat]`, i.e., the (cumulative) probability and joint probability matrices. thresholds : float or list of float The significance thresholds for each matrix in `matrices`. Returns ------- mask : np.ndarray Boolean mask matrix with the shape of the input matrices. Raises ------ ValueError If `matrices` or `thresholds` is an empty list. If `matrices` and `thresholds` have different lengths. See Also -------- ASSET.probability_matrix_montecarlo : for `pmat` generation ASSET.probability_matrix_analytical : for `pmat` generation ASSET.joint_probability_matrix : for `jmat` generation """ if len(matrices) == 0: raise ValueError("Empty list of matrices") if isinstance(thresholds, float): thresholds = np.full(shape=len(matrices), fill_value=thresholds) if len(matrices) != len(thresholds): raise ValueError( '`matrices` and `thresholds` must have same length') mask = np.ones_like(matrices[0], dtype=bool) for (mat, thresh) in zip(matrices, thresholds): mask &= mat > thresh # Replace nans, coming from False * np.inf, with zeros mask[np.isnan(mask)] = False return mask @staticmethod def cluster_matrix_entries(mask_matrix, max_distance, min_neighbors, stretch, working_memory=None): r""" Given a matrix `mask_matrix`, replaces its positive elements with integers representing different cluster IDs. Each cluster comprises close-by elements. In ASSET analysis, `mask_matrix` is a thresholded ("masked") version of the intersection matrix `imat`, whose values are those of `imat` only if considered statistically significant, and zero otherwise. A cluster is built by pooling elements according to their distance, via the DBSCAN algorithm (see `sklearn.cluster.DBSCAN` class). Elements form a neighbourhood if at least one of them has a distance not larger than `max_distance` from the others, and if they are at least `min_neighbors`. Overlapping neighborhoods form a cluster: * Clusters are assigned integers from `1` to the total number `k` of clusters; * Unclustered ("isolated") positive elements of `mask_matrix` are assigned value `-1`; * Non-positive elements are assigned the value `0`. The distance between the positions of two positive elements in `mask_matrix` is given by a Euclidean metric which is stretched if the two positions are not aligned along the 45 degree direction (the main diagonal direction), as more, with maximal stretching along the anti-diagonal. Specifically, the Euclidean distance between positions `(i1, j1)` and `(i2, j2)` is stretched by a factor .. math:: 1 + (\mathtt{stretch} - 1.) * \left|\sin((\pi / 4) - \theta)\right|, where :math:`\theta` is the angle between the pixels and the 45 degree direction. The stretching factor thus varies between 1 and `stretch`. Parameters ---------- mask_matrix : np.ndarray The boolean matrix, whose elements with positive values are to be clustered. The output of :func:`ASSET.mask_matrices`. max_distance : float The maximum distance between two elements in `mask_matrix` to be a part of the same neighbourhood in the DBSCAN algorithm. min_neighbors : int The minimum number of elements to form a neighbourhood. stretch : float The stretching factor of the euclidean metric for elements aligned along the 135 degree direction (anti-diagonal). The actual stretching increases from 1 to `stretch` as the direction of the two elements moves from the 45 to the 135 degree direction. `stretch` must be greater than 1. working_memory : int or None, optional The sought maximum memory in MiB for temporary distance matrix chunks. When None (default), no chunking is performed. This parameter is passed directly to ``sklearn.metrics.pairwise_distances_chunked`` function and it has no influence on the outcome matrix. Instead, it control the memory VS speed trade-off. Default: None Returns ------- cluster_mat : np.ndarray A matrix with the same shape of `mask_matrix`, each of whose elements is either: * a positive integer (cluster ID) if the element is part of a cluster; * `0` if the corresponding element in `mask_matrix` is non-positive; * `-1` if the element does not belong to any cluster. See Also -------- sklearn.cluster.DBSCAN """ # Don't do anything if mat is identically zero if np.all(mask_matrix == 0): return mask_matrix # List the significant pixels of mat in a 2-columns array xpos_sgnf, ypos_sgnf = np.where(mask_matrix > 0) # Compute the matrix D[i, j] of euclidean distances between pixels i # and j try: D = _stretched_metric_2d( xpos_sgnf, ypos_sgnf, stretch=stretch, ref_angle=45, working_memory=working_memory ) except MemoryError as err: raise MemoryError("Set 'working_memory=100' or another value to " "chunk the data") from err # Cluster positions of significant pixels via dbscan core_samples, config = dbscan( D, eps=max_distance, min_samples=min_neighbors, metric='precomputed') # Construct the clustered matrix, where each element has value # * i = 1 to k if it belongs to a cluster i, # * 0 if it is not significant, # * -1 if it is significant but does not belong to any cluster cluster_mat = np.zeros_like(mask_matrix, dtype=np.int32) cluster_mat[xpos_sgnf, ypos_sgnf] = \ config * (config == -1) + (config + 1) * (config >= 0) return cluster_mat def extract_synchronous_events(self, cmat, ids=None): """ Given a list of spike trains, a bin size, and a clustered intersection matrix obtained from those spike trains via ASSET analysis, extracts the sequences of synchronous events (SSEs) corresponding to clustered elements in the cluster matrix. Parameters ---------- cmat : (n,n) np.ndarray The cluster matrix, the output of :func:`ASSET.cluster_matrix_entries`. ids : list, optional A list of spike train IDs. If provided, `ids[i]` is the identity of `spiketrains[i]`. If None, the IDs `0,1,...,n-1` are used. Default: None Returns ------- sse_dict : dict A dictionary `D` of SSEs, where each SSE is a sub-dictionary `Dk`, `k=1,...,K`, where `K` is the max positive integer in `cmat` (i.e., the total number of clusters in `cmat`): .. centered:: D = {1: D1, 2: D2, ..., K: DK} Each sub-dictionary `Dk` represents the k-th diagonal structure (i.e., the k-th cluster) in `cmat`, and is of the form .. centered:: Dk = {(i1, j1): S1, (i2, j2): S2, ..., (iL, jL): SL}. The keys `(i, j)` represent the positions (time bin IDs) of all elements in `cmat` that compose the SSE (i.e., that take value `l` and therefore belong to the same cluster), and the values `Sk` are sets of neuron IDs representing a repeated synchronous event (i.e., spiking at time bins `i` and `j`). """ nr_worms = cmat.max() # number of different clusters ("worms") in cmat if nr_worms <= 0: return {} # Compute the transactions associated to the two binnings tracts_x = _transactions( self.spiketrains_i, bin_size=self.bin_size, t_start=self.t_start_i, t_stop=self.t_stop_i, ids=ids) if self.spiketrains_j is self.spiketrains_i: diag_id = 0 tracts_y = tracts_x else: if self.is_symmetric(): diag_id = 0 tracts_y = tracts_x else: diag_id = None tracts_y = _transactions( self.spiketrains_j, bin_size=self.bin_size, t_start=self.t_start_j, t_stop=self.t_stop_j, ids=ids) # Reconstruct each worm, link by link sse_dict = {} for k in range(1, nr_worms + 1): # for each worm # worm k is a list of links (each link will be 1 sublist) worm_k = {} pos_worm_k = np.array( np.where(cmat == k)).T # position of all links # if no link lies on the reference diagonal if all([y - x != diag_id for (x, y) in pos_worm_k]): for bin_x, bin_y in pos_worm_k: # for each link # reconstruct the link link_l = set(tracts_x[bin_x]).intersection( tracts_y[bin_y]) # and assign it to its pixel worm_k[(bin_x, bin_y)] = link_l sse_dict[k] = worm_k return sse_dict def _rate_of_binned_spiketrain(self, binned_spiketrains, kernel_width): """ Calculate the rate of binned spiketrains using convolution with a boxcar kernel. """ if self.verbose: print('compute rates by boxcar-kernel convolution...') # Create the boxcar kernel and convolve it with the binned spike trains k = int((kernel_width / self.bin_size).simplified.item()) kernel = np.full(k, fill_value=1. / k) rate = np.vstack([np.convolve(bst, kernel, mode='same') for bst in binned_spiketrains]) # The convolution results in an array decreasing at the borders due # to absence of spikes beyond the borders. Replace the first and last # (k//2) elements with the (k//2)-th / (n-k//2)-th ones, respectively k2 = k // 2 for i in range(rate.shape[0]): rate[i, :k2] = rate[i, k2] rate[i, -k2:] = rate[i, -k2 - 1] # Multiply the firing rates by the proper unit rate = rate * (1. / self.bin_size).rescale('Hz') return rate
bsd-3-clause
xubenben/scikit-learn
sklearn/linear_model/ridge.py
25
39394
""" Ridge regression """ # Author: Mathieu Blondel <mathieu@mblondel.org> # Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com> # Fabian Pedregosa <fabian@fseoane.net> # Michael Eickenberg <michael.eickenberg@nsup.org> # License: BSD 3 clause from abc import ABCMeta, abstractmethod import warnings import numpy as np from scipy import linalg from scipy import sparse from scipy.sparse import linalg as sp_linalg from .base import LinearClassifierMixin, LinearModel, _rescale_data from ..base import RegressorMixin from ..utils.extmath import safe_sparse_dot from ..utils import check_X_y from ..utils import compute_sample_weight from ..utils import column_or_1d from ..preprocessing import LabelBinarizer from ..grid_search import GridSearchCV from ..externals import six from ..metrics.scorer import check_scoring def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0): n_samples, n_features = X.shape X1 = sp_linalg.aslinearoperator(X) coefs = np.empty((y.shape[1], n_features)) if n_features > n_samples: def create_mv(curr_alpha): def _mv(x): return X1.matvec(X1.rmatvec(x)) + curr_alpha * x return _mv else: def create_mv(curr_alpha): def _mv(x): return X1.rmatvec(X1.matvec(x)) + curr_alpha * x return _mv for i in range(y.shape[1]): y_column = y[:, i] mv = create_mv(alpha[i]) if n_features > n_samples: # kernel ridge # w = X.T * inv(X X^t + alpha*Id) y C = sp_linalg.LinearOperator( (n_samples, n_samples), matvec=mv, dtype=X.dtype) coef, info = sp_linalg.cg(C, y_column, tol=tol) coefs[i] = X1.rmatvec(coef) else: # linear ridge # w = inv(X^t X + alpha*Id) * X.T y y_column = X1.rmatvec(y_column) C = sp_linalg.LinearOperator( (n_features, n_features), matvec=mv, dtype=X.dtype) coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter, tol=tol) if info < 0: raise ValueError("Failed with error code %d" % info) if max_iter is None and info > 0 and verbose: warnings.warn("sparse_cg did not converge after %d iterations." % info) return coefs def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3): n_samples, n_features = X.shape coefs = np.empty((y.shape[1], n_features)) # According to the lsqr documentation, alpha = damp^2. sqrt_alpha = np.sqrt(alpha) for i in range(y.shape[1]): y_column = y[:, i] coefs[i] = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i], atol=tol, btol=tol, iter_lim=max_iter)[0] return coefs def _solve_cholesky(X, y, alpha): # w = inv(X^t X + alpha*Id) * X.T y n_samples, n_features = X.shape n_targets = y.shape[1] A = safe_sparse_dot(X.T, X, dense_output=True) Xy = safe_sparse_dot(X.T, y, dense_output=True) one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]]) if one_alpha: A.flat[::n_features + 1] += alpha[0] return linalg.solve(A, Xy, sym_pos=True, overwrite_a=True).T else: coefs = np.empty([n_targets, n_features]) for coef, target, current_alpha in zip(coefs, Xy.T, alpha): A.flat[::n_features + 1] += current_alpha coef[:] = linalg.solve(A, target, sym_pos=True, overwrite_a=False).ravel() A.flat[::n_features + 1] -= current_alpha return coefs def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False): # dual_coef = inv(X X^t + alpha*Id) y n_samples = K.shape[0] n_targets = y.shape[1] if copy: K = K.copy() alpha = np.atleast_1d(alpha) one_alpha = (alpha == alpha[0]).all() has_sw = isinstance(sample_weight, np.ndarray) \ or sample_weight not in [1.0, None] if has_sw: # Unlike other solvers, we need to support sample_weight directly # because K might be a pre-computed kernel. sw = np.sqrt(np.atleast_1d(sample_weight)) y = y * sw[:, np.newaxis] K *= np.outer(sw, sw) if one_alpha: # Only one penalty, we can solve multi-target problems in one time. K.flat[::n_samples + 1] += alpha[0] try: # Note: we must use overwrite_a=False in order to be able to # use the fall-back solution below in case a LinAlgError # is raised dual_coef = linalg.solve(K, y, sym_pos=True, overwrite_a=False) except np.linalg.LinAlgError: warnings.warn("Singular matrix in solving dual problem. Using " "least-squares solution instead.") dual_coef = linalg.lstsq(K, y)[0] # K is expensive to compute and store in memory so change it back in # case it was user-given. K.flat[::n_samples + 1] -= alpha[0] if has_sw: dual_coef *= sw[:, np.newaxis] return dual_coef else: # One penalty per target. We need to solve each target separately. dual_coefs = np.empty([n_targets, n_samples]) for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha): K.flat[::n_samples + 1] += current_alpha dual_coef[:] = linalg.solve(K, target, sym_pos=True, overwrite_a=False).ravel() K.flat[::n_samples + 1] -= current_alpha if has_sw: dual_coefs *= sw[np.newaxis, :] return dual_coefs.T def _solve_svd(X, y, alpha): U, s, Vt = linalg.svd(X, full_matrices=False) idx = s > 1e-15 # same default value as scipy.linalg.pinv s_nnz = s[idx][:, np.newaxis] UTy = np.dot(U.T, y) d = np.zeros((s.size, alpha.size)) d[idx] = s_nnz / (s_nnz ** 2 + alpha) d_UT_y = d * UTy return np.dot(Vt.T, d_UT_y).T def ridge_regression(X, y, alpha, sample_weight=None, solver='auto', max_iter=None, tol=1e-3, verbose=0): """Solve the ridge equation by the method of normal equations. Read more in the :ref:`User Guide <ridge_regression>`. Parameters ---------- X : {array-like, sparse matrix, LinearOperator}, shape = [n_samples, n_features] Training data y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values alpha : {float, array-like}, shape = [n_targets] if array-like The l_2 penalty to be used. If an array is passed, penalties are assumed to be specific to targets max_iter : int, optional Maximum number of iterations for conjugate gradient solver. The default value is determined by scipy.sparse.linalg. sample_weight : float or numpy array of shape [n_samples] Individual weights for each sample. If sample_weight is set, then the solver will automatically be set to 'cholesky' solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'} Solver to use in the computational routines: - 'auto' chooses the solver automatically based on the type of data. - 'svd' uses a Singular Value Decomposition of X to compute the Ridge coefficients. More stable for singular matrices than 'cholesky'. - 'cholesky' uses the standard scipy.linalg.solve function to obtain a closed-form solution via a Cholesky decomposition of dot(X.T, X) - 'sparse_cg' uses the conjugate gradient solver as found in scipy.sparse.linalg.cg. As an iterative algorithm, this solver is more appropriate than 'cholesky' for large-scale data (possibility to set `tol` and `max_iter`). - 'lsqr' uses the dedicated regularized least-squares routine scipy.sparse.linalg.lsqr. It is the fatest but may not be available in old scipy versions. It also uses an iterative procedure. All three solvers support both dense and sparse data. tol : float Precision of the solution. verbose : int Verbosity level. Setting verbose > 0 will display additional information depending on the solver used. Returns ------- coef : array, shape = [n_features] or [n_targets, n_features] Weight vector(s). Notes ----- This function won't compute the intercept. """ n_samples, n_features = X.shape if y.ndim > 2: raise ValueError("Target y has the wrong shape %s" % str(y.shape)) ravel = False if y.ndim == 1: y = y.reshape(-1, 1) ravel = True n_samples_, n_targets = y.shape if n_samples != n_samples_: raise ValueError("Number of samples in X and y does not correspond:" " %d != %d" % (n_samples, n_samples_)) has_sw = sample_weight is not None if solver == 'auto': # cholesky if it's a dense array and cg in # any other case if not sparse.issparse(X) or has_sw: solver = 'cholesky' else: solver = 'sparse_cg' elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'): warnings.warn("""lsqr not available on this machine, falling back to sparse_cg.""") solver = 'sparse_cg' if has_sw: if np.atleast_1d(sample_weight).ndim > 1: raise ValueError("Sample weights must be 1D array or scalar") # Sample weight can be implemented via a simple rescaling. X, y = _rescale_data(X, y, sample_weight) # There should be either 1 or n_targets penalties alpha = np.asarray(alpha).ravel() if alpha.size not in [1, n_targets]: raise ValueError("Number of targets and number of penalties " "do not correspond: %d != %d" % (alpha.size, n_targets)) if alpha.size == 1 and n_targets > 1: alpha = np.repeat(alpha, n_targets) if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr'): raise ValueError('Solver %s not understood' % solver) if solver == 'sparse_cg': coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose) elif solver == "lsqr": coef = _solve_lsqr(X, y, alpha, max_iter, tol) elif solver == 'cholesky': if n_features > n_samples: K = safe_sparse_dot(X, X.T, dense_output=True) try: dual_coef = _solve_cholesky_kernel(K, y, alpha) coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T except linalg.LinAlgError: # use SVD solver if matrix is singular solver = 'svd' else: try: coef = _solve_cholesky(X, y, alpha) except linalg.LinAlgError: # use SVD solver if matrix is singular solver = 'svd' if solver == 'svd': if sparse.issparse(X): raise TypeError('SVD solver does not support sparse' ' inputs currently') coef = _solve_svd(X, y, alpha) if ravel: # When y was passed as a 1d-array, we flatten the coefficients. coef = coef.ravel() return coef class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)): @abstractmethod def __init__(self, alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=1e-3, solver="auto"): self.alpha = alpha self.fit_intercept = fit_intercept self.normalize = normalize self.copy_X = copy_X self.max_iter = max_iter self.tol = tol self.solver = solver def fit(self, X, y, sample_weight=None): X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float, multi_output=True, y_numeric=True) if ((sample_weight is not None) and np.atleast_1d(sample_weight).ndim > 1): raise ValueError("Sample weights must be 1D array or scalar") X, y, X_mean, y_mean, X_std = self._center_data( X, y, self.fit_intercept, self.normalize, self.copy_X, sample_weight=sample_weight) self.coef_ = ridge_regression(X, y, alpha=self.alpha, sample_weight=sample_weight, max_iter=self.max_iter, tol=self.tol, solver=self.solver) self._set_intercept(X_mean, y_mean, X_std) return self class Ridge(_BaseRidge, RegressorMixin): """Linear least squares with l2 regularization. This model solves a regression model where the loss function is the linear least squares function and regularization is given by the l2-norm. Also known as Ridge Regression or Tikhonov regularization. This estimator has built-in support for multi-variate regression (i.e., when y is a 2d-array of shape [n_samples, n_targets]). Read more in the :ref:`User Guide <ridge_regression>`. Parameters ---------- alpha : {float, array-like} shape = [n_targets] Small positive values of alpha improve the conditioning of the problem and reduce the variance of the estimates. Alpha corresponds to ``(2*C)^-1`` in other linear models such as LogisticRegression or LinearSVC. If an array is passed, penalties are assumed to be specific to the targets. Hence they must correspond in number. copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. fit_intercept : boolean Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). max_iter : int, optional Maximum number of iterations for conjugate gradient solver. The default value is determined by scipy.sparse.linalg. normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'} Solver to use in the computational routines: - 'auto' chooses the solver automatically based on the type of data. - 'svd' uses a Singular Value Decomposition of X to compute the Ridge coefficients. More stable for singular matrices than 'cholesky'. - 'cholesky' uses the standard scipy.linalg.solve function to obtain a closed-form solution. - 'sparse_cg' uses the conjugate gradient solver as found in scipy.sparse.linalg.cg. As an iterative algorithm, this solver is more appropriate than 'cholesky' for large-scale data (possibility to set `tol` and `max_iter`). - 'lsqr' uses the dedicated regularized least-squares routine scipy.sparse.linalg.lsqr. It is the fatest but may not be available in old scipy versions. It also uses an iterative procedure. All three solvers support both dense and sparse data. tol : float Precision of the solution. Attributes ---------- coef_ : array, shape = [n_features] or [n_targets, n_features] Weight vector(s). intercept_ : float | array, shape = (n_targets,) Independent term in decision function. Set to 0.0 if ``fit_intercept = False``. See also -------- RidgeClassifier, RidgeCV, KernelRidge Examples -------- >>> from sklearn.linear_model import Ridge >>> import numpy as np >>> n_samples, n_features = 10, 5 >>> np.random.seed(0) >>> y = np.random.randn(n_samples) >>> X = np.random.randn(n_samples, n_features) >>> clf = Ridge(alpha=1.0) >>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None, normalize=False, solver='auto', tol=0.001) """ def __init__(self, alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=1e-3, solver="auto"): super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept, normalize=normalize, copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver) def fit(self, X, y, sample_weight=None): """Fit Ridge regression model Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training data y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values sample_weight : float or numpy array of shape [n_samples] Individual weights for each sample Returns ------- self : returns an instance of self. """ return super(Ridge, self).fit(X, y, sample_weight=sample_weight) class RidgeClassifier(LinearClassifierMixin, _BaseRidge): """Classifier using Ridge regression. Read more in the :ref:`User Guide <ridge_regression>`. Parameters ---------- alpha : float Small positive values of alpha improve the conditioning of the problem and reduce the variance of the estimates. Alpha corresponds to ``(2*C)^-1`` in other linear models such as LogisticRegression or LinearSVC. class_weight : dict or 'balanced', optional Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. fit_intercept : boolean Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). max_iter : int, optional Maximum number of iterations for conjugate gradient solver. The default value is determined by scipy.sparse.linalg. normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'} Solver to use in the computational routines. 'svd' will use a Singular value decomposition to obtain the solution, 'cholesky' will use the standard scipy.linalg.solve function, 'sparse_cg' will use the conjugate gradient solver as found in scipy.sparse.linalg.cg while 'auto' will chose the most appropriate depending on the matrix X. 'lsqr' uses a direct regularized least-squares routine provided by scipy. tol : float Precision of the solution. Attributes ---------- coef_ : array, shape = [n_features] or [n_classes, n_features] Weight vector(s). intercept_ : float | array, shape = (n_targets,) Independent term in decision function. Set to 0.0 if ``fit_intercept = False``. See also -------- Ridge, RidgeClassifierCV Notes ----- For multi-class classification, n_class classifiers are trained in a one-versus-all approach. Concretely, this is implemented by taking advantage of the multi-variate response support in Ridge. """ def __init__(self, alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=1e-3, class_weight=None, solver="auto"): super(RidgeClassifier, self).__init__( alpha=alpha, fit_intercept=fit_intercept, normalize=normalize, copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver) self.class_weight = class_weight def fit(self, X, y, sample_weight=None): """Fit Ridge regression model. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples,n_features] Training data y : array-like, shape = [n_samples] Target values sample_weight : float or numpy array of shape (n_samples,) Sample weight. Returns ------- self : returns an instance of self. """ self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1) Y = self._label_binarizer.fit_transform(y) if not self._label_binarizer.y_type_.startswith('multilabel'): y = column_or_1d(y, warn=True) if self.class_weight: if sample_weight is None: sample_weight = 1. # modify the sample weights with the corresponding class weight sample_weight = (sample_weight * compute_sample_weight(self.class_weight, y)) super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight) return self @property def classes_(self): return self._label_binarizer.classes_ class _RidgeGCV(LinearModel): """Ridge regression with built-in Generalized Cross-Validation It allows efficient Leave-One-Out cross-validation. This class is not intended to be used directly. Use RidgeCV instead. Notes ----- We want to solve (K + alpha*Id)c = y, where K = X X^T is the kernel matrix. Let G = (K + alpha*Id)^-1. Dual solution: c = Gy Primal solution: w = X^T c Compute eigendecomposition K = Q V Q^T. Then G = Q (V + alpha*Id)^-1 Q^T, where (V + alpha*Id) is diagonal. It is thus inexpensive to inverse for many alphas. Let loov be the vector of prediction values for each example when the model was fitted with all examples but this example. loov = (KGY - diag(KG)Y) / diag(I-KG) Let looe be the vector of prediction errors for each example when the model was fitted with all examples but this example. looe = y - loov = c / diag(G) References ---------- http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf """ def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True, normalize=False, scoring=None, copy_X=True, gcv_mode=None, store_cv_values=False): self.alphas = np.asarray(alphas) self.fit_intercept = fit_intercept self.normalize = normalize self.scoring = scoring self.copy_X = copy_X self.gcv_mode = gcv_mode self.store_cv_values = store_cv_values def _pre_compute(self, X, y): # even if X is very sparse, K is usually very dense K = safe_sparse_dot(X, X.T, dense_output=True) v, Q = linalg.eigh(K) QT_y = np.dot(Q.T, y) return v, Q, QT_y def _decomp_diag(self, v_prime, Q): # compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T)) return (v_prime * Q ** 2).sum(axis=-1) def _diag_dot(self, D, B): # compute dot(diag(D), B) if len(B.shape) > 1: # handle case where B is > 1-d D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)] return D * B def _errors(self, alpha, y, v, Q, QT_y): # don't construct matrix G, instead compute action on y & diagonal w = 1.0 / (v + alpha) c = np.dot(Q, self._diag_dot(w, QT_y)) G_diag = self._decomp_diag(w, Q) # handle case where y is 2-d if len(y.shape) != 1: G_diag = G_diag[:, np.newaxis] return (c / G_diag) ** 2, c def _values(self, alpha, y, v, Q, QT_y): # don't construct matrix G, instead compute action on y & diagonal w = 1.0 / (v + alpha) c = np.dot(Q, self._diag_dot(w, QT_y)) G_diag = self._decomp_diag(w, Q) # handle case where y is 2-d if len(y.shape) != 1: G_diag = G_diag[:, np.newaxis] return y - (c / G_diag), c def _pre_compute_svd(self, X, y): if sparse.issparse(X): raise TypeError("SVD not supported for sparse matrices") U, s, _ = linalg.svd(X, full_matrices=0) v = s ** 2 UT_y = np.dot(U.T, y) return v, U, UT_y def _errors_svd(self, alpha, y, v, U, UT_y): w = ((v + alpha) ** -1) - (alpha ** -1) c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y G_diag = self._decomp_diag(w, U) + (alpha ** -1) if len(y.shape) != 1: # handle case where y is 2-d G_diag = G_diag[:, np.newaxis] return (c / G_diag) ** 2, c def _values_svd(self, alpha, y, v, U, UT_y): w = ((v + alpha) ** -1) - (alpha ** -1) c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y G_diag = self._decomp_diag(w, U) + (alpha ** -1) if len(y.shape) != 1: # handle case when y is 2-d G_diag = G_diag[:, np.newaxis] return y - (c / G_diag), c def fit(self, X, y, sample_weight=None): """Fit Ridge regression model Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training data y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values sample_weight : float or array-like of shape [n_samples] Sample weight Returns ------- self : Returns self. """ X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float, multi_output=True, y_numeric=True) n_samples, n_features = X.shape X, y, X_mean, y_mean, X_std = LinearModel._center_data( X, y, self.fit_intercept, self.normalize, self.copy_X, sample_weight=sample_weight) gcv_mode = self.gcv_mode with_sw = len(np.shape(sample_weight)) if gcv_mode is None or gcv_mode == 'auto': if sparse.issparse(X) or n_features > n_samples or with_sw: gcv_mode = 'eigen' else: gcv_mode = 'svd' elif gcv_mode == "svd" and with_sw: # FIXME non-uniform sample weights not yet supported warnings.warn("non-uniform sample weights unsupported for svd, " "forcing usage of eigen") gcv_mode = 'eigen' if gcv_mode == 'eigen': _pre_compute = self._pre_compute _errors = self._errors _values = self._values elif gcv_mode == 'svd': # assert n_samples >= n_features _pre_compute = self._pre_compute_svd _errors = self._errors_svd _values = self._values_svd else: raise ValueError('bad gcv_mode "%s"' % gcv_mode) v, Q, QT_y = _pre_compute(X, y) n_y = 1 if len(y.shape) == 1 else y.shape[1] cv_values = np.zeros((n_samples * n_y, len(self.alphas))) C = [] scorer = check_scoring(self, scoring=self.scoring, allow_none=True) error = scorer is None for i, alpha in enumerate(self.alphas): weighted_alpha = (sample_weight * alpha if sample_weight is not None else alpha) if error: out, c = _errors(weighted_alpha, y, v, Q, QT_y) else: out, c = _values(weighted_alpha, y, v, Q, QT_y) cv_values[:, i] = out.ravel() C.append(c) if error: best = cv_values.mean(axis=0).argmin() else: # The scorer want an object that will make the predictions but # they are already computed efficiently by _RidgeGCV. This # identity_estimator will just return them def identity_estimator(): pass identity_estimator.decision_function = lambda y_predict: y_predict identity_estimator.predict = lambda y_predict: y_predict out = [scorer(identity_estimator, y.ravel(), cv_values[:, i]) for i in range(len(self.alphas))] best = np.argmax(out) self.alpha_ = self.alphas[best] self.dual_coef_ = C[best] self.coef_ = safe_sparse_dot(self.dual_coef_.T, X) self._set_intercept(X_mean, y_mean, X_std) if self.store_cv_values: if len(y.shape) == 1: cv_values_shape = n_samples, len(self.alphas) else: cv_values_shape = n_samples, n_y, len(self.alphas) self.cv_values_ = cv_values.reshape(cv_values_shape) return self class _BaseRidgeCV(LinearModel): def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True, normalize=False, scoring=None, cv=None, gcv_mode=None, store_cv_values=False): self.alphas = alphas self.fit_intercept = fit_intercept self.normalize = normalize self.scoring = scoring self.cv = cv self.gcv_mode = gcv_mode self.store_cv_values = store_cv_values def fit(self, X, y, sample_weight=None): """Fit Ridge regression model Parameters ---------- X : array-like, shape = [n_samples, n_features] Training data y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values sample_weight : float or array-like of shape [n_samples] Sample weight Returns ------- self : Returns self. """ if self.cv is None: estimator = _RidgeGCV(self.alphas, fit_intercept=self.fit_intercept, normalize=self.normalize, scoring=self.scoring, gcv_mode=self.gcv_mode, store_cv_values=self.store_cv_values) estimator.fit(X, y, sample_weight=sample_weight) self.alpha_ = estimator.alpha_ if self.store_cv_values: self.cv_values_ = estimator.cv_values_ else: if self.store_cv_values: raise ValueError("cv!=None and store_cv_values=True " " are incompatible") parameters = {'alpha': self.alphas} fit_params = {'sample_weight': sample_weight} gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept), parameters, fit_params=fit_params, cv=self.cv) gs.fit(X, y) estimator = gs.best_estimator_ self.alpha_ = gs.best_estimator_.alpha self.coef_ = estimator.coef_ self.intercept_ = estimator.intercept_ return self class RidgeCV(_BaseRidgeCV, RegressorMixin): """Ridge regression with built-in cross-validation. By default, it performs Generalized Cross-Validation, which is a form of efficient Leave-One-Out cross-validation. Read more in the :ref:`User Guide <ridge_regression>`. Parameters ---------- alphas : numpy array of shape [n_alphas] Array of alpha values to try. Small positive values of alpha improve the conditioning of the problem and reduce the variance of the estimates. Alpha corresponds to ``(2*C)^-1`` in other linear models such as LogisticRegression or LinearSVC. fit_intercept : boolean Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : integer or cross-validation generator, optional If None, Generalized Cross-Validation (efficient Leave-One-Out) will be used. If an integer is passed, it is the number of folds for KFold cross validation. Specific cross-validation objects can be passed, see sklearn.cross_validation module for the list of possible objects gcv_mode : {None, 'auto', 'svd', eigen'}, optional Flag indicating which strategy to use when performing Generalized Cross-Validation. Options are:: 'auto' : use svd if n_samples > n_features or when X is a sparse matrix, otherwise use eigen 'svd' : force computation via singular value decomposition of X (does not work for sparse matrices) 'eigen' : force computation via eigendecomposition of X^T X The 'auto' mode is the default and is intended to pick the cheaper option of the two depending upon the shape and format of the training data. store_cv_values : boolean, default=False Flag indicating if the cross-validation values corresponding to each alpha should be stored in the `cv_values_` attribute (see below). This flag is only compatible with `cv=None` (i.e. using Generalized Cross-Validation). Attributes ---------- cv_values_ : array, shape = [n_samples, n_alphas] or \ shape = [n_samples, n_targets, n_alphas], optional Cross-validation values for each alpha (if `store_cv_values=True` and \ `cv=None`). After `fit()` has been called, this attribute will \ contain the mean squared errors (by default) or the values of the \ `{loss,score}_func` function (if provided in the constructor). coef_ : array, shape = [n_features] or [n_targets, n_features] Weight vector(s). intercept_ : float | array, shape = (n_targets,) Independent term in decision function. Set to 0.0 if ``fit_intercept = False``. alpha_ : float Estimated regularization parameter. See also -------- Ridge: Ridge regression RidgeClassifier: Ridge classifier RidgeClassifierCV: Ridge classifier with built-in cross validation """ pass class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV): """Ridge classifier with built-in cross-validation. By default, it performs Generalized Cross-Validation, which is a form of efficient Leave-One-Out cross-validation. Currently, only the n_features > n_samples case is handled efficiently. Read more in the :ref:`User Guide <ridge_regression>`. Parameters ---------- alphas : numpy array of shape [n_alphas] Array of alpha values to try. Small positive values of alpha improve the conditioning of the problem and reduce the variance of the estimates. Alpha corresponds to ``(2*C)^-1`` in other linear models such as LogisticRegression or LinearSVC. fit_intercept : boolean Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : cross-validation generator, optional If None, Generalized Cross-Validation (efficient Leave-One-Out) will be used. class_weight : dict or 'balanced', optional Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` Attributes ---------- cv_values_ : array, shape = [n_samples, n_alphas] or \ shape = [n_samples, n_responses, n_alphas], optional Cross-validation values for each alpha (if `store_cv_values=True` and `cv=None`). After `fit()` has been called, this attribute will contain \ the mean squared errors (by default) or the values of the \ `{loss,score}_func` function (if provided in the constructor). coef_ : array, shape = [n_features] or [n_targets, n_features] Weight vector(s). intercept_ : float | array, shape = (n_targets,) Independent term in decision function. Set to 0.0 if ``fit_intercept = False``. alpha_ : float Estimated regularization parameter See also -------- Ridge: Ridge regression RidgeClassifier: Ridge classifier RidgeCV: Ridge regression with built-in cross validation Notes ----- For multi-class classification, n_class classifiers are trained in a one-versus-all approach. Concretely, this is implemented by taking advantage of the multi-variate response support in Ridge. """ def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True, normalize=False, scoring=None, cv=None, class_weight=None): super(RidgeClassifierCV, self).__init__( alphas=alphas, fit_intercept=fit_intercept, normalize=normalize, scoring=scoring, cv=cv) self.class_weight = class_weight def fit(self, X, y, sample_weight=None): """Fit the ridge classifier. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target values. sample_weight : float or numpy array of shape (n_samples,) Sample weight. Returns ------- self : object Returns self. """ self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1) Y = self._label_binarizer.fit_transform(y) if not self._label_binarizer.y_type_.startswith('multilabel'): y = column_or_1d(y, warn=True) if self.class_weight: if sample_weight is None: sample_weight = 1. # modify the sample weights with the corresponding class weight sample_weight = (sample_weight * compute_sample_weight(self.class_weight, y)) _BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight) return self @property def classes_(self): return self._label_binarizer.classes_
bsd-3-clause
manterd/myPhyloDB
functions/analysis/spls_graphs.py
1
32640
import datetime from django.http import HttpResponse import logging import pandas as pd from pyper import * from scipy import stats import json from database.models import Kingdom, Phyla, Class, Order, Family, Genus, Species, OTU_99, \ ko_lvl1, ko_lvl2, ko_lvl3, \ nz_lvl1, nz_lvl2, nz_lvl3, nz_lvl4 import functions reload(sys) sys.setdefaultencoding('utf8') LOG_FILENAME = 'error_log.txt' pd.set_option('display.max_colwidth', -1) def getSPLS(request, stops, RID, PID): try: while True: if request.is_ajax(): allJson = request.body.split('&')[0] all = json.loads(allJson) functions.setBase(RID, 'Step 1 of 6: Reading normalized data file...') functions.setBase(RID, 'Step 2 of 6: Selecting your chosen meta-variables...') selectAll = int(all["selectAll"]) keggAll = int(all["keggAll"]) nzAll = int(all["nzAll"]) # Select samples and meta-variables from savedDF metaValsCat = [] metaIDsCat = [] metaValsQuant = all['metaValsQuant'] metaIDsQuant = all['metaIDsQuant'] treeType = int(all['treeType']) DepVar = int(all["DepVar"]) # Create meta-variable DataFrame, final sample list, final category and quantitative field lists based on tree selections savedDF, metaDF, finalSampleIDs, catFields, remCatFields, quantFields, catValues, quantValues = functions.getMetaDF(request.user, metaValsCat, metaIDsCat, metaValsQuant, metaIDsQuant, DepVar) allFields = catFields + quantFields print "ok" if not finalSampleIDs: error = "No valid samples were contained in your final dataset.\nPlease select different variable(s)." myDict = {'error': error} res = json.dumps(myDict) return HttpResponse(res, content_type='application/json') result = '' if treeType == 1: if selectAll == 1: result += 'Taxa level: Kingdom' + '\n' elif selectAll == 2: result += 'Taxa level: Phyla' + '\n' elif selectAll == 3: result += 'Taxa level: Class' + '\n' elif selectAll == 4: result += 'Taxa level: Order' + '\n' elif selectAll == 5: result += 'Taxa level: Family' + '\n' elif selectAll == 6: result += 'Taxa level: Genus' + '\n' elif selectAll == 7: result += 'Taxa level: Species' + '\n' elif selectAll == 9: result += 'Taxa level: OTU_99' + '\n' elif treeType == 2: if keggAll == 1: result += 'KEGG Pathway level: 1' + '\n' elif keggAll == 2: result += 'KEGG Pathway level: 2' + '\n' elif keggAll == 3: result += 'KEGG Pathway level: 3' + '\n' elif treeType == 3: if nzAll == 1: result += 'KEGG Enzyme level: 1' + '\n' elif nzAll == 2: result += 'KEGG Enzyme level: 2' + '\n' elif nzAll == 3: result += 'KEGG Enzyme level: 3' + '\n' elif nzAll == 4: result += 'KEGG Enzyme level: 4' + '\n' elif keggAll == 5: result += 'KEGG Enzyme level: GIBBs' + '\n' elif keggAll == 6: result += 'KEGG Enzyme level: Nitrogen cycle' + '\n' result += 'Categorical variables selected by user: ' + ", ".join(catFields + remCatFields) + '\n' result += 'Categorical variables not included in the statistical analysis (contains only 1 level): ' + ", ".join(remCatFields) + '\n' result += 'Quantitative variables selected by user: ' + ", ".join(quantFields) + '\n' result += '===============================================\n\n' x_scale = all['x_scale'] if x_scale == 'yes': result += 'Predictor (X) variables have been scaled by dividing by their standard deviation.\n' else: result += 'Predictor (X) variables have not been scaled.\n' y_scale = all['y_scale'] if y_scale == 'yes': result += 'All response (Y) variables (i.e., observed & predicted) have been scaled by dividing by their standard deviation.\n' else: result += 'All response (Y) variables (i.e., observed & predicted) have not been scaled.\n' result += '===============================================\n\n' functions.setBase(RID, 'Step 2 of 6: Selecting your chosen meta-variables...done') # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ # if stops[PID] == RID: res = '' return HttpResponse(res, content_type='application/json') # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ # functions.setBase(RID, 'Step 3 of 6: Selecting your chosen taxa or KEGG level...') # filter otus based on user settings remUnclass = all['remUnclass'] remZeroes = all['remZeroes'] perZeroes = int(all['perZeroes']) filterData = all['filterData'] filterPer = int(all['filterPer']) filterMeth = int(all['filterMeth']) remMito = all['remMito'] remChloro = all['remChloro'] mapTaxa = 'no' finalDF = pd.DataFrame() if treeType == 1: if selectAll != 8: filteredDF = functions.filterDF(savedDF, DepVar, selectAll, remUnclass, remMito, remChloro, remZeroes, perZeroes, filterData, filterPer, filterMeth) else: filteredDF = savedDF.copy() finalDF, missingList = functions.getTaxaDF(selectAll, '', filteredDF, metaDF, allFields, DepVar, RID, stops, PID) if selectAll == 8: result += '\nThe following PGPRs were not detected: ' + ", ".join(missingList) + '\n' result += '===============================================\n' if treeType == 2: finalDF, allDF = functions.getKeggDF(keggAll, '', savedDF, metaDF, DepVar, mapTaxa, RID, stops, PID) if treeType == 3: finalDF, allDF = functions.getNZDF(nzAll, '', savedDF, metaDF, DepVar, mapTaxa, RID, stops, PID) if finalDF.empty: error = "Selected taxa were not found in your selected samples." myDict = {'error': error} res = json.dumps(myDict) return HttpResponse(res, content_type='application/json') # make sure column types are correct finalDF[quantFields] = finalDF[quantFields].astype(float) # transform Y, if requested transform = int(all["transform"]) finalDF = functions.transformDF(transform, DepVar, finalDF) # save location info to session myDir = 'myPhyloDB/media/temp/spls/' if not os.path.exists(myDir): os.makedirs(myDir) path = str(myDir) + str(RID) + '.biom' functions.imploding_panda(path, treeType, DepVar, finalSampleIDs, metaDF, finalDF) functions.setBase(RID, 'Step 3 of 6: Selecting your chosen taxa or KEGG level...done') # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ # if stops[PID] == RID: res = '' return HttpResponse(res, content_type='application/json') # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ # functions.setBase(RID, 'Step 4 of 6: Calculating sPLS...') if DepVar == 0: result += 'Dependent Variable: Abundance' + '\n' elif DepVar == 1: result += 'Dependent Variable: Relative Abundance' + '\n' elif DepVar == 2: result += 'Dependent Variable: OTU Richness' + '\n' elif DepVar == 3: result += 'Dependent Variable: OTU Diversity' + '\n' elif DepVar == 4: result += 'Dependent Variable: Total Abundance' + '\n' result += '\n===============================================\n' count_rDF = pd.DataFrame() if DepVar == 0: count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='abund') elif DepVar == 1: count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='rel_abund') elif DepVar == 2: count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='rich') elif DepVar == 3: count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='diversity') elif DepVar == 4: count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='abund_16S') count_rDF.fillna(0, inplace=True) if os.name == 'nt': r = R(RCMD="R/R-Portable/App/R-Portable/bin/R.exe", use_pandas=True) else: r = R(RCMD="R/R-Linux/bin/R", use_pandas=True) functions.setBase(RID, 'Verifying R packages...missing packages are being installed') # R packages from cran r("list.of.packages <- c('mixOmics', 'spls', 'pheatmap', 'RColorBrewer')") r("new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,'Package'])]") print r("if (length(new.packages)) install.packages(new.packages, repos='http://cran.us.r-project.org', dependencies=T)") functions.setBase(RID, 'Step 4 of 6: Calculating sPLS...') print r("library(mixOmics)") print r("library(spls)") print r("library(pheatmap)") print r("library(RColorBrewer)") count_rDF.sort_index(axis=0, inplace=True) metaDF.sort_values('sampleid', inplace=True) r.assign("X", count_rDF) r.assign("Y", metaDF[quantFields]) r.assign("names", count_rDF.columns.values) r("colnames(X) <- names") freqCut = all["freqCut"] num = int(freqCut.split('/')[0]) den = int(freqCut.split('/')[1]) r.assign("num", num) r.assign("den", den) uniqueCut = int(all["uniqueCut"]) r.assign("uniqueCut", uniqueCut) r("nzv_cols <- nearZeroVar(X, freqCut=num/den, uniqueCut=uniqueCut)") r("if(length(nzv_cols$Position > 0)) X <- X[,-nzv_cols$Position]") columns = r.get("ncol(X)") if columns == 0: myDict = {'error': "All predictor variables have zero variance.\nsPLS-Regr was aborted!"} res = json.dumps(myDict) return HttpResponse(res, content_type='application/json') if x_scale == 'yes': r("X_scaled <- scale(X, center=FALSE, scale=TRUE)") else: r("X_scaled <- scale(X, center=FALSE, scale=FALSE)") if y_scale == 'yes': r("Y_scaled <- scale(Y, center=FALSE, scale=TRUE)") else: r("Y_scaled <- scale(Y, center=FALSE, scale=FALSE)") r("detach('package:mixOmics', unload=TRUE)") r("set.seed(1)") r("maxK <- length(Y)") spls_string = "cv <- cv.spls(X_scaled, Y_scaled, scale.x=FALSE, scale.y=FALSE, eta=seq(0.1, 0.9, 0.1), K=c(1:maxK), plot.it=FALSE)" r.assign("cmd", spls_string) r("eval(parse(text=cmd))") r("f <- spls(X_scaled, Y_scaled, scale.x=FALSE, scale.y=FALSE, eta=cv$eta.opt, K=cv$K.opt)") r("out <- capture.output(print(f))") fout = r.get("out") if fout is not None: for i in fout: result += str(i) + '\n' else: myDict = {'error': "Analysis did not converge.\nsPLS-Regr was aborted!"} res = json.dumps(myDict) return HttpResponse(res, content_type='application/json') r("set.seed(1)") r("ci.f <- ci.spls(f, plot.it=FALSE, plot.fix='y')") r("cis <- ci.f$cibeta") r("cf <- correct.spls(ci.f, plot.it=FALSE)") r("out <- capture.output(cis)") fout = r.get("out") if fout is not None: result += '\n\nBootstrapped confidence intervals of coefficients:\n' for i in fout: result += str(i) + '\n' result += '\n===============================================\n' r("coef.f <- coef(f)") r("sum <- sum(coef.f != 0)") total = r.get("sum") functions.setBase(RID, 'Step 4 of 6: Calculating sPLS...done!') # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ # if stops[PID] == RID: res = '' return HttpResponse(res, content_type='application/json') # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ # functions.setBase(RID, 'Step 5 of 6: Formatting sPLS coefficient table...') finalDict = {} if total is not None: r("pred.f <- predict(f, type='fit')") r("pred.f.rows <- row.names(pred.f)") pred = r.get("pred.f") rows = r.get("pred.f.rows") predList = ['pred_' + s for s in quantFields] predDF = pd.DataFrame(pred, columns=predList, index=rows) meta_scaled = r.get("Y_scaled") metaDF_scaled = pd.DataFrame(data=meta_scaled, columns=quantFields, index=rows) resultDF = pd.merge(metaDF_scaled, predDF, left_index=True, right_index=True) result += 'sPLS Model Fit (y = mx + b):\n' result += 'y = predicted\n' result += 'x = observed\n\n' for i in xrange(len(quantFields)): r.assign("myCol", quantFields[i]) x = r.get("Y_scaled[,myCol]") x = x.tolist() y = resultDF[predList[i]].astype(float).values.tolist() slp, inter, r_value, p, se = stats.linregress(x, y) r_sq = r_value * r_value result += 'Variable: ' + str(quantFields[i]) + '\n' result += 'Slope (m): ' + str(slp) + '\n' result += 'Intercept (b): ' + str(inter) + '\n' result += 'R2: ' + str(r_sq) + '\n' result += 'Std Error: ' + str(se) + '\n\n\n' # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ # if stops[PID] == RID: res = '' return HttpResponse(res, content_type='application/json') # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ # r("coef.f.rows <- row.names(coef.f)") cf = r.get("coef.f") rows = r.get("coef.f.rows") coeffsDF = pd.DataFrame(cf, columns=quantFields, index=rows) coeffsDF = coeffsDF.loc[(coeffsDF != 0).any(axis=1)] coeffsDF.sort_index(inplace=True) taxIDList = coeffsDF.index.values.tolist() namesDF = pd.DataFrame() if treeType == 1: if selectAll == 1: taxNameList = Kingdom.objects.filter(kingdomid__in=taxIDList).values('kingdomid', 'kingdomName') namesDF = pd.DataFrame(list(taxNameList)) namesDF.rename(columns={'kingdomName': 'rank_name', 'kingdomid': 'rank_id'}, inplace=True) namesDF.set_index('rank_id', inplace=True) elif selectAll == 2: taxNameList = Phyla.objects.filter(phylaid__in=taxIDList).values('phylaid', 'phylaName') namesDF = pd.DataFrame(list(taxNameList)) namesDF.rename(columns={'phylaName': 'rank_name', 'phylaid': 'rank_id'}, inplace=True) namesDF.set_index('rank_id', inplace=True) elif selectAll == 3: taxNameList = Class.objects.filter(classid__in=taxIDList).values('classid', 'className') namesDF = pd.DataFrame(list(taxNameList)) namesDF.rename(columns={'className': 'rank_name', 'classid': 'rank_id'}, inplace=True) namesDF.set_index('rank_id', inplace=True) elif selectAll == 4: taxNameList = Order.objects.filter(orderid__in=taxIDList).values('orderid', 'orderName') namesDF = pd.DataFrame(list(taxNameList)) namesDF.rename(columns={'orderName': 'rank_name', 'orderid': 'rank_id'}, inplace=True) namesDF.set_index('rank_id', inplace=True) elif selectAll == 5: taxNameList = Family.objects.filter(familyid__in=taxIDList).values('familyid', 'familyName') namesDF = pd.DataFrame(list(taxNameList)) namesDF.rename(columns={'familyName': 'rank_name', 'familyid': 'rank_id'}, inplace=True) namesDF.set_index('rank_id', inplace=True) elif selectAll == 6: taxNameList = Genus.objects.filter(genusid__in=taxIDList).values('genusid', 'genusName') namesDF = pd.DataFrame(list(taxNameList)) namesDF.rename(columns={'genusName': 'rank_name', 'genusid': 'rank_id'}, inplace=True) namesDF.set_index('rank_id', inplace=True) elif selectAll == 7: taxNameList = Species.objects.filter(speciesid__in=taxIDList).values('speciesid', 'speciesName') namesDF = pd.DataFrame(list(taxNameList)) namesDF.rename(columns={'speciesName': 'rank_name', 'speciesid': 'rank_id'}, inplace=True) namesDF.set_index('rank_id', inplace=True) elif selectAll == 9: taxNameList = OTU_99.objects.filter(otuid__in=taxIDList).values('otuid', 'otuName') namesDF = pd.DataFrame(list(taxNameList)) namesDF.rename(columns={'otuName': 'rank_name', 'otuid': 'rank_id'}, inplace=True) namesDF.set_index('rank_id', inplace=True) elif treeType == 2: if keggAll == 1: taxNameList = ko_lvl1.objects.using('picrust').filter(ko_lvl1_id__in=taxIDList).values('ko_lvl1_id', 'ko_lvl1_name') namesDF = pd.DataFrame(list(taxNameList)) namesDF.rename(columns={'ko_lvl1_name': 'rank_name', 'ko_lvl1_id': 'rank_id'}, inplace=True) namesDF.set_index('rank_id', inplace=True) elif keggAll == 2: taxNameList = ko_lvl2.objects.using('picrust').filter(ko_lvl2_id__in=taxIDList).values('ko_lvl2_id', 'ko_lvl2_name') namesDF = pd.DataFrame(list(taxNameList)) namesDF.rename(columns={'ko_lvl2_name': 'rank_name', 'ko_lvl2_id': 'rank_id'}, inplace=True) namesDF.set_index('rank_id', inplace=True) elif keggAll == 3: taxNameList = ko_lvl3.objects.using('picrust').filter(ko_lvl3_id__in=taxIDList).values('ko_lvl3_id', 'ko_lvl3_name') namesDF = pd.DataFrame(list(taxNameList)) namesDF.rename(columns={'ko_lvl3_name': 'rank_name', 'ko_lvl3_id': 'rank_id'}, inplace=True) namesDF.set_index('rank_id', inplace=True) elif treeType == 3: if nzAll == 1: taxNameList = nz_lvl1.objects.using('picrust').filter(nz_lvl1_id__in=taxIDList).values('nz_lvl1_id', 'nz_lvl1_name') namesDF = pd.DataFrame(list(taxNameList)) namesDF.rename(columns={'nz_lvl1_name': 'rank_name', 'nz_lvl1_id': 'rank_id'}, inplace=True) namesDF.set_index('rank_id', inplace=True) elif nzAll == 2: taxNameList = nz_lvl2.objects.using('picrust').filter(nz_lvl2_id__in=taxIDList).values('nz_lvl2_id', 'nz_lvl2_name') namesDF = pd.DataFrame(list(taxNameList)) namesDF.rename(columns={'nz_lvl2_name': 'rank_name', 'nz_lvl2_id': 'rank_id'}, inplace=True) namesDF.set_index('rank_id', inplace=True) elif nzAll == 3: taxNameList = nz_lvl3.objects.using('picrust').filter(nz_lvl3_id__in=taxIDList).values('nz_lvl3_id', 'nz_lvl3_name') namesDF = pd.DataFrame(list(taxNameList)) namesDF.rename(columns={'nz_lvl3_name': 'rank_name', 'nz_lvl3_id': 'rank_id'}, inplace=True) namesDF.set_index('rank_id', inplace=True) elif nzAll == 4: taxNameList = nz_lvl4.objects.using('picrust').filter(nz_lvl4_id__in=taxIDList).values('nz_lvl4_id', 'nz_lvl4_name') namesDF = pd.DataFrame(list(taxNameList)) namesDF.rename(columns={'nz_lvl4_name': 'rank_name', 'nz_lvl4_id': 'rank_id'}, inplace=True) namesDF.set_index('rank_id', inplace=True) elif nzAll == 5: taxNameList = nz_lvl4.objects.using('picrust').filter(nz_lvl4_id__in=taxIDList).values('nz_lvl4_id', 'nz_lvl4_name') namesDF = pd.DataFrame(list(taxNameList)) namesDF.rename(columns={'nz_lvl4_name': 'rank_name', 'nz_lvl4_id': 'rank_id'}, inplace=True) namesDF.set_index('rank_id', inplace=True) elif nzAll == 6: taxNameList = nz_lvl4.objects.using('picrust').filter(nz_lvl4_id__in=taxIDList).values('nz_lvl4_id', 'nz_lvl4_name') namesDF = pd.DataFrame(list(taxNameList)) namesDF.rename(columns={'nz_lvl1_name': 'rank_name', 'nz_lvl1_id': 'rank_id'}, inplace=True) namesDF.set_index('rank_id', inplace=True) namesDF.sort_index(inplace=True) taxNameList = namesDF['rank_name'].values.tolist() if treeType == 2: if keggAll > 1: taxNameList[:] = (item[:20] + '...' if len(item) > 20 else item for item in taxNameList) elif treeType == 3: if nzAll > 1: taxNameList[:] = (item.split()[0] for item in taxNameList) # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ # if stops[PID] == RID: res = '' return HttpResponse(res, content_type='application/json') # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ # coeffsDF = pd.merge(namesDF, coeffsDF, left_index=True, right_index=True, how='inner') coeffsDF.reset_index(inplace=True) res_table = coeffsDF.to_html(classes="table display") res_table = res_table.replace('border="1"', 'border="0"') finalDict['res_table'] = str(res_table) # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ # if stops[PID] == RID: res = '' return HttpResponse(res, content_type='application/json') # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ # resultDF.reset_index(inplace=True) resultDF.rename(columns={'index': 'sampleid'}, inplace=True) pred_table = resultDF.to_html(classes="table display") pred_table = pred_table.replace('border="1"', 'border="0"') finalDict['pred_table'] = str(pred_table) functions.setBase(RID, 'Step 5 of 6: Formatting sPLS coefficient table...done') functions.setBase(RID, 'Step 6 of 6: Formatting graph data for display...') xAxisDict = {} xAxisDict['categories'] = taxNameList labelsDict = {} labelsDict['rotation'] = 270 labelsDict['enabled'] = True labelsDict['style'] = {'fontSize': '14px'} xAxisDict['labels'] = labelsDict xAxisDict['title'] = {'text': None} xAxisDict['tickLength'] = 0 yAxisDict = {} yAxisDict['categories'] = quantFields yAxisDict['labels'] = {'style': {'fontSize': '14px'}} yAxisDict['title'] = {'text': None} seriesList = [] seriesDict = {} seriesDict['borderWidth'] = '1' row, col = coeffsDF.shape dataList = [] for i in xrange(row): for j in xrange(len(quantFields)): val = round(coeffsDF[quantFields[j]].iloc[i], 5) tup = (i, j, val) obsList = list(tup) dataList.append(obsList) # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ # if stops[PID] == RID: res = '' return HttpResponse(res, content_type='application/json') # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ # seriesDict['data'] = dataList # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ # if stops[PID] == RID: res = '' return HttpResponse(res, content_type='application/json') # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ # labelDict = {} labelDict['enabled'] = True labelDict['color'] = 'black', labelDict['syle'] = {'textShadow': 'none'} seriesList.append(seriesDict) finalDict['xAxis'] = xAxisDict finalDict['yAxis'] = yAxisDict finalDict['series'] = seriesList # R clustered heatmap clustDF = coeffsDF.drop('rank_id', axis=1) row, col = clustDF.shape method = all['methodVal'] metric = all['metricVal'] path = "myPhyloDB/media/temp/spls/Rplots/" + str(RID) + ".spls.pdf" if os.path.exists(path): os.remove(path) if not os.path.exists('myPhyloDB/media/temp/spls/Rplots'): os.makedirs('myPhyloDB/media/temp/spls/Rplots') height = 2.5 + 0.2*row width = 5 + 0.2*(col-1) file = "pdf('myPhyloDB/media/temp/spls/Rplots/" + str(RID) + ".spls.pdf', height=" + str(height) + ", width=" + str(width) + ", onefile=FALSE)" r.assign("cmd", file) r("eval(parse(text=cmd))") r.assign("df", clustDF[quantFields]) r("df <- as.matrix(df)") r.assign("rows", taxNameList) r("rownames(df) <- rows") r("col.pal <- brewer.pal(9,'RdBu')") if row > 2 and col > 3: hmap_str = "pheatmap(df, fontsize=12, color=col.pal, clustering_method='" + str(method) + "', clustering_distance_rows='" + str(metric) + "', clustering_distance_cols='" + str(metric) + "')" r.assign("cmd", hmap_str) r("eval(parse(text=cmd))") if row > 2 and col <= 3: hmap_str = "pheatmap(df, color=col.pal, cluster_col=FALSE, clustering_method='" + str(method) + "', clustering_distance_rows='" + str(metric) + "')" r.assign("cmd", hmap_str) r("eval(parse(text=cmd))") if row <= 2 and col > 3: hmap_str = "pheatmap(df, color=col.pal, cluster_row=FALSE, clustering_method='" + str(method) + "', clustering_distance_cols='" + str(metric) + "')" r.assign("cmd", hmap_str) r("eval(parse(text=cmd))") if row <= 2 and col <= 3: hmap_str = "pheatmap(df, color=col.pal, cluster_col=FALSE, cluster_row=FALSE)" r.assign("cmd", hmap_str) r("eval(parse(text=cmd))") r("dev.off()") finalDict['text'] = result functions.setBase(RID, 'Step 6 of 6: Formatting graph data for display...done!') # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ # if stops[PID] == RID: res = '' return HttpResponse(res, content_type='application/json') # /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ # finalDict['error'] = 'none' res = json.dumps(finalDict) return HttpResponse(res, content_type='application/json') except Exception as e: if not stops[PID] == RID: logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG,) myDate = "\nDate: " + str(datetime.datetime.now()) + "\n" logging.exception(myDate) myDict = {} myDict['error'] = "There was an error during your analysis:\nError: " + str(e.message) + "\nTimestamp: " + str(datetime.datetime.now()) res = json.dumps(myDict) return HttpResponse(res, content_type='application/json')
gpl-3.0
wateraccounting/SEBAL
PreSEBAL/preSEBAL.py
1
110216
# -*- coding: utf-8 -*- """ Created on Thu Sep 08 15:09:49 2016 #test Github @author: tih """ import numpy as np import os import scipy.interpolate import gdal from openpyxl import load_workbook import osr from datetime import datetime, timedelta import pandas as pd import shutil import glob from netCDF4 import Dataset import warnings import SEBAL.pySEBAL.pySEBAL_code as SEBAL def main(): #################################################################################################################### ############################################# CREATE INPUT FOR SEBAL RUN ########################################### #################################################################################################################### #################################################################################################################### ##################################################### PreHANTS #################################################### #################################################################################################################### # PreHANTS # Part 1: Define input by user # Part 2: Set parameters and output folder # Part 3: RUN SEBAL # Part 4: HANTS # Part 5: post HANTS # Part 6: Write output #################################################################################################################### ################################################# PreHANTS part 1 ################################################## #################################################################################################################### VegetationExcel =r"E:\Project_2\UAE\Excel\Excel_PreSEBAL_v1_0.xlsx" # This excel defines the p and c factor and vegetation height. #################################################################################################################### ################################################# PreHANTS part 2 ################################################## #################################################################################################################### # Open Excel workbook used for Vegetation c and p factor conversions wb_veg = load_workbook(VegetationExcel, data_only=True) ws_veg = wb_veg['General_Input'] # Input for preSEBAL.py start_date = "%s" %str(ws_veg['B2'].value) end_date = "%s" %str(ws_veg['B3'].value) inputExcel= r"%s" %str(ws_veg['B4'].value) # The excel with all the SEBAL input data LU_data_FileName = r"%s" %str(ws_veg['B5'].value) # Path to Land Use map output_folder = r"%s" %str(ws_veg['B7'].value) # optional paramater DSSF_Folder= r"%s" %str(ws_veg['B6'].value) ######################## Load Excels ########################################## # Open Excel workbook for SEBAL inputs wb = load_workbook(inputExcel, data_only=True) # Get length of EXCEL sheet ws = wb['General_Input'] ws2 = wb['VIIRS_PROBAV_Input'] endExcel=int(ws.max_row) # Create Dict SEBAL_RUNS = dict() for number in range(2,endExcel+1): input_folder_SEBAL = str(ws['B%d' % number].value) output_folder_SEBAL = str(ws['C%d' % number].value) Image_Type = int(ws['D%d' % number].value) PROBA_V_name = str(ws2['D%d' % number].value) VIIRS_name = str(ws2['B%d' % number].value) SEBAL_RUNS[number] = {'input_folder': input_folder_SEBAL, 'output_folder': output_folder_SEBAL, 'image_type': Image_Type,'PROBA_V_name': PROBA_V_name,'VIIRS_name': VIIRS_name} Kind_Of_Runs_Dict = {} for k, v in SEBAL_RUNS.iteritems(): Kind_Of_Runs_Dict.setdefault(v['image_type'], []).append(k) ######################## Create output folders ########################################## output_folder_PreSEBAL_SEBAL = os.path.join(output_folder,'PreSEBAL_SEBAL_out') input_folder_HANTS = os.path.join(output_folder,'HANTS_in') output_folder_PreSEBAL = os.path.join(output_folder,'PreSEBAL_out') temp_folder_PreSEBAL = os.path.join(output_folder,'PreSEBAL_temp') temp_folder_PreSEBAL_LST = os.path.join(temp_folder_PreSEBAL,'LST') NDVI_outfolder = os.path.join(output_folder_PreSEBAL_SEBAL,'NDVI') Albedo_outfolder = os.path.join(output_folder_PreSEBAL_SEBAL,'Albedo') WaterMask_outfolder = os.path.join(output_folder_PreSEBAL_SEBAL,'Water_Mask') LAI_outfolder = os.path.join(output_folder_PreSEBAL,'LAI') ALBEDO_outfolder_end = os.path.join(output_folder_PreSEBAL,'ALBEDO') NDVI_outfolder_end = os.path.join(output_folder_PreSEBAL,'NDVI') WaterMask_outfolder_end = os.path.join(output_folder_PreSEBAL,'Water_Mask') TRANS_outfolder = os.path.join(output_folder_PreSEBAL,'Transmissivity') Surface_Temperature_outfolder = os.path.join(output_folder_PreSEBAL_SEBAL,'Surface_Temperature') output_folder_HANTS_end_sharp = os.path.join(output_folder_PreSEBAL, 'LST_Sharpened') output_folder_HANTS_end_Veg = os.path.join(output_folder_PreSEBAL, 'Vegetation_Height') output_folder_p_factor = os.path.join(output_folder_PreSEBAL, 'p_factor') output_folder_LUE = os.path.join(output_folder_PreSEBAL, 'LUE') if not os.path.exists(output_folder_PreSEBAL_SEBAL): os.makedirs(output_folder_PreSEBAL_SEBAL) if not os.path.exists(output_folder_PreSEBAL): os.mkdir(output_folder_PreSEBAL) if not os.path.exists(temp_folder_PreSEBAL): os.mkdir(temp_folder_PreSEBAL) if not os.path.exists(NDVI_outfolder): os.makedirs(NDVI_outfolder) if not os.path.exists(Albedo_outfolder): os.makedirs(Albedo_outfolder) if not os.path.exists(WaterMask_outfolder): os.makedirs(WaterMask_outfolder) if not os.path.exists(LAI_outfolder): os.makedirs(LAI_outfolder) if not os.path.exists(ALBEDO_outfolder_end): os.makedirs(ALBEDO_outfolder_end) if not os.path.exists(NDVI_outfolder_end): os.makedirs(NDVI_outfolder_end) if not os.path.exists(WaterMask_outfolder_end): os.makedirs(WaterMask_outfolder_end) if not os.path.exists(temp_folder_PreSEBAL_LST): os.makedirs(temp_folder_PreSEBAL_LST) if not os.path.exists(Surface_Temperature_outfolder): os.makedirs(Surface_Temperature_outfolder) if not os.path.exists(TRANS_outfolder): os.makedirs(TRANS_outfolder) if not os.path.exists(output_folder_HANTS_end_sharp): os.mkdir(output_folder_HANTS_end_sharp) if not os.path.exists(output_folder_HANTS_end_Veg): os.mkdir(output_folder_HANTS_end_Veg) if not os.path.exists(output_folder_p_factor): os.mkdir(output_folder_p_factor) if not os.path.exists(output_folder_LUE): os.mkdir(output_folder_LUE) # Do not show warnings warnings.filterwarnings('ignore') #################################################################################################################### ################################################### RUN SEBAL part 3 ############################################### #################################################################################################################### ############################## Define General info ############################ for number in Kind_Of_Runs_Dict[2]: # Number defines the column of the inputExcel print(number) if not (SEBAL_RUNS[number]['PROBA_V_name'] == 'None' and SEBAL_RUNS[number]['VIIRS_name'] == 'None'): Rp = 0.91 # Path radiance in the 10.4-12.5 µm band (W/m2/sr/µm) tau_sky = 0.866 # Narrow band transmissivity of air, range: [10.4-12.5 µm] surf_temp_offset = 3 # Surface temperature offset for water ######################## Open General info from SEBAL Excel ################### # Open the General_Input sheet ws = wb['General_Input'] # Extract the input and output folder, and Image type from the excel file input_folder = str(ws['B%d' % number].value) Image_Type = int(2) # Type of Image (1=Landsat & 2 = VIIRS & GLOBA-V) # Extract the Path to the DEM map from the excel file DEM_fileName = '%s' %str(ws['E%d' % number].value) #'DEM_HydroShed_m' # Open DEM and create Latitude and longitude files lat,lon,lat_fileName,lon_fileName=SEBAL.DEM_lat_lon(DEM_fileName, temp_folder_PreSEBAL) ######################## Extract general data for Landsat ########################################## if Image_Type == 1: # Open the Landsat_Input sheet ws = wb['Landsat_Input'] # Extract Landsat name, number and amount of thermal bands from excel file Name_Landsat_Image = str(ws['B%d' % number].value) # From glovis.usgs.gov Landsat_nr = int(ws['C%d' % number].value) # Type of Landsat (LS) image used (LS5, LS7, or LS8) Bands_thermal = int(ws['D%d' %number].value) # Number of LS bands to use to retrieve land surface # Pixel size of the model pixel_spacing=int(30) # the path to the MTL file of landsat Landsat_meta_fileName = os.path.join(input_folder, '%s_MTL.txt' % Name_Landsat_Image) # read out the general info out of the MTL file in Greenwich Time year, DOY, hour, minutes, UTM_Zone, Sun_elevation = SEBAL.info_general_metadata(Landsat_meta_fileName) # call definition info_general_metadata date=datetime.strptime('%s %s'%(year,DOY), '%Y %j') month = date.month day = date.day # define the kind of sensor and resolution of the sensor sensor1 = 'L%d' % Landsat_nr sensor2 = 'L%d' % Landsat_nr sensor3 = 'L%d' % Landsat_nr res1 = '30m' res2 = '%sm' %int(pixel_spacing) res3 = '30m' # Set the start parameter for determining transmissivity at 0 Determine_transmissivity = 0 ######################## Extract general data for VIIRS-PROBAV ########################################## if Image_Type == 2: # Open the VIIRS_PROBAV_Input sheet ws = wb['VIIRS_PROBAV_Input'] # Extract the name of the thermal and quality VIIRS image from the excel file Name_VIIRS_Image_TB = '%s' %str(ws['B%d' % number].value) # Extract the name to the PROBA-V image from the excel file Name_PROBAV_Image = '%s' %str(ws['D%d' % number].value) # Must be a tiff file # Pixel size of the model pixel_spacing=int(100) # UTM Zone of the end results UTM_Zone = float(ws['G%d' % number].value) if not Name_VIIRS_Image_TB == 'None': #Get time from the VIIRS dataset name (IMPORTANT TO KEEP THE TEMPLATE OF THE VIIRS NAME CORRECT example: VIIRS_SVI05_npp_d20161021_t0956294_e1002080_b25822_c20161021160209495952_noaa_ops.tif) Total_Day_VIIRS = Name_VIIRS_Image_TB.split('_')[3] Total_Time_VIIRS = Name_VIIRS_Image_TB.split('_')[4] # Get the information out of the VIIRS name in GMT (Greenwich time) year = int(Total_Day_VIIRS[1:5]) month = int(Total_Day_VIIRS[5:7]) day = int(Total_Day_VIIRS[7:9]) Startdate = '%d-%02d-%02d' % (year,month,day) DOY=datetime.strptime(Startdate,'%Y-%m-%d').timetuple().tm_yday hour = int(Total_Time_VIIRS[1:3]) minutes = int(Total_Time_VIIRS[3:5]) # If this is runned correctly, we can determine transmissivity ws = wb['Meteo_Input'] Field_Radiation_24 = '%s' %str(ws['J%d' % number].value) Field_Trans_24 = '%s' %str(ws['K%d' % number].value) Determine_transmissivity = 1 # else use PROBA-V day but than no transmissivity can be determined for now else: # Get the day and time from the PROBA-V Band_PROBAVhdf_fileName = os.path.join(input_folder, '%s.HDF5' % (Name_PROBAV_Image)) g=gdal.Open(Band_PROBAVhdf_fileName, gdal.GA_ReadOnly) Meta_data = g.GetMetadata() Date_PROBAV = str(Meta_data['LEVEL3_RADIOMETRY_BLUE_OBSERVATION_START_DATE']) year = int(Date_PROBAV.split("-")[0]) month = int(Date_PROBAV.split("-")[1]) day = int(Date_PROBAV.split("-")[2]) Var_name = '%d%02d%02d' %(year, month, day) DOY=datetime.strptime(Var_name,'%Y%m%d').timetuple().tm_yday # We cannot determine transmissivity Determine_transmissivity = 0 # Determine the transmissivity if possible (Determine_transmissivity = 1) if Determine_transmissivity == 1: # Rounded difference of the local time from Greenwich (GMT) (hours): delta_GTM = round(np.sign(lon[int(np.shape(lon)[0]/2), int(np.shape(lon)[1]/2)]) * lon[int(np.shape(lon)[0]/2), int(np.shape(lon)[1]/2)] * 24 / 360) if np.isnan(delta_GTM) == True: delta_GTM = round(np.nanmean(lon) * np.nanmean(lon) * 24 / 360) # Calculate local time hour += delta_GTM if hour < 0.0: day -= 1 hour += 24 if hour >= 24: day += 1 hour -= 24 # define the kind of sensor and resolution of the sensor sensor1 = 'PROBAV' sensor2 = 'VIIRS' res1 = '375m' res2 = '%sm' %int(pixel_spacing) res3 = '30m' ######################## Extract general data from DEM file and create Slope map ########################################## # Variable date name Var_name = '%d%02d%02d' %(year, month, day) # Reproject from Geog Coord Syst to UTM - # 1) DEM - Original DEM coordinates is Geographic: lat, lon dest, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset( DEM_fileName, pixel_spacing, UTM_Zone=UTM_Zone) band = dest.GetRasterBand(1) # Get the reprojected dem band ncol = dest.RasterXSize # Get the reprojected dem column size nrow = dest.RasterYSize # Get the reprojected dem row size shape=[ncol, nrow] # Read out the DEM band and print the DEM properties data_DEM = band.ReadAsArray(0, 0, ncol, nrow) # 2) Latitude file - reprojection # reproject latitude to the landsat projection and save as tiff file lat_rep, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset( lat_fileName, pixel_spacing, UTM_Zone=UTM_Zone) # Get the reprojected latitude data lat_proy = lat_rep.GetRasterBand(1).ReadAsArray(0, 0, ncol, nrow) # 3) Longitude file - reprojection # reproject longitude to the landsat projection and save as tiff file lon_rep, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset(lon_fileName, pixel_spacing, UTM_Zone=UTM_Zone) # Get the reprojected longitude data lon_proy = lon_rep.GetRasterBand(1).ReadAsArray(0, 0, ncol, nrow) lon_fileName = os.path.join(temp_folder_PreSEBAL,'lon_resh.tif') SEBAL.save_GeoTiff_proy(dest, lon_proy, lon_fileName, shape, nband=1) # Calculate slope and aspect from the reprojected DEM deg2rad,rad2deg,slope,aspect=SEBAL.Calc_Gradient(data_DEM, pixel_spacing) if Determine_transmissivity == 1: # calculate the coz zenith angle Ra_mountain_24, Ra_inst, cos_zn_resh, dr, phi, delta = SEBAL.Calc_Ra_Mountain(lon,DOY,hour,minutes,lon_proy,lat_proy,slope,aspect) cos_zn_fileName = os.path.join(temp_folder_PreSEBAL,'cos_zn.tif') SEBAL.save_GeoTiff_proy(dest, cos_zn_resh, cos_zn_fileName, shape, nband=1) # Save the Ra Ra_inst_fileName = os.path.join(temp_folder_PreSEBAL,'Ra_inst.tif') SEBAL.save_GeoTiff_proy(dest, Ra_inst, Ra_inst_fileName, shape, nband=1) Ra_mountain_24_fileName = os.path.join(temp_folder_PreSEBAL,'Ra_mountain_24.tif') SEBAL.save_GeoTiff_proy(dest, Ra_mountain_24, Ra_mountain_24_fileName, shape, nband=1) #################### Calculate Transmissivity ########################################## # Open the General_Input sheet ws = wb['Meteo_Input'] # Extract the method radiation value Value_Method_Radiation_inst = '%s' %str(ws['L%d' % number].value) # Values to check if data is created Check_Trans_inst = 0 Check_Trans_24 = 0 ''' This is now turned of, so you need to fill in the instantanious transmissivity or Radiation # Extract the data to the method of radiation if int(Value_Method_Radiation_inst) == 2: Field_Radiation_inst = '%s' %str(ws['N%d' % number].value) if Field_Radiation_inst == 'None': # Instantanious Transmissivity files must be created Check_Trans_inst = 1 # Calculate Transmissivity quarters_hours = np.ceil(minutes/30.) * 30 hours_GMT = hour - delta_GTM if quarters_hours >= 60: hours_GMT += 1 quarters_hours = 0 # Define the instantanious LANDSAF file name_Landsaf_inst = 'HDF5_LSASAF_MSG_DSSF_MSG-Disk_%d%02d%02d%02d%02d.tif' %(year, month,day, hours_GMT, quarters_hours) file_Landsaf_inst = os.path.join(DSSF_Folder,name_Landsaf_inst) # Reproject the Ra_inst data to match the LANDSAF data Ra_inst_3Km_dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Ra_inst_fileName, file_Landsaf_inst, method = 1) Ra_inst_3Km = Ra_inst_3Km_dest.GetRasterBand(1).ReadAsArray() Ra_inst_3Km[Ra_inst_3Km==0] = np.nan # Open the Rs LANDSAF data dest_Rs_inst_3Km = gdal.Open(file_Landsaf_inst) Rs_inst_3Km = dest_Rs_inst_3Km.GetRasterBand(1).ReadAsArray() Rs_inst_3Km = np.float_(Rs_inst_3Km)/10 Rs_inst_3Km[Rs_inst_3Km<0]=np.nan # Get shape LANDSAF data shape_trans=[dest_Rs_inst_3Km.RasterXSize , dest_Rs_inst_3Km.RasterYSize ] # Calculate Transmissivity 3Km Transmissivity_3Km = Rs_inst_3Km/Ra_inst_3Km Transmissivity_3Km_fileName = os.path.join(output_folder_temp,'Transmissivity_3Km.tif') SEBAL.save_GeoTiff_proy(Ra_inst_3Km_dest, Transmissivity_3Km, Transmissivity_3Km_fileName, shape_trans, nband=1) # Reproject Transmissivity to match DEM (now this is done by using the nearest neighbour method) Transmissivity_inst_dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Transmissivity_3Km_fileName, cos_zn_fileName, method = 3) Transmissivity_inst = Transmissivity_inst_dest.GetRasterBand(1).ReadAsArray() Transmissivity_inst[Transmissivity_inst>0.98] = 0.98 Transmissivity_inst_fileName = os.path.join(TRANS_outfolder,'Transmissivity_inst_%s.tif' %Var_name) SEBAL.save_GeoTiff_proy(Transmissivity_inst_dest, Transmissivity_inst, Transmissivity_inst_fileName, shape, nband=1) ''' # Extract the method radiation value Value_Method_Radiation_24 = '%s' %str(ws['I%d' % number].value) # Extract the data to the method of radiation if int(Value_Method_Radiation_24) == 2: Field_Radiation_24 = '%s' %str(ws['K%d' % number].value) if Field_Radiation_24 == 'None': # Daily Transmissivity files must be created Check_Trans_24 = 1 # Create times that are needed to calculate daily Rs (LANDSAF) Starttime_GMT = datetime.strptime(Startdate,'%Y-%m-%d') + timedelta(hours=-delta_GTM) Endtime_GMT = Starttime_GMT + timedelta(days=1) Times = pd.date_range(Starttime_GMT, Endtime_GMT,freq = '30min') for Time in Times[:-1]: year_LANDSAF = Time.year month_LANDSAF = Time.month day_LANDSAF = Time.day hour_LANDSAF = Time.hour min_LANDSAF = Time.minute # Define the instantanious LANDSAF file #re = glob.glob('') name_Landsaf_inst = 'HDF5_LSASAF_MSG_DSSF_MSG-Disk_%d%02d%02d%02d%02d.tif' %(year_LANDSAF, month_LANDSAF,day_LANDSAF, hour_LANDSAF, min_LANDSAF) file_Landsaf_inst = os.path.join(DSSF_Folder,name_Landsaf_inst) # Open the Rs LANDSAF data dest_Rs_inst_3Km = gdal.Open(file_Landsaf_inst) Rs_one_3Km = dest_Rs_inst_3Km.GetRasterBand(1).ReadAsArray() Rs_one_3Km = np.float_(Rs_one_3Km)/10 Rs_one_3Km[Rs_one_3Km < 0]=np.nan if Time == Times[0]: Rs_24_3Km_tot = Rs_one_3Km else: Rs_24_3Km_tot += Rs_one_3Km Rs_24_3Km = Rs_24_3Km_tot / len(Times[:-1]) # Reproject the Ra_inst data to match the LANDSAF data Ra_24_3Km_dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Ra_mountain_24_fileName, file_Landsaf_inst, method = 3) Ra_24_3Km = Ra_24_3Km_dest.GetRasterBand(1).ReadAsArray() Ra_24_3Km[Ra_24_3Km==0] = np.nan # Do gapfilling Ra_24_3Km = gap_filling(Ra_24_3Km,np.nan) # Get shape LANDSAF data shape_trans=[dest_Rs_inst_3Km.RasterXSize , dest_Rs_inst_3Km.RasterYSize ] # Calculate Transmissivity 3Km Transmissivity_24_3Km = Rs_24_3Km/Ra_24_3Km Transmissivity_24_3Km_fileName = os.path.join(temp_folder_PreSEBAL,'Transmissivity_24_3Km.tif') SEBAL.save_GeoTiff_proy(Ra_24_3Km_dest, Transmissivity_24_3Km, Transmissivity_24_3Km_fileName, shape_trans, nband=1) # Reproject Transmissivity to match DEM (now this is done by using the nearest neighbour method) Transmissivity_24_dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Transmissivity_24_3Km_fileName, lon_fileName, method = 3) Transmissivity_24 = Transmissivity_24_dest.GetRasterBand(1).ReadAsArray() Transmissivity_24[Transmissivity_24>0.98] = 0.98 Transmissivity_24_fileName = os.path.join(TRANS_outfolder,'Transmissivity_24_%s.tif' %Var_name) SEBAL.save_GeoTiff_proy(Transmissivity_24_dest, Transmissivity_24, Transmissivity_24_fileName, shape, nband=1) #################### Calculate NDVI for LANDSAT ########################################## if Image_Type == 1: # Define bands used for each Landsat number if Landsat_nr == 5 or Landsat_nr == 7: Bands = np.array([1, 2, 3, 4, 5, 7, 6]) elif Landsat_nr == 8: Bands = np.array([2, 3, 4, 5, 6, 7, 10, 11]) else: print('Landsat image not supported, use Landsat 7 or 8') # Open MTL landsat and get the correction parameters Landsat_meta_fileName = os.path.join(input_folder, '%s_MTL.txt' % Name_Landsat_Image) Lmin, Lmax, k1_c, k2_c = SEBAL.info_band_metadata(Landsat_meta_fileName, Bands) # Mean solar exo-atmospheric irradiance for each band (W/m2/microm) # for the different Landsat images (L5, L7, or L8) ESUN_L5 = np.array([1983, 1796, 1536, 1031, 220, 83.44]) ESUN_L7 = np.array([1997, 1812, 1533, 1039, 230.8, 84.9]) ESUN_L8 = np.array([1973.28, 1842.68, 1565.17, 963.69, 245, 82.106]) # Open one band - To get the metadata of the landsat images only once (to get the extend) src_FileName = os.path.join(input_folder, '%s_B2.TIF' % Name_Landsat_Image) # before 10! ls,band_data,ulx,uly,lrx,lry,x_size_ls,y_size_ls = SEBAL.Get_Extend_Landsat(src_FileName) # Crop the Landsat images to the DEM extent - dst_FileName = os.path.join(temp_folder_PreSEBAL,'cropped_LS_b2.tif') # Before 10 !! # Clip the landsat image to match the DEM map lsc, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(src_FileName, lon_fileName) data_LS = lsc.GetRasterBand(1).ReadAsArray() SEBAL.save_GeoTiff_proy(dest, data_LS, dst_FileName, shape, nband=1) # Get the extend of the remaining landsat file after clipping based on the DEM file lsc,band_data,ulx,uly,lrx,lry,x_size_lsc,y_size_lsc = SEBAL.Get_Extend_Landsat(dst_FileName) # Create the corrected signals of Landsat in 1 array Reflect = SEBAL.Landsat_Reflect(Bands,input_folder,Name_Landsat_Image,output_folder,shape,Lmax,Lmin,ESUN_L5,ESUN_L7,ESUN_L8,cos_zn_resh,dr,Landsat_nr, cos_zn_fileName) # Calculate temporal water mask water_mask_temp=SEBAL.Water_Mask(shape,Reflect) # Calculate NDVI NDVI = SEBAL.Calc_NDVI(Reflect) # Calculate albedo albedo = SEBAL.Calc_albedo(Reflect) # Save NDVI NDVI_FileName = os.path.join(NDVI_outfolder,'NDVI_LS_%s.tif'%Var_name) SEBAL.save_GeoTiff_proy(dest, NDVI, NDVI_FileName, shape, nband=1) # Save albedo albedo_FileName = os.path.join(Albedo_outfolder,'Albedo_LS_%s.tif'%Var_name) SEBAL.save_GeoTiff_proy(dest, albedo, albedo_FileName, shape, nband=1) ################### Extract Meteo data for Landsat days from SEBAL Excel ################## # Open the Meteo_Input sheet ws = wb['Meteo_Input'] # ---------------------------- Instantaneous Air Temperature ------------ # Open meteo data, first try to open as value, otherwise as string (path) try: Temp_inst = float(ws['B%d' %number].value) # Instantaneous Air Temperature (°C) # if the data is not a value, than open as a string except: Temp_inst_name = '%s' %str(ws['B%d' %number].value) Temp_inst_fileName = os.path.join(temp_folder_PreSEBAL, 'Temp_inst_input.tif') Temp_inst = SEBAL.Reshape_Reproject_Input_data(Temp_inst_name, Temp_inst_fileName, lon_fileName) try: RH_inst = float(ws['D%d' %number].value) # Instantaneous Relative humidity (%) # if the data is not a value, than open as a string except: RH_inst_name = '%s' %str(ws['D%d' %number].value) RH_inst_fileName = os.path.join(temp_folder_PreSEBAL, 'RH_inst_input.tif') RH_inst = SEBAL.Reshape_Reproject_Input_data(RH_inst_name, RH_inst_fileName, lon_fileName) esat_inst = 0.6108 * np.exp(17.27 * Temp_inst / (Temp_inst + 237.3)) eact_inst = RH_inst * esat_inst / 100 #################### Calculate NDVI for VIIRS-PROBAV ########################################## if Image_Type == 2: if Name_PROBAV_Image == 'None': offset_all = [-1, 1, -2, 2, -3, 3,-4, 4,-5 ,5 ,-6 , 6, -7, 7, -8, 8] found_Name_PROBAV_Image = 0 for offset in offset_all: if found_Name_PROBAV_Image == 1: continue else: try: Name_PROBAV_Image = SEBAL_RUNS[number + offset]['PROBA_V_name'] if not Name_PROBAV_Image == 'None': found_Name_PROBAV_Image = 1 except: pass # Get the day and time from the PROBA-V Band_PROBAVhdf_fileName = os.path.join(input_folder, '%s.HDF5' % (Name_PROBAV_Image)) g=gdal.Open(Band_PROBAVhdf_fileName, gdal.GA_ReadOnly) Meta_data = g.GetMetadata() Date_PROBAV = str(Meta_data['LEVEL3_RADIOMETRY_BLUE_OBSERVATION_START_DATE']) year = int(Date_PROBAV.split("-")[0]) month = int(Date_PROBAV.split("-")[1]) day = int(Date_PROBAV.split("-")[2]) Var_name_2 = '%d%02d%02d' %(year, month, day) # Define the output name NDVI_FileName = os.path.join(NDVI_outfolder,'NDVI_PROBAV_%s.tif' %Var_name_2) Albedo_FileName = os.path.join(Albedo_outfolder, 'Albedo_PROBAV_%s.tif' %Var_name_2) water_mask_temp_FileName = os.path.join(WaterMask_outfolder, 'Water_Mask_PROBAV_%s.tif' %Var_name_2) else: NDVI_FileName = os.path.join(NDVI_outfolder,'NDVI_PROBAV_%s.tif' %Var_name) Albedo_FileName = os.path.join(Albedo_outfolder, 'Albedo_PROBAV_%s.tif' %Var_name) water_mask_temp_FileName = os.path.join(WaterMask_outfolder, 'Water_Mask_PROBAV_%s.tif' %Var_name) # vegetation maps that will be generated if not os.path.exists(NDVI_FileName): # Define the bands that will be used bands=['SM', 'B1', 'B2', 'B3', 'B4'] #'SM', 'BLUE', 'RED', 'NIR', 'SWIR' # Set the index number at 0 index=0 # create a zero array with the shape of the reprojected DEM file data_PROBAV=np.zeros((shape[1], shape[0])) spectral_reflectance_PROBAV=np.zeros([shape[1], shape[0], 5]) # constants n188_float=248 # Now it is 248, but we do not exactly know what this really means and if this is for constant for all images. # write the data one by one to the spectral_reflectance_PROBAV for bandnmr in bands: # Translate the PROBA-V names to the Landsat band names Band_number = {'SM':7,'B1':8,'B2':10,'B3':9,'B4':11} # Open the dataset Band_PROBAVhdf_fileName = os.path.join(input_folder, '%s.HDF5' % (Name_PROBAV_Image)) g=gdal.Open(Band_PROBAVhdf_fileName, gdal.GA_ReadOnly) # define data if it is not there yet if not 'Var_name' in locals(): Meta_data = g.GetMetadata() Date_PROBAV = str(Meta_data['LEVEL3_RADIOMETRY_BLUE_OBSERVATION_START_DATE']) year = int(Date_PROBAV.split("-")[0]) month = int(Date_PROBAV.split("-")[0]) day = int(Date_PROBAV.split("-")[0]) Var_name = '%d%02d%02d' %(year, month, day) # Open the .hdf file name_out = os.path.join(input_folder, '%s_test.tif' % (Name_PROBAV_Image)) name_in = g.GetSubDatasets()[Band_number[bandnmr]][0] # Get environmental variable SEBAL_env_paths = os.environ["SEBAL"].split(';') GDAL_env_path = SEBAL_env_paths[0] GDAL_TRANSLATE = os.path.join(GDAL_env_path, 'gdal_translate.exe') # run gdal translate command FullCmd = '%s -of GTiff %s %s' %(GDAL_TRANSLATE, name_in, name_out) SEBAL.Run_command_window(FullCmd) # Open data dest_PV = gdal.Open(name_out) Data = dest_PV.GetRasterBand(1).ReadAsArray() dest_PV = None # Remove temporary file os.remove(name_out) # Define the x and y spacing Meta_data = g.GetMetadata() Lat_Bottom = float(Meta_data['LEVEL3_GEOMETRY_BOTTOM_LEFT_LATITUDE']) Lat_Top = float(Meta_data['LEVEL3_GEOMETRY_TOP_RIGHT_LATITUDE']) Lon_Left = float(Meta_data['LEVEL3_GEOMETRY_BOTTOM_LEFT_LONGITUDE']) Lon_Right = float(Meta_data['LEVEL3_GEOMETRY_TOP_RIGHT_LONGITUDE']) Pixel_size = float((Meta_data['LEVEL3_GEOMETRY_VNIR_VAA_MAPPING']).split(' ')[-3]) # Define the georeference of the PROBA-V data geo_PROBAV=[Lon_Left-0.5*Pixel_size, Pixel_size, 0, Lat_Top+0.5*Pixel_size, 0, -Pixel_size] #0.000992063492063 # Define the name of the output file PROBAV_data_name=os.path.join(input_folder, '%s_%s.tif' % (Name_PROBAV_Image,bandnmr)) dst_fileName=os.path.join(input_folder, PROBAV_data_name) # create gtiff output with the PROBA-V band fmt = 'GTiff' driver = gdal.GetDriverByName(fmt) dst_dataset = driver.Create(dst_fileName, int(Data.shape[1]), int(Data.shape[0]), 1,gdal.GDT_Float32) dst_dataset.SetGeoTransform(geo_PROBAV) # set the reference info srs = osr.SpatialReference() srs.SetWellKnownGeogCS("WGS84") dst_dataset.SetProjection(srs.ExportToWkt()) # write the array in the geotiff band dst_dataset.GetRasterBand(1).WriteArray(Data) dst_dataset = None # Open the PROBA-V band in SEBAL g=gdal.Open(PROBAV_data_name.replace("\\","/")) # If the data cannot be opened, change the extension if g is None: PROBAV_data_name=os.path.join(input_folder, '%s_%s.tiff' % (Name_PROBAV_Image,bandnmr)) # Reproject the PROBA-V band to match DEM's resolution PROBAV, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example( PROBAV_data_name, lon_fileName) # Open the reprojected PROBA-V band data data_PROBAV_DN = PROBAV.GetRasterBand(1).ReadAsArray(0, 0, ncol, nrow) # Define the filename to store the cropped Landsat image dst_FileName = os.path.join(output_folder, 'Output_PROBAV','proy_PROBAV_%s.tif' % bandnmr) # close the PROBA-V g=None # If the band data is not SM change the DN values into PROBA-V values and write into the spectral_reflectance_PROBAV if bandnmr is not 'SM': data_PROBAV[:, :]=data_PROBAV_DN/2000 spectral_reflectance_PROBAV[:, :, index]=data_PROBAV[:, :] # If the band data is the SM band than write the data into the spectral_reflectance_PROBAV and create cloud mask else: data_PROBAV[:, :]=data_PROBAV_DN Cloud_Mask_PROBAV=np.zeros((shape[1], shape[0])) Cloud_Mask_PROBAV[data_PROBAV[:,:]!=n188_float]=1 spectral_reflectance_PROBAV[:, :, index]=Cloud_Mask_PROBAV # Change the spectral reflectance to meet certain limits spectral_reflectance_PROBAV[:, :, index]=np.where(spectral_reflectance_PROBAV[:, :, index]<=0,np.nan,spectral_reflectance_PROBAV[:, :, index]) spectral_reflectance_PROBAV[:, :, index]=np.where(spectral_reflectance_PROBAV[:, :, index]>=150,np.nan,spectral_reflectance_PROBAV[:, :, index]) # Go to the next index index=index+1 # Bands in PROBAV spectral reflectance # 0 = MS # 1 = BLUE # 2 = NIR # 3 = RED # 4 = SWIR # Calculate surface albedo based on PROBA-V Surface_Albedo_PROBAV = 0.219 * spectral_reflectance_PROBAV[:, :, 1] + 0.361 * spectral_reflectance_PROBAV[:, :, 2] + 0.379 * spectral_reflectance_PROBAV[:, :, 3] + 0.041 * spectral_reflectance_PROBAV[:, :, 4] # Calculate the NDVI based on PROBA-V n218_memory = spectral_reflectance_PROBAV[:, :, 2] + spectral_reflectance_PROBAV[:, :, 3] NDVI = np.zeros((shape[1], shape[0])) NDVI[n218_memory != 0] = ( spectral_reflectance_PROBAV[:, :, 3][n218_memory != 0] - spectral_reflectance_PROBAV[:, :, 2][n218_memory != 0] )/ ( spectral_reflectance_PROBAV[:, :, 2][n218_memory != 0] + spectral_reflectance_PROBAV[:, :, 3][n218_memory != 0] ) # Create Water mask based on PROBA-V water_mask_temp = np.zeros((shape[1], shape[0])) water_mask_temp[np.logical_and(np.logical_and(NDVI<0.1,data_DEM>0),Surface_Albedo_PROBAV<0.2)]=1 # Save Albedo for PROBA-V SEBAL.save_GeoTiff_proy(dest, Surface_Albedo_PROBAV, Albedo_FileName, shape, nband=1) # Save NDVI for PROBA-V SEBAL.save_GeoTiff_proy(dest, NDVI, NDVI_FileName, shape, nband=1) # Save Water Mask for PROBA-V SEBAL.save_GeoTiff_proy(dest, water_mask_temp, water_mask_temp_FileName, shape, nband=1) else: dest_NDVI = gdal.Open(NDVI_FileName) dest_water_mask_temp = gdal.Open(water_mask_temp_FileName) NDVI = dest_NDVI.GetRasterBand(1).ReadAsArray() water_mask_temp = dest_water_mask_temp.GetRasterBand(1).ReadAsArray() ############################ Calculate LAI ########################################## # Calculate the LAI FPAR,tir_emis,Nitrogen,vegt_cover,LAI,b10_emissivity = SEBAL.Calc_vegt_para(NDVI,water_mask_temp,shape) # Create LAI name if Image_Type == 1: LAI_FileName = os.path.join(LAI_outfolder,'LAI_LS_%s.tif' %Var_name) SEBAL.save_GeoTiff_proy(dest, LAI, LAI_FileName, shape, nband=1) #################### Calculate thermal for Landsat ########################################## if Image_Type == 1: # Calculate thermal therm_data = SEBAL.Landsat_therm_data(Bands,input_folder,Name_Landsat_Image,output_folder,ulx_dem,lry_dem,lrx_dem,uly_dem,shape) # Calculate surface temperature Surface_temp=SEBAL.Calc_surface_water_temp(Temp_inst,Landsat_nr,Lmax,Lmin,therm_data,b10_emissivity,k1_c,k2_c,eact_inst,shape,water_mask_temp,Bands_thermal,Rp,tau_sky,surf_temp_offset,Image_Type) # Save surface temperature therm_data_FileName = os.path.join(Surface_Temperature_outfolder,'Surface_Temperature_LS_%s.tif' %Var_name) SEBAL.save_GeoTiff_proy(dest, Surface_temp, therm_data_FileName, shape, nband=1) ################################## Calculate VIIRS surface temperature ######################## if Image_Type == 2: # If there is VIIRS data if not Name_VIIRS_Image_TB == 'None': # Define the VIIRS thermal data name VIIRS_data_name=os.path.join(input_folder, '%s' % (Name_VIIRS_Image_TB)) # Reproject VIIRS thermal data VIIRS, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(VIIRS_data_name, lon_fileName) # Open VIIRS thermal data data_VIIRS = VIIRS.GetRasterBand(1).ReadAsArray() # Set the conditions for the brightness temperature (100m) brightness_temp=np.where(data_VIIRS>=250, data_VIIRS, np.nan) # Constants k1=606.399172 k2=1258.78 L_lambda_b10_100=((2*6.63e-34*(3.0e8)**2)/((11.45e-6)**5*(np.exp((6.63e-34*3e8)/(1.38e-23*(11.45e-6)*brightness_temp))-1)))*1e-6 # Get Temperature for 100 and 375m resolution Temp_TOA_100 = SEBAL.Get_Thermal(L_lambda_b10_100,Rp,Temp_inst,tau_sky,tir_emis,k1,k2) # Conditions for surface temperature (100m) n120_surface_temp=Temp_TOA_100.clip(250, 450) # Save the surface temperature of the VIIRS in 100m resolution temp_surface_100_fileName_beforeTS = os.path.join(Surface_Temperature_outfolder,'Surface_Temperature_VIIRS_%s.tif' %Var_name) SEBAL.save_GeoTiff_proy(dest, n120_surface_temp, temp_surface_100_fileName_beforeTS, shape, nband=1) ################################################################################################################### ################################################### HANTS part 4 ################################################## ################################################################################################################### # Select files for PROBA-V that needs to be used (sometimes a composite product is used) PROBA_V_Dict = {} for k, v in SEBAL_RUNS.iteritems(): if str(v['PROBA_V_name']) != 'None': PROBA_V_Dict.setdefault(v['PROBA_V_name'], []).append(k) Amount_Unique_PROBA_V_images = len(PROBA_V_Dict.keys()) Back_names = [] # Define HANTS PROBA-V variables VARS = ["NDVI", "Albedo"] for VAR in VARS: output_folder_preprocessing_VAR = os.path.join(output_folder_PreSEBAL_SEBAL, VAR) os.chdir(output_folder_preprocessing_VAR) for PROBA_V_image in PROBA_V_Dict.keys(): Band_PROBAVhdf_fileName = os.path.join(input_folder_SEBAL, '%s.HDF5' % (PROBA_V_image)) g=gdal.Open(Band_PROBAVhdf_fileName, gdal.GA_ReadOnly) Meta_data = g.GetMetadata() Date_PROBAV = str(Meta_data['LEVEL3_RADIOMETRY_BLUE_OBSERVATION_START_DATE']) year = int(Date_PROBAV.split("-")[0]) month = int(Date_PROBAV.split("-")[1]) day = int(Date_PROBAV.split("-")[2]) Back_name = '%s_PROBAV_%d%02d%02d.tif' %(VAR, year, month, day) # Create HANTS input NDVI input_folder_HANTS_VAR = os.path.join(temp_folder_PreSEBAL, VAR) if not os.path.exists(input_folder_HANTS_VAR): os.mkdir(input_folder_HANTS_VAR) shutil.copy(os.path.join(output_folder_preprocessing_VAR,Back_name),os.path.join(input_folder_HANTS_VAR,Back_name)) # VIIRS parameter copy VIIRS_Dict = {} for k, v in SEBAL_RUNS.iteritems(): if str(v['VIIRS_name']) != 'None': VIIRS_Dict.setdefault(v['VIIRS_name'], []).append(k) THERM = 'Surface_Temperature' output_folder_preprocessing_THERM = os.path.join(output_folder_PreSEBAL_SEBAL, THERM) for VIIRS_image in VIIRS_Dict.keys(): try: Date_VIIRS = (VIIRS_image.split("d")[1]) year = int(Date_VIIRS.split("-")[0][0:4]) month = int(Date_VIIRS.split("-")[0][4:6]) day = int(Date_VIIRS.split("-")[0][6:8]) except: Date_VIIRS = (VIIRS_image.split("_")[3]) year = int(Date_VIIRS.split("-")[0][0:4]) month = int(Date_VIIRS.split("-")[0][4:6]) day = int(Date_VIIRS.split("-")[0][6:8]) Back_name_TB = '%s_VIIRS_%d%02d%02d.tif' %(THERM, year, month, day) # Create HANTS input NDVI input_folder_HANTS_THERM = os.path.join(temp_folder_PreSEBAL, THERM) if not os.path.exists(input_folder_HANTS_THERM): os.mkdir(input_folder_HANTS_THERM) shutil.copy(os.path.join(output_folder_preprocessing_THERM,Back_name_TB),os.path.join(input_folder_HANTS_THERM,Back_name_TB)) ############################################ Solve shift in PROBA=V ############################################## VAR = 'Albedo' os.chdir(os.path.join(temp_folder_PreSEBAL, VAR)) re = glob.glob('%s*.tif' %(VAR)) i = 0 while i < int(len(re)-1): filename1 = re[0] # maak hier misschien later van dat alleen 0 word genomen als de hoeveelheid pixels minder dan 40% van totaal is filename2 = re[i + 1] dest1 = gdal.Open(filename1) dest2 = gdal.Open(filename2) Array1 = dest1.GetRasterBand(1).ReadAsArray().flatten() Array2 = dest2.GetRasterBand(1).ReadAsArray().flatten() Array3 = dest1.GetRasterBand(1).ReadAsArray()[1:,:].flatten() Array4 = dest2.GetRasterBand(1).ReadAsArray()[:-1,:].flatten() Array1_flat = Array1[np.logical_and(~np.isnan(Array1),~np.isnan(Array2))] Array2_flat = Array2[np.logical_and(~np.isnan(Array1),~np.isnan(Array2))] Array3_flat = Array3[np.logical_and(~np.isnan(Array3),~np.isnan(Array4))] Array4_flat = Array4[np.logical_and(~np.isnan(Array3),~np.isnan(Array4))] Corr = np.corrcoef(Array1_flat,Array2_flat)[0,1] Corr2 = np.corrcoef(Array3_flat,Array4_flat)[0,1] if Corr2 > Corr: x,y = dest1.GetRasterBand(1).ReadAsArray().shape for VAR_check in VARS: os.chdir(os.path.join(temp_folder_PreSEBAL, VAR_check)) endname = filename2.split('_')[-1] re_vars = glob.glob('%s*_%s' %(VAR_check,endname)) filename3 = re_vars[0] dest3 = gdal.Open(filename3) New_Array = np.ones(dest1.GetRasterBand(1).ReadAsArray().shape) * np.nan New_Array[1:,:] = dest3.GetRasterBand(1).ReadAsArray()[:-1,:] filename_out = os.path.join(temp_folder_PreSEBAL, VAR_check, filename3) SEBAL.save_GeoTiff_proy(dest3, New_Array, filename_out, [int(y),int(x)], nband=1) i += 1 ################################################### General HANTS ############################################### # Open one image PROBA_V_IMAGE = os.path.join(input_folder_HANTS_VAR,Back_name) destPROBAV = gdal.Open(PROBA_V_IMAGE) VIIRS_IMAGE = os.path.join(input_folder_HANTS_THERM,Back_name_TB) destVIIRS = gdal.Open(VIIRS_IMAGE) # Get Geotransform Geo_PROBAV = destPROBAV.GetGeoTransform() x_size_PROBAV = destPROBAV.RasterXSize y_size_PROBAV = destPROBAV.RasterYSize Geo_VIIRS = destVIIRS.GetGeoTransform() x_size_VIIRS = destVIIRS.RasterXSize y_size_VIIRS = destVIIRS.RasterYSize # Get projection proj = Get_epsg(destPROBAV) projVIIRS = Get_epsg(destVIIRS) # Data parameters latlim = [Geo_PROBAV[3] + y_size_PROBAV * Geo_PROBAV[5],Geo_PROBAV[3]] lonlim = [Geo_PROBAV[0], Geo_PROBAV[0] + x_size_PROBAV * Geo_PROBAV[1]] cellsize = Geo_PROBAV[1] latlimVIIRS = [Geo_VIIRS [3] + y_size_VIIRS * Geo_VIIRS [5],Geo_VIIRS [3]] lonlimVIIRS = [Geo_VIIRS [0], Geo_VIIRS [0] + x_size_VIIRS * Geo_VIIRS [1]] cellsizeVIIRS = Geo_VIIRS [1] # Get the HANTS parameters ws_para = wb_veg['HANTS_Input'] # amount of images Dates = pd.date_range(start_date, end_date, freq = 'D') ###################################################### HANTS Thermal ############################################### # Define parameters for the NDVI THERM = 'Surface_Temperature' # Define paths for NDVI input_folder_HANTS_THERM = os.path.join(temp_folder_PreSEBAL, THERM) name_format = '%s_VIIRS_{0}.tif' %THERM nc_path_TB = os.path.join(input_folder_HANTS_THERM,'%s_NC.nc' %THERM) # Create Output folder rasters_path_out = os.path.join(temp_folder_PreSEBAL, THERM + "_HANTS") if not os.path.exists(rasters_path_out): os.mkdir(rasters_path_out) # HANTS parameters for NDVI nb = int(len(Dates)) Dates = pd.date_range(start_date, end_date, freq = 'D') nf = int(ws_para['D2'].value) # number of frequencies to be considered above the zero frequency low = float(ws_para['D3'].value) # valid range minimum high = float(ws_para['D4'].value) # valid range maximum HiLo = str(ws_para['D5'].value) # 2-character string indicating rejection of high or low outliers fet = float(ws_para['D6'].value) # fit error tolerance (point eviating more than fet from curve fit are rejected) delta = float(ws_para['D7'].value) # small positive number e.g. 0.1 to supress high amplitudes dod = float(ws_para['D8'].value) # degree of overdeterminedness (iteration stops if number of points reaches the minimum required for curve fitting, plus dod). This is a safety measure from SEBAL.hants import wa_gdal # Run wa_gdal.run_HANTS(input_folder_HANTS_THERM, name_format, start_date, end_date, latlimVIIRS, lonlimVIIRS, cellsizeVIIRS, nc_path_TB, nb, nf, HiLo, low, high, fet, dod, delta, projVIIRS, -9999.0, rasters_path_out, export_hants_only=True) ###################################################### HANTS NDVI ############################################### # Define parameters for the NDVI VAR = 'NDVI' # Define paths for NDVI input_folder_HANTS_VAR = os.path.join(temp_folder_PreSEBAL, VAR) name_format = '%s_PROBAV_{0}.tif' %VAR nc_path_ndvi = os.path.join(input_folder_HANTS_VAR,'%s_NC.nc' %VAR) # Create Output folder rasters_path_out = os.path.join(temp_folder_PreSEBAL, VAR + "_HANTS") if not os.path.exists(rasters_path_out): os.mkdir(rasters_path_out) # HANTS parameters for NDVI # Dates = pd.date_range(start_date, end_date, freq = '5D') nb = int(len(Dates)) # nr of images nf = int(ws_para['C2'].value) # number of frequencies to be considered above the zero frequency low = float(ws_para['C3'].value) # valid range minimum high = float(ws_para['C4'].value) # valid range maximum HiLo = str(ws_para['C5'].value) # 2-character string indicating rejection of high or low outliers fet = float(ws_para['C6'].value) # fit error tolerance (point eviating more than fet from curve fit are rejected) delta = float(ws_para['C7'].value) # small positive number e.g. 0.1 to supress high amplitudes dod = float(ws_para['C8'].value) # degree of overdeterminedness (iteration stops if number of points reaches the minimum required for curve fitting, plus dod). This is a safety measure from SEBAL.hants import wa_gdal # Run wa_gdal.run_HANTS(input_folder_HANTS_VAR, name_format, start_date, end_date, latlim, lonlim, cellsize, nc_path_ndvi, nb, nf, HiLo, low, high, fet, dod, delta, proj, -9999.0, rasters_path_out, export_hants_only=True) ###################################################### HANTS Albedo ############################################## # Define parameters for the albedo VAR = 'Albedo' # Define paths for NDVI input_folder_HANTS_VAR = os.path.join(temp_folder_PreSEBAL, VAR) name_format = '%s_PROBAV_{0}.tif' %VAR nc_path_albedo = os.path.join(input_folder_HANTS_VAR,'%s_NC.nc' %VAR) # Create Output folder rasters_path_out = os.path.join(temp_folder_PreSEBAL, VAR + "_HANTS") if not os.path.exists(rasters_path_out): os.mkdir(rasters_path_out) # HANTS parameters for NDVI Dates = pd.date_range(start_date, end_date, freq = 'D') nb = int(len(Dates)) # nr of images nf = int(ws_para['B2'].value) # number of frequencies to be considered above the zero frequency low = float(ws_para['B3'].value) # valid range minimum high = float(ws_para['B4'].value) # valid range maximum HiLo = str(ws_para['B5'].value) # 2-character string indicating rejection of high or low outliers fet = float(ws_para['B6'].value) # fit error tolerance (point eviating more than fet from curve fit are rejected) delta = float(ws_para['B7'].value) # small positive number e.g. 0.1 to supress high amplitudes dod = float(ws_para['B8'].value) # degree of overdeterminedness (iteration stops if number of points reaches the minimum required for curve fitting, plus dod). This is a safety measure from SEBAL.hants import wa_gdal # Run wa_gdal.run_HANTS(input_folder_HANTS_VAR, name_format, start_date, end_date, latlim, lonlim, cellsize, nc_path_albedo, nb, nf, HiLo, low, high, fet, dod, delta, proj, -9999.0, rasters_path_out, export_hants_only=True) ################################################################################################################### ################################################### post HANTS part 5 ############################################# ################################################################################################################### ############################################# Create Outlier maps for PROBA-V ####################################### # Create output folder if not exists output_folder_HANTS_outliers_PROBAV = os.path.join(temp_folder_PreSEBAL, 'Outliers_PROBAV') if not os.path.exists(output_folder_HANTS_outliers_PROBAV): os.mkdir(output_folder_HANTS_outliers_PROBAV) fh = Dataset(nc_path_albedo, mode='r') Var = fh.variables.keys()[-1] lat = fh.variables[fh.variables.keys()[1]][:] lon = fh.variables[fh.variables.keys()[2]][:] time = fh.variables[fh.variables.keys()[3]][:] minimum_lon = np.min(lon) maximum_lat = np.max(lat) diff_lon = lon[1] - lon[0] diff_lat = lat[1] - lat[0] if not ('shape' in locals() or 'dest' in locals()): Example_file = os.path.join(output_folder_preprocessing_VAR, Back_name) dest = gdal.Open(Example_file) ncol = dest.RasterXSize # Get the reprojected dem column size nrow = dest.RasterYSize # Get the reprojected dem row size shape=[ncol, nrow] for i in range(0,int(np.shape(time)[0])): time_now = time[i] data = fh.variables['outliers'][:,:,i] geo = tuple([minimum_lon, diff_lon, 0, maximum_lat, 0, diff_lat]) name_out = os.path.join(output_folder_HANTS_outliers_PROBAV, 'Outliers_PROBAV_%s.tif' %time_now) SEBAL.save_GeoTiff_proy(dest, data, name_out, shape, nband=1) ############################################# Create ALBEDO and NDVI ######################################### # Create the end thermal files date by date for date in Dates: # Define date year = date.year month = date.month day = date.day # input filenames needed for creating end thermal file filename_outliers = os.path.join(output_folder_HANTS_outliers_PROBAV,"Outliers_PROBAV_%d%02d%02d.tif" %(year,month,day)) VAR = 'Albedo' input_folder_PreSEBAL_ALBEDO = os.path.join(temp_folder_PreSEBAL, VAR + "_HANTS") filename_Albedo_original = os.path.join(Albedo_outfolder, "%s_PROBAV_%d%02d%02d.tif" %(VAR,year,month,day)) filename_Albedo_HANTS = os.path.join(input_folder_PreSEBAL_ALBEDO, "%s_PROBAV_%d%02d%02d.tif" %(VAR,year,month,day)) VAR = 'NDVI' input_folder_PreSEBAL_NDVI = os.path.join(temp_folder_PreSEBAL, VAR + "_HANTS") filename_NDVI_original = os.path.join(NDVI_outfolder, "%s_PROBAV_%d%02d%02d.tif" %(VAR,year,month,day)) filename_NDVI_HANTS = os.path.join(input_folder_PreSEBAL_NDVI, "%s_PROBAV_%d%02d%02d.tif" %(VAR,year,month,day)) # Open the input filenames dest_outliers = gdal.Open(filename_outliers) dest_PROBAV_ALBEDO = gdal.Open(filename_Albedo_original) dest_PROBAV_NDVI = gdal.Open(filename_NDVI_original) dest_HANTS_ALBEDO = gdal.Open(filename_Albedo_HANTS) dest_HANTS_NDVI = gdal.Open(filename_NDVI_HANTS) # If original exists, this will be the basis for the end thermal map if not dest_PROBAV_ALBEDO == None: # Open arrays of the input files Array_outliers = dest_outliers.GetRasterBand(1).ReadAsArray()[:,:] Array_ALBEDO_original = dest_PROBAV_ALBEDO.GetRasterBand(1).ReadAsArray() Array_ALBEDO_HANTS = dest_HANTS_ALBEDO.GetRasterBand(1).ReadAsArray()[:,:] Array_NDVI_original = dest_PROBAV_NDVI.GetRasterBand(1).ReadAsArray() Array_NDVI_HANTS = dest_HANTS_NDVI.GetRasterBand(1).ReadAsArray()[:,:] # Create outlier Mask Array_outliers[Array_outliers==-9999.] = 0 Array_outliers_mask = np.zeros(np.shape(Array_outliers)) Array_outliers_mask[Array_outliers==1.]=0 Array_outliers_mask[Array_outliers==0.]=1 Array_outliers_mask[Array_outliers_mask==0]=2 Array_outliers_mask[Array_outliers_mask==1]=0 Array_outliers_mask[Array_outliers_mask==2]=1 # Create a buffer zone arround the bad pixels Array_outliers_mask = Create_Buffer(Array_outliers_mask) Array_outliers_mask[Array_outliers_mask==1] = 2 Array_outliers_mask[Array_outliers_mask==0] = 1 Array_outliers_mask[Array_outliers_mask==2] = 0 # If there are more than 300 Good pixels if np.nansum(Array_outliers_mask) > 300: # Use the mask to find the good original pixels and HANTS pixels Array_ALBEDO_original_mask_nan = Array_ALBEDO_original * Array_outliers_mask Array_ALBEDO_HANTS_mask_nan = Array_ALBEDO_HANTS * Array_outliers_mask Array_NDVI_original_mask_nan = Array_NDVI_original * Array_outliers_mask Array_NDVI_HANTS_mask_nan = Array_NDVI_HANTS * Array_outliers_mask # Create a 1D array of those pixels Array_ALBEDO_original_mask_nan_flatten = Array_ALBEDO_original_mask_nan.flatten() Array_ALBEDO_HANTS_mask_nan_flatten = Array_ALBEDO_HANTS_mask_nan.flatten() Array_NDVI_original_mask_nan_flatten = Array_NDVI_original_mask_nan.flatten() Array_NDVI_HANTS_mask_nan_flatten = Array_NDVI_HANTS_mask_nan.flatten() # Remove pixels with high and low values Array_ALBEDO_HANTS_mask_nan_flatten[Array_ALBEDO_HANTS_mask_nan_flatten<-0.2] = np.nan Array_ALBEDO_HANTS_mask_nan_flatten[Array_ALBEDO_HANTS_mask_nan_flatten>0.6] = np.nan Array_ALBEDO_original_mask_nan_flatten[Array_ALBEDO_original_mask_nan_flatten<-0.2] = np.nan Array_ALBEDO_original_mask_nan_flatten[Array_ALBEDO_original_mask_nan_flatten>0.6] = np.nan Array_NDVI_HANTS_mask_nan_flatten[Array_NDVI_HANTS_mask_nan_flatten<-0.2] = np.nan Array_NDVI_HANTS_mask_nan_flatten[Array_NDVI_HANTS_mask_nan_flatten>0.6] = np.nan Array_NDVI_original_mask_nan_flatten[Array_NDVI_original_mask_nan_flatten<-0.2] = np.nan Array_NDVI_original_mask_nan_flatten[Array_NDVI_original_mask_nan_flatten>0.6] = np.nan # Remove the nan values (if there is a nan in one of the arrays remove also the same value in the other array) Array_ALBEDO_original_mask_nan_flatten2 = Array_ALBEDO_original_mask_nan_flatten[np.logical_or(~np.isnan(Array_ALBEDO_original_mask_nan_flatten),~np.isnan(Array_ALBEDO_HANTS_mask_nan_flatten))] Array_ALBEDO_HANTS_mask_nan_flatten2 = Array_ALBEDO_HANTS_mask_nan_flatten[np.logical_or(~np.isnan(Array_ALBEDO_original_mask_nan_flatten),~np.isnan(Array_ALBEDO_HANTS_mask_nan_flatten))] Array_NDVI_original_mask_nan_flatten2 = Array_NDVI_original_mask_nan_flatten[np.logical_or(~np.isnan(Array_NDVI_original_mask_nan_flatten),~np.isnan(Array_NDVI_HANTS_mask_nan_flatten))] Array_NDVI_HANTS_mask_nan_flatten2 = Array_NDVI_HANTS_mask_nan_flatten[np.logical_or(~np.isnan(Array_NDVI_HANTS_mask_nan_flatten),~np.isnan(Array_NDVI_original_mask_nan_flatten))] Array_ALBEDO_original_mask_nan_flatten = Array_ALBEDO_original_mask_nan_flatten2 Array_ALBEDO_HANTS_mask_nan_flatten = Array_ALBEDO_HANTS_mask_nan_flatten2 Array_NDVI_original_mask_nan_flatten = Array_NDVI_original_mask_nan_flatten2 Array_NDVI_HANTS_mask_nan_flatten = Array_NDVI_HANTS_mask_nan_flatten2 # Remove all zero values Array_ALBEDO_original_mask_nan_flatten_without_zero =Array_ALBEDO_original_mask_nan_flatten[Array_ALBEDO_original_mask_nan_flatten != 0.0] Array_NDVI_original_mask_nan_flatten_without_zero =Array_NDVI_original_mask_nan_flatten[Array_NDVI_original_mask_nan_flatten != 0.0] # Caluculate the value of the 40 and 90 percent percentiles of the original arrays good pixels Array_ALBEDO_original_mask_value_cold = np.nanpercentile(Array_ALBEDO_original_mask_nan_flatten_without_zero,40) Array_ALBEDO_original_mask_value_hot = np.nanpercentile(Array_ALBEDO_original_mask_nan_flatten_without_zero,90) Array_NDVI_original_mask_value_cold = np.nanpercentile(Array_NDVI_original_mask_nan_flatten_without_zero,40) Array_NDVI_original_mask_value_hot = np.nanpercentile(Array_NDVI_original_mask_nan_flatten_without_zero,90) # Delete the colder and hotter pixel values in both 1D arrays (this is to exclude large areas of seas) Array_ALBEDO_HANTS_mask_nan_flatten_exc_coldest = Array_ALBEDO_HANTS_mask_nan_flatten[np.logical_and(Array_ALBEDO_original_mask_nan_flatten > Array_ALBEDO_original_mask_value_cold,Array_ALBEDO_original_mask_nan_flatten < Array_ALBEDO_original_mask_value_hot)] Array_ALBEDO_original_mask_nan_flatten_exc_coldest = Array_ALBEDO_original_mask_nan_flatten[np.logical_and(Array_ALBEDO_original_mask_nan_flatten > Array_ALBEDO_original_mask_value_cold,Array_ALBEDO_original_mask_nan_flatten < Array_ALBEDO_original_mask_value_hot)] Array_NDVI_HANTS_mask_nan_flatten_exc_coldest = Array_NDVI_HANTS_mask_nan_flatten[np.logical_and(Array_NDVI_original_mask_nan_flatten > Array_NDVI_original_mask_value_cold,Array_NDVI_original_mask_nan_flatten < Array_NDVI_original_mask_value_hot)] Array_NDVI_original_mask_nan_flatten_exc_coldest = Array_NDVI_original_mask_nan_flatten[np.logical_and(Array_NDVI_original_mask_nan_flatten > Array_NDVI_original_mask_value_cold,Array_NDVI_original_mask_nan_flatten < Array_NDVI_original_mask_value_hot)] #Calculate the mean of those arrays Ave_ALBEDO_HANTS = np.nanmean(Array_ALBEDO_HANTS_mask_nan_flatten_exc_coldest) Ave_ALBEDO_original = np.nanmean(Array_ALBEDO_original_mask_nan_flatten_exc_coldest) Ave_NDVI_HANTS = np.nanmean(Array_NDVI_HANTS_mask_nan_flatten_exc_coldest) Ave_NDVI_original = np.nanmean(Array_NDVI_original_mask_nan_flatten_exc_coldest) # Calculate the correction factor for the simulated image Factor_Albedo = Ave_ALBEDO_original/Ave_ALBEDO_HANTS Factor_NDVI = Ave_NDVI_original/Ave_NDVI_HANTS # Apply this factor over the simulated HANTS image Array_ALBEDO_HANTS_Corrected = Array_ALBEDO_HANTS * Factor_Albedo Array_NDVI_HANTS_Corrected = Array_NDVI_HANTS * Factor_NDVI # Create the end array by replacing the bad pixels of the original array by the corrected simulated HANTS values End_array_Albedo = np.ones(np.shape(Array_outliers_mask)) * np.nan End_array_Albedo[Array_outliers_mask==0] =Array_ALBEDO_HANTS_Corrected[Array_outliers_mask==0] End_array_Albedo[Array_outliers_mask==1] =Array_ALBEDO_original[Array_outliers_mask==1] End_array_NDVI = np.ones(np.shape(Array_outliers_mask)) * np.nan End_array_NDVI[Array_outliers_mask==0] =Array_NDVI_HANTS_Corrected[Array_outliers_mask==0] End_array_NDVI[Array_outliers_mask==1] =Array_NDVI_original[Array_outliers_mask==1] # If the original images is to bad than replace the whole image by the simulated HANTS image else: End_array_Albedo = Array_ALBEDO_HANTS End_array_NDVI = Array_NDVI_HANTS # Get the geolocation information of the image geo = dest_PROBAV_ALBEDO.GetGeoTransform() proj = dest_outliers.GetProjection() # If there is no original image, use the simulated HANTS image else: Array_ALBEDO_HANTS = dest_HANTS_ALBEDO.GetRasterBand(1).ReadAsArray() End_array_Albedo = Array_ALBEDO_HANTS Array_NDVI_HANTS = dest_HANTS_NDVI.GetRasterBand(1).ReadAsArray() End_array_NDVI = Array_NDVI_HANTS dest_test = None i = 0 while dest_test == None: # Get the date of the first image that exists to get the geolocation information date2 = Dates[i] year2 = date2.year month2= date2.month day2 = date2.day try: filename_ALBEDO_original2 = os.path.join(input_folder_PreSEBAL_ALBEDO, "Albedo_PROBAV_%d%02d%02d.tif" %(year2,month2,day2)) dest_test = gdal.Open(filename_ALBEDO_original2) geo = dest_test.GetGeoTransform() proj = dest_test.GetProjection() except: i+=1 # Save the end array output_name_end_ALBEDO = os.path.join(ALBEDO_outfolder_end, "Albedo_PROBAV_%d%02d%02d.tif"%(year,month,day)) SEBAL.save_GeoTiff_proy(dest, End_array_Albedo, output_name_end_ALBEDO, shape, nband=1) output_name_end_NDVI = os.path.join(NDVI_outfolder_end, "NDVI_PROBAV_%d%02d%02d.tif"%(year,month,day)) SEBAL.save_GeoTiff_proy(dest, End_array_NDVI, output_name_end_NDVI, shape, nband=1) ############################################# Create Outlier maps for VIIRS ######################################### # Create output folder if not exists output_folder_HANTS_outliers_VIIRS = os.path.join(temp_folder_PreSEBAL, 'Outliers_VIIRS') if not os.path.exists(output_folder_HANTS_outliers_VIIRS): os.mkdir(output_folder_HANTS_outliers_VIIRS) fh = Dataset(nc_path_TB, mode='r') Var = fh.variables.keys()[-1] lat = fh.variables[fh.variables.keys()[1]][:] lon = fh.variables[fh.variables.keys()[2]][:] time = fh.variables[fh.variables.keys()[3]][:] minimum_lon = np.min(lon) maximum_lat = np.max(lat) diff_lon = lon[1] - lon[0] diff_lat = lat[1] - lat[0] if not ('shape' in locals() or 'dest' in locals()): Example_file = os.path.join(output_folder_preprocessing_THERM,Back_name_TB) dest = gdal.Open(Example_file) ncol = dest.RasterXSize # Get the reprojected dem column size nrow = dest.RasterYSize # Get the reprojected dem row size shape=[ncol, nrow] for i in range(0,int(np.shape(time)[0])): time_now = time[i] data = fh.variables['outliers'][:,:,i] geo = tuple([minimum_lon, diff_lon, 0, maximum_lat, 0, diff_lat]) name_out = os.path.join(output_folder_HANTS_outliers_VIIRS, 'Outliers_VIIRS_%s.tif' %time_now) SEBAL.save_GeoTiff_proy(dest, data, name_out, shape, nband=1) ############################################# Create end thermal ######################################### # Create the end thermal files date by date for date in Dates: # Define date year = date.year month = date.month day = date.day # input filenames needed for creating end thermal file filename_outliers = os.path.join(output_folder_HANTS_outliers_VIIRS,"Outliers_VIIRS_%d%02d%02d.tif" %(year,month,day)) filename_VIIRS_original = os.path.join(input_folder_HANTS_THERM, "Surface_Temperature_VIIRS_%d%02d%02d.tif" %(year,month,day)) filename_VIIRS_HANTS = os.path.join(temp_folder_PreSEBAL, THERM + "_HANTS", "Surface_Temperature_VIIRS_%d%02d%02d.tif" %(year,month,day)) # Open the input filenames dest_outliers = gdal.Open(filename_outliers) dest_VIIRS_original = gdal.Open(filename_VIIRS_original) dest_VIIRS_HANTS = gdal.Open(filename_VIIRS_HANTS) # If original exists, this will be the basis for the end thermal map if not dest_VIIRS_original == None: # Open arrays of the input files Array_outliers = dest_outliers.GetRasterBand(1).ReadAsArray()[:,:] Array_VIIRS_original = dest_VIIRS_original.GetRasterBand(1).ReadAsArray() Array_VIIRS_HANTS = dest_VIIRS_HANTS.GetRasterBand(1).ReadAsArray()[:,:] # Create outlier Mask Array_outliers[Array_outliers==-9999.] = 0 Array_outliers_mask = np.zeros(np.shape(Array_outliers)) Array_outliers_mask[Array_outliers==1.]=0 Array_outliers_mask[Array_outliers==0.]=1 Array_outliers_mask[Array_outliers_mask==0]=2 Array_outliers_mask[Array_outliers_mask==1]=0 Array_outliers_mask[Array_outliers_mask==2]=1 # Create a buffer zone arround the bad pixels Array_outliers_mask = Create_Buffer(Array_outliers_mask) Array_outliers_mask[Array_outliers_mask==1] = 2 Array_outliers_mask[Array_outliers_mask==0] = 1 Array_outliers_mask[Array_outliers_mask==2] = 0 # If there are more than 300 Good pixels if np.nansum(Array_outliers_mask) > 300: # Use the mask to find the good original pixels and HANTS pixels Array_VIIRS_original_mask_nan = Array_VIIRS_original * Array_outliers_mask Array_VIIRS_HANTS_mask_nan = Array_VIIRS_HANTS * Array_outliers_mask # Create a 1D array of those pixels Array_VIIRS_original_mask_nan_flatten = Array_VIIRS_original_mask_nan.flatten() Array_VIIRS_HANTS_mask_nan_flatten = Array_VIIRS_HANTS_mask_nan.flatten() # Remove pixels with high and low values Array_VIIRS_HANTS_mask_nan_flatten[Array_VIIRS_HANTS_mask_nan_flatten<250] = np.nan Array_VIIRS_HANTS_mask_nan_flatten[Array_VIIRS_HANTS_mask_nan_flatten>350] = np.nan Array_VIIRS_original_mask_nan_flatten[Array_VIIRS_original_mask_nan_flatten<250] = np.nan Array_VIIRS_original_mask_nan_flatten[Array_VIIRS_original_mask_nan_flatten>350] = np.nan # Remove the nan values (if there is a nan in one of the arrays remove also the same value in the other array) Array_VIIRS_original_mask_no_nan_flatten = Array_VIIRS_original_mask_nan_flatten[np.logical_or(~np.isnan(Array_VIIRS_original_mask_nan_flatten),~np.isnan(Array_VIIRS_HANTS_mask_nan_flatten))] Array_VIIRS_HANTS_mask_no_nan_flatten = Array_VIIRS_HANTS_mask_nan_flatten[np.logical_or(~np.isnan(Array_VIIRS_original_mask_nan_flatten),~np.isnan(Array_VIIRS_HANTS_mask_nan_flatten))] # Remove all zero values Array_VIIRS_original_mask_nan_flatten_without_zero =Array_VIIRS_original_mask_no_nan_flatten[Array_VIIRS_original_mask_no_nan_flatten>0] # Caluculate the value of the 40 and 90 percent percentiles of the original arrays good pixels Array_VIIRS_original_mask_value_cold = np.nanpercentile(Array_VIIRS_original_mask_nan_flatten_without_zero,40) Array_VIIRS_original_mask_value_hot = np.nanpercentile(Array_VIIRS_original_mask_nan_flatten_without_zero,90) # Delete the colder and hotter pixel values in both 1D arrays (this is to exclude large areas of seas) Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest = Array_VIIRS_HANTS_mask_no_nan_flatten[np.logical_and(Array_VIIRS_original_mask_no_nan_flatten > Array_VIIRS_original_mask_value_cold,Array_VIIRS_original_mask_no_nan_flatten < Array_VIIRS_original_mask_value_hot)] Array_VIIRS_original_mask_nan_flatten_exc_coldest = Array_VIIRS_original_mask_no_nan_flatten[np.logical_and(Array_VIIRS_original_mask_no_nan_flatten > Array_VIIRS_original_mask_value_cold,Array_VIIRS_original_mask_no_nan_flatten < Array_VIIRS_original_mask_value_hot)] #Calculate the mean of those arrays Ave_VIIRS_HANTS = np.nanmean(Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest) Ave_VIIRS_original = np.nanmean(Array_VIIRS_original_mask_nan_flatten_exc_coldest) # Calculate the correction factor for the simulated image Factor = Ave_VIIRS_original/Ave_VIIRS_HANTS # Apply this factor over the simulated HANTS image Array_VIIRS_HANTS_Corrected = Array_VIIRS_HANTS * Factor # Create the end array by replacing the bad pixels of the original array by the corrected simulated HANTS values End_array = np.ones(np.shape(Array_outliers_mask)) * np.nan End_array[Array_outliers_mask==0] =Array_VIIRS_HANTS_Corrected[Array_outliers_mask==0] End_array[Array_outliers_mask==1] =Array_VIIRS_original[Array_outliers_mask==1] # If the original images is to bad than replace the whole image by the simulated HANTS image else: End_array = Array_VIIRS_HANTS # Get the geolocation information of the image geo = dest_VIIRS_original.GetGeoTransform() proj = dest_outliers.GetProjection() # If there is no original image, use the simulated HANTS image else: Array_VIIRS_HANTS = dest_VIIRS_HANTS.GetRasterBand(1).ReadAsArray() End_array = Array_VIIRS_HANTS dest_test = None i = 0 while dest_test == None: # Get the date of the first image that exists to get the geolocation information date2 = Dates[i] year2 = date2.year month2= date2.month day2 = date2.day try: filename_VIIRS_original2 = os.path.join(input_folder_HANTS_THERM, "Surface_Temperature_VIIRS_%d%02d%02d.tif" %(year2,month2,day2)) dest_test = gdal.Open(filename_VIIRS_original2) geo = dest_test.GetGeoTransform() proj = dest_test.GetProjection() except: i+=1 # Save the end array output_name_end_LST = os.path.join(temp_folder_PreSEBAL_LST, "VIIRS_LST_%d%02d%02d.tif"%(year,month,day)) SEBAL.save_GeoTiff_proy(dest, End_array, output_name_end_LST, shape, nband=1) ################################################################################################################### ###################################################### preSEBAL continue ########################################## ################################################################################################################### ############################################### Apply thermal sharpening ########################################## print('---------------------------------------------------------') print('-------------------- Downscale VIIRS --------------------') print('---------------------------------------------------------') # Upscale VIIRS and PROBA-V to 400m pixel_spacing_upscale = 400 # Open the General_Input sheet ws = wb['General_Input'] # Extract the input and output folder, and Image type from the excel file DEM_fileName = str(ws['E2'].value) ws = wb['VIIRS_PROBAV_Input'] UTM_Zone = int(str(ws['G2'].value)) # Reproject from Geog Coord Syst to UTM - # 1) DEM - Original DEM coordinates is Geographic: lat, lon proyDEM_fileName_100 = os.path.join(temp_folder_PreSEBAL,'DEM_100.tif') dest, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset( DEM_fileName, pixel_spacing = 100, UTM_Zone=UTM_Zone) band = dest.GetRasterBand(1) # Get the reprojected dem band ncol = dest.RasterXSize # Get the reprojected dem column size nrow = dest.RasterYSize # Get the reprojected dem row size shape=[ncol, nrow] DEM = band.ReadAsArray() # Save DEM file with the 100 meter resolution SEBAL.save_GeoTiff_proy(dest, DEM, proyDEM_fileName_100, shape, nband=1) # Create upscaled DEM proyDEM_fileName_400 = os.path.join(temp_folder_PreSEBAL,'DEM_400.tif') dest_400, ulx_dem_400, lry_dem_400, lrx_dem_400, uly_dem_400, epsg_to = SEBAL.reproject_dataset( DEM_fileName, pixel_spacing_upscale, UTM_Zone = UTM_Zone) # find spatial parameters array DEM_400 = dest_400.GetRasterBand(1).ReadAsArray() Y_raster_size_400 = dest_400.RasterYSize X_raster_size_400 = dest_400.RasterXSize shape_400=([X_raster_size_400, Y_raster_size_400]) # Save DEM file with the 400 meter resolution SEBAL.save_GeoTiff_proy(dest_400, DEM_400, proyDEM_fileName_400, shape_400, nband=1) for date in Dates: surf_temp_fileName = os.path.join(temp_folder_PreSEBAL, 'Surf_temp_After_TS_%d%02d%02d.tif' %(date.year, date.month, date.day)) temp_surface_100_fileName_beforeTS = os.path.join(temp_folder_PreSEBAL_LST,'VIIRS_LST_%d%02d%02d.tif' %(date.year, date.month, date.day)) ################################ Thermal Sharpening ##################################################### # Define filename file_NDVI_after_HANTS = os.path.join(NDVI_outfolder_end, 'NDVI_PROBAV_%d%02d%02d.tif' %(date.year, date.month, date.day)) # Open NDVI/LST destination folder dest_NDVI = gdal.Open(file_NDVI_after_HANTS) dest_LST = gdal.Open(temp_surface_100_fileName_beforeTS) # Open NDVI array NDVI = dest_NDVI.GetRasterBand(1).ReadAsArray() # Open LST array LST = dest_LST.GetRasterBand(1).ReadAsArray() # Upscale thermal band VIIRS from 100m to 400m VIIRS_Upscale, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset_example( temp_surface_100_fileName_beforeTS, proyDEM_fileName_400) data_Temp_Surf_400 = VIIRS_Upscale.GetRasterBand(1).ReadAsArray() # Upscale PROBA-V NDVI from 100m to 400m NDVI_PROBAV_Upscale, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset_example( file_NDVI_after_HANTS, proyDEM_fileName_400) data_NDVI_400 = NDVI_PROBAV_Upscale.GetRasterBand(1).ReadAsArray() # Define the width of the moving window box Box=9 # Apply the surface temperature sharpening temp_surface_sharpened = SEBAL.Thermal_Sharpening(data_Temp_Surf_400, data_NDVI_400, NDVI, Box, NDVI_PROBAV_Upscale, output_folder, proyDEM_fileName_100, shape, dest, surf_temp_fileName) # Create Water mask based on HANTS NDVI output water_mask = np.zeros((shape[1], shape[0])) water_mask[NDVI<0.0]=1 # Divide temporal watermask in snow and water mask by using surface temperature Snow_Mask_PROBAV, water_mask, ts_moist_veg_min, NDVI_max, NDVI_std = SEBAL.CalculateSnowWaterMask(NDVI,shape,water_mask,temp_surface_sharpened) # Replace water values temp_surface_sharpened[water_mask==1] = LST[water_mask == 1] temp_surface_sharpened = np.where(np.isnan(temp_surface_sharpened), LST, temp_surface_sharpened) surf_temp_fileName = os.path.join(output_folder_HANTS_end_sharp, 'LST_surface_temp_sharpened_%d%02d%02d.tif' %(date.year, date.month, date.day)) SEBAL.save_GeoTiff_proy(dest, temp_surface_sharpened, surf_temp_fileName, shape, nband=1) ################################################## Calculate LAI ################################################## # Open NDVI destination folder dest_NDVI = gdal.Open(file_NDVI_after_HANTS) # Open NDVI array NDVI = dest_NDVI.GetRasterBand(1).ReadAsArray() LAI_FileName = os.path.join(LAI_outfolder,'LAI_%d%02d%02d.tif' %(date.year, date.month, date.day)) # Calculate LAI FPAR, tir_emis, Nitrogen, vegt_cover, LAI, b10_emissivity = SEBAL.Calc_vegt_para(NDVI,water_mask, shape) SEBAL.save_GeoTiff_proy(dest, LAI, LAI_FileName, shape, nband=1) ################################ Calculate the Vegetation height ######################## # Open preprosessing excel the Vegetation_Height sheet ws_veg = wb_veg['Vegetation_Height'] # Define output name for the LandUse map dst_FileName = os.path.join(output_folder,'LU.tif') # Open LU data LU_dest = gdal.Open(LU_data_FileName) LU_data = LU_dest.GetRasterBand(1).ReadAsArray() # Reproject the LAI to the same projection as LU dest1, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(LAI_FileName, LU_data_FileName) ## input after HANTS LAI_proj = dest1.GetRasterBand(1).ReadAsArray() # Read out the excel file coefficient numbers Array = np.zeros([ws_veg.max_row-1,4]) for j in ['A','C','D','E']: j_number={'A' : 0, 'C' : 1, 'D' : 2, 'E' : 3} for i in range(2,ws_veg.max_row+1): Value = (ws_veg['%s%s' %(j,i)].value) Array[i-2, j_number[j]] = Value # Create maps with the coefficient numbers for the right land cover coeff = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1]),3]) for coeff_nmbr in range(0,3): for Class in range(0,len(Array)): coeff[LU_data==Array[Class,0],coeff_nmbr] = Array[Class,coeff_nmbr+1] # Get some dimensions of the projected dataset band_data = dest1.GetRasterBand(1) ncol_data = dest1.RasterXSize nrow_data = dest1.RasterYSize shape_data=[ncol_data, nrow_data] # Calculate the vegetation height in the LU projection Veg_Height_proj = coeff[:,:,0] * np.power(LAI_proj,2) + coeff[:,:,1] * LAI_proj + coeff[:,:,2] Veg_Height_proj = np.clip(Veg_Height_proj, 0, 600) # Save the vegetation height in the lU projection in the temporary directory Veg_Height_proj_FileName = os.path.join(temp_folder_PreSEBAL,'Veg_Height_proj.tif') SEBAL.save_GeoTiff_proy(dest1, Veg_Height_proj, Veg_Height_proj_FileName, shape_data, nband=1) # Reproject the Veg_height to the LAI projection dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Veg_Height_proj_FileName, LAI_FileName) # Get some dimensions of the original dataset band_data = dest.GetRasterBand(1) ncol_data = dest.RasterXSize nrow_data = dest.RasterYSize # Open the Veg_height with the same projection as LAI Veg_Height = band_data.ReadAsArray(0, 0, ncol_data, nrow_data) Veg_Height[Veg_Height == 0] = 0.4 # Save Vegetation Height in the end folder dst_FileName = os.path.join(output_folder_HANTS_end_Veg,'Vegetation_Height_%d%02d%02d.tif' %(date.year, date.month, date.day)) SEBAL.save_GeoTiff_proy(dest, Veg_Height, dst_FileName, shape, nband=1) ######################## calculate Water Mask ######################### # Open all the water mask os.chdir(WaterMask_outfolder) re_water_mask = glob.glob('Water_Mask*.tif') # Loop over all the files for water_mask_filename in re_water_mask: # Create the filepath to the water mask water_mask_filepath = os.path.join(WaterMask_outfolder,water_mask_filename) # Open Array water_mask_dest = gdal.Open(water_mask_filepath) # If the total water mask raster does not exists create this one if not 'water_mask_array' in locals(): water_mask_array = np.zeros([water_mask_dest.RasterYSize, water_mask_dest.RasterXSize]) # Add all the water masks water_mask_array += water_mask_dest.GetRasterBand(1).ReadAsArray() # Calculate the end water mask if the area is more than 50 percent defined as water water_mask_array_per = water_mask_array/len(re_water_mask) water_mask_array_end = np.zeros([water_mask_dest.RasterYSize, water_mask_dest.RasterXSize]) water_mask_array_end[water_mask_array_per > 0.5] = 1 # Save water mask WaterMask_outfolder_end_FileName = os.path.join(WaterMask_outfolder_end,'Water_Mask.tif') SEBAL.save_GeoTiff_proy(dest, water_mask_array_end, WaterMask_outfolder_end_FileName, shape, nband=1) ######################## calculate p-factor by using the Landuse map ######################### ws_p = wb_veg['p-factor'] Array_P = np.zeros([ws_p.max_row-1,2]) for j in ['A','C']: j_number={'A' : 0, 'C' : 1} for i in range(2,ws_p.max_row+1): Value = (ws_p['%s%s' %(j,i)].value) Array_P[i-2, j_number[j]] = Value p_factor = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])]) for Class in range(0,len(Array_P)): p_factor[LU_data==Array_P[Class,0]] = Array_P[Class,1] p_factor[p_factor == 0] = 0.5 dst_FileName = os.path.join(temp_folder_PreSEBAL, 'p-factor_proj.tif') SEBAL.save_GeoTiff_proy(dest1, p_factor, dst_FileName, shape_data, nband=1) dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(dst_FileName, LAI_FileName) band_data = dest.GetRasterBand(1) # Get the reprojected dem band ncol_data = dest.RasterXSize nrow_data = dest.RasterYSize p_factor = band_data.ReadAsArray(0, 0, ncol_data, nrow_data) p_factor[p_factor == 0] = 0.5 dst_pfactor_FileName = os.path.join(output_folder_p_factor,'p_factor.tif') SEBAL.save_GeoTiff_proy(dest, p_factor, dst_pfactor_FileName, shape, nband=1) ######################## calculate c-factor by using the Landuse map ######################### ws_c = wb_veg['C-factor'] Array_C = np.zeros([ws_c.max_row-1,2]) for j in ['A','C']: j_number={'A' : 0, 'C' : 1} for i in range(2,ws_c.max_row+1): Value = (ws_c['%s%s' %(j,i)].value) Array_C[i-2, j_number[j]] = Value c_factor = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])]) for Class in range(0,len(Array_C)): c_factor[LU_data==Array_C[Class,0]] = Array_C[Class,1] c_factor[np.logical_and(c_factor != 3.0, c_factor != 4.0)] = np.nan LUE_max = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])]) LUE_max[c_factor == 3] = 2.5 LUE_max[c_factor == 4] = 4.5 LUE_max[LUE_max == 0] = 2.5 dst_FileName = os.path.join(temp_folder_PreSEBAL, 'LUE_max_proj.tif') SEBAL.save_GeoTiff_proy(dest1, LUE_max, dst_FileName, shape_data, nband=1) dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(dst_FileName, LAI_FileName) band_data = dest.GetRasterBand(1) # Get the reprojected dem band ncol_data = dest.RasterXSize nrow_data = dest.RasterYSize LUE_max = band_data.ReadAsArray(0, 0, ncol_data, nrow_data) LUE_max[LUE_max == 0] = 2.5 dst_LUEmax_FileName = os.path.join(output_folder_LUE,'LUE_max.tif') SEBAL.save_GeoTiff_proy(dest, LUE_max, dst_LUEmax_FileName, shape, nband=1) #################################################################################################################### ################################################ Write output part 6 ############################################### #################################################################################################################### ############################################# Fill in the additional input sheet ######################################### # things to be filled in: # Transmissivity (optional) # NDVI (additional input) # Albedo (additional input) # LST (additional input) # Water Mask (additional input) # p-factor (soil input) # c-factor (soil input) # Vegetation height (meteo input) # VIIRS parameter copy VIIRS_Dict = {} for k, v in SEBAL_RUNS.iteritems(): VIIRS_Dict.setdefault(v['output_folder'], []).append(k) ''' LST folder = output_folder_HANTS_end NDVI folder = os.path.join(output_folder_HANTS, 'NDVI') ALBEDO folder = os.path.join(output_folder_HANTS, 'Albedo') SAVI folder = os.path.join(output_folder_HANTS, 'SAVI') ''' VARS = ["NDVI", "Albedo"] Letter_dict = {"NDVI":'B', "Albedo":'D'} xfile = load_workbook(inputExcel) sheet_additional = xfile.get_sheet_by_name('Additional_Input') sheet_meteo = xfile.get_sheet_by_name('Meteo_Input') sheet_soil = xfile.get_sheet_by_name('Soil_Input') sheet_out_name = ''.join([os.path.splitext(os.path.basename(inputExcel))[0],'_SEBAL.xlsx']) sheet_out_dir = os.path.dirname(inputExcel) sheet_out_file_name = os.path.join(sheet_out_dir, sheet_out_name) for output_name_run in VIIRS_Dict.keys(): # Get General parameters Row_number = VIIRS_Dict[output_name_run][0] Type_of_Run = SEBAL_RUNS.items() VIIRS_date = output_name_run.split('_')[-1] VIIRS_datetime= datetime.strptime(VIIRS_date, '%d%m%Y') date_run = '%d%02d%02d' %(VIIRS_datetime.year,VIIRS_datetime.month,VIIRS_datetime.day) # import LST file_name_LST = os.path.join(output_folder_HANTS_end_sharp, 'LST_surface_temp_sharpened_%s.tif' %date_run ) sheet_additional['E%d'%(Row_number)] = str(file_name_LST) # import NDVI and Albedo and water mask for VAR_SINGLE in VARS: Letter = Letter_dict[VAR_SINGLE] file_name_VAR_single = os.path.join(output_folder_PreSEBAL, VAR_SINGLE, '%s_PROBAV_%s.tif' %(VAR_SINGLE, date_run)) sheet_additional['%s%d'%(Letter, Row_number)] = str(file_name_VAR_single) # import Water Mask sheet_additional['C%d'%(Row_number)] = str(WaterMask_outfolder_end_FileName) # import p-factor file_name_p_factor = os.path.join(output_folder_p_factor,'p_factor.tif') sheet_soil['H%d'%(Row_number)] = str(file_name_p_factor) # import p-factor file_name_c_factor = os.path.join(output_folder_LUE, 'LUE_max.tif') sheet_soil['I%d'%(Row_number)] = str(file_name_c_factor) # import vegetation height file_name_vegt_height = os.path.join(output_folder_HANTS_end_Veg,'Vegetation_Height_%s.tif' %date_run) sheet_meteo['O%d'%(Row_number)] = str(file_name_vegt_height) xfile.save(sheet_out_file_name) ''' # If instantanious Transmissivity is calculated in PreSEBAL if Check_Trans_inst == 1: sheet['N%d'%(number)] = str(Transmissivity_inst_fileName) xfile.save(inputExcel) # If daily Transmissivity is calculated in PreSEBAL if Check_Trans_24 == 1: sheet_meteo['K%d'%(number)] = str(Transmissivity_24_fileName) xfile.save(sheet_out_file_name) ''' ''' ############################################# Create Outlier maps for PROBA-V ######################################### # Create output folder if not exists output_folder_HANTS_outliers = os.path.join(output_folder_HANTS, 'Outliers') if not os.path.exists(output_folder_HANTS_outliers): os.mkdir(output_folder_HANTS_outliers) fh = Dataset(nc_path_albedo, mode='r') Var = fh.variables.keys()[-1] data = fh.variables['outliers'][:] lat = fh.variables[fh.variables.keys()[1]][:] lon = fh.variables[fh.variables.keys()[2]][:] time = fh.variables[fh.variables.keys()[3]][:] minimum_lon = np.min(lon) maximum_lat = np.max(lat) diff_lon = lon[1] - lon[0] diff_lat = lat[1] - lat[0] if not ('shape' in locals() or 'dest' in locals()): Example_file = os.path.join(output_folder_preprocessing_VAR,Back_name) dest = gdal.Open(Example_file) ncol = dest.RasterXSize # Get the reprojected dem column size nrow = dest.RasterYSize # Get the reprojected dem row size shape=[ncol, nrow] for i in range(0,int(np.shape(data)[2])): time_now = time[i] data_now = data[:,:,i] geo = tuple([minimum_lon, diff_lon, 0, maximum_lat, 0, diff_lat]) name_out = os.path.join(output_folder_HANTS_outliers, 'Outliers_PROBAV_%s.tif' %time_now) SEBAL.save_GeoTiff_proy(dest, data_now, name_out, shape, nband=1) ############################################ NDVI ################################################## # Create output folder if not exists output_folder_HANTS_outliers = os.path.join(output_folder_HANTS, 'Outliers_NDVI') if not os.path.exists(output_folder_HANTS_outliers): os.mkdir(output_folder_HANTS_outliers) fh = Dataset(nc_path_ndvi, mode='r') Var = fh.variables.keys()[-1] data = fh.variables['outliers'][:] lat = fh.variables[fh.variables.keys()[1]][:] lon = fh.variables[fh.variables.keys()[2]][:] time = fh.variables[fh.variables.keys()[3]][:] minimum_lon = np.min(lon) maximum_lat = np.max(lat) diff_lon = lon[1] - lon[0] diff_lat = lat[1] - lat[0] if not ('shape' in locals() or 'dest' in locals()): Example_file = os.path.join(output_folder_preprocessing_VAR,Back_name) dest = gdal.Open(Example_file) ncol = dest.RasterXSize # Get the reprojected dem column size nrow = dest.RasterYSize # Get the reprojected dem row size shape=[ncol, nrow] for i in range(0,int(np.shape(data)[2])): time_now = time[i] data_now = data[:,:,i] geo = tuple([minimum_lon, diff_lon, 0, maximum_lat, 0, diff_lat]) name_out = os.path.join(output_folder_HANTS_outliers, 'Outliers_PROBAV_%s.tif' %time_now) SEBAL.save_GeoTiff_proy(dest, data_now, name_out, shape, nband=1) ###################################################### postHANTS Albedo ############################################### for date in Dates: year = date.year month = date.month day = date.day filename_outliers = r"G:\SEBAL_Tadla\PROBAV-VIIRS\HANTS_output\Outliers\Outliers_PROBAV_%d%02d%02d.tif" %(year,month,day) filename_VIIRS_original = r"G:\SEBAL_Tadla\PROBAV-VIIRS\HANTS_input\Albedo\Albedo_PROBAV_%d%02d%02d.tif" %(year,month,day) filename_VIIRS_HANTS = r"G:\SEBAL_Tadla\PROBAV-VIIRS\HANTS_output\Albedo\Albedo_PROBAV_%d%02d%02d.tif"%(year,month,day) dest_outliers = gdal.Open(filename_outliers) dest_VIIRS_original = gdal.Open(filename_VIIRS_original) dest_VIIRS_HANTS = gdal.Open(filename_VIIRS_HANTS) if not dest_VIIRS_original == None: Array_outliers = dest_outliers.GetRasterBand(1).ReadAsArray()[:,:] Array_VIIRS_original = dest_VIIRS_original.GetRasterBand(1).ReadAsArray() Array_VIIRS_HANTS = dest_VIIRS_HANTS.GetRasterBand(1).ReadAsArray()[:,:] Array_outliers[Array_outliers==-9999.] = 0 Array_outliers_mask = np.zeros(np.shape(Array_outliers)) Array_outliers_mask[Array_outliers==1.]=0 Array_outliers_mask[Array_outliers==0.]=1 Array_outliers_mask[Array_outliers_mask==0]=2 Array_outliers_mask[Array_outliers_mask==1]=0 Array_outliers_mask[Array_outliers_mask==2]=1 Array_outliers_mask = Create_Buffer(Array_outliers_mask) Array_outliers_mask[Array_outliers_mask==1] = 2 Array_outliers_mask[Array_outliers_mask==0] = 1 Array_outliers_mask[Array_outliers_mask==2] = 0 if np.nansum(Array_outliers_mask) > 30: Array_outliers_mask[Array_VIIRS_HANTS == 0] = np.nan Array_VIIRS_original_mask_nan = Array_VIIRS_original * Array_outliers_mask Array_VIIRS_HANTS_mask_nan = Array_VIIRS_HANTS * Array_outliers_mask Array_VIIRS_original_mask_nan_flatten = Array_VIIRS_original_mask_nan.flatten() Array_VIIRS_HANTS_mask_nan_flatten = Array_VIIRS_HANTS_mask_nan.flatten() Array_VIIRS_original_mask_nan_flatten = Array_VIIRS_original_mask_nan_flatten[~np.isnan(Array_VIIRS_original_mask_nan_flatten)] Array_VIIRS_HANTS_mask_nan_flatten = Array_VIIRS_HANTS_mask_nan_flatten[~np.isnan(Array_VIIRS_HANTS_mask_nan_flatten)] Array_VIIRS_original_mask_nan_flatten_without_zero =Array_VIIRS_original_mask_nan_flatten[Array_VIIRS_original_mask_nan_flatten>0] Array_VIIRS_original_mask_value_cold = np.percentile(Array_VIIRS_original_mask_nan_flatten_without_zero,40) Array_VIIRS_original_mask_value_hot = np.percentile(Array_VIIRS_original_mask_nan_flatten_without_zero,90) Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest = Array_VIIRS_HANTS_mask_nan_flatten[np.logical_and(Array_VIIRS_original_mask_nan_flatten > Array_VIIRS_original_mask_value_cold,Array_VIIRS_original_mask_nan_flatten < Array_VIIRS_original_mask_value_hot)] Array_VIIRS_original_mask_nan_flatten_exc_coldest = Array_VIIRS_original_mask_nan_flatten[np.logical_and(Array_VIIRS_original_mask_nan_flatten > Array_VIIRS_original_mask_value_cold,Array_VIIRS_original_mask_nan_flatten < Array_VIIRS_original_mask_value_hot)] Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest[Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest==-9999] = np.nan Array_VIIRS_original_mask_nan_flatten_exc_coldest[Array_VIIRS_original_mask_nan_flatten_exc_coldest==-9999] = np.nan Ave_VIIRS_HANTS = np.nanmean(Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest) Ave_VIIRS_original = np.nanmean(Array_VIIRS_original_mask_nan_flatten_exc_coldest) Factor = Ave_VIIRS_original/Ave_VIIRS_HANTS Array_VIIRS_HANTS_Corrected = Array_VIIRS_HANTS * Factor End_array = np.ones(np.shape(Array_outliers_mask)) * np.nan End_array[Array_outliers_mask==0] =Array_VIIRS_HANTS_Corrected[Array_outliers_mask==0] End_array[Array_outliers_mask==1] =Array_VIIRS_original[Array_outliers_mask==1] else: End_array = Array_VIIRS_HANTS geo = dest_VIIRS_original.GetGeoTransform() proj = dest_outliers.GetProjection() else: Array_VIIRS_HANTS = dest_VIIRS_HANTS.GetRasterBand(1).ReadAsArray() End_array = Array_VIIRS_HANTS dest_test = None i = 0 while dest_test == None: date2 = Dates[i] year2 = date2.year month2= date2.month day2 = date2.day try: filename_VIIRS_original2 = r"G:\SEBAL_Tadla\PROBAV-VIIRS\HANTS_input\Albedo\Albedo_PROBAV_%d%02d%02d.tif" %(year2,month2,day2) dest_test = gdal.Open(filename_VIIRS_original2) geo = dest_test.GetGeoTransform() proj = dest_test.GetProjection() except: i+=1 import wa.General.data_conversions as DC name = r"G:\SEBAL_Tadla\PROBAV-VIIRS\HANTS_end\Albedo\Albedo_PROBAV_%d%02d%02d.tif"%(year,month,day) DC.Save_as_tiff(name, End_array, geo, proj) ################################## All input is now calculated, so preprosessing can start ######################## # Open preprosessing excel the Vegetation_Height sheet ws_veg = wb_veg['Vegetation_Height'] # Define output name for the LandUse map dst_FileName = os.path.join(output_folder,'LU_%s.tif' %Var_name) # Open LU data LU_dest = gdal.Open(LU_data_FileName) LU_data = LU_dest.GetRasterBand(1).ReadAsArray() # Reproject the LAI to the same projection as LU dest1, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(LAI_FileName, LU_data_FileName) ## input after HANTS LAI_proj = dest1.GetRasterBand(1).ReadAsArray() # Read out the excel file coefficient numbers Array = np.zeros([ws_veg.max_row-1,4]) for j in ['A','C','D','E']: j_number={'A' : 0, 'C' : 1, 'D' : 2, 'E' : 3} for i in range(2,ws_veg.max_row+1): Value = (ws_veg['%s%s' %(j,i)].value) Array[i-2, j_number[j]] = Value # Create maps with the coefficient numbers for the right land cover coeff = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1]),3]) for coeff_nmbr in range(0,3): for Class in range(0,len(Array)): coeff[LU_data==Array[Class,0],coeff_nmbr] = Array[Class,coeff_nmbr+1] # Get some dimensions of the projected dataset band_data = dest1.GetRasterBand(1) ncol_data = dest1.RasterXSize nrow_data = dest1.RasterYSize shape_data=[ncol_data, nrow_data] # Calculate the vegetation height in the LU projection Veg_Height_proj = coeff[:,:,0] * np.power(LAI_proj,2) + coeff[:,:,1] * LAI_proj + coeff[:,:,2] Veg_Height_proj = np.clip(Veg_Height_proj, 0, 600) # Save the vegetation height in the lU projection in the temporary directory Veg_Height_proj_FileName = os.path.join(output_folder_temp,'Veg_Height_proj.tif') save_GeoTiff_proy(dest1, Veg_Height_proj, Veg_Height_proj_FileName, shape_data, nband=1) # Reproject the Veg_height to the LAI projection dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Veg_Height_proj_FileName, LAI_FileName) # Get some dimensions of the original dataset band_data = dest.GetRasterBand(1) ncol_data = dest.RasterXSize nrow_data = dest.RasterYSize # Open the Veg_height with the same projection as LAI Veg_Height = band_data.ReadAsArray(0, 0, ncol_data, nrow_data) Veg_Height[Veg_Height == 0] = np.nan # Save Vegetation Height in the end folder dst_FileName = os.path.join(output_folder,'Vegetation_Height_%s.tif' %Var_name) save_GeoTiff_proy(dest, Veg_Height, dst_FileName, shape, nband=1) ######################## calculate p-factor by using the Landuse map ######################### ws_p = wb_veg['p-factor'] Array_P = np.zeros([ws_p.max_row-1,2]) for j in ['A','C']: j_number={'A' : 0, 'C' : 1} for i in range(2,ws_p.max_row+1): Value = (ws_p['%s%s' %(j,i)].value) Array_P[i-2, j_number[j]] = Value p_factor = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])]) for Class in range(0,len(Array_P)): p_factor[LU_data==Array_P[Class,0]] = Array_P[Class,1] p_factor[p_factor == 0] = np.nan dst_FileName = os.path.join(output_folder_temp,'p-factor_proj.tif') save_GeoTiff_proy(dest1, p_factor, dst_FileName, shape_data, nband=1) dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(dst_FileName, LAI_FileName) band_data = dest.GetRasterBand(1) # Get the reprojected dem band ncol_data = dest.RasterXSize nrow_data = dest.RasterYSize p_factor = band_data.ReadAsArray(0, 0, ncol_data, nrow_data) p_factor[p_factor == 0] = np.nan dst_pfactor_FileName = os.path.join(output_folder,'p-factor_%s.tif' %Var_name) save_GeoTiff_proy(dest, p_factor, dst_pfactor_FileName, shape, nband=1) ######################## calculate c-factor by using the Landuse map ######################### ws_c = wb_veg['C-factor'] Array_C = np.zeros([ws_c.max_row-1,2]) for j in ['A','C']: j_number={'A' : 0, 'C' : 1} for i in range(2,ws_c.max_row+1): Value = (ws_c['%s%s' %(j,i)].value) Array_C[i-2, j_number[j]] = Value c_factor = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])]) for Class in range(0,len(Array_C)): c_factor[LU_data==Array_C[Class,0]] = Array_C[Class,1] c_factor[np.logical_and(c_factor != 3.0, c_factor != 4.0)] = np.nan LUE_max = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])]) LUE_max[c_factor == 3] = 2.5 LUE_max[c_factor == 4] = 4.5 LUE_max[LUE_max == 0] = np.nan dst_FileName = os.path.join(output_folder_temp,'LUE_max_proj.tif') save_GeoTiff_proy(dest1, LUE_max, dst_FileName, shape_data, nband=1) dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(dst_FileName, LAI_FileName) band_data = dest.GetRasterBand(1) # Get the reprojected dem band ncol_data = dest.RasterXSize nrow_data = dest.RasterYSize LUE_max = band_data.ReadAsArray(0, 0, ncol_data, nrow_data) LUE_max[LUE_max == 0] = np.nan dst_LUEmax_FileName = os.path.join(output_folder,'LUE_max_%s.tif' %Var_name) save_GeoTiff_proy(dest, LUE_max, dst_LUEmax_FileName, shape, nband=1) ############################# delete temporary directory ######################## shutil.rmtree(output_folder_temp) ################################################################################# ''' # Functions ################################################################################# def Create_Buffer(Data_In): ''' This function creates a 3D array which is used to apply the moving window ''' Buffer_area = 7 # A block of 2 times Buffer_area + 1 will be 1 if there is the pixel in the middle is 1 Data_Out=np.empty((len(Data_In),len(Data_In[1]))) Data_Out[:,:] = Data_In for ypixel in range(0,Buffer_area + 1): for xpixel in range(1,Buffer_area + 1): if ypixel==0: for xpixel in range(1,Buffer_area + 1): Data_Out[:,0:-xpixel] += Data_In[:,xpixel:] Data_Out[:,xpixel:] += Data_In[:,:-xpixel] for ypixel in range(1,Buffer_area + 1): Data_Out[ypixel:,:] += Data_In[:-ypixel,:] Data_Out[0:-ypixel,:] += Data_In[ypixel:,:] else: Data_Out[0:-xpixel,ypixel:] += Data_In[xpixel:,:-ypixel] Data_Out[xpixel:,ypixel:] += Data_In[:-xpixel,:-ypixel] Data_Out[0:-xpixel,0:-ypixel] += Data_In[xpixel:,ypixel:] Data_Out[xpixel:,0:-ypixel] += Data_In[:-xpixel,ypixel:] Data_Out[Data_Out>0.1] = 1 Data_Out[Data_Out<=0.1] = 0 return(Data_Out) #------------------------------------------------------------------------------ def Get_epsg(g): try: # Get info of the dataset that is used for transforming gland_proj = g.GetProjection() Projection=gland_proj.split('EPSG","') epsg_to=int((str(Projection[-1]).split(']')[0])[0:-1]) except: epsg_to=4326 print('Was not able to get the projection, so WGS84 is assumed') return(epsg_to) #------------------------------------------------------------------------------ def gap_filling(data,NoDataValue): """ This function fills the no data gaps in a numpy array Keyword arguments: dataset -- Array NoDataValue -- Value that must be filled """ # fill the no data values if NoDataValue is np.nan: mask = ~(np.isnan(data)) else: mask = ~(data==NoDataValue) xx, yy = np.meshgrid(np.arange(data.shape[1]), np.arange(data.shape[0])) xym = np.vstack( (np.ravel(xx[mask]), np.ravel(yy[mask])) ).T data0 = np.ravel( data[:,:][mask] ) interp0 = scipy.interpolate.NearestNDInterpolator( xym, data0 ) data_end = interp0(np.ravel(xx), np.ravel(yy)).reshape( xx.shape ) return (data_end) #------------------------------------------------------------------------------ if __name__ == '__main__': main()
apache-2.0
DailyActie/Surrogate-Model
01-codes/scikit-learn-master/sklearn/preprocessing/data.py
1
67256
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Olivier Grisel <olivier.grisel@ensta.org> # Andreas Mueller <amueller@ais.uni-bonn.de> # Eric Martin <eric@ericmart.in> # Giorgio Patrini <giorgio.patrini@anu.edu.au> # License: BSD 3 clause import numbers import warnings from itertools import chain, combinations import numpy as np from scipy import sparse from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1, inplace_csr_row_normalize_l2) from ..base import BaseEstimator, TransformerMixin from ..externals import six from ..utils import check_array from ..utils import deprecated from ..utils.extmath import _incremental_mean_and_var from ..utils.extmath import row_norms from ..utils.fixes import combinations_with_replacement as combinations_w_r from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis, incr_mean_variance_axis, min_max_axis) from ..utils.validation import check_is_fitted, FLOAT_DTYPES zip = six.moves.zip map = six.moves.map range = six.moves.range __all__ = [ 'Binarizer', 'KernelCenterer', 'MinMaxScaler', 'MaxAbsScaler', 'Normalizer', 'OneHotEncoder', 'RobustScaler', 'StandardScaler', 'add_dummy_feature', 'binarize', 'normalize', 'scale', 'robust_scale', 'maxabs_scale', 'minmax_scale', ] DEPRECATION_MSG_1D = ( "Passing 1d arrays as data is deprecated in 0.17 and will " "raise ValueError in 0.19. Reshape your data either using " "X.reshape(-1, 1) if your data has a single feature or " "X.reshape(1, -1) if it contains a single sample." ) def _handle_zeros_in_scale(scale, copy=True): ''' Makes sure that whenever scale is zero, we handle it correctly. This happens in most scalers when we have constant features.''' # if we are fitting on 1D arrays, scale might be a scalar if np.isscalar(scale): if scale == .0: scale = 1. return scale elif isinstance(scale, np.ndarray): if copy: # New array to avoid side-effects scale = scale.copy() scale[scale == 0.0] = 1.0 return scale def scale(X, axis=0, with_mean=True, with_std=True, copy=True): """Standardize a dataset along any axis Center to the mean and component wise scale to unit variance. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : {array-like, sparse matrix} The data to center and scale. axis : int (0 by default) axis used to compute the means and standard deviations along. If 0, independently standardize each feature, otherwise (if 1) standardize each sample. with_mean : boolean, True by default If True, center the data before scaling. with_std : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSC matrix and if axis is 1). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_mean=False` (in that case, only variance scaling will be performed on the features of the CSC matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSC matrix. See also -------- :class:`sklearn.preprocessing.StandardScaler` to perform centering and scaling using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False, warn_on_dtype=True, estimator='the scale function', dtype=FLOAT_DTYPES) if sparse.issparse(X): if with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` instead" " See docstring for motivation and alternatives.") if axis != 0: raise ValueError("Can only scale sparse matrix on axis=0, " " got axis=%d" % axis) if with_std: _, var = mean_variance_axis(X, axis=0) var = _handle_zeros_in_scale(var, copy=False) inplace_column_scale(X, 1 / np.sqrt(var)) else: X = np.asarray(X) if with_mean: mean_ = np.mean(X, axis) if with_std: scale_ = np.std(X, axis) # Xr is a view on the original array that enables easy use of # broadcasting on the axis in which we are interested in Xr = np.rollaxis(X, axis) if with_mean: Xr -= mean_ mean_1 = Xr.mean(axis=0) # Verify that mean_1 is 'close to zero'. If X contains very # large values, mean_1 can also be very large, due to a lack of # precision of mean_. In this case, a pre-scaling of the # concerned feature is efficient, for instance by its mean or # maximum. if not np.allclose(mean_1, 0): warnings.warn("Numerical issues were encountered " "when centering the data " "and might not be solved. Dataset may " "contain too large values. You may need " "to prescale your features.") Xr -= mean_1 if with_std: scale_ = _handle_zeros_in_scale(scale_, copy=False) Xr /= scale_ if with_mean: mean_2 = Xr.mean(axis=0) # If mean_2 is not 'close to zero', it comes from the fact that # scale_ is very small so that mean_2 = mean_1/scale_ > 0, even # if mean_1 was close to zero. The problem is thus essentially # due to the lack of precision of mean_. A solution is then to # subtract the mean again: if not np.allclose(mean_2, 0): warnings.warn("Numerical issues were encountered " "when scaling the data " "and might not be solved. The standard " "deviation of the data is probably " "very close to 0. ") Xr -= mean_2 return X class MinMaxScaler(BaseEstimator, TransformerMixin): """Transforms features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by:: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. This transformation is often used as an alternative to zero mean, unit variance scaling. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- feature_range: tuple (min, max), default=(0, 1) Desired range of transformed data. copy : boolean, optional, default True Set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array). Attributes ---------- min_ : ndarray, shape (n_features,) Per feature adjustment for minimum. scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. .. versionadded:: 0.17 *scale_* attribute. data_min_ : ndarray, shape (n_features,) Per feature minimum seen in the data .. versionadded:: 0.17 *data_min_* instead of deprecated *data_min*. data_max_ : ndarray, shape (n_features,) Per feature maximum seen in the data .. versionadded:: 0.17 *data_max_* instead of deprecated *data_max*. data_range_ : ndarray, shape (n_features,) Per feature range ``(data_max_ - data_min_)`` seen in the data .. versionadded:: 0.17 *data_range_* instead of deprecated *data_range*. """ def __init__(self, feature_range=(0, 1), copy=True): self.feature_range = feature_range self.copy = copy @property @deprecated("Attribute data_range will be removed in " "0.19. Use ``data_range_`` instead") def data_range(self): return self.data_range_ @property @deprecated("Attribute data_min will be removed in " "0.19. Use ``data_min_`` instead") def data_min(self): return self.data_min_ def _reset(self): """Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched. """ # Checking one attribute is enough, becase they are all set together # in partial_fit if hasattr(self, 'scale_'): del self.scale_ del self.min_ del self.n_samples_seen_ del self.data_min_ del self.data_max_ del self.data_range_ def fit(self, X, y=None): """Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y) def partial_fit(self, X, y=None): """Online computation of min and max on X for later scaling. All of X is processed as a single batch. This is intended for cases when `fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y : Passthrough for ``Pipeline`` compatibility. """ feature_range = self.feature_range if feature_range[0] >= feature_range[1]: raise ValueError("Minimum of desired feature range must be smaller" " than maximum. Got %s." % str(feature_range)) if sparse.issparse(X): raise TypeError("MinMaxScaler does no support sparse input. " "You may consider to use MaxAbsScaler instead.") X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) data_min = np.min(X, axis=0) data_max = np.max(X, axis=0) # First pass if not hasattr(self, 'n_samples_seen_'): self.n_samples_seen_ = X.shape[0] # Next steps else: data_min = np.minimum(self.data_min_, data_min) data_max = np.maximum(self.data_max_, data_max) self.n_samples_seen_ += X.shape[0] data_range = data_max - data_min self.scale_ = ((feature_range[1] - feature_range[0]) / _handle_zeros_in_scale(data_range)) self.min_ = feature_range[0] - data_min * self.scale_ self.data_min_ = data_min self.data_max_ = data_max self.data_range_ = data_range return self def transform(self, X): """Scaling features of X according to feature_range. Parameters ---------- X : array-like, shape [n_samples, n_features] Input data that will be transformed. """ check_is_fitted(self, 'scale_') X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) X *= self.scale_ X += self.min_ return X def inverse_transform(self, X): """Undo the scaling of X according to feature_range. Parameters ---------- X : array-like, shape [n_samples, n_features] Input data that will be transformed. It cannot be sparse. """ check_is_fitted(self, 'scale_') X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) X -= self.min_ X /= self.scale_ return X def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True): """Transforms features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by:: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. This transformation is often used as an alternative to zero mean, unit variance scaling. Read more in the :ref:`User Guide <preprocessing_scaler>`. .. versionadded:: 0.17 *minmax_scale* function interface to :class:`sklearn.preprocessing.MinMaxScaler`. Parameters ---------- feature_range: tuple (min, max), default=(0, 1) Desired range of transformed data. axis : int (0 by default) axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). """ # To allow retro-compatibility, we handle here the case of 1D-input # From 0.17, 1D-input are deprecated in scaler objects # Although, we want to allow the users to keep calling this function # with 1D-input. # Cast input to array, as we need to check ndim. Prior to 0.17, that was # done inside the scaler object fit_transform. # If copy is required, it will be done inside the scaler object. X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True, dtype=FLOAT_DTYPES) original_ndim = X.ndim if original_ndim == 1: X = X.reshape(X.shape[0], 1) s = MinMaxScaler(feature_range=feature_range, copy=copy) if axis == 0: X = s.fit_transform(X) else: X = s.fit_transform(X.T).T if original_ndim == 1: X = X.ravel() return X class StandardScaler(BaseEstimator, TransformerMixin): """Standardize features by removing the mean and scaling to unit variance Centering and scaling happen independently on each feature by computing the relevant statistics on the samples in the training set. Mean and standard deviation are then stored to be used on later data using the `transform` method. Standardization of a dataset is a common requirement for many machine learning estimators: they might behave badly if the individual feature do not more or less look like standard normally distributed data (e.g. Gaussian with 0 mean and unit variance). For instance many elements used in the objective function of a learning algorithm (such as the RBF kernel of Support Vector Machines or the L1 and L2 regularizers of linear models) assume that all features are centered around 0 and have variance in the same order. If a feature has a variance that is orders of magnitude larger that others, it might dominate the objective function and make the estimator unable to learn from other features correctly as expected. This scaler can also be applied to sparse CSR or CSC matrices by passing `with_mean=False` to avoid breaking the sparsity structure of the data. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- with_mean : boolean, True by default If True, center the data before scaling. This does not work (and will raise an exception) when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory. with_std : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default True If False, try to avoid a copy and do inplace scaling instead. This is not guaranteed to always work inplace; e.g. if the data is not a NumPy array or scipy.sparse CSR matrix, a copy may still be returned. Attributes ---------- scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. .. versionadded:: 0.17 *scale_* is recommended instead of deprecated *std_*. mean_ : array of floats with shape [n_features] The mean value for each feature in the training set. var_ : array of floats with shape [n_features] The variance for each feature in the training set. Used to compute `scale_` n_samples_seen_ : int The number of samples processed by the estimator. Will be reset on new calls to fit, but increments across ``partial_fit`` calls. See also -------- :func:`sklearn.preprocessing.scale` to perform centering and scaling without using the ``Transformer`` object oriented API :class:`sklearn.decomposition.RandomizedPCA` with `whiten=True` to further remove the linear correlation across features. """ def __init__(self, copy=True, with_mean=True, with_std=True): self.with_mean = with_mean self.with_std = with_std self.copy = copy @property @deprecated("Attribute ``std_`` will be removed in 0.19. Use ``scale_`` instead") def std_(self): return self.scale_ def _reset(self): """Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched. """ # Checking one attribute is enough, becase they are all set together # in partial_fit if hasattr(self, 'scale_'): del self.scale_ del self.n_samples_seen_ del self.mean_ del self.var_ def fit(self, X, y=None): """Compute the mean and std to be used for later scaling. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y: Passthrough for ``Pipeline`` compatibility. """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y) def partial_fit(self, X, y=None): """Online computation of mean and std on X for later scaling. All of X is processed as a single batch. This is intended for cases when `fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. The algorithm for incremental mean and std is given in Equation 1.5a,b in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms for computing the sample variance: Analysis and recommendations." The American Statistician 37.3 (1983): 242-247: Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y: Passthrough for ``Pipeline`` compatibility. """ X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) # Even in the case of `with_mean=False`, we update the mean anyway # This is needed for the incremental computation of the var # See incr_mean_variance_axis and _incremental_mean_variance_axis if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.with_std: # First pass if not hasattr(self, 'n_samples_seen_'): self.mean_, self.var_ = mean_variance_axis(X, axis=0) self.n_samples_seen_ = X.shape[0] # Next passes else: self.mean_, self.var_, self.n_samples_seen_ = \ incr_mean_variance_axis(X, axis=0, last_mean=self.mean_, last_var=self.var_, last_n=self.n_samples_seen_) else: self.mean_ = None self.var_ = None else: # First pass if not hasattr(self, 'n_samples_seen_'): self.mean_ = .0 self.n_samples_seen_ = 0 if self.with_std: self.var_ = .0 else: self.var_ = None self.mean_, self.var_, self.n_samples_seen_ = \ _incremental_mean_and_var(X, self.mean_, self.var_, self.n_samples_seen_) if self.with_std: self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_)) else: self.scale_ = None return self def transform(self, X, y=None, copy=None): """Perform standardization by centering and scaling Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to scale along the features axis. """ check_is_fitted(self, 'scale_') copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.scale_ is not None: inplace_column_scale(X, 1 / self.scale_) else: if self.with_mean: X -= self.mean_ if self.with_std: X /= self.scale_ return X def inverse_transform(self, X, copy=None): """Scale back the data to the original representation Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to scale along the features axis. """ check_is_fitted(self, 'scale_') copy = copy if copy is not None else self.copy if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot uncenter sparse matrices: pass `with_mean=False` " "instead See docstring for motivation and alternatives.") if not sparse.isspmatrix_csr(X): X = X.tocsr() copy = False if copy: X = X.copy() if self.scale_ is not None: inplace_column_scale(X, self.scale_) else: X = np.asarray(X) if copy: X = X.copy() if self.with_std: X *= self.scale_ if self.with_mean: X += self.mean_ return X class MaxAbsScaler(BaseEstimator, TransformerMixin): """Scale each feature by its maximum absolute value. This estimator scales and translates each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. It does not shift/center the data, and thus does not destroy any sparsity. This scaler can also be applied to sparse CSR or CSC matrices. .. versionadded:: 0.17 Parameters ---------- copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). Attributes ---------- scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. .. versionadded:: 0.17 *scale_* attribute. max_abs_ : ndarray, shape (n_features,) Per feature maximum absolute value. n_samples_seen_ : int The number of samples processed by the estimator. Will be reset on new calls to fit, but increments across ``partial_fit`` calls. """ def __init__(self, copy=True): self.copy = copy def _reset(self): """Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched. """ # Checking one attribute is enough, becase they are all set together # in partial_fit if hasattr(self, 'scale_'): del self.scale_ del self.n_samples_seen_ del self.max_abs_ def fit(self, X, y=None): """Compute the maximum absolute value to be used for later scaling. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y) def partial_fit(self, X, y=None): """Online computation of max absolute value of X for later scaling. All of X is processed as a single batch. This is intended for cases when `fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y: Passthrough for ``Pipeline`` compatibility. """ X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): mins, maxs = min_max_axis(X, axis=0) max_abs = np.maximum(np.abs(mins), np.abs(maxs)) else: max_abs = np.abs(X).max(axis=0) # First pass if not hasattr(self, 'n_samples_seen_'): self.n_samples_seen_ = X.shape[0] # Next passes else: max_abs = np.maximum(self.max_abs_, max_abs) self.n_samples_seen_ += X.shape[0] self.max_abs_ = max_abs self.scale_ = _handle_zeros_in_scale(max_abs) return self def transform(self, X, y=None): """Scale the data Parameters ---------- X : {array-like, sparse matrix} The data that should be scaled. """ check_is_fitted(self, 'scale_') X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): inplace_column_scale(X, 1.0 / self.scale_) else: X /= self.scale_ return X def inverse_transform(self, X): """Scale back the data to the original representation Parameters ---------- X : {array-like, sparse matrix} The data that should be transformed back. """ check_is_fitted(self, 'scale_') X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): inplace_column_scale(X, self.scale_) else: X *= self.scale_ return X def maxabs_scale(X, axis=0, copy=True): """Scale each feature to the [-1, 1] range without breaking the sparsity. This estimator scales each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. This scaler can also be applied to sparse CSR or CSC matrices. Parameters ---------- axis : int (0 by default) axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). """ # To allow retro-compatibility, we handle here the case of 1D-input # From 0.17, 1D-input are deprecated in scaler objects # Although, we want to allow the users to keep calling this function # with 1D-input. # Cast input to array, as we need to check ndim. Prior to 0.17, that was # done inside the scaler object fit_transform. # If copy is required, it will be done inside the scaler object. X = check_array(X, accept_sparse=('csr', 'csc'), copy=False, ensure_2d=False, dtype=FLOAT_DTYPES) original_ndim = X.ndim if original_ndim == 1: X = X.reshape(X.shape[0], 1) s = MaxAbsScaler(copy=copy) if axis == 0: X = s.fit_transform(X) else: X = s.fit_transform(X.T).T if original_ndim == 1: X = X.ravel() return X class RobustScaler(BaseEstimator, TransformerMixin): """Scale features using statistics that are robust to outliers. This Scaler removes the median and scales the data according to the Interquartile Range (IQR). The IQR is the range between the 1st quartile (25th quantile) and the 3rd quartile (75th quantile). Centering and scaling happen independently on each feature (or each sample, depending on the `axis` argument) by computing the relevant statistics on the samples in the training set. Median and interquartile range are then stored to be used on later data using the `transform` method. Standardization of a dataset is a common requirement for many machine learning estimators. Typically this is done by removing the mean and scaling to unit variance. However, outliers can often influence the sample mean / variance in a negative way. In such cases, the median and the interquartile range often give better results. .. versionadded:: 0.17 Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- with_centering : boolean, True by default If True, center the data before scaling. This does not work (and will raise an exception) when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory. with_scaling : boolean, True by default If True, scale the data to interquartile range. copy : boolean, optional, default is True If False, try to avoid a copy and do inplace scaling instead. This is not guaranteed to always work inplace; e.g. if the data is not a NumPy array or scipy.sparse CSR matrix, a copy may still be returned. Attributes ---------- center_ : array of floats The median value for each feature in the training set. scale_ : array of floats The (scaled) interquartile range for each feature in the training set. .. versionadded:: 0.17 *scale_* attribute. See also -------- :class:`sklearn.preprocessing.StandardScaler` to perform centering and scaling using mean and variance. :class:`sklearn.decomposition.RandomizedPCA` with `whiten=True` to further remove the linear correlation across features. Notes ----- See examples/preprocessing/plot_robust_scaling.py for an example. http://en.wikipedia.org/wiki/Median_(statistics) http://en.wikipedia.org/wiki/Interquartile_range """ def __init__(self, with_centering=True, with_scaling=True, copy=True): self.with_centering = with_centering self.with_scaling = with_scaling self.copy = copy def _check_array(self, X, copy): """Makes sure centering is not enabled for sparse matrices.""" X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): if self.with_centering: raise ValueError( "Cannot center sparse matrices: use `with_centering=False`" " instead. See docstring for motivation and alternatives.") return X def fit(self, X, y=None): """Compute the median and quantiles to be used for scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the median and quantiles used for later scaling along the features axis. """ if sparse.issparse(X): raise TypeError("RobustScaler cannot be fitted on sparse inputs") X = self._check_array(X, self.copy) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if self.with_centering: self.center_ = np.median(X, axis=0) if self.with_scaling: q = np.percentile(X, (25, 75), axis=0) self.scale_ = (q[1] - q[0]) self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False) return self def transform(self, X, y=None): """Center and scale the data Parameters ---------- X : array-like The data used to scale along the specified axis. """ if self.with_centering: check_is_fitted(self, 'center_') if self.with_scaling: check_is_fitted(self, 'scale_') X = self._check_array(X, self.copy) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): if self.with_scaling: inplace_column_scale(X, 1.0 / self.scale_) else: if self.with_centering: X -= self.center_ if self.with_scaling: X /= self.scale_ return X def inverse_transform(self, X): """Scale back the data to the original representation Parameters ---------- X : array-like The data used to scale along the specified axis. """ if self.with_centering: check_is_fitted(self, 'center_') if self.with_scaling: check_is_fitted(self, 'scale_') X = self._check_array(X, self.copy) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): if self.with_scaling: inplace_column_scale(X, self.scale_) else: if self.with_scaling: X *= self.scale_ if self.with_centering: X += self.center_ return X def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True): """Standardize a dataset along any axis Center to the median and component wise scale according to the interquartile range. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : array-like The data to center and scale. axis : int (0 by default) axis used to compute the medians and IQR along. If 0, independently scale each feature, otherwise (if 1) scale each sample. with_centering : boolean, True by default If True, center the data before scaling. with_scaling : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default is True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_centering=False` (in that case, only variance scaling will be performed on the features of the CSR matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSR matrix. See also -------- :class:`sklearn.preprocessing.RobustScaler` to perform centering and scaling using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling, copy=copy) if axis == 0: return s.fit_transform(X) else: return s.fit_transform(X.T).T class PolynomialFeatures(BaseEstimator, TransformerMixin): """Generate polynomial and interaction features. Generate a new feature matrix consisting of all polynomial combinations of the features with degree less than or equal to the specified degree. For example, if an input sample is two dimensional and of the form [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2]. Parameters ---------- degree : integer The degree of the polynomial features. Default = 2. interaction_only : boolean, default = False If true, only interaction features are produced: features that are products of at most ``degree`` *distinct* input features (so not ``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.). include_bias : boolean If True (default), then include a bias column, the feature in which all polynomial powers are zero (i.e. a column of ones - acts as an intercept term in a linear model). Examples -------- >>> X = np.arange(6).reshape(3, 2) >>> X array([[0, 1], [2, 3], [4, 5]]) >>> poly = PolynomialFeatures(2) >>> poly.fit_transform(X) array([[ 1., 0., 1., 0., 0., 1.], [ 1., 2., 3., 4., 6., 9.], [ 1., 4., 5., 16., 20., 25.]]) >>> poly = PolynomialFeatures(interaction_only=True) >>> poly.fit_transform(X) array([[ 1., 0., 1., 0.], [ 1., 2., 3., 6.], [ 1., 4., 5., 20.]]) Attributes ---------- powers_ : array, shape (n_input_features, n_output_features) powers_[i, j] is the exponent of the jth input in the ith output. n_input_features_ : int The total number of input features. n_output_features_ : int The total number of polynomial output features. The number of output features is computed by iterating over all suitably sized combinations of input features. Notes ----- Be aware that the number of features in the output array scales polynomially in the number of features of the input array, and exponentially in the degree. High degrees can cause overfitting. See :ref:`examples/linear_model/plot_polynomial_interpolation.py <example_linear_model_plot_polynomial_interpolation.py>` """ def __init__(self, degree=2, interaction_only=False, include_bias=True): self.degree = degree self.interaction_only = interaction_only self.include_bias = include_bias @staticmethod def _combinations(n_features, degree, interaction_only, include_bias): comb = (combinations if interaction_only else combinations_w_r) start = int(not include_bias) return chain.from_iterable(comb(range(n_features), i) for i in range(start, degree + 1)) @property def powers_(self): check_is_fitted(self, 'n_input_features_') combinations = self._combinations(self.n_input_features_, self.degree, self.interaction_only, self.include_bias) return np.vstack(np.bincount(c, minlength=self.n_input_features_) for c in combinations) def fit(self, X, y=None): """ Compute number of output features. """ n_samples, n_features = check_array(X).shape combinations = self._combinations(n_features, self.degree, self.interaction_only, self.include_bias) self.n_input_features_ = n_features self.n_output_features_ = sum(1 for _ in combinations) return self def transform(self, X, y=None): """Transform data to polynomial features Parameters ---------- X : array-like, shape [n_samples, n_features] The data to transform, row by row. Returns ------- XP : np.ndarray shape [n_samples, NP] The matrix of features, where NP is the number of polynomial features generated from the combination of inputs. """ check_is_fitted(self, ['n_input_features_', 'n_output_features_']) X = check_array(X, dtype=FLOAT_DTYPES) n_samples, n_features = X.shape if n_features != self.n_input_features_: raise ValueError("X shape does not match training shape") # allocate output data XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype) combinations = self._combinations(n_features, self.degree, self.interaction_only, self.include_bias) for i, c in enumerate(combinations): XP[:, i] = X[:, c].prod(1) return XP def normalize(X, norm='l2', axis=1, copy=True, return_norm=False): """Scale input vectors individually to unit norm (vector length). Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to normalize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. norm : 'l1', 'l2', or 'max', optional ('l2' by default) The norm to use to normalize each non zero sample (or each non-zero feature if axis is 0). axis : 0 or 1, optional (1 by default) axis used to normalize the data along. If 1, independently normalize each sample, otherwise (if 0) normalize each feature. copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). return_norm : boolean, default False whether to return the computed norms See also -------- :class:`sklearn.preprocessing.Normalizer` to perform normalization using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ if norm not in ('l1', 'l2', 'max'): raise ValueError("'%s' is not a supported norm" % norm) if axis == 0: sparse_format = 'csc' elif axis == 1: sparse_format = 'csr' else: raise ValueError("'%d' is not a supported axis" % axis) X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True, estimator='the normalize function', dtype=FLOAT_DTYPES) if axis == 0: X = X.T if sparse.issparse(X): if norm == 'l1': inplace_csr_row_normalize_l1(X) elif norm == 'l2': inplace_csr_row_normalize_l2(X) elif norm == 'max': _, norms = min_max_axis(X, 1) norms = norms.repeat(np.diff(X.indptr)) mask = norms != 0 X.data[mask] /= norms[mask] else: if norm == 'l1': norms = np.abs(X).sum(axis=1) elif norm == 'l2': norms = row_norms(X) elif norm == 'max': norms = np.max(X, axis=1) norms = _handle_zeros_in_scale(norms, copy=False) X /= norms[:, np.newaxis] if axis == 0: X = X.T if return_norm: return X, norms else: return X class Normalizer(BaseEstimator, TransformerMixin): """Normalize samples individually to unit norm. Each sample (i.e. each row of the data matrix) with at least one non zero component is rescaled independently of other samples so that its norm (l1 or l2) equals one. This transformer is able to work both with dense numpy arrays and scipy.sparse matrix (use CSR format if you want to avoid the burden of a copy / conversion). Scaling inputs to unit norms is a common operation for text classification or clustering for instance. For instance the dot product of two l2-normalized TF-IDF vectors is the cosine similarity of the vectors and is the base similarity metric for the Vector Space Model commonly used by the Information Retrieval community. Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- norm : 'l1', 'l2', or 'max', optional ('l2' by default) The norm to use to normalize each non zero sample. copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix). Notes ----- This estimator is stateless (besides constructor parameters), the fit method does nothing but is useful when used in a pipeline. See also -------- :func:`sklearn.preprocessing.normalize` equivalent function without the object oriented API """ def __init__(self, norm='l2', copy=True): self.norm = norm self.copy = copy def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. """ X = check_array(X, accept_sparse='csr') return self def transform(self, X, y=None, copy=None): """Scale each non zero row of X to unit norm Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to normalize, row by row. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. """ copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr') return normalize(X, norm=self.norm, axis=1, copy=copy) def binarize(X, threshold=0.0, copy=True): """Boolean thresholding of array-like or scipy.sparse matrix Read more in the :ref:`User Guide <preprocessing_binarization>`. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to binarize, element by element. scipy.sparse matrices should be in CSR or CSC format to avoid an un-necessary copy. threshold : float, optional (0.0 by default) Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : boolean, optional, default True set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR / CSC matrix and if axis is 1). See also -------- :class:`sklearn.preprocessing.Binarizer` to perform binarization using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy) if sparse.issparse(X): if threshold < 0: raise ValueError('Cannot binarize a sparse matrix with threshold ' '< 0') cond = X.data > threshold not_cond = np.logical_not(cond) X.data[cond] = 1 X.data[not_cond] = 0 X.eliminate_zeros() else: cond = X > threshold not_cond = np.logical_not(cond) X[cond] = 1 X[not_cond] = 0 return X class Binarizer(BaseEstimator, TransformerMixin): """Binarize data (set feature values to 0 or 1) according to a threshold Values greater than the threshold map to 1, while values less than or equal to the threshold map to 0. With the default threshold of 0, only positive values map to 1. Binarization is a common operation on text count data where the analyst can decide to only consider the presence or absence of a feature rather than a quantified number of occurrences for instance. It can also be used as a pre-processing step for estimators that consider boolean random variables (e.g. modelled using the Bernoulli distribution in a Bayesian setting). Read more in the :ref:`User Guide <preprocessing_binarization>`. Parameters ---------- threshold : float, optional (0.0 by default) Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : boolean, optional, default True set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix). Notes ----- If the input is a sparse matrix, only the non-zero values are subject to update by the Binarizer class. This estimator is stateless (besides constructor parameters), the fit method does nothing but is useful when used in a pipeline. """ def __init__(self, threshold=0.0, copy=True): self.threshold = threshold self.copy = copy def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. """ check_array(X, accept_sparse='csr') return self def transform(self, X, y=None, copy=None): """Binarize each element of X Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to binarize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. """ copy = copy if copy is not None else self.copy return binarize(X, threshold=self.threshold, copy=copy) class KernelCenterer(BaseEstimator, TransformerMixin): """Center a kernel matrix Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a function mapping x to a Hilbert space. KernelCenterer centers (i.e., normalize to have zero mean) the data without explicitly computing phi(x). It is equivalent to centering phi(x) with sklearn.preprocessing.StandardScaler(with_std=False). Read more in the :ref:`User Guide <kernel_centering>`. """ def fit(self, K, y=None): """Fit KernelCenterer Parameters ---------- K : numpy array of shape [n_samples, n_samples] Kernel matrix. Returns ------- self : returns an instance of self. """ K = check_array(K, dtype=FLOAT_DTYPES) n_samples = K.shape[0] self.K_fit_rows_ = np.sum(K, axis=0) / n_samples self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples return self def transform(self, K, y=None, copy=True): """Center kernel matrix. Parameters ---------- K : numpy array of shape [n_samples1, n_samples2] Kernel matrix. copy : boolean, optional, default True Set to False to perform inplace computation. Returns ------- K_new : numpy array of shape [n_samples1, n_samples2] """ check_is_fitted(self, 'K_fit_all_') K = check_array(K, copy=copy, dtype=FLOAT_DTYPES) K_pred_cols = (np.sum(K, axis=1) / self.K_fit_rows_.shape[0])[:, np.newaxis] K -= self.K_fit_rows_ K -= K_pred_cols K += self.K_fit_all_ return K def add_dummy_feature(X, value=1.0): """Augment dataset with an additional dummy feature. This is useful for fitting an intercept term with implementations which cannot otherwise fit it directly. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] Data. value : float Value to use for the dummy feature. Returns ------- X : {array, sparse matrix}, shape [n_samples, n_features + 1] Same data with dummy feature added as first column. Examples -------- >>> from sklearn.preprocessing import add_dummy_feature >>> add_dummy_feature([[0, 1], [1, 0]]) array([[ 1., 0., 1.], [ 1., 1., 0.]]) """ X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES) n_samples, n_features = X.shape shape = (n_samples, n_features + 1) if sparse.issparse(X): if sparse.isspmatrix_coo(X): # Shift columns to the right. col = X.col + 1 # Column indices of dummy feature are 0 everywhere. col = np.concatenate((np.zeros(n_samples), col)) # Row indices of dummy feature are 0, ..., n_samples-1. row = np.concatenate((np.arange(n_samples), X.row)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.ones(n_samples) * value, X.data)) return sparse.coo_matrix((data, (row, col)), shape) elif sparse.isspmatrix_csc(X): # Shift index pointers since we need to add n_samples elements. indptr = X.indptr + n_samples # indptr[0] must be 0. indptr = np.concatenate((np.array([0]), indptr)) # Row indices of dummy feature are 0, ..., n_samples-1. indices = np.concatenate((np.arange(n_samples), X.indices)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.ones(n_samples) * value, X.data)) return sparse.csc_matrix((data, indices, indptr), shape) else: klass = X.__class__ return klass(add_dummy_feature(X.tocoo(), value)) else: return np.hstack((np.ones((n_samples, 1)) * value, X)) def _transform_selected(X, transform, selected="all", copy=True): """Apply a transform function to portion of selected features Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] Dense array or sparse matrix. transform : callable A callable transform(X) -> X_transformed copy : boolean, optional Copy X even if it could be avoided. selected: "all" or array of indices or mask Specify which features to apply the transform to. Returns ------- X : array or sparse matrix, shape=(n_samples, n_features_new) """ if isinstance(selected, six.string_types) and selected == "all": return transform(X) X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES) if len(selected) == 0: return X n_features = X.shape[1] ind = np.arange(n_features) sel = np.zeros(n_features, dtype=bool) sel[np.asarray(selected)] = True not_sel = np.logical_not(sel) n_selected = np.sum(sel) if n_selected == 0: # No features selected. return X elif n_selected == n_features: # All features selected. return transform(X) else: X_sel = transform(X[:, ind[sel]]) X_not_sel = X[:, ind[not_sel]] if sparse.issparse(X_sel) or sparse.issparse(X_not_sel): return sparse.hstack((X_sel, X_not_sel)) else: return np.hstack((X_sel, X_not_sel)) class OneHotEncoder(BaseEstimator, TransformerMixin): """Encode categorical integer features using a one-hot aka one-of-K scheme. The input to this transformer should be a matrix of integers, denoting the values taken on by categorical (discrete) features. The output will be a sparse matrix where each column corresponds to one possible value of one feature. It is assumed that input features take on values in the range [0, n_values). This encoding is needed for feeding categorical data to many scikit-learn estimators, notably linear models and SVMs with the standard kernels. Read more in the :ref:`User Guide <preprocessing_categorical_features>`. Parameters ---------- n_values : 'auto', int or array of ints Number of values per feature. - 'auto' : determine value range from training data. - int : number of categorical values per feature. Each feature value should be in ``range(n_values)`` - array : ``n_values[i]`` is the number of categorical values in ``X[:, i]``. Each feature value should be in ``range(n_values[i])`` categorical_features: "all" or array of indices or mask Specify what features are treated as categorical. - 'all' (default): All features are treated as categorical. - array of indices: Array of categorical feature indices. - mask: Array of length n_features and with dtype=bool. Non-categorical features are always stacked to the right of the matrix. dtype : number type, default=np.float Desired dtype of output. sparse : boolean, default=True Will return sparse matrix if set True else will return an array. handle_unknown : str, 'error' or 'ignore' Whether to raise an error or ignore if a unknown categorical feature is present during transform. Attributes ---------- active_features_ : array Indices for active features, meaning values that actually occur in the training set. Only available when n_values is ``'auto'``. feature_indices_ : array of shape (n_features,) Indices to feature ranges. Feature ``i`` in the original data is mapped to features from ``feature_indices_[i]`` to ``feature_indices_[i+1]`` (and then potentially masked by `active_features_` afterwards) n_values_ : array of shape (n_features,) Maximum number of values per feature. Examples -------- Given a dataset with three features and two samples, we let the encoder find the maximum value per feature and transform the data to a binary one-hot encoding. >>> from sklearn.preprocessing import OneHotEncoder >>> enc = OneHotEncoder() >>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \ [1, 0, 2]]) # doctest: +ELLIPSIS OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>, handle_unknown='error', n_values='auto', sparse=True) >>> enc.n_values_ array([2, 3, 4]) >>> enc.feature_indices_ array([0, 2, 5, 9]) >>> enc.transform([[0, 1, 1]]).toarray() array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]]) See also -------- sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of dictionary items (also handles string-valued features). sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot encoding of dictionary items or strings. """ def __init__(self, n_values="auto", categorical_features="all", dtype=np.float64, sparse=True, handle_unknown='error'): self.n_values = n_values self.categorical_features = categorical_features self.dtype = dtype self.sparse = sparse self.handle_unknown = handle_unknown def fit(self, X, y=None): """Fit OneHotEncoder to X. Parameters ---------- X : array-like, shape [n_samples, n_feature] Input array of type int. Returns ------- self """ self.fit_transform(X) return self def _fit_transform(self, X): """Assumes X contains only categorical features.""" X = check_array(X, dtype=np.int) if np.any(X < 0): raise ValueError("X needs to contain only non-negative integers.") n_samples, n_features = X.shape if self.n_values == 'auto': n_values = np.max(X, axis=0) + 1 elif isinstance(self.n_values, numbers.Integral): if (np.max(X, axis=0) >= self.n_values).any(): raise ValueError("Feature out of bounds for n_values=%d" % self.n_values) n_values = np.empty(n_features, dtype=np.int) n_values.fill(self.n_values) else: try: n_values = np.asarray(self.n_values, dtype=int) except (ValueError, TypeError): raise TypeError("Wrong type for parameter `n_values`. Expected" " 'auto', int or array of ints, got %r" % type(X)) if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]: raise ValueError("Shape mismatch: if n_values is an array," " it has to be of shape (n_features,).") self.n_values_ = n_values n_values = np.hstack([[0], n_values]) indices = np.cumsum(n_values) self.feature_indices_ = indices column_indices = (X + indices[:-1]).ravel() row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features) data = np.ones(n_samples * n_features) out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr() if self.n_values == 'auto': mask = np.array(out.sum(axis=0)).ravel() != 0 active_features = np.where(mask)[0] out = out[:, active_features] self.active_features_ = active_features return out if self.sparse else out.toarray() def fit_transform(self, X, y=None): """Fit OneHotEncoder to X, then transform X. Equivalent to self.fit(X).transform(X), but more convenient and more efficient. See fit for the parameters, transform for the return value. """ return _transform_selected(X, self._fit_transform, self.categorical_features, copy=True) def _transform(self, X): """Assumes X contains only categorical features.""" X = check_array(X, dtype=np.int) if np.any(X < 0): raise ValueError("X needs to contain only non-negative integers.") n_samples, n_features = X.shape indices = self.feature_indices_ if n_features != indices.shape[0] - 1: raise ValueError("X has different shape than during fitting." " Expected %d, got %d." % (indices.shape[0] - 1, n_features)) # We use only those categorical features of X that are known using fit. # i.e lesser than n_values_ using mask. # This means, if self.handle_unknown is "ignore", the row_indices and # col_indices corresponding to the unknown categorical feature are # ignored. mask = (X < self.n_values_).ravel() if np.any(~mask): if self.handle_unknown not in ['error', 'ignore']: raise ValueError("handle_unknown should be either error or " "unknown got %s" % self.handle_unknown) if self.handle_unknown == 'error': raise ValueError("unknown categorical feature present %s " "during transform." % X.ravel()[~mask]) column_indices = (X + indices[:-1]).ravel()[mask] row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features)[mask] data = np.ones(np.sum(mask)) out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr() if self.n_values == 'auto': out = out[:, self.active_features_] return out if self.sparse else out.toarray() def transform(self, X): """Transform X using one-hot encoding. Parameters ---------- X : array-like, shape [n_samples, n_features] Input array of type int. Returns ------- X_out : sparse matrix if sparse=True else a 2-d array, dtype=int Transformed input. """ return _transform_selected(X, self._transform, self.categorical_features, copy=True)
mit
ryanvarley/ExoData
exodata/astroclasses.py
1
42589
""" Contains structural classes ie binary, star, planet etc which mimic the xml structure with objects """ import sys import math from pkg_resources import resource_stream import logging import numpy as np import astropy.coordinates import astropy.units as u from . import equations as eq from . import astroquantities as aq from . import assumptions as assum from . import flags from . import params as ed_params logger = logging.getLogger('') class _BaseObject(object): def __init__(self, params=None): self.children = [] self.parent = False self.classType = 'BaseObject' self.flags = flags.Flags() self.params = {} if params is not None: self._updateParams(params) # TODO value validator? def _addChild(self, child): self.children.append(child) def _updateParams(self, params): """ This method updates parameters allowing for any validation / unit additions in the near future """ self.params.update(params) def _getParentClass(self, startClass, parentClass): """ gets the parent class by calling successive parent classes with .parent until parentclass is matched. """ try: if not startClass: # reached system with no hits raise AttributeError except AttributeError: # i.e calling binary on an object without one raise HierarchyError('This object ({0}) has no {1} as a parent object'.format(self.name, parentClass)) if startClass.classType == parentClass: return startClass else: return self._getParentClass(startClass.parent, parentClass) @property def name(self): # TODO variable for altnames try: return self.params['name'] except KeyError: try: return self.parent.name except AttributeError: return 'Un-named ' + self.classType except AttributeError: return 'Un-named ' + self.classType def __repr__(self): return '{0}({1!r})'.format(self.classType, self.name) def getParam(self, paramKey): """ Fetches a parameter from the params dictionary. If it's not there it will return NaN. This allows the use of list comprehensions over the entire planet set without KeyErrors. NaN was used as unlike False and None, NaN < 1 and NaN > 1 are both False """ try: return self.params[paramKey] except KeyError: return np.NaN def __eq__(self, other): """ check the parameter dictionaries for both clases are the same (and both are of the same class) """ if type(self) == type(other): return self.params == other.params else: return False @property def system(self): return self._getParentClass(self.parent, 'System') class System(_BaseObject): def __init__(self, *args, **kwargs): _BaseObject.__init__(self, *args, **kwargs) self.classType = 'System' @property def ra(self): return self.getParam('rightascension') @ra.setter def ra(self, ra): self.params['rightascension'] = ra @property def dec(self): return self.getParam('declination') @dec.setter def dec(self, dec): self.params['declination'] = dec @property def d(self): return self.getParam('distance') @d.setter def d(self, d): d = d.rescale(aq.pc) self.params['distance'] = d @property def stars(self): return self.children # TODO child could be a binary or planet @property def epoch(self): return self.getParam('epoch') @epoch.setter def epoch(self, epoch): self.params['epoch'] = epoch class PlanetAndBinaryCommon(_BaseObject): def __init__(self, *args, **kwargs): _BaseObject.__init__(self, *args, **kwargs) self.classType = 'PlanetAndBinaryCommon' @property def i(self): return self.getParam('inclination') @i.setter def i(self, i): i = i.rescale(aq.deg) self.params['inclination'] = i @property def e(self): return self.getParam('eccentricity') @e.setter def e(self, e): self.params['eccentricity'] = e @property def P(self): period = self.getParam('period') if period is not np.nan: return period elif ed_params.estimateMissingValues: self.flags.addFlag('Calculated Period') return self.calcPeriod() else: return np.nan @P.setter def P(self, P): P = P.rescale(aq.day) self.params['period'] = P def calcPeriod(self): raise NotImplementedError('Only implemented for Binary and Planet child classes') @property def a(self): sma = self.getParam('semimajoraxis') if sma is np.nan and ed_params.estimateMissingValues: if self.getParam('period') is not np.nan: sma = self.calcSMA() # calc using period self.flags.addFlag('Calculated SMA') return sma else: return np.nan else: return sma @a.setter def a(self, a): a = a.rescale(aq.au) self.params['semimajoraxis'] = a def calcSMA(self): raise NotImplementedError('Only implemented for Binary and Planet child classes') @property def transittime(self): return self.getParam('transittime') @transittime.setter def transittime(self, transittime): self.params['transittime'] = transittime @property def periastron(self): peri = self.getParam('periastron') if math.isnan(peri) and self.e == 0: peri = 0 * aq.deg return peri @periastron.setter def periastron(self, periastron): self.params['periastron'] = periastron @property def longitude(self): return self.getParam('longitude') @longitude.setter def longitude(self, longitude): self.params['longitude'] = longitude @property def ascendingnode(self): return self.getParam('ascendingnode') @ascendingnode.setter def ascendingnode(self, ascendingnode): self.params['ascendingnode'] = ascendingnode @property def separation(self): return self.getParam('separation') @separation.setter def seperation(self, seperation): self.params['seperation'] = seperation class StarAndBinaryCommon(_BaseObject): def __init__(self, *args, **kwargs): _BaseObject.__init__(self, *args, **kwargs) self.classType = 'StarAndBinaryCommon' @property def magU(self): return self.getParam('magU') @magU.setter def magU(self, mag): self.params['magU'] = mag @property def magB(self): return self.getParam('magB') @magB.setter def magB(self, mag): self.params['magB'] = mag @property def magH(self): return self.getParam('magH') @magH.setter def magH(self, mag): self.params['magH'] = mag @property def magI(self): return self.getParam('magI') @magI.setter def magI(self, mag): self.params['magI'] = mag @property def magJ(self): return self.getParam('magJ') @magJ.setter def magJ(self, mag): self.params['magJ'] = mag @property def magK(self): return self.getParam('magK') @magK.setter def magK(self, mag): self.params['magK'] = mag @property def magV(self): return self.getParam('magV') @magV.setter def magV(self, mag): self.params['magV'] = mag @property def magL(self): return self.getParam('magL') @magL.setter def magL(self, mag): self.params['magL'] = mag @property def magM(self): return self.getParam('magM') @magM.setter def magM(self, mag): self.params['magM'] = mag @property def magN(self): return self.getParam('magN') @magN.setter def magN(self, mag): self.params['magN'] = mag class StarAndPlanetCommon(_BaseObject): def __init__(self, *args, **kwargs): _BaseObject.__init__(self, *args, **kwargs) self.classType = 'StarAndPlanetCommon' @property def age(self): return self.getParam('age') @age.setter def age(self, age): age = age.rescale(aq.Gyear) self.params['age'] = age @property # allows stars and planets to access system values by propagating up def ra(self): return self.parent.ra @ra.setter def ra(self, ra): self.parent.ra = ra @property def dec(self): return self.parent.dec @dec.setter def dec(self, dec): self.parent.dec = dec @property def d(self): return self.parent.d @d.setter def d(self, d): self.parent.d = d @property def R(self): return self.getParam('radius') @R.setter def R(self, R): self.params['radius'] = R @property def T(self): """ Looks for the temperature in the catalogue, if absent it calculates it using calcTemperature() :return: planet temperature """ paramTemp = self.getParam('temperature') if not paramTemp is np.nan: return paramTemp elif ed_params.estimateMissingValues: self.flags.addFlag('Calculated Temperature') return self.calcTemperature() else: return np.nan @T.setter def T(self, T): T = T.rescale(aq.K) self.params['temperature'] = T @property def M(self): return self.getParam('mass') @M.setter def M(self, M): M = M.rescale(aq.M_j) self.params['mass'] = M def calcTemperature(self): raise NotImplementedError('Only implemented for Stars and Planet child classes') @property def binary(self): return self._getParentClass(self, 'Binary') def calcSurfaceGravity(self): return eq.SurfaceGravity(self.M, self.R).g def calcLogg(self): return eq.Logg(self.M, self.R).logg def calcDensity(self): if self.M is np.nan or self.R is np.nan: return np.nan else: return eq.Density(self.M, self.R).density class Binary(PlanetAndBinaryCommon, StarAndBinaryCommon): # TODO add binary methods and variables, remove unused one from starcommon def __init__(self, *args, **kwargs): StarAndBinaryCommon.__init__(self, *args, **kwargs) PlanetAndBinaryCommon.__init__(self, *args, **kwargs) self.classType = 'Binary' @property def stars(self): return self.children @property def d(self): return self.parent.d def calcPeriod(self): raise NotImplementedError # TODO def calcSMA(self): raise NotImplementedError # TODO class Star(StarAndPlanetCommon, StarAndBinaryCommon): def __init__(self, *args, **kwargs): StarAndPlanetCommon.__init__(self, *args, **kwargs) self.classType = 'Star' @property def d(self): """ Note this should work from child parents as .d propergates, calculates using the star estimation method estimateDistance and estimateAbsoluteMagnitude """ # TODO this will only work from a star or below. good thing? d = self.parent.d if ed_params.estimateMissingValues: if d is np.nan: d = self.estimateDistance() if d is not np.nan: self.flags.addFlag('Estimated Distance') return d else: return np.nan def calcLuminosity(self): return eq.StellarLuminosity(self.R, self.T).L def calcTemperature(self): """ uses equations.starTemperature to estimate temperature based on main sequence relationship """ return eq.estimateStellarTemperature(self.M) def _get_or_convert_magnitude(self, mag_letter): """ Takes input of the magnitude letter and ouputs the magnitude fetched from the catalogue or a converted value :return: """ allowed_mags = "UBVJIHKLMN" catalogue_mags = 'BVIJHK' if mag_letter not in allowed_mags or not len(mag_letter) == 1: raise ValueError("Magnitude letter must be a single letter in {0}".format(allowed_mags)) mag_str = 'mag'+mag_letter mag_val = self.getParam(mag_str) if isNanOrNone(mag_val) and ed_params.estimateMissingValues: # then we need to estimate it! # old style dict comprehension for python 2.6 mag_dict = dict(('mag'+letter, self.getParam('mag'+letter)) for letter in catalogue_mags) mag_class = Magnitude(self.spectralType, **mag_dict) try: mag_conversion = mag_class.convert(mag_letter) # logger.debug('Star Class: Conversion to {0} successful, got {1}'.format(mag_str, mag_conversion)) self.flags.addFlag('Estimated mag{0}'.format(mag_letter)) return mag_conversion except ValueError as e: # cant convert logger.exception(e) # logger.debug('Cant convert to {0}'.format(mag_letter)) return np.nan else: # logger.debug('returning {0}={1} from catalogue'.format(mag_str, mag_val)) return mag_val @property def magU(self): return self._get_or_convert_magnitude('U') @property def magB(self): return self._get_or_convert_magnitude('B') @property def magV(self): return self._get_or_convert_magnitude('V') @property def magJ(self): return self._get_or_convert_magnitude('J') @property def magI(self): return self._get_or_convert_magnitude('I') @property def magH(self): return self._get_or_convert_magnitude('H') @property def magK(self): return self._get_or_convert_magnitude('K') @property def magL(self): return self._get_or_convert_magnitude('L') @property def magM(self): return self._get_or_convert_magnitude('M') @property def magN(self): return self._get_or_convert_magnitude('N') @property def Z(self): return self.getParam('metallicity') @Z.setter def Z(self, Z): self.params['metallicity'] = Z @property def spectralType(self): return self.getParam('spectraltype') @spectralType.setter def spectralType(self, spectraltype): self.params['spectraltype'] = spectraltype @property def planets(self): return self.children def getLimbdarkeningCoeff(self, wavelength=1.22): # TODO replace with pylightcurve """ Looks up quadratic limb darkening parameter from the star based on T, logg and metalicity. :param wavelength: microns :type wavelength: float :return: limb darkening coefficients 1 and 2 """ # TODO check this returns correct value - im not certain # The intervals of values in the tables tempind = [ 3500., 3750., 4000., 4250., 4500., 4750., 5000., 5250., 5500., 5750., 6000., 6250., 6500., 6750., 7000., 7250., 7500., 7750., 8000., 8250., 8500., 8750., 9000., 9250., 9500., 9750., 10000., 10250., 10500., 10750., 11000., 11250., 11500., 11750., 12000., 12250., 12500., 12750., 13000., 14000., 15000., 16000., 17000., 19000., 20000., 21000., 22000., 23000., 24000., 25000., 26000., 27000., 28000., 29000., 30000., 31000., 32000., 33000., 34000., 35000., 36000., 37000., 38000., 39000., 40000., 41000., 42000., 43000., 44000., 45000., 46000., 47000., 48000., 49000., 50000.] lggind = [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5.] mhind = [-5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1., -0.5, -0.3, -0.2, -0.1, 0., 0.1, 0.2, 0.3, 0.5, 1.] # Choose the values in the table nearest our parameters tempselect = _findNearest(tempind, float(self.T)) lgselect = _findNearest(lggind, float(self.calcLogg())) mhselect = _findNearest(mhind, float(self.Z)) quadratic_filepath = resource_stream(__name__, 'data/quadratic.dat') coeffTable = np.loadtxt(quadratic_filepath) foundValues = False for i in range(len(coeffTable)): if coeffTable[i, 2] == lgselect and coeffTable[i, 3] == tempselect and coeffTable[i, 4] == mhselect: if coeffTable[i, 0] == 1: u1array = coeffTable[i, 8:] # Limb darkening parameter u1 for each wl in waveind u2array = coeffTable[i+1, 8:] foundValues = True break if not foundValues: raise ValueError('No limb darkening values could be found') # TODO replace with better exception waveind = [0.365, 0.445, 0.551, 0.658, 0.806, 1.22, 1.63, 2.19, 3.45] # Wavelengths available in table # Interpolates the value at wavelength from values in the table (waveind) u1AtWavelength = np.interp(wavelength, waveind, u1array, left=0, right=0) u2AtWavelength = np.interp(wavelength, waveind, u2array, left=0, right=0) return u1AtWavelength, u2AtWavelength def estimateAbsoluteMagnitude(self): return eq.estimateAbsoluteMagnitude(self.spectralType) def estimateDistance(self): # TODO handle other mags than V if self.magV is not np.nan: return eq.estimateDistance(self.magV, self.estimateAbsoluteMagnitude()) else: return np.nan class Planet(StarAndPlanetCommon, PlanetAndBinaryCommon): def __init__(self, *args, **kwargs): StarAndPlanetCommon.__init__(self, *args, **kwargs) PlanetAndBinaryCommon.__init__(self, *args, **kwargs) self.classType = 'Planet' @property def isTransiting(self): """ Checks the the istransiting tag to see if the planet transits. Note that this only works as of catalogue version ee12343381ae4106fd2db908e25ffc537a2ee98c (11th March 2014) where the istransiting tag was implemented """ try: isTransiting = self.params['istransiting'] except KeyError: return False if isTransiting == '1': return True else: return False def calcTransitDuration(self, circular=False): """ Estimation of the primary transit time assuming a circular orbit (see :py:func:`equations.transitDuration`) """ try: if circular: return eq.transitDurationCircular(self.P, self.star.R, self.R, self.a, self.i) else: return eq.TransitDuration(self.P, self.a, self.R, self.star.R, self.i, self.e, self.periastron).Td except (ValueError, AttributeError, # caused by trying to rescale nan i.e. missing i value HierarchyError): # i.e. planets that dont orbit stars return np.nan def calcScaleHeight(self): raise NotImplementedError # return eq.scaleHeight(self.T, , self.g) # TODO mu based on assumptions def calcTransitDepth(self): return eq.TransitDepth(self.star.R, self.R).depth def type(self): return assum.planetType(self.T, self.M, self.R) def massType(self): return assum.planetMassType(self.M) def radiusType(self): return assum.planetRadiusType(self.R) def tempType(self): return assum.planetTempType(self.T) @property def mu(self): # TODO make getter look in params first calc if not molweight = self.getParam('molweight') if molweight is np.nan: # Use assumptions if self.M is not np.nan: return assum.planetMu(self.massType()) elif self.R is not np.nan: return assum.planetMu(self.radiusType()) else: return np.nan else: return molweight @mu.setter def mu(self, mu): mu = mu.rescale(aq.atomic_mass_unit) self.params['moleight'] = mu @property def albedo(self): albedo = self.getParam('albedo') if albedo is not np.nan: return albedo elif self.getParam('temperature') is not np.nan: planetClass = self.tempType() elif self.M is not np.nan: planetClass = self.massType() elif self.R is not np.nan: planetClass = self.radiusType() else: return np.nan return assum.planetAlbedo(planetClass) @albedo.setter def albedo(self, albedo): albedo = albedo self.params['albedo'] = albedo def calcTemperature(self): """ Calculates the temperature using which uses equations.MeanPlanetTemp, albedo assumption and potentially equations.starTemperature. issues - you cant get the albedo assumption without temp but you need it to calculate the temp. """ try: return eq.MeanPlanetTemp(self.albedo, self.star.T, self.star.R, self.a).T_p except (ValueError, HierarchyError): # ie missing value (.a) returning nan return np.nan def estimateMass(self): density = assum.planetDensity(self.radiusType()) return eq.Density(None, self.R, density).M def calcSMA(self): """ Calculates the semi-major axis from Keplers Third Law """ try: return eq.KeplersThirdLaw(None, self.star.M, self.P).a except HierarchyError: return np.nan def calcSMAfromT(self, epsilon=0.7): """ Calculates the semi-major axis based on planet temperature """ return eq.MeanPlanetTemp(self.albedo(), self.star.T, self.star.R, epsilon, self.T).a def calcPeriod(self): """ calculates period using a and stellar mass """ return eq.KeplersThirdLaw(self.a, self.star.M).P @property def discoveryMethod(self): return self.getParam('discoverymethod') @discoveryMethod.setter def discoveryMethod(self, discoverymethod): self.params['discoverymethod'] = discoverymethod @property def discoveryYear(self): try: return int(self.getParam('discoveryyear')) except ValueError: # np.nan return self.getParam('discoveryyear') @discoveryYear.setter def discoveryYear(self, discoveryYear): self.params['discoveryyear'] = discoveryYear @property def lastUpdate(self): return self.getParam('lastupdate') @property def description(self): return self.getParam('description') @property def star(self): return self._getParentClass(self.parent, 'Star') class Parameters(object): # TODO would this subclassing dict be more preferable? """ A class to hold parameter dictionaries, the input can be validated, units added and handling of multi valued fields. In future this may be better as a child of dict. """ def __init__(self): self.params = { 'altnames': [], 'list': [], } self._defaultUnits = { # this holds quantities with no current or projected ambiguity about their unit 'age': aq.Gyear, 'distance': aq.pc, # TODO more specific unit handling here or in classes? 'magB': 1, 'magH': 1, 'magI': 1, 'magJ': 1, 'magK': 1, 'magV': 1, 'temperature': aq.K, } self.rejectTags = ('system', 'binary', 'star', 'planet', 'moon') # These are handled in their own classes def addParam(self, key, value, attrib=None): """ Checks the key dosnt already exist, adds alternate names to a seperate list Future - format input and add units - logging """ if key in self.rejectTags: return False # TODO Replace with exception # Temporary code to handle the seperation tag than can occur several times with different units. # TODO code a full multi unit solution (github issue #1) if key == 'separation': if attrib is None: return False # reject seperations without a unit try: if not attrib['unit'] == 'AU': return False # reject for now except KeyError: # a seperation attribute exists but not one for units return False if key in self.params: # if already exists if key == 'name': try: # if flagged as a primary or popular name use this one, an option should be made to use either if attrib['type'] == 'pri': # first names or popular names. oldname = self.params['name'] self.params['altnames'].append(oldname) self.params['name'] = value else: self.params['altnames'].append(value) except (KeyError, TypeError): # KeyError = no type key in attrib dict, TypeError = not a dict self.params['altnames'].append(value) elif key == 'list': self.params['list'].append(value) else: try: name = self.params['name'] except KeyError: name = 'Unnamed' print('rejected duplicate {0}: {1} in {2}'.format(key, value, name)) # TODO: log rejected value return False # TODO Replace with exception else: # If the key doesn't already exist and isn't rejected # Some tags have no value but a upperlimit in the attributes if value is None and attrib is not None: try: value = attrib['upperlimit'] except KeyError: try: value = attrib['lowerlimit'] except KeyError: return False if key == 'rightascension': value = _ra_string_to_unit(value) elif key == 'declination': value = _dec_string_to_unit(value) elif key in self._defaultUnits: try: value = float(value) * self._defaultUnits[key] except: print('caught an error with {0} - {1}'.format(key, value)) self.params[key] = value class BinaryParameters(Parameters): def __init__(self): Parameters.__init__(self) self._defaultUnits.update({ 'separation': aq.au, # TODO there is actually 2 different measurements (other is arcsec) 'periastron': aq.deg, }) class StarParameters(Parameters): def __init__(self): Parameters.__init__(self) self._defaultUnits.update({ 'mass': aq.M_s, 'metallicity': 1, 'radius': aq.R_s, }) class PlanetParameters(Parameters): def __init__(self): Parameters.__init__(self) self._defaultUnits.update({ 'discoveryyear': 1, 'mass': aq.M_j, 'radius': aq.R_j, 'inclination': aq.deg, 'eccentricity': 1, 'periastron': aq.deg, 'period': aq.day, 'semimajoraxis': aq.au, 'transittime': aq.JD, # TODO specific JD, MJF etc 'molweight': aq.atomic_mass_unit, 'separation': aq.au, # TODO there is actually 2 different measurements (other is arcsec) }) def _findNearest(arr, value): """ Finds the value in arr that value is closest to """ arr = np.array(arr) # find nearest value in array idx = (abs(arr-value)).argmin() return arr[idx] class SpectralType(object): """ Takes input of a spectral type as a string and interprets it into the luminosity class and stellar type. .. usage : self.lumType = Luminosity Class self.classLetter = Stellar Class (ie O B A etc) self.classNumber = Stellar Class number self.specClass = ie A8V will be A8 self.specType = ie A*V will be A8V (default for calling the class) self.original = the original string This class ignores spaces, only considers the first class if given multiple options (ie K0/K1V, GIV/V, F8-G0) ignores non-typical star classes (ie ) and ignores extra statements like G8 V+ """ def __init__(self, classString): self.original = classString self.lumType = '' self.classLetter = '' self.classNumber = '' self._parseSpecType(classString) @property def specClass(self): """ Spectral class ie A8V is A8 """ return self.classLetter + str(self.classNumber) @property def roundedSpecClass(self): """ Spectral class with rounded class number ie A8.5V is A9 """ try: classnumber = str(int(np.around(self.classNumber))) except TypeError: classnumber = str(self.classNumber) return self.classLetter + classnumber @property def specType(self): """ Spectral class ie A8V is A8V """ return self.classLetter + str(self.classNumber) + self.lumType @property def roundedSpecType(self): """ Spectral class with rounded class number ie A8.5V is A9V """ return self.roundedSpecClass + self.lumType def __repr__(self): return self.specType def _parseSpecType(self, classString): """ This class attempts to parse the spectral type. It should probably use more advanced matching use regex """ try: classString = str(classString) except UnicodeEncodeError: # This is for the benefit of 1RXS1609 which currently has the spectral type K7\pm 1V # TODO add unicode support and handling for this case / ammend the target return False # some initial cases if classString == '' or classString == 'nan': return False possNumbers = range(10) possLType = ('III', 'II', 'Iab', 'Ia0', 'Ia', 'Ib', 'IV', 'V') # in order of unique matches # remove spaces, remove slashes classString = classString.replace(' ', '') classString = classString.replace('-', '/') classString = classString.replace('\\', '/') classString = classString.split('/')[0] # TODO we do not consider slashed classes yet (intemediates) # check first 3 chars for spectral types stellarClass = classString[:3] if stellarClass in _possSpectralClasses: self.classLetter = stellarClass elif stellarClass[:2] in _possSpectralClasses: # needed because A5V wouldnt match before self.classLetter = stellarClass[:2] elif stellarClass[0] in _possSpectralClasses: self.classLetter = stellarClass[0] else: return False # assume a non standard class and fail # get number try: numIndex = len(self.classLetter) classNum = int(classString[numIndex]) if classNum in possNumbers: self.classNumber = int(classNum) # don't consider decimals here, done at the type check typeString = classString[numIndex+1:] else: return False # invalid number received except IndexError: # reached the end of the string return True except ValueError: # i.e its a letter - fail # TODO multi letter checking typeString = classString[1:] if typeString == '': # ie there is no more information as in 'A8' return True # Now check for a decimal and handle those cases if typeString[0] == '.': # handle decimal cases, we check each number in turn, add them as strings and then convert to float and add # to original number decimalNumbers = '.' for number in typeString[1:]: try: if int(number) in possNumbers: decimalNumbers += number else: print('Something went wrong in decimal checking') # TODO replace with logging return False # somethings gone wrong except ValueError: break # recevied a non-number (probably L class) # add decimal to classNum try: self.classNumber += float(decimalNumbers) except ValueError: # probably trying to convert '.' to a float pass typeString = typeString[len(decimalNumbers):] if len(typeString) is 0: return True # Handle luminosity class for possL in possLType: # match each possible case in turn (in order of uniqueness) Lcase = typeString[:len(possL)] # match from front with length to minimise matching say IV in '<3 CIV' if possL == Lcase: self.lumType = possL return True if not self.classNumber == '': return True else: # if there no number asumme we have a name ie 'Catac. var.' self.classLetter = '' self.classNumber = '' self.lumType = '' return False _ExampleSystemCount = 1 # Used by example.py - put here to enable global # main sequence _possSingleLetterClasses = ('O', 'B', 'A', 'F', 'G', 'K', 'M', 'L', 'T', 'Y', # dwarfs 'C', 'S', 'W', # Wolf-Rayet 'P', 'Q', # Non-stellar spectral types ) # skipped carbon stars with dashes ie C-R _possMultiLetterClasses = ('WNE', 'WNL', 'WCE', 'WCL', 'WO', 'WR', 'WN', 'WC', # Wolf-Rayet stars, WN/C skipped 'MS', 'MC', # intermediary carbon-related classes 'DAB', 'DAO', 'DAZ', 'DBZ', # Extended white dwarf spectral types 'DAV', 'DBV', 'DCV', # Variable star designations, GW Vir (DOV and PNNV) skipped 'DA', 'DB', 'DO', 'DQ', 'DZ', 'DC', 'DX', # white dwarf spectral types ) _possSpectralClasses = _possMultiLetterClasses + _possSingleLetterClasses # multi first class Magnitude(object): """ Holds measured magnitudes and can convert between them given a spectral class. """ def __init__(self, spectral_type, magU=None, magB=None, magV=None, magI=None, magJ=None, magH=None, magK=None, magL=None, magM=None, magN=None): if isinstance(spectral_type, SpectralType): self.spectral_type = spectral_type else: self.spectral_type = SpectralType(spectral_type) self.magU = magU self.magB = magB self.magV = magV self.magI = magI self.magJ = magJ self.magH = magH self.magK = magK self.magL = magL self.magM = magM self.magN = magN # For magDict, these should probably be grouped together self.column_for_V_conversion = { # mag column, sign (most are V-Mag (+1), some are Mag-V (-1)) 'U': (2, -1), 'B': (3, -1), 'J': (8, +1), 'H': (9, +1), 'K': (10, +1), 'L': (11, +1), 'M': (12, +1), 'N': (13, +1), } def convert(self, to_mag, from_mag=None): """ Converts magnitudes using UBVRIJHKLMNQ photometry in Taurus-Auriga (Kenyon+ 1995) ReadMe+ftp1995ApJS..101..117K Colors for main-sequence stars If from_mag isn't specified the program will cycle through provided magnitudes and choose one. Note that all magnitudes are first converted to V, and then to the requested magnitude. :param to_mag: magnitude to convert to :param from_mag: magnitude to convert from :return: """ allowed_mags = "UBVJIHKLMN" if from_mag: if to_mag == 'V': # If V mag is requested (1/3) - from mag specified return self._convert_to_from('V', from_mag) if from_mag == 'V': magV = self.magV else: magV = self._convert_to_from('V', from_mag) return self._convert_to_from(to_mag, 'V', magV) # if we can convert from any magnitude, try V first elif not isNanOrNone(self.magV): if to_mag == 'V': # If V mag is requested (2/3) - no need to convert return self.magV else: return self._convert_to_from(to_mag, 'V', self.magV) else: # Otherwise lets try all other magnitudes in turn order = "UBJHKLMN" # V is the intermediate step from the others, done by default if possible for mag_letter in order: try: magV = self._convert_to_from('V', mag_letter) if to_mag == 'V': # If V mag is requested (3/3) - try all other mags to convert logging.debug('Converted to magV from {0} got {1}'.format(mag_letter, magV)) return magV else: mag_val = self._convert_to_from(to_mag, 'V', magV) logging.debug('Converted to mag{0} from {1} got {2}'.format(to_mag, mag_letter, mag_val)) return mag_val except ValueError: continue # this conversion may not be possible, try another raise ValueError('Could not convert from any provided magnitudes') def _convert_to_from(self, to_mag, from_mag, fromVMag=None): """ Converts from or to V mag using the conversion tables :param to_mag: uppercase magnitude letter i.e. 'V' or 'K' :param from_mag: uppercase magnitude letter i.e. 'V' or 'K' :param fromVMag: MagV if from_mag is 'V' :return: estimated magnitude for to_mag from from_mag """ lumtype = self.spectral_type.lumType # rounds decimal types, TODO perhaps we should interpolate? specClass = self.spectral_type.roundedSpecClass if not specClass: # TODO investigate implications of this raise ValueError('Can not convert when no spectral class is given') if lumtype not in ('V', ''): raise ValueError("Can only convert for main sequence stars. Got {0} type".format(lumtype)) if to_mag == 'V': col, sign = self.column_for_V_conversion[from_mag] try: # TODO replace with pandas table offset = float(magDict[specClass][col]) except KeyError: raise ValueError('No data available to convert those magnitudes for that spectral type') if math.isnan(offset): raise ValueError('No data available to convert those magnitudes for that spectral type') else: from_mag_val = self.__dict__['mag'+from_mag] # safer than eval if isNanOrNone(from_mag_val): # logger.debug('2 '+from_mag) raise ValueError('You cannot convert from a magnitude you have not specified in class') return from_mag_val + (offset*sign) elif from_mag == 'V': if fromVMag is None: # trying to second guess here could mess up a K->B calulation by using the intermediate measured V. While # this would probably be preferable it is not was was asked and therefore could give unexpected results raise ValueError('Must give fromVMag, even if it is self.magV') col, sign = self.column_for_V_conversion[to_mag] try: offset = float(magDict[specClass][col]) except KeyError: raise ValueError('No data available to convert those magnitudes for that spectral type') if math.isnan(offset): raise ValueError('No data available to convert those magnitudes for that spectral type') else: return fromVMag + (offset*sign*-1) # -1 as we are now converting the other way else: raise ValueError('Can only convert from and to V magnitude. Use .convert() instead') def _createMagConversionDict(): """ loads magnitude_conversion.dat which is table A% 1995ApJS..101..117K """ magnitude_conversion_filepath = resource_stream(__name__, 'data/magnitude_conversion.dat') raw_table = np.loadtxt(magnitude_conversion_filepath, '|S5') magDict = {} for row in raw_table: if sys.hexversion >= 0x03000000: starClass = row[1].decode("utf-8") # otherwise we get byte ints or b' caused by 2to3 tableData = [x.decode("utf-8") for x in row[3:]] else: starClass = row[1] tableData = row[3:] magDict[starClass] = tableData return magDict magDict = _createMagConversionDict() def isNanOrNone(val): """ Tests if val is float('nan') or None using math.isnan and is None. Needed as isnan fails if a non float is given. :param val: :return: """ if val is None: return True else: try: return math.isnan(val) except TypeError: # not a float return False def _ra_string_to_unit(ra_string): ra_split = ra_string.split(' ') hour, min, sec = ra_split ra_astropy_format = '{}h{}m{}s'.format(hour, min, sec) ra_unit = astropy.coordinates.Longitude(ra_astropy_format, unit=u.deg) return ra_unit def _dec_string_to_unit(dec_string): deg_split = dec_string.split(' ') deg, arcmin, arcsec = deg_split deg_astropy_format = '{}d{}m{}s'.format(deg, arcmin, arcsec) dec_unit = astropy.coordinates.Latitude(deg_astropy_format, unit=u.deg) return dec_unit class HierarchyError(ed_params.ExoDataError): pass
mit
vrooje/pulsar-hunters-analysis
aggregate_pulsarclass.py
1
33505
#Python 2.7.9 (default, Apr 5 2015, 22:21:35) import sys, os # file with raw classifications (csv) # put this way up here so if there are no inputs we exit quickly before even trying to load everything else try: classfile_in = sys.argv[1] except: #classfile_in = 'data/2e3d12a2-56ca-4d1f-930a-9ecc7fd39885.csv' print("\nUsage: %s classifications_infile [weight_class aggregations_outfile]" % sys.argv[0]) print(" classifications_infile is a Zooniverse (Panoptes) classifications data export CSV.") print(" weight_class is 1 if you want to calculate and apply user weightings, 0 otherwise.") print(" aggregations_outfile is the name of the file you want written. If you don't specify,") print(" the filename is %s by default." % outfile_default) sys.exit(0) import numpy as np # using 1.10.1 import pandas as pd # using 0.13.1 #import datetime #import dateutil.parser import json ############ Define files and settings below ############## # default outfile outfile_default = 'pulsar_aggregations.csv' rankfile_stem = 'subjects_ranked_by_weighted_class_asof_' # file with tags left in Talk, for value-added columns below talk_export_file = "project-764-tags_2016-01-15.json" # file with master list between Zooniverse metadata image filename (no source coords) and # original filename with source coords and additional info # also I get to have a variable that uses "filename" twice where each means a different thing # a filename for a file full of filenames #alliterationbiyotch filename_master_list_filename = "HTRU-N_sets_keys.csv" # this is a list of possible matches to known pulsars that was done after the fact so they # are flagged as "cand" in the database instead of "known" etc. poss_match_file = 'PossibleMatches.csv' # later we will select on tags by the project team and possibly weight them differently # note I've included the moderators and myself (though I didn't tag anything). # Also note it's possible to do this in a more general fashion using a file with project users and roles # However, hard-coding seemed the thing to do given our time constraints (and the fact that I don't think # you can currently export the user role file from the project builder) project_team = 'bretonr jocelynbb spindizzy Simon_Rookyard Polzin cristina_ilie jamesy23 ADCameron Prabu walkcr roblyon chiatan llevin benjamin_shaw bhaswati djchampion jwbmartin bstappers ElisabethB Capella05 vrooje'.split() # define the active workflow - we will ignore all classifications not on this workflow # we could make this an input but let's not get too fancy for a specific case. # for beta test #active_workflow_id = 1099 #active_workflow_major = 6 # for live project active_workflow_id = 1224 active_workflow_major = 4 # do we want sum(weighted vote count) = sum(raw vote count)? normalise_weights = True # do we want to write an extra file with just classification counts and usernames # (and a random color column, for treemaps)? counts_out = True counts_out_file = 'class_counts_colors.csv' ############ Set the other inputs now ############### try: apply_weight = int(sys.argv[2]) except: apply_weight = 0 try: outfile = sys.argv[3] except: outfile = outfile_default ################################################################################# ################################################################################# ################################################################################# # This is the function that actually does the aggregating def aggregate_class(grp): # translate the group to a dataframe because FML if I don't (some indexing etc is different) thegrp = pd.DataFrame(grp) # figure out what we're looping over below answers = thegrp.pulsar_classification.unique() # aggregating is a matter of grouping by different answers and summing the counts/weights byans = thegrp.groupby('pulsar_classification') ans_ct_tot = byans['count'].aggregate('sum') ans_wt_tot = byans['weight'].aggregate('sum') # we want fractions eventually, so we need denominators count_tot = np.sum(ans_ct_tot) # we could also do len(thegrp) weight_tot = np.sum(ans_wt_tot) # okay, now we should have a series of counts for each answer, one for weighted counts, and # the total votes and weighted votes for this subject. # now loop through the possible answers and create the raw and weighted vote fractions # and save the counts as well. # this is a list for now and we'll make it into a series and order the columns later class_agg = {} class_agg['count_unweighted'] = count_tot class_agg['count_weighted'] = weight_tot class_agg['subject_type'] = thegrp.subject_type.unique()[0] class_agg['filename'] = thegrp.filename.unique()[0] for a in answers: # don't be that jerk who labels things with "p0" or otherwise useless internal indices. # Use the text of the response next to this answer choice in the project builder (but strip spaces) raw_frac_label = ('p_'+a).replace(' ', '_') wt_frac_label = ('p_'+a+'_weight').replace(' ', '_') class_agg[raw_frac_label] = ans_ct_tot[a]/float(count_tot) class_agg[wt_frac_label] = ans_wt_tot[a]/float(weight_tot) # oops, this is hard-coded so that there's Yes and No as answers - sorry to those trying to generalise col_order = ["filename", "p_Yes", "p_No", "p_Yes_weight", "p_No_weight", "count_unweighted", "count_weighted", "subject_type"] return pd.Series(class_agg)[col_order] ################################################################################# ################################################################################# ################################################################################# # The new weighting assignment function allows the user to choose between different weighting schemes # though note the one in this function is not preferred for reasons explained below. def assign_weight_old(seed): # keep the two seed cases separate because we might want to use a different base for each if seed < 0.: return max([0.05, pow(1.0025, seed)]) elif seed > 0: return min([3.0, pow(1.0025, seed)]) else: return 1.0 # assigns a weight based on a seed parameter # The weight is assigned using the seed as an exponent and the number below as the base. # The number is just slightly offset from 1 so that it takes many classifications for # a user's potential weight to cap out at the max weight (3) or bottom out at the min (0.05). # Currently there are 641 "known" pulsars in the DB so the base of 1.025 is largely based on that. # Update: there are now about 5,000 simulated pulsars in the subject set as well, and they have a # much higher retirement limit, so that more people will have classified them and we have more info. # Note I'd rather this did a proper analysis with a confusion matrix etc but under a time crunch # we went with something simpler. def assign_weight(q, which_weight): # the floor weight for the case of which_weight == 2 # i.e. someone who has seed = 0 will have this # seed = 0 could either be equal numbers right & wrong, OR that we don't have any information c0 = 0.5 seed = q[1].seed n_gs = q[1].n_gs # Two possible weighting schemes: # which_weight == 1: w = 1.0025^(seed), bounded between 0.05 and 3.0 # which_weight == 2: w = (1 + log n_gs)^(seed/n_gs), bounded between 0.05 and 3.0 # # Weighting Scheme 1: # this is an okay weighting scheme, but it doesn't account for the fact that someone might be prolific # but not a very good classifier, and those classifiers shouldn't have a high weight. # Example: Bob does 10000 gold-standard classifications and gets 5100 right, 4900 wrong. # In this weighting scheme, Bob's weighting seed is +100, which means a weight of 1.0025^100 = 1.3, # despite the fact that Bob's classifications are consistent with random within 1%. # The weighting below this one would take the weight based on 100/10000, which is much better. if which_weight == 1: # keep the two seed cases separate because we might want to use a different base for each if seed < 0.: return max([0.05, pow(1.0025, seed)]) elif seed > 0: return min([3.0, pow(1.0025, seed)]) else: return 1.0 elif which_weight == 2: if n_gs < 1: # don't divide by or take the log of 0 # also if they didn't do any gold-standard classifications assume they have the default weight return c0 else: # note the max of 3 is unlikely to be reached, but someone could hit the floor. return min([3.0, max([0.05, c0*pow((1.0 + np.log10(n_gs)), (float(seed)/float(n_gs)))])]) else: # unweighted - so maybe don't even enter this function if which_weight is not 1 or 2... return 1.0 ################################################################################# ################################################################################# ################################################################################# # Get the Gini coefficient - https://en.wikipedia.org/wiki/Gini_coefficient # Typical values of the Gini for healthy Zooniverse projects (Cox et al. 2015) are # in the range of 0.7-0.9. def gini(list_of_values): sorted_list = sorted(list_of_values) height, area = 0, 0 for value in sorted_list: height += value area += height - value / 2. fair_area = height * len(list_of_values) / 2 return (fair_area - area) / fair_area ################################################################################# ################################################################################# ################################################################################# # assign a color randomly if logged in, gray otherwise def randcolor(user_label): if user_label.startswith('not-logged-in-'): # keep it confined to grays, i.e. R=G=B and not too bright, not too dark g = random.randint(25,150) return '#%02X%02X%02X' % (g,g,g) #return '#555555' else: # the lambda makes this generate a new int every time it's called, so that # in general R != G != B below. r = lambda: random.randint(0,255) return '#%02X%02X%02X' % (r(),r(),r()) ################################################################################# ################################################################################# ################################################################################# # These are functions that extract information from the various JSONs that are # included in the classification exports. To Do: optimise these so that one .apply() # call will extract them for everything without so many &^%@$ing loops. def get_subject_type(q): try: return q[1].subject_json[q[1].subject_id]['#Class'] except: return "cand" def get_filename(q): try: return q[1].subject_json[q[1].subject_id]['CandidateFile'] except: try: return q[1].subject_json[q[1].subject_id]['CandidateFileVertical'] except: try: return q[1].subject_json[q[1].subject_id]['CandidateFileHorizontal'] except: return "filenotfound.png" # get number of gold-standard classifications completed by a user (used if weighting) def get_n_gs(thegrp): return sum(pd.DataFrame(thegrp).seed != 0) # Something went weird with IP addresses, so use more info to determine unique users # Note the user_name still has the IP address in it if the user is not logged in; # it's just that for this specific project it's not that informative. def get_alternate_sessioninfo(row): # if they're logged in, save yourself all this trouble if not row[1]['user_name'].startswith('not-logged-in'): return row[1]['user_name'] else: metadata = row[1]['meta_json'] # IP + session, if it exists # (IP, agent, viewport_width, viewport_height) if session doesn't exist try: # start with "not-logged-in" so stuff later doesn't break return str(row[1]['user_name']) +"_"+ str(metadata['session']) except: try: viewport = str(metadata['viewport']) except: viewport = "NoViewport" try: user_agent = str(metadata['user_agent']) except: user_agent = "NoUserAgent" try: user_ip = str(row[1]['user_name']) except: user_ip = "NoUserIP" thesession = user_ip + user_agent + viewport return thesession ################################################################################# ################################################################################# ################################################################################# # Print out the input parameters just as a sanity check print("Computing aggregations using:") print(" infile: %s" % classfile_in) print(" weighted? %d" % apply_weight) print(" Will print to %s after processing." % outfile) ################################################################################# ################################################################################# ################################################################################# # # # # # Begin the main work # # # # print("Reading classifications from %s ..." % classfile_in) classifications = pd.read_csv(classfile_in) # this step can take a few minutes for a big file # Talk tags are not usually huge files so this doesn't usually take that long print("Parsing Talk tag file for team tags %s ..." % talk_export_file) talkjson = json.loads(open(talk_export_file).read()) talktags_all = pd.DataFrame(talkjson) # we only care about the Subject comments here, not discussions on the boards # also we only care about tags by the research team & moderators talktags = talktags_all[(talktags_all.taggable_type == "Subject") & (talktags_all.user_login.isin(project_team))].copy() # make a username-tag pair column # subject id is a string in the classifications array so force it to be one here or the match won't work talktags['subject_id'] = [str(int(q)) for q in talktags.taggable_id] talktags["user_tag"] = talktags.user_login+": #"+talktags.name+";" # when we're talking about Subject tags, taggable_id is subject_id talk_bysubj = talktags.groupby('subject_id') # this now contains all the project-team-written tags on each subject, 1 row per subject subj_tags = pd.DataFrame(talk_bysubj.user_tag.unique()) # if we need this as an explicit column #subj_tags['subject_id'] = subj_tags.index # likewise reading this matched files doesn't take long even though we have a for loop. print("Reading master list of matched filenames %s..." % filename_master_list_filename) matched_filenames = pd.read_csv(filename_master_list_filename) print("Reading from list of possible matches to known pulsars %s..." % poss_match_file) # ['Zooniverse name', 'HTRU-N name', 'Possible source'] possible_knowns = pd.read_csv(poss_match_file) possible_knowns['is_poss_known'] = [True for q in possible_knowns['Possible source']] # This section takes quite a while and it's because we have so many for loops, which I think is # in part because reading out of a dict from a column in a DataFrame needs loops when done this way # and in part because we were in a rush. # I think it's possible we could pass this to a function and reshape things there, then return # a set of new columns - but I didn't have time to figure that out under the deadlines we had. print("Making new columns and getting user labels...") # first, extract the started_at and finished_at from the annotations column classifications['meta_json'] = [json.loads(q) for q in classifications.metadata] classifications['started_at_str'] = [q['started_at'] for q in classifications.meta_json] classifications['finished_at_str'] = [q['finished_at'] for q in classifications.meta_json] # we need to set up a new user id column that's login name if the classification is while logged in, # session if not (right now "user_name" is login name or hashed IP and, well, read on...) # in this particular run of this particular project, session is a better tracer of uniqueness than IP # for anonymous users, because of a bug with some back-end stuff that someone else is fixing # but we also want to keep the user name if it exists, so let's use this function #classifications['user_label'] = [get_alternate_sessioninfo(q) for q in classifications.iterrows()] classifications['user_label'] = [get_alternate_sessioninfo(q) for q in classifications['user_name meta_json'.split()].iterrows()] classifications['created_day'] = [q[:10] for q in classifications.created_at] # Get subject info into a format we can actually use classifications['subject_json'] = [json.loads(q) for q in classifications.subject_data] # extract the subject ID because that's needed later # Note the subject ID becomes the *index* of the dict, which is actually pretty strange versus # everything else in the export, and I'd really rather it be included here as "subject_id":"1234567" etc. # # You can isolate the keys as a new column but then it's a DictKey type, but stringifying it adds # all these other characters that you then have to take out. Thankfully all our subject IDs are numbers # this is a little weird and there must be a better way but... it works classifications['subject_id'] = [str(q.keys()).replace("dict_keys(['", "").replace("'])", '') for q in classifications.subject_json] # extract retired status, though not sure we're actually going to use it. # also, what a mess - you have to extract the subject ID first and then use it to call the subject_json. UGH # update: we didn't use it and each of these lines takes ages, so commenting it out #classifications['retired'] = [q[1].subject_json[q[1].subject_id]['retired'] for q in classifications.iterrows()] # Get annotation info into a format we can actually use # these annotations are just a single yes or no question, yay classifications['annotation_json'] = [json.loads(q) for q in classifications.annotations] classifications['pulsar_classification'] = [q[0]['value'] for q in classifications.annotation_json] # create a weight parameter but set it to 1.0 for all classifications (unweighted) - may change later classifications['weight'] = [1.0 for q in classifications.workflow_version] # also create a count parameter, because at the time of writing this .aggregate('count') was sometimes off by 1 classifications['count'] = [1 for q in classifications.workflow_version] ####################################################### # discard classifications not in the active workflow # ####################################################### print("Picking classifications from the active workflow (id %d, version %d.*)" % (active_workflow_id, active_workflow_major)) # use any workflow consistent with this major version, e.g. 6.12 and 6.23 are both 6 so they're both ok # also check it's the correct workflow id the_active_workflow = [int(q) == active_workflow_major for q in classifications.workflow_version] this_workflow = classifications.workflow_id == active_workflow_id in_workflow = this_workflow & the_active_workflow # note I haven't saved the full DF anywhere because of memory reasons, so if you're debugging: # classifications_all = classifications.copy() classifications = classifications[in_workflow] print("Extracting filenames and subject types...") # extract whether this is a known pulsar or a candidate that needs classifying - that info is in the # "#Class" column in the subject metadata (where # means it can't be seen by classifiers). # the options are "cand" for "candidate", "known" for known pulsar, "disc" for a pulsar that has been # discovered by this team but is not yet published # do this after you choose a workflow because #Class doesn't exist for the early subjects so it will break # also don't send the entirety of classifications into the function, to save memory #classifications['subject_type'] = [get_subject_type(q) for q in classifications.iterrows()] #classifications['filename'] = [get_filename(q) for q in classifications.iterrows()] classifications['subject_type'] = [get_subject_type(q) for q in classifications['subject_id subject_json'.split()].iterrows()] classifications['filename'] = [get_filename(q) for q in classifications['subject_id subject_json'.split()].iterrows()] # Let me just pause a second to rant again about the fact that subject ID is the index of the subject_json. # Because of that, because the top-level access to that was-json-now-a-dict requires the subject id rather than # just being label:value pairs, I have to do an iterrows() and send part of the entire classifications DF into # a loop so that I can simultaneously access each subject ID *and* the dict, rather than just accessing the # info from the dict directly, which would be much faster. # this might be useful for a sanity check later # first_class_day = min(classifications.created_day).replace(' ', '') # last_class_day = max(classifications.created_day).replace(' ', '') # for some reason this is reporting last-classification dates that are days after the actual last # classification. Not sure? Might be because this is a front-end reporting, so if someone has set # their computer's time wrong we could get the wrong time here. # could fix that by using created_at but ... I forgot. last_class_time = max(classifications.finished_at_str)[:16].replace(' ', '_').replace('T', '_').replace(':', 'h')+"m" ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## # ####################################################### # Apply weighting function (or don't) # ####################################################### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## # classifications['seed'] = [0 for q in classifications.weight] classifications['is_gs'] = [0 for q in classifications.weight] if apply_weight > 0: print(" Computing user weights...") # for now this is assuming all subjects marked as "known" or "disc" are pulsars # and also "fake" are simulated pulsars is_known = (classifications.subject_type == 'known') | (classifications.subject_type == 'disc') | (classifications.subject_type == 'fake') #is_candidate = np.invert(is_known) # if it's a non-gold-standard classification, mark it classifications.loc[is_known, 'is_gs'] = 1 ok_incr = 1.0 # upweight if correct oops_incr = -2.0 # downweight more if incorrect # find the correct classifications of known pulsars ok_class = (is_known) & (classifications.pulsar_classification == 'Yes') # find the incorrect classifications of known pulsars oops_class = (is_known) & (classifications.pulsar_classification == 'No') # set the individual seeds classifications.loc[ok_class, 'seed'] = ok_incr classifications.loc[oops_class, 'seed'] = oops_incr # then group classifications by user name, which will weight logged in as well as not-logged-in (the latter by session) by_user = classifications.groupby('user_label') # get the user's summed seed, which goes into the exponent for the weight user_exp = by_user.seed.aggregate('sum') # then set up the DF that will contain the weights etc, and fill it user_weights = pd.DataFrame(user_exp) user_weights.columns = ['seed'] user_weights['user_label'] = user_weights.index user_weights['nclass_user'] = by_user['count'].aggregate('sum') user_weights['n_gs'] = by_user['is_gs'].aggregate('sum') user_weights['weight'] = [assign_weight(q, apply_weight) for q in user_weights.iterrows()] #user_weights['weight'] = [assign_weight_old(q) for q in user_exp] # if you want sum(unweighted classification count) == sum(weighted classification count), do this if normalise_weights: user_weights.weight *= float(len(classifications))/float(sum(user_weights.weight * user_weights.nclass_user)) # weights are assigned, now need to match them up to the main classifications table # making sure that this weight keeps the name 'weight' and the other gets renamed (suffixes flag) # if assign_weight == 0 then we won't enter this loop and the old "weights" will stay # as they are, i.e. == 1 uniformly. classifications_old = classifications.copy() classifications = pd.merge(classifications_old, user_weights, how='left', on='user_label', sort=False, suffixes=('_2', ''), copy=True) else: # just make a collated classification count array so we can print it to the screen by_user = classifications.groupby('user_label') user_exp = by_user.seed.aggregate('sum') user_weights = pd.DataFrame(user_exp) user_weights.columns = ['seed'] #user_weights['user_label'] = user_weights.index user_weights['nclass_user'] = by_user['count'].aggregate('sum') user_weights['n_gs'] = by_user['is_gs'].aggregate('sum') # UNWEIGHTED user_weights['weight'] = [1 for q in user_exp] # grab basic stats n_subj_tot = len(classifications.subject_data.unique()) by_subject = classifications.groupby('subject_id') subj_class = by_subject.created_at.aggregate('count') all_users = classifications.user_label.unique() n_user_tot = len(all_users) n_user_unreg = sum([q.startswith('not-logged-in-') for q in all_users]) # obviously if we didn't weight then we don't need to get stats on weights if apply_weight > 0: user_weight_mean = np.mean(user_weights.weight) user_weight_median = np.median(user_weights.weight) user_weight_25pct = np.percentile(user_weights.weight, 25) user_weight_75pct = np.percentile(user_weights.weight, 75) user_weight_min = min(user_weights.weight) user_weight_max = max(user_weights.weight) nclass_mean = np.mean(user_weights.nclass_user) nclass_median = np.median(user_weights.nclass_user) nclass_tot = len(classifications) user_weights.sort_values(['nclass_user'], ascending=False, inplace=True) # If you want to print out a file of classification counts per user, with colors for making a treemap # honestly I'm not sure why you wouldn't want to print this, as it's very little extra effort if counts_out == True: print("Printing classification counts to %s..." % counts_out_file) user_weight['color'] = [randcolor(q) for q in user_weight.index] user_weight.to_csv(counts_out_file) ## ## ## ## ## ## ## ## ## ## ## ## ## ## # ####################################################### # Print out basic project info # ####################################################### ## ## ## ## ## ## ## ## ## ## ## ## ## ## # print("%d classifications from %d users, %d registered and %d unregistered.\n" % (nclass_tot, n_user_tot, n_user_tot - n_user_unreg, n_user_unreg)) print("Mean n_class per user %.1f, median %.1f." % (nclass_mean, nclass_median)) if apply_weight > 0: print("Mean user weight %.3f, median %.3f, with the middle 50 percent of users between %.3f and %.3f." % (user_weight_mean, user_weight_median, user_weight_25pct, user_weight_75pct)) print("The min user weight is %.3f and the max user weight is %.3f.\n" % (user_weight_min, user_weight_max)) cols_print = 'nclass_user weight'.split() else: cols_print = 'nclass_user' # don't make this leaderboard public unless you want to gamify your users in ways we already know # have unintended and sometimes negative consequences. This is just for your information. print("Classification leaderboard:") print(user_weights[cols_print].head(20)) print("Gini coefficient for project: %.3f" % gini(user_weight['nclass_user'])) ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## # ####################################################### # Aggregate classifications, unweighted and weighted # ####################################################### ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## # print("\nAggregating classifications...\n") class_agg = by_subject['weight count pulsar_classification subject_type filename'.split()].apply(aggregate_class) # really ought to replace all the NaNs with 0.0 ####################################################### # Write to files # ####################################################### # # add value-added columns # # let people look up the subject on Talk directly from the aggregated file class_agg['link'] = ['https://www.zooniverse.org/projects/zooniverse/pulsar-hunters/talk/subjects/'+str(q) for q in class_agg.index] # after we do the merges below the new indices might not be linked to the subject id, so save it explicitly class_agg['subject_id'] = [str(q) for q in class_agg.index] # match up all the ancillary file data. Maybe there's a faster way to do this than with a chain but meh, # it's actually not *that* slow compared to the clusterf*ck of for loops in the column assignment part above class_agg_old = class_agg.copy() class_agg_interm = pd.merge(class_agg_old, subj_tags, how='left', left_index=True, right_index=True, sort=False, copy=True) class_agg_interm2 = pd.merge(class_agg_interm, matched_filenames, how='left', left_on='filename', right_on='Pulsar Hunters File', sort=False, copy=True) class_agg = pd.merge(class_agg_interm2, possible_knowns, how='left', left_on='filename', right_on='Zooniverse name', sort=False, copy=True) # fill in the is_poss_known column with False where it is currently NaN # currently it's either True or NaN -- with pd.isnull NaN becomes True and True becomes False, so invert that. class_agg['is_poss_known'] = np.invert(pd.isnull(class_agg['is_poss_known'])) # make the list ranked by p_Yes_weight class_agg.sort_values(['subject_type','p_Yes_weight'], ascending=False, inplace=True) print("Writing aggregated output to file %s...\n" % outfile) pd.DataFrame(class_agg).to_csv(outfile) # Now make files ranked by p_Yes, one with all subjects classified and one with only candidates # /Users/vrooje/anaconda/bin/ipython:1: FutureWarning: sort(columns=....) is deprecated, use sort_values(by=.....) # #!/bin/bash /Users/vrooje/anaconda/bin/python.app #class_agg.sort('p_Yes_weight', ascending=False, inplace=True) class_agg.sort_values(['p_Yes_weight'], ascending=False, inplace=True) # I'd rather note the last classification date than the date we happen to produce the file # rightnow = datetime.datetime.now().strftime('%Y-%M-%D_%H:%M') # rankfile_all = rankfile_stem + rightnow + ".csv" rankfile_all = 'all_'+rankfile_stem + last_class_time + ".csv" # there go those hard-coded columns again rank_cols = ['subject_id', 'filename', 'p_Yes_weight', 'count_weighted', 'p_Yes', 'count_unweighted', 'subject_type', 'link', 'user_tag', 'HTRU-N File'] print("Writing full ranked list to file %s...\n" % rankfile_all) # write just the weighted yes percentage, the weighted count, the subject type, and the link to the subject page # the subject ID is the index so it will be written anyway pd.DataFrame(class_agg[rank_cols]).to_csv(rankfile_all) rankfile = 'cand_allsubj_'+rankfile_stem + last_class_time + ".csv" print("Writing candidate-only ranked list to file %s...\n" % rankfile) # also only include entries where there were at least 5 weighted votes tallied # and only "cand" subject_type objects classified_candidate = (class_agg.count_weighted > 5) & (class_agg.subject_type == 'cand') pd.DataFrame(class_agg[rank_cols][classified_candidate]).to_csv(rankfile) rankfile_unk = 'cand_'+rankfile_stem + last_class_time + ".csv" print("Writing candidate-only, unknown-only ranked list to file %s...\n" % rankfile_unk) # also only include entries where there were at least 5 weighted votes tallied # and only "cand" subject_type objects classified_unknown_candidate = (classified_candidate) & (np.invert(class_agg.is_poss_known)) pd.DataFrame(class_agg[rank_cols][classified_unknown_candidate]).to_csv(rankfile_unk) # copy the candidate list into Google Drive so others can see it, overwriting previous versions # Note: this is the way I instantly shared the new aggregated results with collaborators, because # Google Drive automatically syncs with the online version. Dropbox would work too, etc. YMMV cpfile = "/Users/vrooje/Google Drive/pulsar_hunters_share/all_candidates_ranked_by_classifications_%dclass.csv" % nclass_tot print("Copying to Google Drive folder as %s..." % cpfile) os.system("cp -f '%s' '%s'" % (rankfile, cpfile)) # and the unknown candidate sub-list cpfile2 = "/Users/vrooje/Google Drive/pulsar_hunters_share/unknown_candidates_ranked_by_classifications_%dclass.csv" % nclass_tot print("Copying to Google Drive folder as %s..." % cpfile2) os.system("cp -f '%s' '%s'" % (rankfile_unk, cpfile2)) # and just for the record, all subjects. cpfile3 = "/Users/vrooje/Google Drive/pulsar_hunters_share/all_subjects_ranked_by_classifications_%dclass.csv" % nclass_tot print("... and %s" % cpfile3) os.system("cp -f '%s' '%s'" % (rankfile_all, cpfile3)) #done.
mit
mbayon/TFG-MachineLearning
vbig/lib/python2.7/site-packages/pandas/tests/series/test_indexing.py
3
88164
# coding=utf-8 # pylint: disable-msg=E1101,W0612 import pytest from datetime import datetime, timedelta from numpy import nan import numpy as np import pandas as pd import pandas._libs.index as _index from pandas.core.dtypes.common import is_integer, is_scalar from pandas import (Index, Series, DataFrame, isnull, date_range, NaT, MultiIndex, Timestamp, DatetimeIndex, Timedelta) from pandas.core.indexing import IndexingError from pandas.tseries.offsets import BDay from pandas._libs import tslib, lib from pandas.compat import lrange, range from pandas import compat from pandas.util.testing import (slow, assert_series_equal, assert_almost_equal, assert_frame_equal) import pandas.util.testing as tm from pandas.tests.series.common import TestData JOIN_TYPES = ['inner', 'outer', 'left', 'right'] class TestSeriesIndexing(TestData): def test_get(self): # GH 6383 s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45, 51, 39, 55, 43, 54, 52, 51, 54])) result = s.get(25, 0) expected = 0 assert result == expected s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45, 51, 39, 55, 43, 54, 52, 51, 54]), index=pd.Float64Index( [25.0, 36.0, 49.0, 64.0, 81.0, 100.0, 121.0, 144.0, 169.0, 196.0, 1225.0, 1296.0, 1369.0, 1444.0, 1521.0, 1600.0, 1681.0, 1764.0, 1849.0, 1936.0], dtype='object')) result = s.get(25, 0) expected = 43 assert result == expected # GH 7407 # with a boolean accessor df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3}) vc = df.i.value_counts() result = vc.get(99, default='Missing') assert result == 'Missing' vc = df.b.value_counts() result = vc.get(False, default='Missing') assert result == 3 result = vc.get(True, default='Missing') assert result == 'Missing' def test_get_nan(self): # GH 8569 s = pd.Float64Index(range(10)).to_series() assert s.get(np.nan) is None assert s.get(np.nan, default='Missing') == 'Missing' # ensure that fixing the above hasn't broken get # with multiple elements idx = [20, 30] assert_series_equal(s.get(idx), Series([np.nan] * 2, index=idx)) idx = [np.nan, np.nan] assert_series_equal(s.get(idx), Series([np.nan] * 2, index=idx)) def test_delitem(self): # GH 5542 # should delete the item inplace s = Series(lrange(5)) del s[0] expected = Series(lrange(1, 5), index=lrange(1, 5)) assert_series_equal(s, expected) del s[1] expected = Series(lrange(2, 5), index=lrange(2, 5)) assert_series_equal(s, expected) # empty s = Series() def f(): del s[0] pytest.raises(KeyError, f) # only 1 left, del, add, del s = Series(1) del s[0] assert_series_equal(s, Series(dtype='int64', index=Index( [], dtype='int64'))) s[0] = 1 assert_series_equal(s, Series(1)) del s[0] assert_series_equal(s, Series(dtype='int64', index=Index( [], dtype='int64'))) # Index(dtype=object) s = Series(1, index=['a']) del s['a'] assert_series_equal(s, Series(dtype='int64', index=Index( [], dtype='object'))) s['a'] = 1 assert_series_equal(s, Series(1, index=['a'])) del s['a'] assert_series_equal(s, Series(dtype='int64', index=Index( [], dtype='object'))) def test_getitem_setitem_ellipsis(self): s = Series(np.random.randn(10)) np.fix(s) result = s[...] assert_series_equal(result, s) s[...] = 5 assert (result == 5).all() def test_getitem_negative_out_of_bounds(self): s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10)) pytest.raises(IndexError, s.__getitem__, -11) pytest.raises(IndexError, s.__setitem__, -11, 'foo') def test_pop(self): # GH 6600 df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, }) k = df.iloc[4] result = k.pop('B') assert result == 4 expected = Series([0, 0], index=['A', 'C'], name=4) assert_series_equal(k, expected) def test_getitem_get(self): idx1 = self.series.index[5] idx2 = self.objSeries.index[5] assert self.series[idx1] == self.series.get(idx1) assert self.objSeries[idx2] == self.objSeries.get(idx2) assert self.series[idx1] == self.series[5] assert self.objSeries[idx2] == self.objSeries[5] assert self.series.get(-1) == self.series.get(self.series.index[-1]) assert self.series[5] == self.series.get(self.series.index[5]) # missing d = self.ts.index[0] - BDay() pytest.raises(KeyError, self.ts.__getitem__, d) # None # GH 5652 for s in [Series(), Series(index=list('abc'))]: result = s.get(None) assert result is None def test_iloc(self): s = Series(np.random.randn(10), index=lrange(0, 20, 2)) for i in range(len(s)): result = s.iloc[i] exp = s[s.index[i]] assert_almost_equal(result, exp) # pass a slice result = s.iloc[slice(1, 3)] expected = s.loc[2:4] assert_series_equal(result, expected) # test slice is a view result[:] = 0 assert (s[1:3] == 0).all() # list of integers result = s.iloc[[0, 2, 3, 4, 5]] expected = s.reindex(s.index[[0, 2, 3, 4, 5]]) assert_series_equal(result, expected) def test_iloc_nonunique(self): s = Series([0, 1, 2], index=[0, 1, 0]) assert s.iloc[2] == 2 def test_getitem_regression(self): s = Series(lrange(5), index=lrange(5)) result = s[lrange(5)] assert_series_equal(result, s) def test_getitem_setitem_slice_bug(self): s = Series(lrange(10), lrange(10)) result = s[-12:] assert_series_equal(result, s) result = s[-7:] assert_series_equal(result, s[3:]) result = s[:-12] assert_series_equal(result, s[:0]) s = Series(lrange(10), lrange(10)) s[-12:] = 0 assert (s == 0).all() s[:-12] = 5 assert (s == 0).all() def test_getitem_int64(self): idx = np.int64(5) assert self.ts[idx] == self.ts[5] def test_getitem_fancy(self): slice1 = self.series[[1, 2, 3]] slice2 = self.objSeries[[1, 2, 3]] assert self.series.index[2] == slice1.index[1] assert self.objSeries.index[2] == slice2.index[1] assert self.series[2] == slice1[1] assert self.objSeries[2] == slice2[1] def test_getitem_boolean(self): s = self.series mask = s > s.median() # passing list is OK result = s[list(mask)] expected = s[mask] assert_series_equal(result, expected) tm.assert_index_equal(result.index, s.index[mask]) def test_getitem_boolean_empty(self): s = Series([], dtype=np.int64) s.index.name = 'index_name' s = s[s.isnull()] assert s.index.name == 'index_name' assert s.dtype == np.int64 # GH5877 # indexing with empty series s = Series(['A', 'B']) expected = Series(np.nan, index=['C'], dtype=object) result = s[Series(['C'], dtype=object)] assert_series_equal(result, expected) s = Series(['A', 'B']) expected = Series(dtype=object, index=Index([], dtype='int64')) result = s[Series([], dtype=object)] assert_series_equal(result, expected) # invalid because of the boolean indexer # that's empty or not-aligned def f(): s[Series([], dtype=bool)] pytest.raises(IndexingError, f) def f(): s[Series([True], dtype=bool)] pytest.raises(IndexingError, f) def test_getitem_generator(self): gen = (x > 0 for x in self.series) result = self.series[gen] result2 = self.series[iter(self.series > 0)] expected = self.series[self.series > 0] assert_series_equal(result, expected) assert_series_equal(result2, expected) def test_type_promotion(self): # GH12599 s = pd.Series() s["a"] = pd.Timestamp("2016-01-01") s["b"] = 3.0 s["c"] = "foo" expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"], index=["a", "b", "c"]) assert_series_equal(s, expected) def test_getitem_boolean_object(self): # using column from DataFrame s = self.series mask = s > s.median() omask = mask.astype(object) # getitem result = s[omask] expected = s[mask] assert_series_equal(result, expected) # setitem s2 = s.copy() cop = s.copy() cop[omask] = 5 s2[mask] = 5 assert_series_equal(cop, s2) # nans raise exception omask[5:10] = np.nan pytest.raises(Exception, s.__getitem__, omask) pytest.raises(Exception, s.__setitem__, omask, 5) def test_getitem_setitem_boolean_corner(self): ts = self.ts mask_shifted = ts.shift(1, freq=BDay()) > ts.median() # these used to raise...?? pytest.raises(Exception, ts.__getitem__, mask_shifted) pytest.raises(Exception, ts.__setitem__, mask_shifted, 1) # ts[mask_shifted] # ts[mask_shifted] = 1 pytest.raises(Exception, ts.loc.__getitem__, mask_shifted) pytest.raises(Exception, ts.loc.__setitem__, mask_shifted, 1) # ts.loc[mask_shifted] # ts.loc[mask_shifted] = 2 def test_getitem_setitem_slice_integers(self): s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16]) result = s[:4] expected = s.reindex([2, 4, 6, 8]) assert_series_equal(result, expected) s[:4] = 0 assert (s[:4] == 0).all() assert not (s[4:] == 0).any() def test_getitem_setitem_datetime_tz_pytz(self): tm._skip_if_no_pytz() from pytz import timezone as tz from pandas import date_range N = 50 # testing with timezone, GH #2785 rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern') ts = Series(np.random.randn(N), index=rng) # also test Timestamp tz handling, GH #2789 result = ts.copy() result["1990-01-01 09:00:00+00:00"] = 0 result["1990-01-01 09:00:00+00:00"] = ts[4] assert_series_equal(result, ts) result = ts.copy() result["1990-01-01 03:00:00-06:00"] = 0 result["1990-01-01 03:00:00-06:00"] = ts[4] assert_series_equal(result, ts) # repeat with datetimes result = ts.copy() result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0 result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4] assert_series_equal(result, ts) result = ts.copy() # comparison dates with datetime MUST be localized! date = tz('US/Central').localize(datetime(1990, 1, 1, 3)) result[date] = 0 result[date] = ts[4] assert_series_equal(result, ts) def test_getitem_setitem_datetime_tz_dateutil(self): tm._skip_if_no_dateutil() from dateutil.tz import tzutc from pandas._libs.tslib import _dateutil_gettz as gettz tz = lambda x: tzutc() if x == 'UTC' else gettz( x) # handle special case for utc in dateutil from pandas import date_range N = 50 # testing with timezone, GH #2785 rng = date_range('1/1/1990', periods=N, freq='H', tz='America/New_York') ts = Series(np.random.randn(N), index=rng) # also test Timestamp tz handling, GH #2789 result = ts.copy() result["1990-01-01 09:00:00+00:00"] = 0 result["1990-01-01 09:00:00+00:00"] = ts[4] assert_series_equal(result, ts) result = ts.copy() result["1990-01-01 03:00:00-06:00"] = 0 result["1990-01-01 03:00:00-06:00"] = ts[4] assert_series_equal(result, ts) # repeat with datetimes result = ts.copy() result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0 result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4] assert_series_equal(result, ts) result = ts.copy() result[datetime(1990, 1, 1, 3, tzinfo=tz('America/Chicago'))] = 0 result[datetime(1990, 1, 1, 3, tzinfo=tz('America/Chicago'))] = ts[4] assert_series_equal(result, ts) def test_getitem_setitem_datetimeindex(self): N = 50 # testing with timezone, GH #2785 rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern') ts = Series(np.random.randn(N), index=rng) result = ts["1990-01-01 04:00:00"] expected = ts[4] assert result == expected result = ts.copy() result["1990-01-01 04:00:00"] = 0 result["1990-01-01 04:00:00"] = ts[4] assert_series_equal(result, ts) result = ts["1990-01-01 04:00:00":"1990-01-01 07:00:00"] expected = ts[4:8] assert_series_equal(result, expected) result = ts.copy() result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = 0 result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = ts[4:8] assert_series_equal(result, ts) lb = "1990-01-01 04:00:00" rb = "1990-01-01 07:00:00" result = ts[(ts.index >= lb) & (ts.index <= rb)] expected = ts[4:8] assert_series_equal(result, expected) # repeat all the above with naive datetimes result = ts[datetime(1990, 1, 1, 4)] expected = ts[4] assert result == expected result = ts.copy() result[datetime(1990, 1, 1, 4)] = 0 result[datetime(1990, 1, 1, 4)] = ts[4] assert_series_equal(result, ts) result = ts[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] expected = ts[4:8] assert_series_equal(result, expected) result = ts.copy() result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = 0 result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = ts[4:8] assert_series_equal(result, ts) lb = datetime(1990, 1, 1, 4) rb = datetime(1990, 1, 1, 7) result = ts[(ts.index >= lb) & (ts.index <= rb)] expected = ts[4:8] assert_series_equal(result, expected) result = ts[ts.index[4]] expected = ts[4] assert result == expected result = ts[ts.index[4:8]] expected = ts[4:8] assert_series_equal(result, expected) result = ts.copy() result[ts.index[4:8]] = 0 result[4:8] = ts[4:8] assert_series_equal(result, ts) # also test partial date slicing result = ts["1990-01-02"] expected = ts[24:48] assert_series_equal(result, expected) result = ts.copy() result["1990-01-02"] = 0 result["1990-01-02"] = ts[24:48] assert_series_equal(result, ts) def test_getitem_setitem_periodindex(self): from pandas import period_range N = 50 rng = period_range('1/1/1990', periods=N, freq='H') ts = Series(np.random.randn(N), index=rng) result = ts["1990-01-01 04"] expected = ts[4] assert result == expected result = ts.copy() result["1990-01-01 04"] = 0 result["1990-01-01 04"] = ts[4] assert_series_equal(result, ts) result = ts["1990-01-01 04":"1990-01-01 07"] expected = ts[4:8] assert_series_equal(result, expected) result = ts.copy() result["1990-01-01 04":"1990-01-01 07"] = 0 result["1990-01-01 04":"1990-01-01 07"] = ts[4:8] assert_series_equal(result, ts) lb = "1990-01-01 04" rb = "1990-01-01 07" result = ts[(ts.index >= lb) & (ts.index <= rb)] expected = ts[4:8] assert_series_equal(result, expected) # GH 2782 result = ts[ts.index[4]] expected = ts[4] assert result == expected result = ts[ts.index[4:8]] expected = ts[4:8] assert_series_equal(result, expected) result = ts.copy() result[ts.index[4:8]] = 0 result[4:8] = ts[4:8] assert_series_equal(result, ts) def test_getitem_median_slice_bug(self): index = date_range('20090415', '20090519', freq='2B') s = Series(np.random.randn(13), index=index) indexer = [slice(6, 7, None)] result = s[indexer] expected = s[indexer[0]] assert_series_equal(result, expected) def test_getitem_out_of_bounds(self): # don't segfault, GH #495 pytest.raises(IndexError, self.ts.__getitem__, len(self.ts)) # GH #917 s = Series([]) pytest.raises(IndexError, s.__getitem__, -1) def test_getitem_setitem_integers(self): # caused bug without test s = Series([1, 2, 3], ['a', 'b', 'c']) assert s.iloc[0] == s['a'] s.iloc[0] = 5 tm.assert_almost_equal(s['a'], 5) def test_getitem_box_float64(self): value = self.ts[5] assert isinstance(value, np.float64) def test_getitem_ambiguous_keyerror(self): s = Series(lrange(10), index=lrange(0, 20, 2)) pytest.raises(KeyError, s.__getitem__, 1) pytest.raises(KeyError, s.loc.__getitem__, 1) def test_getitem_unordered_dup(self): obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b']) assert is_scalar(obj['c']) assert obj['c'] == 0 def test_getitem_dups_with_missing(self): # breaks reindex, so need to use .loc internally # GH 4246 s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah']) expected = s.loc[['foo', 'bar', 'bah', 'bam']] result = s[['foo', 'bar', 'bah', 'bam']] assert_series_equal(result, expected) def test_getitem_dups(self): s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64) expected = Series([3, 4], index=['C', 'C'], dtype=np.int64) result = s['C'] assert_series_equal(result, expected) def test_getitem_dataframe(self): rng = list(range(10)) s = pd.Series(10, index=rng) df = pd.DataFrame(rng, index=rng) pytest.raises(TypeError, s.__getitem__, df > 5) def test_getitem_callable(self): # GH 12533 s = pd.Series(4, index=list('ABCD')) result = s[lambda x: 'A'] assert result == s.loc['A'] result = s[lambda x: ['A', 'B']] tm.assert_series_equal(result, s.loc[['A', 'B']]) result = s[lambda x: [True, False, True, True]] tm.assert_series_equal(result, s.iloc[[0, 2, 3]]) def test_setitem_ambiguous_keyerror(self): s = Series(lrange(10), index=lrange(0, 20, 2)) # equivalent of an append s2 = s.copy() s2[1] = 5 expected = s.append(Series([5], index=[1])) assert_series_equal(s2, expected) s2 = s.copy() s2.loc[1] = 5 expected = s.append(Series([5], index=[1])) assert_series_equal(s2, expected) def test_setitem_float_labels(self): # note labels are floats s = Series(['a', 'b', 'c'], index=[0, 0.5, 1]) tmp = s.copy() s.loc[1] = 'zoo' tmp.iloc[2] = 'zoo' assert_series_equal(s, tmp) def test_setitem_callable(self): # GH 12533 s = pd.Series([1, 2, 3, 4], index=list('ABCD')) s[lambda x: 'A'] = -1 tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD'))) def test_setitem_other_callable(self): # GH 13299 inc = lambda x: x + 1 s = pd.Series([1, 2, -1, 4]) s[s < 0] = inc expected = pd.Series([1, 2, inc, 4]) tm.assert_series_equal(s, expected) def test_slice(self): numSlice = self.series[10:20] numSliceEnd = self.series[-10:] objSlice = self.objSeries[10:20] assert self.series.index[9] not in numSlice.index assert self.objSeries.index[9] not in objSlice.index assert len(numSlice) == len(numSlice.index) assert self.series[numSlice.index[0]] == numSlice[numSlice.index[0]] assert numSlice.index[1] == self.series.index[11] assert tm.equalContents(numSliceEnd, np.array(self.series)[-10:]) # Test return view. sl = self.series[10:20] sl[:] = 0 assert (self.series[10:20] == 0).all() def test_slice_can_reorder_not_uniquely_indexed(self): s = Series(1, index=['a', 'a', 'b', 'b', 'c']) s[::-1] # it works! def test_slice_float_get_set(self): pytest.raises(TypeError, lambda: self.ts[4.0:10.0]) def f(): self.ts[4.0:10.0] = 0 pytest.raises(TypeError, f) pytest.raises(TypeError, self.ts.__getitem__, slice(4.5, 10.0)) pytest.raises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0) def test_slice_floats2(self): s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float)) assert len(s.loc[12.0:]) == 8 assert len(s.loc[12.5:]) == 7 i = np.arange(10, 20, dtype=float) i[2] = 12.2 s.index = i assert len(s.loc[12.0:]) == 8 assert len(s.loc[12.5:]) == 7 def test_slice_float64(self): values = np.arange(10., 50., 2) index = Index(values) start, end = values[[5, 15]] s = Series(np.random.randn(20), index=index) result = s[start:end] expected = s.iloc[5:16] assert_series_equal(result, expected) result = s.loc[start:end] assert_series_equal(result, expected) df = DataFrame(np.random.randn(20, 3), index=index) result = df[start:end] expected = df.iloc[5:16] tm.assert_frame_equal(result, expected) result = df.loc[start:end] tm.assert_frame_equal(result, expected) def test_setitem(self): self.ts[self.ts.index[5]] = np.NaN self.ts[[1, 2, 17]] = np.NaN self.ts[6] = np.NaN assert np.isnan(self.ts[6]) assert np.isnan(self.ts[2]) self.ts[np.isnan(self.ts)] = 5 assert not np.isnan(self.ts[2]) # caught this bug when writing tests series = Series(tm.makeIntIndex(20).astype(float), index=tm.makeIntIndex(20)) series[::2] = 0 assert (series[::2] == 0).all() # set item that's not contained s = self.series.copy() s['foobar'] = 1 app = Series([1], index=['foobar'], name='series') expected = self.series.append(app) assert_series_equal(s, expected) # Test for issue #10193 key = pd.Timestamp('2012-01-01') series = pd.Series() series[key] = 47 expected = pd.Series(47, [key]) assert_series_equal(series, expected) series = pd.Series([], pd.DatetimeIndex([], freq='D')) series[key] = 47 expected = pd.Series(47, pd.DatetimeIndex([key], freq='D')) assert_series_equal(series, expected) def test_setitem_dtypes(self): # change dtypes # GH 4463 expected = Series([np.nan, 2, 3]) s = Series([1, 2, 3]) s.iloc[0] = np.nan assert_series_equal(s, expected) s = Series([1, 2, 3]) s.loc[0] = np.nan assert_series_equal(s, expected) s = Series([1, 2, 3]) s[0] = np.nan assert_series_equal(s, expected) s = Series([False]) s.loc[0] = np.nan assert_series_equal(s, Series([np.nan])) s = Series([False, True]) s.loc[0] = np.nan assert_series_equal(s, Series([np.nan, 1.0])) def test_set_value(self): idx = self.ts.index[10] res = self.ts.set_value(idx, 0) assert res is self.ts assert self.ts[idx] == 0 # equiv s = self.series.copy() res = s.set_value('foobar', 0) assert res is s assert res.index[-1] == 'foobar' assert res['foobar'] == 0 s = self.series.copy() s.loc['foobar'] = 0 assert s.index[-1] == 'foobar' assert s['foobar'] == 0 def test_setslice(self): sl = self.ts[5:20] assert len(sl) == len(sl.index) assert sl.index.is_unique def test_basic_getitem_setitem_corner(self): # invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2] with tm.assert_raises_regex(ValueError, 'tuple-index'): self.ts[:, 2] with tm.assert_raises_regex(ValueError, 'tuple-index'): self.ts[:, 2] = 2 # weird lists. [slice(0, 5)] will work but not two slices result = self.ts[[slice(None, 5)]] expected = self.ts[:5] assert_series_equal(result, expected) # OK pytest.raises(Exception, self.ts.__getitem__, [5, slice(None, None)]) pytest.raises(Exception, self.ts.__setitem__, [5, slice(None, None)], 2) def test_basic_getitem_with_labels(self): indices = self.ts.index[[5, 10, 15]] result = self.ts[indices] expected = self.ts.reindex(indices) assert_series_equal(result, expected) result = self.ts[indices[0]:indices[2]] expected = self.ts.loc[indices[0]:indices[2]] assert_series_equal(result, expected) # integer indexes, be careful s = Series(np.random.randn(10), index=lrange(0, 20, 2)) inds = [0, 2, 5, 7, 8] arr_inds = np.array([0, 2, 5, 7, 8]) result = s[inds] expected = s.reindex(inds) assert_series_equal(result, expected) result = s[arr_inds] expected = s.reindex(arr_inds) assert_series_equal(result, expected) # GH12089 # with tz for values s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"), index=['a', 'b', 'c']) expected = Timestamp('2011-01-01', tz='US/Eastern') result = s.loc['a'] assert result == expected result = s.iloc[0] assert result == expected result = s['a'] assert result == expected def test_basic_setitem_with_labels(self): indices = self.ts.index[[5, 10, 15]] cp = self.ts.copy() exp = self.ts.copy() cp[indices] = 0 exp.loc[indices] = 0 assert_series_equal(cp, exp) cp = self.ts.copy() exp = self.ts.copy() cp[indices[0]:indices[2]] = 0 exp.loc[indices[0]:indices[2]] = 0 assert_series_equal(cp, exp) # integer indexes, be careful s = Series(np.random.randn(10), index=lrange(0, 20, 2)) inds = [0, 4, 6] arr_inds = np.array([0, 4, 6]) cp = s.copy() exp = s.copy() s[inds] = 0 s.loc[inds] = 0 assert_series_equal(cp, exp) cp = s.copy() exp = s.copy() s[arr_inds] = 0 s.loc[arr_inds] = 0 assert_series_equal(cp, exp) inds_notfound = [0, 4, 5, 6] arr_inds_notfound = np.array([0, 4, 5, 6]) pytest.raises(Exception, s.__setitem__, inds_notfound, 0) pytest.raises(Exception, s.__setitem__, arr_inds_notfound, 0) # GH12089 # with tz for values s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"), index=['a', 'b', 'c']) s2 = s.copy() expected = Timestamp('2011-01-03', tz='US/Eastern') s2.loc['a'] = expected result = s2.loc['a'] assert result == expected s2 = s.copy() s2.iloc[0] = expected result = s2.iloc[0] assert result == expected s2 = s.copy() s2['a'] = expected result = s2['a'] assert result == expected def test_loc_getitem(self): inds = self.series.index[[3, 4, 7]] assert_series_equal(self.series.loc[inds], self.series.reindex(inds)) assert_series_equal(self.series.iloc[5::2], self.series[5::2]) # slice with indices d1, d2 = self.ts.index[[5, 15]] result = self.ts.loc[d1:d2] expected = self.ts.truncate(d1, d2) assert_series_equal(result, expected) # boolean mask = self.series > self.series.median() assert_series_equal(self.series.loc[mask], self.series[mask]) # ask for index value assert self.ts.loc[d1] == self.ts[d1] assert self.ts.loc[d2] == self.ts[d2] def test_loc_getitem_not_monotonic(self): d1, d2 = self.ts.index[[5, 15]] ts2 = self.ts[::2][[1, 2, 0]] pytest.raises(KeyError, ts2.loc.__getitem__, slice(d1, d2)) pytest.raises(KeyError, ts2.loc.__setitem__, slice(d1, d2), 0) def test_loc_getitem_setitem_integer_slice_keyerrors(self): s = Series(np.random.randn(10), index=lrange(0, 20, 2)) # this is OK cp = s.copy() cp.iloc[4:10] = 0 assert (cp.iloc[4:10] == 0).all() # so is this cp = s.copy() cp.iloc[3:11] = 0 assert (cp.iloc[3:11] == 0).values.all() result = s.iloc[2:6] result2 = s.loc[3:11] expected = s.reindex([4, 6, 8, 10]) assert_series_equal(result, expected) assert_series_equal(result2, expected) # non-monotonic, raise KeyError s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]] pytest.raises(KeyError, s2.loc.__getitem__, slice(3, 11)) pytest.raises(KeyError, s2.loc.__setitem__, slice(3, 11), 0) def test_loc_getitem_iterator(self): idx = iter(self.series.index[:10]) result = self.series.loc[idx] assert_series_equal(result, self.series[:10]) def test_setitem_with_tz(self): for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']: orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3, tz=tz)) assert orig.dtype == 'datetime64[ns, {0}]'.format(tz) # scalar s = orig.copy() s[1] = pd.Timestamp('2011-01-01', tz=tz) exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz), pd.Timestamp('2011-01-01 00:00', tz=tz), pd.Timestamp('2016-01-01 02:00', tz=tz)]) tm.assert_series_equal(s, exp) s = orig.copy() s.loc[1] = pd.Timestamp('2011-01-01', tz=tz) tm.assert_series_equal(s, exp) s = orig.copy() s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz) tm.assert_series_equal(s, exp) # vector vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz), pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2]) assert vals.dtype == 'datetime64[ns, {0}]'.format(tz) s[[1, 2]] = vals exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz), pd.Timestamp('2011-01-01 00:00', tz=tz), pd.Timestamp('2012-01-01 00:00', tz=tz)]) tm.assert_series_equal(s, exp) s = orig.copy() s.loc[[1, 2]] = vals tm.assert_series_equal(s, exp) s = orig.copy() s.iloc[[1, 2]] = vals tm.assert_series_equal(s, exp) def test_setitem_with_tz_dst(self): # GH XXX tz = 'US/Eastern' orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3, tz=tz)) assert orig.dtype == 'datetime64[ns, {0}]'.format(tz) # scalar s = orig.copy() s[1] = pd.Timestamp('2011-01-01', tz=tz) exp = pd.Series([pd.Timestamp('2016-11-06 00:00-04:00', tz=tz), pd.Timestamp('2011-01-01 00:00-05:00', tz=tz), pd.Timestamp('2016-11-06 01:00-05:00', tz=tz)]) tm.assert_series_equal(s, exp) s = orig.copy() s.loc[1] = pd.Timestamp('2011-01-01', tz=tz) tm.assert_series_equal(s, exp) s = orig.copy() s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz) tm.assert_series_equal(s, exp) # vector vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz), pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2]) assert vals.dtype == 'datetime64[ns, {0}]'.format(tz) s[[1, 2]] = vals exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz), pd.Timestamp('2011-01-01 00:00', tz=tz), pd.Timestamp('2012-01-01 00:00', tz=tz)]) tm.assert_series_equal(s, exp) s = orig.copy() s.loc[[1, 2]] = vals tm.assert_series_equal(s, exp) s = orig.copy() s.iloc[[1, 2]] = vals tm.assert_series_equal(s, exp) def test_where(self): s = Series(np.random.randn(5)) cond = s > 0 rs = s.where(cond).dropna() rs2 = s[cond] assert_series_equal(rs, rs2) rs = s.where(cond, -s) assert_series_equal(rs, s.abs()) rs = s.where(cond) assert (s.shape == rs.shape) assert (rs is not s) # test alignment cond = Series([True, False, False, True, False], index=s.index) s2 = -(s.abs()) expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index) rs = s2.where(cond[:3]) assert_series_equal(rs, expected) expected = s2.abs() expected.iloc[0] = s2[0] rs = s2.where(cond[:3], -s2) assert_series_equal(rs, expected) pytest.raises(ValueError, s.where, 1) pytest.raises(ValueError, s.where, cond[:3].values, -s) # GH 2745 s = Series([1, 2]) s[[True, False]] = [0, 1] expected = Series([0, 2]) assert_series_equal(s, expected) # failures pytest.raises(ValueError, s.__setitem__, tuple([[[True, False]]]), [0, 2, 3]) pytest.raises(ValueError, s.__setitem__, tuple([[[True, False]]]), []) # unsafe dtype changes for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64]: s = Series(np.arange(10), dtype=dtype) mask = s < 5 s[mask] = lrange(2, 7) expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype) assert_series_equal(s, expected) assert s.dtype == expected.dtype # these are allowed operations, but are upcasted for dtype in [np.int64, np.float64]: s = Series(np.arange(10), dtype=dtype) mask = s < 5 values = [2.5, 3.5, 4.5, 5.5, 6.5] s[mask] = values expected = Series(values + lrange(5, 10), dtype='float64') assert_series_equal(s, expected) assert s.dtype == expected.dtype # GH 9731 s = Series(np.arange(10), dtype='int64') mask = s > 5 values = [2.5, 3.5, 4.5, 5.5] s[mask] = values expected = Series(lrange(6) + values, dtype='float64') assert_series_equal(s, expected) # can't do these as we are forced to change the itemsize of the input # to something we cannot for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]: s = Series(np.arange(10), dtype=dtype) mask = s < 5 values = [2.5, 3.5, 4.5, 5.5, 6.5] pytest.raises(Exception, s.__setitem__, tuple(mask), values) # GH3235 s = Series(np.arange(10), dtype='int64') mask = s < 5 s[mask] = lrange(2, 7) expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64') assert_series_equal(s, expected) assert s.dtype == expected.dtype s = Series(np.arange(10), dtype='int64') mask = s > 5 s[mask] = [0] * 4 expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64') assert_series_equal(s, expected) s = Series(np.arange(10)) mask = s > 5 def f(): s[mask] = [5, 4, 3, 2, 1] pytest.raises(ValueError, f) def f(): s[mask] = [0] * 5 pytest.raises(ValueError, f) # dtype changes s = Series([1, 2, 3, 4]) result = s.where(s > 2, np.nan) expected = Series([np.nan, np.nan, 3, 4]) assert_series_equal(result, expected) # GH 4667 # setting with None changes dtype s = Series(range(10)).astype(float) s[8] = None result = s[8] assert isnull(result) s = Series(range(10)).astype(float) s[s > 8] = None result = s[isnull(s)] expected = Series(np.nan, index=[9]) assert_series_equal(result, expected) def test_where_array_like(self): # see gh-15414 s = Series([1, 2, 3]) cond = [False, True, True] expected = Series([np.nan, 2, 3]) klasses = [list, tuple, np.array, Series] for klass in klasses: result = s.where(klass(cond)) assert_series_equal(result, expected) def test_where_invalid_input(self): # see gh-15414: only boolean arrays accepted s = Series([1, 2, 3]) msg = "Boolean array expected for the condition" conds = [ [1, 0, 1], Series([2, 5, 7]), ["True", "False", "True"], [Timestamp("2017-01-01"), pd.NaT, Timestamp("2017-01-02")] ] for cond in conds: with tm.assert_raises_regex(ValueError, msg): s.where(cond) msg = "Array conditional must be same shape as self" with tm.assert_raises_regex(ValueError, msg): s.where([True]) def test_where_ndframe_align(self): msg = "Array conditional must be same shape as self" s = Series([1, 2, 3]) cond = [True] with tm.assert_raises_regex(ValueError, msg): s.where(cond) expected = Series([1, np.nan, np.nan]) out = s.where(Series(cond)) tm.assert_series_equal(out, expected) cond = np.array([False, True, False, True]) with tm.assert_raises_regex(ValueError, msg): s.where(cond) expected = Series([np.nan, 2, np.nan]) out = s.where(Series(cond)) tm.assert_series_equal(out, expected) def test_where_setitem_invalid(self): # GH 2702 # make sure correct exceptions are raised on invalid list assignment # slice s = Series(list('abc')) def f(): s[0:3] = list(range(27)) pytest.raises(ValueError, f) s[0:3] = list(range(3)) expected = Series([0, 1, 2]) assert_series_equal(s.astype(np.int64), expected, ) # slice with step s = Series(list('abcdef')) def f(): s[0:4:2] = list(range(27)) pytest.raises(ValueError, f) s = Series(list('abcdef')) s[0:4:2] = list(range(2)) expected = Series([0, 'b', 1, 'd', 'e', 'f']) assert_series_equal(s, expected) # neg slices s = Series(list('abcdef')) def f(): s[:-1] = list(range(27)) pytest.raises(ValueError, f) s[-3:-1] = list(range(2)) expected = Series(['a', 'b', 'c', 0, 1, 'f']) assert_series_equal(s, expected) # list s = Series(list('abc')) def f(): s[[0, 1, 2]] = list(range(27)) pytest.raises(ValueError, f) s = Series(list('abc')) def f(): s[[0, 1, 2]] = list(range(2)) pytest.raises(ValueError, f) # scalar s = Series(list('abc')) s[0] = list(range(10)) expected = Series([list(range(10)), 'b', 'c']) assert_series_equal(s, expected) def test_where_broadcast(self): # Test a variety of differently sized series for size in range(2, 6): # Test a variety of boolean indices for selection in [ # First element should be set np.resize([True, False, False, False, False], size), # Set alternating elements] np.resize([True, False], size), # No element should be set np.resize([False], size)]: # Test a variety of different numbers as content for item in [2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min]: # Test numpy arrays, lists and tuples as the input to be # broadcast for arr in [np.array([item]), [item], (item, )]: data = np.arange(size, dtype=float) s = Series(data) s[selection] = arr # Construct the expected series by taking the source # data or item based on the selection expected = Series([item if use_item else data[ i] for i, use_item in enumerate(selection)]) assert_series_equal(s, expected) s = Series(data) result = s.where(~selection, arr) assert_series_equal(result, expected) def test_where_inplace(self): s = Series(np.random.randn(5)) cond = s > 0 rs = s.copy() rs.where(cond, inplace=True) assert_series_equal(rs.dropna(), s[cond]) assert_series_equal(rs, s.where(cond)) rs = s.copy() rs.where(cond, -s, inplace=True) assert_series_equal(rs, s.where(cond, -s)) def test_where_dups(self): # GH 4550 # where crashes with dups in index s1 = Series(list(range(3))) s2 = Series(list(range(3))) comb = pd.concat([s1, s2]) result = comb.where(comb < 2) expected = Series([0, 1, np.nan, 0, 1, np.nan], index=[0, 1, 2, 0, 1, 2]) assert_series_equal(result, expected) # GH 4548 # inplace updating not working with dups comb[comb < 1] = 5 expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2]) assert_series_equal(comb, expected) comb[comb < 2] += 10 expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2]) assert_series_equal(comb, expected) def test_where_datetime(self): s = Series(date_range('20130102', periods=2)) expected = Series([10, 10], dtype='datetime64[ns]') mask = np.array([False, False]) rs = s.where(mask, [10, 10]) assert_series_equal(rs, expected) rs = s.where(mask, 10) assert_series_equal(rs, expected) rs = s.where(mask, 10.0) assert_series_equal(rs, expected) rs = s.where(mask, [10.0, 10.0]) assert_series_equal(rs, expected) rs = s.where(mask, [10.0, np.nan]) expected = Series([10, None], dtype='datetime64[ns]') assert_series_equal(rs, expected) # GH 15701 timestamps = ['2016-12-31 12:00:04+00:00', '2016-12-31 12:00:04.010000+00:00'] s = Series([pd.Timestamp(t) for t in timestamps]) rs = s.where(Series([False, True])) expected = Series([pd.NaT, s[1]]) assert_series_equal(rs, expected) def test_where_timedelta(self): s = Series([1, 2], dtype='timedelta64[ns]') expected = Series([10, 10], dtype='timedelta64[ns]') mask = np.array([False, False]) rs = s.where(mask, [10, 10]) assert_series_equal(rs, expected) rs = s.where(mask, 10) assert_series_equal(rs, expected) rs = s.where(mask, 10.0) assert_series_equal(rs, expected) rs = s.where(mask, [10.0, 10.0]) assert_series_equal(rs, expected) rs = s.where(mask, [10.0, np.nan]) expected = Series([10, None], dtype='timedelta64[ns]') assert_series_equal(rs, expected) def test_mask(self): # compare with tested results in test_where s = Series(np.random.randn(5)) cond = s > 0 rs = s.where(~cond, np.nan) assert_series_equal(rs, s.mask(cond)) rs = s.where(~cond) rs2 = s.mask(cond) assert_series_equal(rs, rs2) rs = s.where(~cond, -s) rs2 = s.mask(cond, -s) assert_series_equal(rs, rs2) cond = Series([True, False, False, True, False], index=s.index) s2 = -(s.abs()) rs = s2.where(~cond[:3]) rs2 = s2.mask(cond[:3]) assert_series_equal(rs, rs2) rs = s2.where(~cond[:3], -s2) rs2 = s2.mask(cond[:3], -s2) assert_series_equal(rs, rs2) pytest.raises(ValueError, s.mask, 1) pytest.raises(ValueError, s.mask, cond[:3].values, -s) # dtype changes s = Series([1, 2, 3, 4]) result = s.mask(s > 2, np.nan) expected = Series([1, 2, np.nan, np.nan]) assert_series_equal(result, expected) def test_mask_broadcast(self): # GH 8801 # copied from test_where_broadcast for size in range(2, 6): for selection in [ # First element should be set np.resize([True, False, False, False, False], size), # Set alternating elements] np.resize([True, False], size), # No element should be set np.resize([False], size)]: for item in [2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min]: for arr in [np.array([item]), [item], (item, )]: data = np.arange(size, dtype=float) s = Series(data) result = s.mask(selection, arr) expected = Series([item if use_item else data[ i] for i, use_item in enumerate(selection)]) assert_series_equal(result, expected) def test_mask_inplace(self): s = Series(np.random.randn(5)) cond = s > 0 rs = s.copy() rs.mask(cond, inplace=True) assert_series_equal(rs.dropna(), s[~cond]) assert_series_equal(rs, s.mask(cond)) rs = s.copy() rs.mask(cond, -s, inplace=True) assert_series_equal(rs, s.mask(cond, -s)) def test_ix_setitem(self): inds = self.series.index[[3, 4, 7]] result = self.series.copy() result.loc[inds] = 5 expected = self.series.copy() expected[[3, 4, 7]] = 5 assert_series_equal(result, expected) result.iloc[5:10] = 10 expected[5:10] = 10 assert_series_equal(result, expected) # set slice with indices d1, d2 = self.series.index[[5, 15]] result.loc[d1:d2] = 6 expected[5:16] = 6 # because it's inclusive assert_series_equal(result, expected) # set index value self.series.loc[d1] = 4 self.series.loc[d2] = 6 assert self.series[d1] == 4 assert self.series[d2] == 6 def test_where_numeric_with_string(self): # GH 9280 s = pd.Series([1, 2, 3]) w = s.where(s > 1, 'X') assert not is_integer(w[0]) assert is_integer(w[1]) assert is_integer(w[2]) assert isinstance(w[0], str) assert w.dtype == 'object' w = s.where(s > 1, ['X', 'Y', 'Z']) assert not is_integer(w[0]) assert is_integer(w[1]) assert is_integer(w[2]) assert isinstance(w[0], str) assert w.dtype == 'object' w = s.where(s > 1, np.array(['X', 'Y', 'Z'])) assert not is_integer(w[0]) assert is_integer(w[1]) assert is_integer(w[2]) assert isinstance(w[0], str) assert w.dtype == 'object' def test_setitem_boolean(self): mask = self.series > self.series.median() # similiar indexed series result = self.series.copy() result[mask] = self.series * 2 expected = self.series * 2 assert_series_equal(result[mask], expected[mask]) # needs alignment result = self.series.copy() result[mask] = (self.series * 2)[0:5] expected = (self.series * 2)[0:5].reindex_like(self.series) expected[-mask] = self.series[mask] assert_series_equal(result[mask], expected[mask]) def test_ix_setitem_boolean(self): mask = self.series > self.series.median() result = self.series.copy() result.loc[mask] = 0 expected = self.series expected[mask] = 0 assert_series_equal(result, expected) def test_ix_setitem_corner(self): inds = list(self.series.index[[5, 8, 12]]) self.series.loc[inds] = 5 pytest.raises(Exception, self.series.loc.__setitem__, inds + ['foo'], 5) def test_get_set_boolean_different_order(self): ordered = self.series.sort_values() # setting copy = self.series.copy() copy[ordered > 0] = 0 expected = self.series.copy() expected[expected > 0] = 0 assert_series_equal(copy, expected) # getting sel = self.series[ordered > 0] exp = self.series[self.series > 0] assert_series_equal(sel, exp) def test_setitem_na(self): # these induce dtype changes expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan]) s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10]) s[::2] = np.nan assert_series_equal(s, expected) # get's coerced to float, right? expected = Series([np.nan, 1, np.nan, 0]) s = Series([True, True, False, False]) s[::2] = np.nan assert_series_equal(s, expected) expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8, 9]) s = Series(np.arange(10)) s[:5] = np.nan assert_series_equal(s, expected) def test_basic_indexing(self): s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b']) pytest.raises(IndexError, s.__getitem__, 5) pytest.raises(IndexError, s.__setitem__, 5, 0) pytest.raises(KeyError, s.__getitem__, 'c') s = s.sort_index() pytest.raises(IndexError, s.__getitem__, 5) pytest.raises(IndexError, s.__setitem__, 5, 0) def test_int_indexing(self): s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2]) pytest.raises(KeyError, s.__getitem__, 5) pytest.raises(KeyError, s.__getitem__, 'c') # not monotonic s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1]) pytest.raises(KeyError, s.__getitem__, 5) pytest.raises(KeyError, s.__getitem__, 'c') def test_datetime_indexing(self): from pandas import date_range index = date_range('1/1/2000', '1/7/2000') index = index.repeat(3) s = Series(len(index), index=index) stamp = Timestamp('1/8/2000') pytest.raises(KeyError, s.__getitem__, stamp) s[stamp] = 0 assert s[stamp] == 0 # not monotonic s = Series(len(index), index=index) s = s[::-1] pytest.raises(KeyError, s.__getitem__, stamp) s[stamp] = 0 assert s[stamp] == 0 def test_timedelta_assignment(self): # GH 8209 s = Series([]) s.loc['B'] = timedelta(1) tm.assert_series_equal(s, Series(Timedelta('1 days'), index=['B'])) s = s.reindex(s.index.insert(0, 'A')) tm.assert_series_equal(s, Series( [np.nan, Timedelta('1 days')], index=['A', 'B'])) result = s.fillna(timedelta(1)) expected = Series(Timedelta('1 days'), index=['A', 'B']) tm.assert_series_equal(result, expected) s.loc['A'] = timedelta(1) tm.assert_series_equal(s, expected) # GH 14155 s = Series(10 * [np.timedelta64(10, 'm')]) s.loc[[1, 2, 3]] = np.timedelta64(20, 'm') expected = pd.Series(10 * [np.timedelta64(10, 'm')]) expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, 'm')) tm.assert_series_equal(s, expected) def test_underlying_data_conversion(self): # GH 4080 df = DataFrame(dict((c, [1, 2, 3]) for c in ['a', 'b', 'c'])) df.set_index(['a', 'b', 'c'], inplace=True) s = Series([1], index=[(2, 2, 2)]) df['val'] = 0 df df['val'].update(s) expected = DataFrame( dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0])) expected.set_index(['a', 'b', 'c'], inplace=True) tm.assert_frame_equal(df, expected) # GH 3970 # these are chained assignments as well pd.set_option('chained_assignment', None) df = DataFrame({"aa": range(5), "bb": [2.2] * 5}) df["cc"] = 0.0 ck = [True] * len(df) df["bb"].iloc[0] = .13 # TODO: unused df_tmp = df.iloc[ck] # noqa df["bb"].iloc[0] = .15 assert df['bb'].iloc[0] == 0.15 pd.set_option('chained_assignment', 'raise') # GH 3217 df = DataFrame(dict(a=[1, 3], b=[np.nan, 2])) df['c'] = np.nan df['c'].update(pd.Series(['foo'], index=[0])) expected = DataFrame(dict(a=[1, 3], b=[np.nan, 2], c=['foo', np.nan])) tm.assert_frame_equal(df, expected) def test_preserveRefs(self): seq = self.ts[[5, 10, 15]] seq[1] = np.NaN assert not np.isnan(self.ts[10]) def test_drop(self): # unique s = Series([1, 2], index=['one', 'two']) expected = Series([1], index=['one']) result = s.drop(['two']) assert_series_equal(result, expected) result = s.drop('two', axis='rows') assert_series_equal(result, expected) # non-unique # GH 5248 s = Series([1, 1, 2], index=['one', 'two', 'one']) expected = Series([1, 2], index=['one', 'one']) result = s.drop(['two'], axis=0) assert_series_equal(result, expected) result = s.drop('two') assert_series_equal(result, expected) expected = Series([1], index=['two']) result = s.drop(['one']) assert_series_equal(result, expected) result = s.drop('one') assert_series_equal(result, expected) # single string/tuple-like s = Series(range(3), index=list('abc')) pytest.raises(ValueError, s.drop, 'bc') pytest.raises(ValueError, s.drop, ('a', )) # errors='ignore' s = Series(range(3), index=list('abc')) result = s.drop('bc', errors='ignore') assert_series_equal(result, s) result = s.drop(['a', 'd'], errors='ignore') expected = s.iloc[1:] assert_series_equal(result, expected) # bad axis pytest.raises(ValueError, s.drop, 'one', axis='columns') # GH 8522 s = Series([2, 3], index=[True, False]) assert s.index.is_object() result = s.drop(True) expected = Series([3], index=[False]) assert_series_equal(result, expected) def test_align(self): def _check_align(a, b, how='left', fill=None): aa, ab = a.align(b, join=how, fill_value=fill) join_index = a.index.join(b.index, how=how) if fill is not None: diff_a = aa.index.difference(join_index) diff_b = ab.index.difference(join_index) if len(diff_a) > 0: assert (aa.reindex(diff_a) == fill).all() if len(diff_b) > 0: assert (ab.reindex(diff_b) == fill).all() ea = a.reindex(join_index) eb = b.reindex(join_index) if fill is not None: ea = ea.fillna(fill) eb = eb.fillna(fill) assert_series_equal(aa, ea) assert_series_equal(ab, eb) assert aa.name == 'ts' assert ea.name == 'ts' assert ab.name == 'ts' assert eb.name == 'ts' for kind in JOIN_TYPES: _check_align(self.ts[2:], self.ts[:-5], how=kind) _check_align(self.ts[2:], self.ts[:-5], how=kind, fill=-1) # empty left _check_align(self.ts[:0], self.ts[:-5], how=kind) _check_align(self.ts[:0], self.ts[:-5], how=kind, fill=-1) # empty right _check_align(self.ts[:-5], self.ts[:0], how=kind) _check_align(self.ts[:-5], self.ts[:0], how=kind, fill=-1) # both empty _check_align(self.ts[:0], self.ts[:0], how=kind) _check_align(self.ts[:0], self.ts[:0], how=kind, fill=-1) def test_align_fill_method(self): def _check_align(a, b, how='left', method='pad', limit=None): aa, ab = a.align(b, join=how, method=method, limit=limit) join_index = a.index.join(b.index, how=how) ea = a.reindex(join_index) eb = b.reindex(join_index) ea = ea.fillna(method=method, limit=limit) eb = eb.fillna(method=method, limit=limit) assert_series_equal(aa, ea) assert_series_equal(ab, eb) for kind in JOIN_TYPES: for meth in ['pad', 'bfill']: _check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth) _check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth, limit=1) # empty left _check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth) _check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth, limit=1) # empty right _check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth) _check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth, limit=1) # both empty _check_align(self.ts[:0], self.ts[:0], how=kind, method=meth) _check_align(self.ts[:0], self.ts[:0], how=kind, method=meth, limit=1) def test_align_nocopy(self): b = self.ts[:5].copy() # do copy a = self.ts.copy() ra, _ = a.align(b, join='left') ra[:5] = 5 assert not (a[:5] == 5).any() # do not copy a = self.ts.copy() ra, _ = a.align(b, join='left', copy=False) ra[:5] = 5 assert (a[:5] == 5).all() # do copy a = self.ts.copy() b = self.ts[:5].copy() _, rb = a.align(b, join='right') rb[:3] = 5 assert not (b[:3] == 5).any() # do not copy a = self.ts.copy() b = self.ts[:5].copy() _, rb = a.align(b, join='right', copy=False) rb[:2] = 5 assert (b[:2] == 5).all() def test_align_same_index(self): a, b = self.ts.align(self.ts, copy=False) assert a.index is self.ts.index assert b.index is self.ts.index a, b = self.ts.align(self.ts, copy=True) assert a.index is not self.ts.index assert b.index is not self.ts.index def test_align_multiindex(self): # GH 10665 midx = pd.MultiIndex.from_product([range(2), range(3), range(2)], names=('a', 'b', 'c')) idx = pd.Index(range(2), name='b') s1 = pd.Series(np.arange(12, dtype='int64'), index=midx) s2 = pd.Series(np.arange(2, dtype='int64'), index=idx) # these must be the same results (but flipped) res1l, res1r = s1.align(s2, join='left') res2l, res2r = s2.align(s1, join='right') expl = s1 tm.assert_series_equal(expl, res1l) tm.assert_series_equal(expl, res2r) expr = pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx) tm.assert_series_equal(expr, res1r) tm.assert_series_equal(expr, res2l) res1l, res1r = s1.align(s2, join='right') res2l, res2r = s2.align(s1, join='left') exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)], names=('a', 'b', 'c')) expl = pd.Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx) tm.assert_series_equal(expl, res1l) tm.assert_series_equal(expl, res2r) expr = pd.Series([0, 0, 1, 1] * 2, index=exp_idx) tm.assert_series_equal(expr, res1r) tm.assert_series_equal(expr, res2l) def test_reindex(self): identity = self.series.reindex(self.series.index) # __array_interface__ is not defined for older numpies # and on some pythons try: assert np.may_share_memory(self.series.index, identity.index) except AttributeError: pass assert identity.index.is_(self.series.index) assert identity.index.identical(self.series.index) subIndex = self.series.index[10:20] subSeries = self.series.reindex(subIndex) for idx, val in compat.iteritems(subSeries): assert val == self.series[idx] subIndex2 = self.ts.index[10:20] subTS = self.ts.reindex(subIndex2) for idx, val in compat.iteritems(subTS): assert val == self.ts[idx] stuffSeries = self.ts.reindex(subIndex) assert np.isnan(stuffSeries).all() # This is extremely important for the Cython code to not screw up nonContigIndex = self.ts.index[::2] subNonContig = self.ts.reindex(nonContigIndex) for idx, val in compat.iteritems(subNonContig): assert val == self.ts[idx] # return a copy the same index here result = self.ts.reindex() assert not (result is self.ts) def test_reindex_nan(self): ts = Series([2, 3, 5, 7], index=[1, 4, nan, 8]) i, j = [nan, 1, nan, 8, 4, nan], [2, 0, 2, 3, 1, 2] assert_series_equal(ts.reindex(i), ts.iloc[j]) ts.index = ts.index.astype('object') # reindex coerces index.dtype to float, loc/iloc doesn't assert_series_equal(ts.reindex(i), ts.iloc[j], check_index_type=False) def test_reindex_series_add_nat(self): rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s') series = Series(rng) result = series.reindex(lrange(15)) assert np.issubdtype(result.dtype, np.dtype('M8[ns]')) mask = result.isnull() assert mask[-5:].all() assert not mask[:-5].any() def test_reindex_with_datetimes(self): rng = date_range('1/1/2000', periods=20) ts = Series(np.random.randn(20), index=rng) result = ts.reindex(list(ts.index[5:10])) expected = ts[5:10] tm.assert_series_equal(result, expected) result = ts[list(ts.index[5:10])] tm.assert_series_equal(result, expected) def test_reindex_corner(self): # (don't forget to fix this) I think it's fixed self.empty.reindex(self.ts.index, method='pad') # it works # corner case: pad empty series reindexed = self.empty.reindex(self.ts.index, method='pad') # pass non-Index reindexed = self.ts.reindex(list(self.ts.index)) assert_series_equal(self.ts, reindexed) # bad fill method ts = self.ts[::2] pytest.raises(Exception, ts.reindex, self.ts.index, method='foo') def test_reindex_pad(self): s = Series(np.arange(10), dtype='int64') s2 = s[::2] reindexed = s2.reindex(s.index, method='pad') reindexed2 = s2.reindex(s.index, method='ffill') assert_series_equal(reindexed, reindexed2) expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8], index=np.arange(10)) assert_series_equal(reindexed, expected) # GH4604 s = Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 'd', 'e']) new_index = ['a', 'g', 'c', 'f'] expected = Series([1, 1, 3, 3], index=new_index) # this changes dtype because the ffill happens after result = s.reindex(new_index).ffill() assert_series_equal(result, expected.astype('float64')) result = s.reindex(new_index).ffill(downcast='infer') assert_series_equal(result, expected) expected = Series([1, 5, 3, 5], index=new_index) result = s.reindex(new_index, method='ffill') assert_series_equal(result, expected) # inferrence of new dtype s = Series([True, False, False, True], index=list('abcd')) new_index = 'agc' result = s.reindex(list(new_index)).ffill() expected = Series([True, True, False], index=list(new_index)) assert_series_equal(result, expected) # GH4618 shifted series downcasting s = Series(False, index=lrange(0, 5)) result = s.shift(1).fillna(method='bfill') expected = Series(False, index=lrange(0, 5)) assert_series_equal(result, expected) def test_reindex_nearest(self): s = Series(np.arange(10, dtype='int64')) target = [0.1, 0.9, 1.5, 2.0] actual = s.reindex(target, method='nearest') expected = Series(np.around(target).astype('int64'), target) assert_series_equal(expected, actual) actual = s.reindex_like(actual, method='nearest') assert_series_equal(expected, actual) actual = s.reindex_like(actual, method='nearest', tolerance=1) assert_series_equal(expected, actual) actual = s.reindex(target, method='nearest', tolerance=0.2) expected = Series([0, 1, np.nan, 2], target) assert_series_equal(expected, actual) def test_reindex_backfill(self): pass def test_reindex_int(self): ts = self.ts[::2] int_ts = Series(np.zeros(len(ts), dtype=int), index=ts.index) # this should work fine reindexed_int = int_ts.reindex(self.ts.index) # if NaNs introduced assert reindexed_int.dtype == np.float_ # NO NaNs introduced reindexed_int = int_ts.reindex(int_ts.index[::2]) assert reindexed_int.dtype == np.int_ def test_reindex_bool(self): # A series other than float, int, string, or object ts = self.ts[::2] bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index) # this should work fine reindexed_bool = bool_ts.reindex(self.ts.index) # if NaNs introduced assert reindexed_bool.dtype == np.object_ # NO NaNs introduced reindexed_bool = bool_ts.reindex(bool_ts.index[::2]) assert reindexed_bool.dtype == np.bool_ def test_reindex_bool_pad(self): # fail ts = self.ts[5:] bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index) filled_bool = bool_ts.reindex(self.ts.index, method='pad') assert isnull(filled_bool[:5]).all() def test_reindex_like(self): other = self.ts[::2] assert_series_equal(self.ts.reindex(other.index), self.ts.reindex_like(other)) # GH 7179 day1 = datetime(2013, 3, 5) day2 = datetime(2013, 5, 5) day3 = datetime(2014, 3, 5) series1 = Series([5, None, None], [day1, day2, day3]) series2 = Series([None, None], [day1, day3]) result = series1.reindex_like(series2, method='pad') expected = Series([5, np.nan], index=[day1, day3]) assert_series_equal(result, expected) def test_reindex_fill_value(self): # ----------------------------------------------------------- # floats floats = Series([1., 2., 3.]) result = floats.reindex([1, 2, 3]) expected = Series([2., 3., np.nan], index=[1, 2, 3]) assert_series_equal(result, expected) result = floats.reindex([1, 2, 3], fill_value=0) expected = Series([2., 3., 0], index=[1, 2, 3]) assert_series_equal(result, expected) # ----------------------------------------------------------- # ints ints = Series([1, 2, 3]) result = ints.reindex([1, 2, 3]) expected = Series([2., 3., np.nan], index=[1, 2, 3]) assert_series_equal(result, expected) # don't upcast result = ints.reindex([1, 2, 3], fill_value=0) expected = Series([2, 3, 0], index=[1, 2, 3]) assert issubclass(result.dtype.type, np.integer) assert_series_equal(result, expected) # ----------------------------------------------------------- # objects objects = Series([1, 2, 3], dtype=object) result = objects.reindex([1, 2, 3]) expected = Series([2, 3, np.nan], index=[1, 2, 3], dtype=object) assert_series_equal(result, expected) result = objects.reindex([1, 2, 3], fill_value='foo') expected = Series([2, 3, 'foo'], index=[1, 2, 3], dtype=object) assert_series_equal(result, expected) # ------------------------------------------------------------ # bools bools = Series([True, False, True]) result = bools.reindex([1, 2, 3]) expected = Series([False, True, np.nan], index=[1, 2, 3], dtype=object) assert_series_equal(result, expected) result = bools.reindex([1, 2, 3], fill_value=False) expected = Series([False, True, False], index=[1, 2, 3]) assert_series_equal(result, expected) def test_select(self): n = len(self.ts) result = self.ts.select(lambda x: x >= self.ts.index[n // 2]) expected = self.ts.reindex(self.ts.index[n // 2:]) assert_series_equal(result, expected) result = self.ts.select(lambda x: x.weekday() == 2) expected = self.ts[self.ts.index.weekday == 2] assert_series_equal(result, expected) def test_cast_on_putmask(self): # GH 2746 # need to upcast s = Series([1, 2], index=[1, 2], dtype='int64') s[[True, False]] = Series([0], index=[1], dtype='int64') expected = Series([0, 2], index=[1, 2], dtype='int64') assert_series_equal(s, expected) def test_type_promote_putmask(self): # GH8387: test that changing types does not break alignment ts = Series(np.random.randn(100), index=np.arange(100, 0, -1)).round(5) left, mask = ts.copy(), ts > 0 right = ts[mask].copy().map(str) left[mask] = right assert_series_equal(left, ts.map(lambda t: str(t) if t > 0 else t)) s = Series([0, 1, 2, 0]) mask = s > 0 s2 = s[mask].map(str) s[mask] = s2 assert_series_equal(s, Series([0, '1', '2', 0])) s = Series([0, 'foo', 'bar', 0]) mask = Series([False, True, True, False]) s2 = s[mask] s[mask] = s2 assert_series_equal(s, Series([0, 'foo', 'bar', 0])) def test_head_tail(self): assert_series_equal(self.series.head(), self.series[:5]) assert_series_equal(self.series.head(0), self.series[0:0]) assert_series_equal(self.series.tail(), self.series[-5:]) assert_series_equal(self.series.tail(0), self.series[0:0]) def test_multilevel_preserve_name(self): index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']], labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=['first', 'second']) s = Series(np.random.randn(len(index)), index=index, name='sth') result = s['foo'] result2 = s.loc['foo'] assert result.name == s.name assert result2.name == s.name def test_setitem_scalar_into_readonly_backing_data(self): # GH14359: test that you cannot mutate a read only buffer array = np.zeros(5) array.flags.writeable = False # make the array immutable series = Series(array) for n in range(len(series)): with pytest.raises(ValueError): series[n] = 1 assert array[n] == 0 def test_setitem_slice_into_readonly_backing_data(self): # GH14359: test that you cannot mutate a read only buffer array = np.zeros(5) array.flags.writeable = False # make the array immutable series = Series(array) with pytest.raises(ValueError): series[1:3] = 1 assert not array.any() class TestTimeSeriesDuplicates(object): def setup_method(self, method): dates = [datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 3), datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 4), datetime(2000, 1, 4), datetime(2000, 1, 5)] self.dups = Series(np.random.randn(len(dates)), index=dates) def test_constructor(self): assert isinstance(self.dups, Series) assert isinstance(self.dups.index, DatetimeIndex) def test_is_unique_monotonic(self): assert not self.dups.index.is_unique def test_index_unique(self): uniques = self.dups.index.unique() expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 5)]) assert uniques.dtype == 'M8[ns]' # sanity tm.assert_index_equal(uniques, expected) assert self.dups.index.nunique() == 4 # #2563 assert isinstance(uniques, DatetimeIndex) dups_local = self.dups.index.tz_localize('US/Eastern') dups_local.name = 'foo' result = dups_local.unique() expected = DatetimeIndex(expected, name='foo') expected = expected.tz_localize('US/Eastern') assert result.tz is not None assert result.name == 'foo' tm.assert_index_equal(result, expected) # NaT, note this is excluded arr = [1370745748 + t for t in range(20)] + [tslib.iNaT] idx = DatetimeIndex(arr * 3) tm.assert_index_equal(idx.unique(), DatetimeIndex(arr)) assert idx.nunique() == 20 assert idx.nunique(dropna=False) == 21 arr = [Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20)] + [NaT] idx = DatetimeIndex(arr * 3) tm.assert_index_equal(idx.unique(), DatetimeIndex(arr)) assert idx.nunique() == 20 assert idx.nunique(dropna=False) == 21 def test_index_dupes_contains(self): d = datetime(2011, 12, 5, 20, 30) ix = DatetimeIndex([d, d]) assert d in ix def test_duplicate_dates_indexing(self): ts = self.dups uniques = ts.index.unique() for date in uniques: result = ts[date] mask = ts.index == date total = (ts.index == date).sum() expected = ts[mask] if total > 1: assert_series_equal(result, expected) else: assert_almost_equal(result, expected[0]) cp = ts.copy() cp[date] = 0 expected = Series(np.where(mask, 0, ts), index=ts.index) assert_series_equal(cp, expected) pytest.raises(KeyError, ts.__getitem__, datetime(2000, 1, 6)) # new index ts[datetime(2000, 1, 6)] = 0 assert ts[datetime(2000, 1, 6)] == 0 def test_range_slice(self): idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000', '1/4/2000']) ts = Series(np.random.randn(len(idx)), index=idx) result = ts['1/2/2000':] expected = ts[1:] assert_series_equal(result, expected) result = ts['1/2/2000':'1/3/2000'] expected = ts[1:4] assert_series_equal(result, expected) def test_groupby_average_dup_values(self): result = self.dups.groupby(level=0).mean() expected = self.dups.groupby(self.dups.index).mean() assert_series_equal(result, expected) def test_indexing_over_size_cutoff(self): import datetime # #1821 old_cutoff = _index._SIZE_CUTOFF try: _index._SIZE_CUTOFF = 1000 # create large list of non periodic datetime dates = [] sec = datetime.timedelta(seconds=1) half_sec = datetime.timedelta(microseconds=500000) d = datetime.datetime(2011, 12, 5, 20, 30) n = 1100 for i in range(n): dates.append(d) dates.append(d + sec) dates.append(d + sec + half_sec) dates.append(d + sec + sec + half_sec) d += 3 * sec # duplicate some values in the list duplicate_positions = np.random.randint(0, len(dates) - 1, 20) for p in duplicate_positions: dates[p + 1] = dates[p] df = DataFrame(np.random.randn(len(dates), 4), index=dates, columns=list('ABCD')) pos = n * 3 timestamp = df.index[pos] assert timestamp in df.index # it works! df.loc[timestamp] assert len(df.loc[[timestamp]]) > 0 finally: _index._SIZE_CUTOFF = old_cutoff def test_indexing_unordered(self): # GH 2437 rng = date_range(start='2011-01-01', end='2011-01-15') ts = Series(np.random.rand(len(rng)), index=rng) ts2 = pd.concat([ts[0:4], ts[-4:], ts[4:-4]]) for t in ts.index: # TODO: unused? s = str(t) # noqa expected = ts[t] result = ts2[t] assert expected == result # GH 3448 (ranges) def compare(slobj): result = ts2[slobj].copy() result = result.sort_index() expected = ts[slobj] assert_series_equal(result, expected) compare(slice('2011-01-01', '2011-01-15')) compare(slice('2010-12-30', '2011-01-15')) compare(slice('2011-01-01', '2011-01-16')) # partial ranges compare(slice('2011-01-01', '2011-01-6')) compare(slice('2011-01-06', '2011-01-8')) compare(slice('2011-01-06', '2011-01-12')) # single values result = ts2['2011'].sort_index() expected = ts['2011'] assert_series_equal(result, expected) # diff freq rng = date_range(datetime(2005, 1, 1), periods=20, freq='M') ts = Series(np.arange(len(rng)), index=rng) ts = ts.take(np.random.permutation(20)) result = ts['2005'] for t in result.index: assert t.year == 2005 def test_indexing(self): idx = date_range("2001-1-1", periods=20, freq='M') ts = Series(np.random.rand(len(idx)), index=idx) # getting # GH 3070, make sure semantics work on Series/Frame expected = ts['2001'] expected.name = 'A' df = DataFrame(dict(A=ts)) result = df['2001']['A'] assert_series_equal(expected, result) # setting ts['2001'] = 1 expected = ts['2001'] expected.name = 'A' df.loc['2001', 'A'] = 1 result = df['2001']['A'] assert_series_equal(expected, result) # GH3546 (not including times on the last day) idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H') ts = Series(lrange(len(idx)), index=idx) expected = ts['2013-05'] assert_series_equal(expected, ts) idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S') ts = Series(lrange(len(idx)), index=idx) expected = ts['2013-05'] assert_series_equal(expected, ts) idx = [Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013, 5, 31, 23, 59, 59, 999999))] ts = Series(lrange(len(idx)), index=idx) expected = ts['2013'] assert_series_equal(expected, ts) # GH14826, indexing with a seconds resolution string / datetime object df = DataFrame(np.random.rand(5, 5), columns=['open', 'high', 'low', 'close', 'volume'], index=date_range('2012-01-02 18:01:00', periods=5, tz='US/Central', freq='s')) expected = df.loc[[df.index[2]]] # this is a single date, so will raise pytest.raises(KeyError, df.__getitem__, '2012-01-02 18:01:02', ) pytest.raises(KeyError, df.__getitem__, df.index[2], ) class TestDatetimeIndexing(object): """ Also test support for datetime64[ns] in Series / DataFrame """ def setup_method(self, method): dti = DatetimeIndex(start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq='Min') self.series = Series(np.random.rand(len(dti)), dti) def test_fancy_getitem(self): dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1), end=datetime(2010, 1, 1)) s = Series(np.arange(len(dti)), index=dti) assert s[48] == 48 assert s['1/2/2009'] == 48 assert s['2009-1-2'] == 48 assert s[datetime(2009, 1, 2)] == 48 assert s[lib.Timestamp(datetime(2009, 1, 2))] == 48 pytest.raises(KeyError, s.__getitem__, '2009-1-3') assert_series_equal(s['3/6/2009':'2009-06-05'], s[datetime(2009, 3, 6):datetime(2009, 6, 5)]) def test_fancy_setitem(self): dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1), end=datetime(2010, 1, 1)) s = Series(np.arange(len(dti)), index=dti) s[48] = -1 assert s[48] == -1 s['1/2/2009'] = -2 assert s[48] == -2 s['1/2/2009':'2009-06-05'] = -3 assert (s[48:54] == -3).all() def test_dti_snap(self): dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002', '1/5/2002', '1/6/2002', '1/7/2002'], freq='D') res = dti.snap(freq='W-MON') exp = date_range('12/31/2001', '1/7/2002', freq='w-mon') exp = exp.repeat([3, 4]) assert (res == exp).all() res = dti.snap(freq='B') exp = date_range('1/1/2002', '1/7/2002', freq='b') exp = exp.repeat([1, 1, 1, 2, 2]) assert (res == exp).all() def test_dti_reset_index_round_trip(self): dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D') d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti) d2 = d1.reset_index() assert d2.dtypes[0] == np.dtype('M8[ns]') d3 = d2.set_index('index') assert_frame_equal(d1, d3, check_names=False) # #2329 stamp = datetime(2012, 11, 22) df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value']) df = df.set_index('Date') assert df.index[0] == stamp assert df.reset_index()['Date'][0] == stamp def test_series_set_value(self): # #1561 dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)] index = DatetimeIndex(dates) s = Series().set_value(dates[0], 1.) s2 = s.set_value(dates[1], np.nan) exp = Series([1., np.nan], index=index) assert_series_equal(s2, exp) # s = Series(index[:1], index[:1]) # s2 = s.set_value(dates[1], index[1]) # assert s2.values.dtype == 'M8[ns]' @slow def test_slice_locs_indexerror(self): times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10) for i in range(100000)] s = Series(lrange(100000), times) s.loc[datetime(1900, 1, 1):datetime(2100, 1, 1)] def test_slicing_datetimes(self): # GH 7523 # unique df = DataFrame(np.arange(4., dtype='float64'), index=[datetime(2001, 1, i, 10, 00) for i in [1, 2, 3, 4]]) result = df.loc[datetime(2001, 1, 1, 10):] assert_frame_equal(result, df) result = df.loc[:datetime(2001, 1, 4, 10)] assert_frame_equal(result, df) result = df.loc[datetime(2001, 1, 1, 10):datetime(2001, 1, 4, 10)] assert_frame_equal(result, df) result = df.loc[datetime(2001, 1, 1, 11):] expected = df.iloc[1:] assert_frame_equal(result, expected) result = df.loc['20010101 11':] assert_frame_equal(result, expected) # duplicates df = pd.DataFrame(np.arange(5., dtype='float64'), index=[datetime(2001, 1, i, 10, 00) for i in [1, 2, 2, 3, 4]]) result = df.loc[datetime(2001, 1, 1, 10):] assert_frame_equal(result, df) result = df.loc[:datetime(2001, 1, 4, 10)] assert_frame_equal(result, df) result = df.loc[datetime(2001, 1, 1, 10):datetime(2001, 1, 4, 10)] assert_frame_equal(result, df) result = df.loc[datetime(2001, 1, 1, 11):] expected = df.iloc[1:] assert_frame_equal(result, expected) result = df.loc['20010101 11':] assert_frame_equal(result, expected) def test_frame_datetime64_duplicated(self): dates = date_range('2010-07-01', end='2010-08-05') tst = DataFrame({'symbol': 'AAA', 'date': dates}) result = tst.duplicated(['date', 'symbol']) assert (-result).all() tst = DataFrame({'date': dates}) result = tst.duplicated() assert (-result).all() class TestNatIndexing(object): def setup_method(self, method): self.series = Series(date_range('1/1/2000', periods=10)) # --------------------------------------------------------------------- # NaT support def test_set_none_nan(self): self.series[3] = None assert self.series[3] is NaT self.series[3:5] = None assert self.series[4] is NaT self.series[5] = np.nan assert self.series[5] is NaT self.series[5:7] = np.nan assert self.series[6] is NaT def test_nat_operations(self): # GH 8617 s = Series([0, pd.NaT], dtype='m8[ns]') exp = s[0] assert s.median() == exp assert s.min() == exp assert s.max() == exp def test_round_nat(self): # GH14940 s = Series([pd.NaT]) expected = Series(pd.NaT) for method in ["round", "floor", "ceil"]: round_method = getattr(s.dt, method) for freq in ["s", "5s", "min", "5min", "h", "5h"]: assert_series_equal(round_method(freq), expected)
mit
sergiohgz/incubator-airflow
airflow/www/views.py
2
111267
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import ast import codecs import copy import datetime as dt import inspect import itertools import json import logging import math import os import traceback from collections import defaultdict from datetime import timedelta from functools import wraps from textwrap import dedent import bleach import markdown import nvd3 import pendulum import pkg_resources import sqlalchemy as sqla from flask import ( abort, jsonify, redirect, url_for, request, Markup, Response, current_app, render_template, make_response) from flask import flash from flask._compat import PY2 from flask_admin import BaseView, expose, AdminIndexView from flask_admin.actions import action from flask_admin.babel import lazy_gettext from flask_admin.contrib.sqla import ModelView from flask_admin.form.fields import DateTimeField from flask_admin.tools import iterdecode from jinja2 import escape from jinja2.sandbox import ImmutableSandboxedEnvironment from past.builtins import basestring, unicode from pygments import highlight, lexers from pygments.formatters import HtmlFormatter from sqlalchemy import or_, desc, and_, union_all from wtforms import ( Form, SelectField, TextAreaField, PasswordField, StringField, validators) import airflow from airflow import configuration as conf from airflow import models from airflow import settings from airflow.api.common.experimental.mark_tasks import (set_dag_run_state_to_running, set_dag_run_state_to_success, set_dag_run_state_to_failed) from airflow.exceptions import AirflowException from airflow.models import BaseOperator from airflow.models import XCom, DagRun from airflow.operators.subdag_operator import SubDagOperator from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, SCHEDULER_DEPS from airflow.utils import timezone from airflow.utils.dates import infer_time_unit, scale_time_units, parse_execution_date from airflow.utils.db import create_session, provide_session from airflow.utils.helpers import alchemy_to_dict from airflow.utils.json import json_ser from airflow.utils.net import get_hostname from airflow.utils.state import State from airflow.utils.timezone import datetime from airflow.www import utils as wwwutils from airflow.www.forms import (DateTimeForm, DateTimeWithNumRunsForm, DateTimeWithNumRunsWithDagRunsForm) from airflow.www.validators import GreaterEqualThan QUERY_LIMIT = 100000 CHART_LIMIT = 200000 UTF8_READER = codecs.getreader('utf-8') dagbag = models.DagBag(settings.DAGS_FOLDER) login_required = airflow.login.login_required current_user = airflow.login.current_user logout_user = airflow.login.logout_user FILTER_BY_OWNER = False PAGE_SIZE = conf.getint('webserver', 'page_size') if conf.getboolean('webserver', 'FILTER_BY_OWNER'): # filter_by_owner if authentication is enabled and filter_by_owner is true FILTER_BY_OWNER = not current_app.config['LOGIN_DISABLED'] def dag_link(v, c, m, p): if m.dag_id is None: return Markup() dag_id = bleach.clean(m.dag_id) url = url_for( 'airflow.graph', dag_id=dag_id, execution_date=m.execution_date) return Markup( '<a href="{}">{}</a>'.format(url, dag_id)) def log_url_formatter(v, c, m, p): return Markup( '<a href="{m.log_url}">' ' <span class="glyphicon glyphicon-book" aria-hidden="true">' '</span></a>').format(**locals()) def dag_run_link(v, c, m, p): dag_id = bleach.clean(m.dag_id) url = url_for( 'airflow.graph', dag_id=m.dag_id, run_id=m.run_id, execution_date=m.execution_date) return Markup('<a href="{url}">{m.run_id}</a>'.format(**locals())) def task_instance_link(v, c, m, p): dag_id = bleach.clean(m.dag_id) task_id = bleach.clean(m.task_id) url = url_for( 'airflow.task', dag_id=dag_id, task_id=task_id, execution_date=m.execution_date.isoformat()) url_root = url_for( 'airflow.graph', dag_id=dag_id, root=task_id, execution_date=m.execution_date.isoformat()) return Markup( """ <span style="white-space: nowrap;"> <a href="{url}">{task_id}</a> <a href="{url_root}" title="Filter on this task and upstream"> <span class="glyphicon glyphicon-filter" style="margin-left: 0px;" aria-hidden="true"></span> </a> </span> """.format(**locals())) def state_token(state): color = State.color(state) return Markup( '<span class="label" style="background-color:{color};">' '{state}</span>'.format(**locals())) def parse_datetime_f(value): if not isinstance(value, dt.datetime): return value return timezone.make_aware(value) def state_f(v, c, m, p): return state_token(m.state) def duration_f(v, c, m, p): if m.end_date and m.duration: return timedelta(seconds=m.duration) def datetime_f(v, c, m, p): attr = getattr(m, p) dttm = attr.isoformat() if attr else '' if timezone.utcnow().isoformat()[:4] == dttm[:4]: dttm = dttm[5:] return Markup("<nobr>{}</nobr>".format(dttm)) def nobr_f(v, c, m, p): return Markup("<nobr>{}</nobr>".format(getattr(m, p))) def label_link(v, c, m, p): try: default_params = ast.literal_eval(m.default_params) except: default_params = {} url = url_for( 'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no, **default_params) return Markup("<a href='{url}'>{m.label}</a>".format(**locals())) def pool_link(v, c, m, p): url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool return Markup("<a href='{url}'>{m.pool}</a>".format(**locals())) def pygment_html_render(s, lexer=lexers.TextLexer): return highlight( s, lexer(), HtmlFormatter(linenos=True), ) def render(obj, lexer): out = "" if isinstance(obj, basestring): out += pygment_html_render(obj, lexer) elif isinstance(obj, (tuple, list)): for i, s in enumerate(obj): out += "<div>List item #{}</div>".format(i) out += "<div>" + pygment_html_render(s, lexer) + "</div>" elif isinstance(obj, dict): for k, v in obj.items(): out += '<div>Dict item "{}"</div>'.format(k) out += "<div>" + pygment_html_render(v, lexer) + "</div>" return out def wrapped_markdown(s): return '<div class="rich_doc">' + markdown.markdown(s) + "</div>" attr_renderer = { 'bash_command': lambda x: render(x, lexers.BashLexer), 'hql': lambda x: render(x, lexers.SqlLexer), 'sql': lambda x: render(x, lexers.SqlLexer), 'doc': lambda x: render(x, lexers.TextLexer), 'doc_json': lambda x: render(x, lexers.JsonLexer), 'doc_rst': lambda x: render(x, lexers.RstLexer), 'doc_yaml': lambda x: render(x, lexers.YamlLexer), 'doc_md': wrapped_markdown, 'python_callable': lambda x: render( inspect.getsource(x), lexers.PythonLexer), } def data_profiling_required(f): """Decorator for views requiring data profiling access""" @wraps(f) def decorated_function(*args, **kwargs): if ( current_app.config['LOGIN_DISABLED'] or (not current_user.is_anonymous() and current_user.data_profiling()) ): return f(*args, **kwargs) else: flash("This page requires data profiling privileges", "error") return redirect(url_for('admin.index')) return decorated_function def fused_slots(v, c, m, p): url = ( '/admin/taskinstance/' + '?flt1_pool_equals=' + m.pool + '&flt2_state_equals=running') return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots())) def fqueued_slots(v, c, m, p): url = ( '/admin/taskinstance/' + '?flt1_pool_equals=' + m.pool + '&flt2_state_equals=queued&sort=10&desc=1') return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots())) def recurse_tasks(tasks, task_ids, dag_ids, task_id_to_dag): if isinstance(tasks, list): for task in tasks: recurse_tasks(task, task_ids, dag_ids, task_id_to_dag) return if isinstance(tasks, SubDagOperator): subtasks = tasks.subdag.tasks dag_ids.append(tasks.subdag.dag_id) for subtask in subtasks: if subtask.task_id not in task_ids: task_ids.append(subtask.task_id) task_id_to_dag[subtask.task_id] = tasks.subdag recurse_tasks(subtasks, task_ids, dag_ids, task_id_to_dag) if isinstance(tasks, BaseOperator): task_id_to_dag[tasks.task_id] = tasks.dag def get_chart_height(dag): """ TODO(aoen): See [AIRFLOW-1263] We use the number of tasks in the DAG as a heuristic to approximate the size of generated chart (otherwise the charts are tiny and unreadable when DAGs have a large number of tasks). Ideally nvd3 should allow for dynamic-height charts, that is charts that take up space based on the size of the components within. """ return 600 + len(dag.tasks) * 10 def get_date_time_num_runs_dag_runs_form_data(request, session, dag): dttm = request.args.get('execution_date') if dttm: dttm = pendulum.parse(dttm) else: dttm = dag.latest_execution_date or timezone.utcnow() base_date = request.args.get('base_date') if base_date: base_date = timezone.parse(base_date) else: # The DateTimeField widget truncates milliseconds and would loose # the first dag run. Round to next second. base_date = (dttm + timedelta(seconds=1)).replace(microsecond=0) default_dag_run = conf.getint('webserver', 'default_dag_run_display_number') num_runs = request.args.get('num_runs') num_runs = int(num_runs) if num_runs else default_dag_run DR = models.DagRun drs = ( session.query(DR) .filter( DR.dag_id == dag.dag_id, DR.execution_date <= base_date) .order_by(desc(DR.execution_date)) .limit(num_runs) .all() ) dr_choices = [] dr_state = None for dr in drs: dr_choices.append((dr.execution_date.isoformat(), dr.run_id)) if dttm == dr.execution_date: dr_state = dr.state # Happens if base_date was changed and the selected dag run is not in result if not dr_state and drs: dr = drs[0] dttm = dr.execution_date dr_state = dr.state return { 'dttm': dttm, 'base_date': base_date, 'num_runs': num_runs, 'execution_date': dttm.isoformat(), 'dr_choices': dr_choices, 'dr_state': dr_state, } class Airflow(BaseView): def is_visible(self): return False @expose('/') @login_required def index(self): return self.render('airflow/dags.html') @expose('/chart_data') @data_profiling_required @wwwutils.gzipped # @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key) def chart_data(self): from airflow import macros import pandas as pd if conf.getboolean('core', 'secure_mode'): abort(404) with create_session() as session: chart_id = request.args.get('chart_id') csv = request.args.get('csv') == "true" chart = session.query(models.Chart).filter_by(id=chart_id).first() db = session.query( models.Connection).filter_by(conn_id=chart.conn_id).first() payload = { "state": "ERROR", "error": "" } # Processing templated fields try: args = ast.literal_eval(chart.default_params) if type(args) is not type(dict()): raise AirflowException('Not a dict') except: args = {} payload['error'] += ( "Default params is not valid, string has to evaluate as " "a Python dictionary. ") request_dict = {k: request.args.get(k) for k in request.args} args.update(request_dict) args['macros'] = macros sandbox = ImmutableSandboxedEnvironment() sql = sandbox.from_string(chart.sql).render(**args) label = sandbox.from_string(chart.label).render(**args) payload['sql_html'] = Markup(highlight( sql, lexers.SqlLexer(), # Lexer call HtmlFormatter(noclasses=True)) ) payload['label'] = label pd.set_option('display.max_colwidth', 100) hook = db.get_hook() try: df = hook.get_pandas_df( wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type)) df = df.fillna(0) except Exception as e: payload['error'] += "SQL execution failed. Details: " + str(e) if csv: return Response( response=df.to_csv(index=False), status=200, mimetype="application/text") if not payload['error'] and len(df) == CHART_LIMIT: payload['warning'] = ( "Data has been truncated to {0}" " rows. Expect incomplete results.").format(CHART_LIMIT) if not payload['error'] and len(df) == 0: payload['error'] += "Empty result set. " elif ( not payload['error'] and chart.sql_layout == 'series' and chart.chart_type != "datatable" and len(df.columns) < 3): payload['error'] += "SQL needs to return at least 3 columns. " elif ( not payload['error'] and chart.sql_layout == 'columns' and len(df.columns) < 2): payload['error'] += "SQL needs to return at least 2 columns. " elif not payload['error']: import numpy as np chart_type = chart.chart_type data = None if chart.show_datatable or chart_type == "datatable": data = df.to_dict(orient="split") data['columns'] = [{'title': c} for c in data['columns']] payload['data'] = data # Trying to convert time to something Highcharts likes x_col = 1 if chart.sql_layout == 'series' else 0 if chart.x_is_date: try: # From string to datetime df[df.columns[x_col]] = pd.to_datetime( df[df.columns[x_col]]) df[df.columns[x_col]] = df[df.columns[x_col]].apply( lambda x: int(x.strftime("%s")) * 1000) except Exception as e: payload['error'] = "Time conversion failed" if chart_type == 'datatable': payload['state'] = 'SUCCESS' return wwwutils.json_response(payload) else: if chart.sql_layout == 'series': # User provides columns (series, x, y) xaxis_label = df.columns[1] yaxis_label = df.columns[2] df[df.columns[2]] = df[df.columns[2]].astype(np.float) df = df.pivot_table( index=df.columns[1], columns=df.columns[0], values=df.columns[2], aggfunc=np.sum) else: # User provides columns (x, y, metric1, metric2, ...) xaxis_label = df.columns[0] yaxis_label = 'y' df.index = df[df.columns[0]] df = df.sort(df.columns[0]) del df[df.columns[0]] for col in df.columns: df[col] = df[col].astype(np.float) df = df.fillna(0) NVd3ChartClass = chart_mapping.get(chart.chart_type) NVd3ChartClass = getattr(nvd3, NVd3ChartClass) nvd3_chart = NVd3ChartClass(x_is_date=chart.x_is_date) for col in df.columns: nvd3_chart.add_serie(name=col, y=df[col].tolist(), x=df[col].index.tolist()) try: nvd3_chart.buildcontent() payload['chart_type'] = nvd3_chart.__class__.__name__ payload['htmlcontent'] = nvd3_chart.htmlcontent except Exception as e: payload['error'] = str(e) payload['state'] = 'SUCCESS' payload['request_dict'] = request_dict return wwwutils.json_response(payload) @expose('/chart') @data_profiling_required def chart(self): if conf.getboolean('core', 'secure_mode'): abort(404) with create_session() as session: chart_id = request.args.get('chart_id') embed = request.args.get('embed') chart = session.query(models.Chart).filter_by(id=chart_id).first() NVd3ChartClass = chart_mapping.get(chart.chart_type) if not NVd3ChartClass: flash( "Not supported anymore as the license was incompatible, " "sorry", "danger") redirect('/admin/chart/') sql = "" if chart.show_sql: sql = Markup(highlight( chart.sql, lexers.SqlLexer(), # Lexer call HtmlFormatter(noclasses=True)) ) return self.render( 'airflow/nvd3.html', chart=chart, title="Airflow - Chart", sql=sql, label=chart.label, embed=embed) @expose('/dag_stats') @login_required @provide_session def dag_stats(self, session=None): ds = models.DagStat ds.update( dag_ids=[dag.dag_id for dag in dagbag.dags.values() if not dag.is_subdag] ) qry = ( session.query(ds.dag_id, ds.state, ds.count) ) data = {} for dag_id, state, count in qry: if dag_id not in data: data[dag_id] = {} data[dag_id][state] = count payload = {} for dag in dagbag.dags.values(): payload[dag.safe_dag_id] = [] for state in State.dag_states: try: count = data[dag.dag_id][state] except Exception: count = 0 d = { 'state': state, 'count': count, 'dag_id': dag.dag_id, 'color': State.color(state) } payload[dag.safe_dag_id].append(d) return wwwutils.json_response(payload) @expose('/task_stats') @login_required @provide_session def task_stats(self, session=None): TI = models.TaskInstance DagRun = models.DagRun Dag = models.DagModel LastDagRun = ( session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date')) .join(Dag, Dag.dag_id == DagRun.dag_id) .filter(DagRun.state != State.RUNNING) .filter(Dag.is_active == True) .filter(Dag.is_subdag == False) .group_by(DagRun.dag_id) .subquery('last_dag_run') ) RunningDagRun = ( session.query(DagRun.dag_id, DagRun.execution_date) .join(Dag, Dag.dag_id == DagRun.dag_id) .filter(DagRun.state == State.RUNNING) .filter(Dag.is_active == True) .filter(Dag.is_subdag == False) .subquery('running_dag_run') ) # Select all task_instances from active dag_runs. # If no dag_run is active, return task instances from most recent dag_run. LastTI = ( session.query(TI.dag_id.label('dag_id'), TI.state.label('state')) .join(LastDagRun, and_( LastDagRun.c.dag_id == TI.dag_id, LastDagRun.c.execution_date == TI.execution_date)) ) RunningTI = ( session.query(TI.dag_id.label('dag_id'), TI.state.label('state')) .join(RunningDagRun, and_( RunningDagRun.c.dag_id == TI.dag_id, RunningDagRun.c.execution_date == TI.execution_date)) ) UnionTI = union_all(LastTI, RunningTI).alias('union_ti') qry = ( session.query(UnionTI.c.dag_id, UnionTI.c.state, sqla.func.count()) .group_by(UnionTI.c.dag_id, UnionTI.c.state) ) data = {} for dag_id, state, count in qry: if dag_id not in data: data[dag_id] = {} data[dag_id][state] = count session.commit() payload = {} for dag in dagbag.dags.values(): payload[dag.safe_dag_id] = [] for state in State.task_states: try: count = data[dag.dag_id][state] except: count = 0 d = { 'state': state, 'count': count, 'dag_id': dag.dag_id, 'color': State.color(state) } payload[dag.safe_dag_id].append(d) return wwwutils.json_response(payload) @expose('/code') @login_required def code(self): dag_id = request.args.get('dag_id') dag = dagbag.get_dag(dag_id) title = dag_id try: with open(dag.fileloc, 'r') as f: code = f.read() html_code = highlight( code, lexers.PythonLexer(), HtmlFormatter(linenos=True)) except IOError as e: html_code = str(e) return self.render( 'airflow/dag_code.html', html_code=html_code, dag=dag, title=title, root=request.args.get('root'), demo_mode=conf.getboolean('webserver', 'demo_mode')) @expose('/dag_details') @login_required @provide_session def dag_details(self, session=None): dag_id = request.args.get('dag_id') dag = dagbag.get_dag(dag_id) title = "DAG details" TI = models.TaskInstance states = ( session.query(TI.state, sqla.func.count(TI.dag_id)) .filter(TI.dag_id == dag_id) .group_by(TI.state) .all() ) return self.render( 'airflow/dag_details.html', dag=dag, title=title, states=states, State=State) @current_app.errorhandler(404) def circles(self): return render_template( 'airflow/circles.html', hostname=get_hostname()), 404 @current_app.errorhandler(500) def show_traceback(self): from airflow.utils import asciiart as ascii_ return render_template( 'airflow/traceback.html', hostname=get_hostname(), nukular=ascii_.nukular, info=traceback.format_exc()), 500 @expose('/noaccess') def noaccess(self): return self.render('airflow/noaccess.html') @expose('/pickle_info') @login_required def pickle_info(self): d = {} dag_id = request.args.get('dag_id') dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values() for dag in dags: if not dag.is_subdag: d[dag.dag_id] = dag.pickle_info() return wwwutils.json_response(d) @expose('/login', methods=['GET', 'POST']) def login(self): return airflow.login.login(self, request) @expose('/logout') def logout(self): logout_user() flash('You have been logged out.') return redirect(url_for('admin.index')) @expose('/rendered') @login_required @wwwutils.action_logging def rendered(self): dag_id = request.args.get('dag_id') task_id = request.args.get('task_id') execution_date = request.args.get('execution_date') dttm = pendulum.parse(execution_date) form = DateTimeForm(data={'execution_date': dttm}) dag = dagbag.get_dag(dag_id) task = copy.copy(dag.get_task(task_id)) ti = models.TaskInstance(task=task, execution_date=dttm) try: ti.render_templates() except Exception as e: flash("Error rendering template: " + str(e), "error") title = "Rendered Template" html_dict = {} for template_field in task.__class__.template_fields: content = getattr(task, template_field) if template_field in attr_renderer: html_dict[template_field] = attr_renderer[template_field](content) else: html_dict[template_field] = ( "<pre><code>" + str(content) + "</pre></code>") return self.render( 'airflow/ti_code.html', html_dict=html_dict, dag=dag, task_id=task_id, execution_date=execution_date, form=form, title=title, ) @expose('/get_logs_with_metadata') @login_required @wwwutils.action_logging @provide_session def get_logs_with_metadata(self, session=None): dag_id = request.args.get('dag_id') task_id = request.args.get('task_id') execution_date = request.args.get('execution_date') dttm = pendulum.parse(execution_date) try_number = int(request.args.get('try_number')) metadata = request.args.get('metadata') metadata = json.loads(metadata) # metadata may be null if not metadata: metadata = {} # Convert string datetime into actual datetime try: execution_date = timezone.parse(execution_date) except ValueError: error_message = ( 'Given execution date, {}, could not be identified ' 'as a date. Example date format: 2015-11-16T14:34:15+00:00'.format( execution_date)) response = jsonify({'error': error_message}) response.status_code = 400 return response logger = logging.getLogger('airflow.task') task_log_reader = conf.get('core', 'task_log_reader') handler = next((handler for handler in logger.handlers if handler.name == task_log_reader), None) ti = session.query(models.TaskInstance).filter( models.TaskInstance.dag_id == dag_id, models.TaskInstance.task_id == task_id, models.TaskInstance.execution_date == dttm).first() try: if ti is None: logs = ["*** Task instance did not exist in the DB\n"] metadata['end_of_log'] = True else: dag = dagbag.get_dag(dag_id) ti.task = dag.get_task(ti.task_id) logs, metadatas = handler.read(ti, try_number, metadata=metadata) metadata = metadatas[0] for i, log in enumerate(logs): if PY2 and not isinstance(log, unicode): logs[i] = log.decode('utf-8') message = logs[0] return jsonify(message=message, metadata=metadata) except AttributeError as e: error_message = ["Task log handler {} does not support read logs.\n{}\n" .format(task_log_reader, str(e))] metadata['end_of_log'] = True return jsonify(message=error_message, error=True, metadata=metadata) @expose('/log') @login_required @wwwutils.action_logging @provide_session def log(self, session=None): dag_id = request.args.get('dag_id') task_id = request.args.get('task_id') execution_date = request.args.get('execution_date') dttm = pendulum.parse(execution_date) form = DateTimeForm(data={'execution_date': dttm}) dag = dagbag.get_dag(dag_id) ti = session.query(models.TaskInstance).filter( models.TaskInstance.dag_id == dag_id, models.TaskInstance.task_id == task_id, models.TaskInstance.execution_date == dttm).first() logs = [''] * (ti.next_try_number - 1 if ti is not None else 0) return self.render( 'airflow/ti_log.html', logs=logs, dag=dag, title="Log by attempts", dag_id=dag.dag_id, task_id=task_id, execution_date=execution_date, form=form) @expose('/task') @login_required @wwwutils.action_logging def task(self): TI = models.TaskInstance dag_id = request.args.get('dag_id') task_id = request.args.get('task_id') # Carrying execution_date through, even though it's irrelevant for # this context execution_date = request.args.get('execution_date') dttm = pendulum.parse(execution_date) form = DateTimeForm(data={'execution_date': dttm}) dag = dagbag.get_dag(dag_id) if not dag or task_id not in dag.task_ids: flash( "Task [{}.{}] doesn't seem to exist" " at the moment".format(dag_id, task_id), "error") return redirect('/admin/') task = copy.copy(dag.get_task(task_id)) task.resolve_template_files() ti = TI(task=task, execution_date=dttm) ti.refresh_from_db() ti_attrs = [] for attr_name in dir(ti): if not attr_name.startswith('_'): attr = getattr(ti, attr_name) if type(attr) != type(self.task): ti_attrs.append((attr_name, str(attr))) task_attrs = [] for attr_name in dir(task): if not attr_name.startswith('_'): attr = getattr(task, attr_name) if type(attr) != type(self.task) and \ attr_name not in attr_renderer: task_attrs.append((attr_name, str(attr))) # Color coding the special attributes that are code special_attrs_rendered = {} for attr_name in attr_renderer: if hasattr(task, attr_name): source = getattr(task, attr_name) special_attrs_rendered[attr_name] = attr_renderer[attr_name](source) no_failed_deps_result = [( "Unknown", dedent("""\ All dependencies are met but the task instance is not running. In most cases this just means that the task will probably be scheduled soon unless:<br/> - The scheduler is down or under heavy load<br/> - The following configuration values may be limiting the number of queueable processes: <code>parallelism</code>, <code>dag_concurrency</code>, <code>max_active_dag_runs_per_dag</code>, <code>non_pooled_task_slot_count</code><br/> {} <br/> If this task instance does not start soon please contact your Airflow """ """administrator for assistance.""" .format( "- This task instance already ran and had its state changed " "manually (e.g. cleared in the UI)<br/>" if ti.state == State.NONE else "")))] # Use the scheduler's context to figure out which dependencies are not met dep_context = DepContext(SCHEDULER_DEPS) failed_dep_reasons = [(dep.dep_name, dep.reason) for dep in ti.get_failed_dep_statuses( dep_context=dep_context)] title = "Task Instance Details" return self.render( 'airflow/task.html', task_attrs=task_attrs, ti_attrs=ti_attrs, failed_dep_reasons=failed_dep_reasons or no_failed_deps_result, task_id=task_id, execution_date=execution_date, special_attrs_rendered=special_attrs_rendered, form=form, dag=dag, title=title) @expose('/xcom') @login_required @wwwutils.action_logging @provide_session def xcom(self, session=None): dag_id = request.args.get('dag_id') task_id = request.args.get('task_id') # Carrying execution_date through, even though it's irrelevant for # this context execution_date = request.args.get('execution_date') dttm = pendulum.parse(execution_date) form = DateTimeForm(data={'execution_date': dttm}) dag = dagbag.get_dag(dag_id) if not dag or task_id not in dag.task_ids: flash( "Task [{}.{}] doesn't seem to exist" " at the moment".format(dag_id, task_id), "error") return redirect('/admin/') xcomlist = session.query(XCom).filter( XCom.dag_id == dag_id, XCom.task_id == task_id, XCom.execution_date == dttm).all() attributes = [] for xcom in xcomlist: if not xcom.key.startswith('_'): attributes.append((xcom.key, xcom.value)) title = "XCom" return self.render( 'airflow/xcom.html', attributes=attributes, task_id=task_id, execution_date=execution_date, form=form, dag=dag, title=title) @expose('/run') @login_required @wwwutils.action_logging @wwwutils.notify_owner def run(self): dag_id = request.args.get('dag_id') task_id = request.args.get('task_id') origin = request.args.get('origin') dag = dagbag.get_dag(dag_id) task = dag.get_task(task_id) execution_date = request.args.get('execution_date') execution_date = pendulum.parse(execution_date) ignore_all_deps = request.args.get('ignore_all_deps') == "true" ignore_task_deps = request.args.get('ignore_task_deps') == "true" ignore_ti_state = request.args.get('ignore_ti_state') == "true" try: from airflow.executors import GetDefaultExecutor from airflow.executors.celery_executor import CeleryExecutor executor = GetDefaultExecutor() if not isinstance(executor, CeleryExecutor): flash("Only works with the CeleryExecutor, sorry", "error") return redirect(origin) except ImportError: # in case CeleryExecutor cannot be imported it is not active either flash("Only works with the CeleryExecutor, sorry", "error") return redirect(origin) ti = models.TaskInstance(task=task, execution_date=execution_date) ti.refresh_from_db() # Make sure the task instance can be queued dep_context = DepContext( deps=QUEUE_DEPS, ignore_all_deps=ignore_all_deps, ignore_task_deps=ignore_task_deps, ignore_ti_state=ignore_ti_state) failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context)) if failed_deps: failed_deps_str = ", ".join( ["{}: {}".format(dep.dep_name, dep.reason) for dep in failed_deps]) flash("Could not queue task instance for execution, dependencies not met: " "{}".format(failed_deps_str), "error") return redirect(origin) executor.start() executor.queue_task_instance( ti, ignore_all_deps=ignore_all_deps, ignore_task_deps=ignore_task_deps, ignore_ti_state=ignore_ti_state) executor.heartbeat() flash( "Sent {} to the message queue, " "it should start any moment now.".format(ti)) return redirect(origin) @expose('/delete') @login_required @wwwutils.action_logging @wwwutils.notify_owner def delete(self): from airflow.api.common.experimental import delete_dag from airflow.exceptions import DagNotFound, DagFileExists dag_id = request.args.get('dag_id') origin = request.args.get('origin') or "/admin/" try: delete_dag.delete_dag(dag_id) except DagNotFound: flash("DAG with id {} not found. Cannot delete".format(dag_id)) return redirect(request.referrer) except DagFileExists: flash("Dag id {} is still in DagBag. " "Remove the DAG file first.".format(dag_id)) return redirect(request.referrer) flash("Deleting DAG with id {}. May take a couple minutes to fully" " disappear.".format(dag_id)) # Upon successful delete return to origin return redirect(origin) @expose('/trigger') @login_required @wwwutils.action_logging @wwwutils.notify_owner def trigger(self): dag_id = request.args.get('dag_id') origin = request.args.get('origin') or "/admin/" dag = dagbag.get_dag(dag_id) if not dag: flash("Cannot find dag {}".format(dag_id)) return redirect(origin) execution_date = timezone.utcnow() run_id = "manual__{0}".format(execution_date.isoformat()) dr = DagRun.find(dag_id=dag_id, run_id=run_id) if dr: flash("This run_id {} already exists".format(run_id)) return redirect(origin) run_conf = {} dag.create_dagrun( run_id=run_id, execution_date=execution_date, state=State.RUNNING, conf=run_conf, external_trigger=True ) flash( "Triggered {}, " "it should start any moment now.".format(dag_id)) return redirect(origin) def _clear_dag_tis(self, dag, start_date, end_date, origin, recursive=False, confirmed=False): if confirmed: count = dag.clear( start_date=start_date, end_date=end_date, include_subdags=recursive) flash("{0} task instances have been cleared".format(count)) return redirect(origin) tis = dag.clear( start_date=start_date, end_date=end_date, include_subdags=recursive, dry_run=True) if not tis: flash("No task instances to clear", 'error') response = redirect(origin) else: details = "\n".join([str(t) for t in tis]) response = self.render( 'airflow/confirm.html', message=("Here's the list of task instances you are about " "to clear:"), details=details) return response @expose('/clear') @login_required @wwwutils.action_logging @wwwutils.notify_owner def clear(self): dag_id = request.args.get('dag_id') task_id = request.args.get('task_id') origin = request.args.get('origin') dag = dagbag.get_dag(dag_id) execution_date = request.args.get('execution_date') execution_date = pendulum.parse(execution_date) confirmed = request.args.get('confirmed') == "true" upstream = request.args.get('upstream') == "true" downstream = request.args.get('downstream') == "true" future = request.args.get('future') == "true" past = request.args.get('past') == "true" recursive = request.args.get('recursive') == "true" dag = dag.sub_dag( task_regex=r"^{0}$".format(task_id), include_downstream=downstream, include_upstream=upstream) end_date = execution_date if not future else None start_date = execution_date if not past else None return self._clear_dag_tis(dag, start_date, end_date, origin, recursive=recursive, confirmed=confirmed) @expose('/dagrun_clear') @login_required @wwwutils.action_logging @wwwutils.notify_owner def dagrun_clear(self): dag_id = request.args.get('dag_id') task_id = request.args.get('task_id') origin = request.args.get('origin') execution_date = request.args.get('execution_date') confirmed = request.args.get('confirmed') == "true" dag = dagbag.get_dag(dag_id) execution_date = pendulum.parse(execution_date) start_date = execution_date end_date = execution_date return self._clear_dag_tis(dag, start_date, end_date, origin, recursive=True, confirmed=confirmed) @expose('/blocked') @login_required @provide_session def blocked(self, session=None): DR = models.DagRun dags = ( session.query(DR.dag_id, sqla.func.count(DR.id)) .filter(DR.state == State.RUNNING) .group_by(DR.dag_id) .all() ) payload = [] for dag_id, active_dag_runs in dags: max_active_runs = 0 if dag_id in dagbag.dags: max_active_runs = dagbag.dags[dag_id].max_active_runs payload.append({ 'dag_id': dag_id, 'active_dag_run': active_dag_runs, 'max_active_runs': max_active_runs, }) return wwwutils.json_response(payload) def _mark_dagrun_state_as_failed(self, dag_id, execution_date, confirmed, origin): if not execution_date: flash('Invalid execution date', 'error') return redirect(origin) execution_date = pendulum.parse(execution_date) dag = dagbag.get_dag(dag_id) if not dag: flash('Cannot find DAG: {}'.format(dag_id), 'error') return redirect(origin) new_dag_state = set_dag_run_state_to_failed(dag, execution_date, commit=confirmed) if confirmed: flash('Marked failed on {} task instances'.format(len(new_dag_state))) return redirect(origin) else: details = '\n'.join([str(t) for t in new_dag_state]) response = self.render('airflow/confirm.html', message=("Here's the list of task instances you are " "about to mark as failed"), details=details) return response def _mark_dagrun_state_as_success(self, dag_id, execution_date, confirmed, origin): if not execution_date: flash('Invalid execution date', 'error') return redirect(origin) execution_date = pendulum.parse(execution_date) dag = dagbag.get_dag(dag_id) if not dag: flash('Cannot find DAG: {}'.format(dag_id), 'error') return redirect(origin) new_dag_state = set_dag_run_state_to_success(dag, execution_date, commit=confirmed) if confirmed: flash('Marked success on {} task instances'.format(len(new_dag_state))) return redirect(origin) else: details = '\n'.join([str(t) for t in new_dag_state]) response = self.render('airflow/confirm.html', message=("Here's the list of task instances you are " "about to mark as success"), details=details) return response @expose('/dagrun_failed') @login_required @wwwutils.action_logging @wwwutils.notify_owner def dagrun_failed(self): dag_id = request.args.get('dag_id') execution_date = request.args.get('execution_date') confirmed = request.args.get('confirmed') == 'true' origin = request.args.get('origin') return self._mark_dagrun_state_as_failed(dag_id, execution_date, confirmed, origin) @expose('/dagrun_success') @login_required @wwwutils.action_logging @wwwutils.notify_owner def dagrun_success(self): dag_id = request.args.get('dag_id') execution_date = request.args.get('execution_date') confirmed = request.args.get('confirmed') == 'true' origin = request.args.get('origin') return self._mark_dagrun_state_as_success(dag_id, execution_date, confirmed, origin) def _mark_task_instance_state(self, dag_id, task_id, origin, execution_date, confirmed, upstream, downstream, future, past, state): dag = dagbag.get_dag(dag_id) task = dag.get_task(task_id) task.dag = dag execution_date = pendulum.parse(execution_date) if not dag: flash("Cannot find DAG: {}".format(dag_id)) return redirect(origin) if not task: flash("Cannot find task {} in DAG {}".format(task_id, dag.dag_id)) return redirect(origin) from airflow.api.common.experimental.mark_tasks import set_state if confirmed: altered = set_state(task=task, execution_date=execution_date, upstream=upstream, downstream=downstream, future=future, past=past, state=state, commit=True) flash("Marked {} on {} task instances".format(state, len(altered))) return redirect(origin) to_be_altered = set_state(task=task, execution_date=execution_date, upstream=upstream, downstream=downstream, future=future, past=past, state=state, commit=False) details = "\n".join([str(t) for t in to_be_altered]) response = self.render("airflow/confirm.html", message=("Here's the list of task instances you are " "about to mark as {}:".format(state)), details=details) return response @expose('/failed') @login_required @wwwutils.action_logging @wwwutils.notify_owner def failed(self): dag_id = request.args.get('dag_id') task_id = request.args.get('task_id') origin = request.args.get('origin') execution_date = request.args.get('execution_date') confirmed = request.args.get('confirmed') == "true" upstream = request.args.get('upstream') == "true" downstream = request.args.get('downstream') == "true" future = request.args.get('future') == "true" past = request.args.get('past') == "true" return self._mark_task_instance_state(dag_id, task_id, origin, execution_date, confirmed, upstream, downstream, future, past, State.FAILED) @expose('/success') @login_required @wwwutils.action_logging @wwwutils.notify_owner def success(self): dag_id = request.args.get('dag_id') task_id = request.args.get('task_id') origin = request.args.get('origin') execution_date = request.args.get('execution_date') confirmed = request.args.get('confirmed') == "true" upstream = request.args.get('upstream') == "true" downstream = request.args.get('downstream') == "true" future = request.args.get('future') == "true" past = request.args.get('past') == "true" return self._mark_task_instance_state(dag_id, task_id, origin, execution_date, confirmed, upstream, downstream, future, past, State.SUCCESS) @expose('/tree') @login_required @wwwutils.gzipped @wwwutils.action_logging @provide_session def tree(self, session=None): default_dag_run = conf.getint('webserver', 'default_dag_run_display_number') dag_id = request.args.get('dag_id') blur = conf.getboolean('webserver', 'demo_mode') dag = dagbag.get_dag(dag_id) if dag_id not in dagbag.dags: flash('DAG "{0}" seems to be missing.'.format(dag_id), "error") return redirect('/admin/') root = request.args.get('root') if root: dag = dag.sub_dag( task_regex=root, include_downstream=False, include_upstream=True) base_date = request.args.get('base_date') num_runs = request.args.get('num_runs') num_runs = int(num_runs) if num_runs else default_dag_run if base_date: base_date = timezone.parse(base_date) else: base_date = dag.latest_execution_date or timezone.utcnow() DR = models.DagRun dag_runs = ( session.query(DR) .filter( DR.dag_id == dag.dag_id, DR.execution_date <= base_date) .order_by(DR.execution_date.desc()) .limit(num_runs) .all() ) dag_runs = { dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs} dates = sorted(list(dag_runs.keys())) max_date = max(dates) if dates else None min_date = min(dates) if dates else None tis = dag.get_task_instances( session, start_date=min_date, end_date=base_date) task_instances = {} for ti in tis: tid = alchemy_to_dict(ti) dr = dag_runs.get(ti.execution_date) tid['external_trigger'] = dr['external_trigger'] if dr else False task_instances[(ti.task_id, ti.execution_date)] = tid expanded = [] # The default recursion traces every path so that tree view has full # expand/collapse functionality. After 5,000 nodes we stop and fall # back on a quick DFS search for performance. See PR #320. node_count = [0] node_limit = 5000 / max(1, len(dag.roots)) def recurse_nodes(task, visited): visited.add(task) node_count[0] += 1 children = [ recurse_nodes(t, visited) for t in task.upstream_list if node_count[0] < node_limit or t not in visited] # D3 tree uses children vs _children to define what is # expanded or not. The following block makes it such that # repeated nodes are collapsed by default. children_key = 'children' if task.task_id not in expanded: expanded.append(task.task_id) elif children: children_key = "_children" def set_duration(tid): if (isinstance(tid, dict) and tid.get("state") == State.RUNNING and tid["start_date"] is not None): d = timezone.utcnow() - pendulum.parse(tid["start_date"]) tid["duration"] = d.total_seconds() return tid return { 'name': task.task_id, 'instances': [ set_duration(task_instances.get((task.task_id, d))) or { 'execution_date': d.isoformat(), 'task_id': task.task_id } for d in dates], children_key: children, 'num_dep': len(task.upstream_list), 'operator': task.task_type, 'retries': task.retries, 'owner': task.owner, 'start_date': task.start_date, 'end_date': task.end_date, 'depends_on_past': task.depends_on_past, 'ui_color': task.ui_color, } data = { 'name': '[DAG]', 'children': [recurse_nodes(t, set()) for t in dag.roots], 'instances': [ dag_runs.get(d) or {'execution_date': d.isoformat()} for d in dates], } data = json.dumps(data, indent=4, default=json_ser) session.commit() form = DateTimeWithNumRunsForm(data={'base_date': max_date, 'num_runs': num_runs}) return self.render( 'airflow/tree.html', operators=sorted( list(set([op.__class__ for op in dag.tasks])), key=lambda x: x.__name__ ), root=root, form=form, dag=dag, data=data, blur=blur, num_runs=num_runs) @expose('/graph') @login_required @wwwutils.gzipped @wwwutils.action_logging @provide_session def graph(self, session=None): dag_id = request.args.get('dag_id') blur = conf.getboolean('webserver', 'demo_mode') dag = dagbag.get_dag(dag_id) if dag_id not in dagbag.dags: flash('DAG "{0}" seems to be missing.'.format(dag_id), "error") return redirect('/admin/') root = request.args.get('root') if root: dag = dag.sub_dag( task_regex=root, include_upstream=True, include_downstream=False) arrange = request.args.get('arrange', dag.orientation) nodes = [] edges = [] for task in dag.tasks: nodes.append({ 'id': task.task_id, 'value': { 'label': task.task_id, 'labelStyle': "fill:{0};".format(task.ui_fgcolor), 'style': "fill:{0};".format(task.ui_color), } }) def get_upstream(task): for t in task.upstream_list: edge = { 'u': t.task_id, 'v': task.task_id, } if edge not in edges: edges.append(edge) get_upstream(t) for t in dag.roots: get_upstream(t) dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag) dt_nr_dr_data['arrange'] = arrange dttm = dt_nr_dr_data['dttm'] class GraphForm(DateTimeWithNumRunsWithDagRunsForm): arrange = SelectField("Layout", choices=( ('LR', "Left->Right"), ('RL', "Right->Left"), ('TB', "Top->Bottom"), ('BT', "Bottom->Top"), )) form = GraphForm(data=dt_nr_dr_data) form.execution_date.choices = dt_nr_dr_data['dr_choices'] task_instances = { ti.task_id: alchemy_to_dict(ti) for ti in dag.get_task_instances(session, dttm, dttm)} tasks = { t.task_id: { 'dag_id': t.dag_id, 'task_type': t.task_type, } for t in dag.tasks} if not tasks: flash("No tasks found", "error") session.commit() doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') and dag.doc_md else '' return self.render( 'airflow/graph.html', dag=dag, form=form, width=request.args.get('width', "100%"), height=request.args.get('height', "800"), execution_date=dttm.isoformat(), state_token=state_token(dt_nr_dr_data['dr_state']), doc_md=doc_md, arrange=arrange, operators=sorted( list(set([op.__class__ for op in dag.tasks])), key=lambda x: x.__name__ ), blur=blur, root=root or '', task_instances=json.dumps(task_instances, indent=2), tasks=json.dumps(tasks, indent=2), nodes=json.dumps(nodes, indent=2), edges=json.dumps(edges, indent=2), ) @expose('/duration') @login_required @wwwutils.action_logging @provide_session def duration(self, session=None): default_dag_run = conf.getint('webserver', 'default_dag_run_display_number') dag_id = request.args.get('dag_id') dag = dagbag.get_dag(dag_id) base_date = request.args.get('base_date') num_runs = request.args.get('num_runs') num_runs = int(num_runs) if num_runs else default_dag_run if base_date: base_date = pendulum.parse(base_date) else: base_date = dag.latest_execution_date or timezone.utcnow() dates = dag.date_range(base_date, num=-abs(num_runs)) min_date = dates[0] if dates else datetime(2000, 1, 1) root = request.args.get('root') if root: dag = dag.sub_dag( task_regex=root, include_upstream=True, include_downstream=False) chart_height = get_chart_height(dag) chart = nvd3.lineChart( name="lineChart", x_is_date=True, height=chart_height, width="1200") cum_chart = nvd3.lineChart( name="cumLineChart", x_is_date=True, height=chart_height, width="1200") y = defaultdict(list) x = defaultdict(list) cum_y = defaultdict(list) tis = dag.get_task_instances( session, start_date=min_date, end_date=base_date) TF = models.TaskFail ti_fails = ( session .query(TF) .filter( TF.dag_id == dag.dag_id, TF.execution_date >= min_date, TF.execution_date <= base_date, TF.task_id.in_([t.task_id for t in dag.tasks])) .all() ) fails_totals = defaultdict(int) for tf in ti_fails: dict_key = (tf.dag_id, tf.task_id, tf.execution_date) fails_totals[dict_key] += tf.duration for ti in tis: if ti.duration: dttm = wwwutils.epoch(ti.execution_date) x[ti.task_id].append(dttm) y[ti.task_id].append(float(ti.duration)) fails_dict_key = (ti.dag_id, ti.task_id, ti.execution_date) fails_total = fails_totals[fails_dict_key] cum_y[ti.task_id].append(float(ti.duration + fails_total)) # determine the most relevant time unit for the set of task instance # durations for the DAG y_unit = infer_time_unit([d for t in y.values() for d in t]) cum_y_unit = infer_time_unit([d for t in cum_y.values() for d in t]) # update the y Axis on both charts to have the correct time units chart.create_y_axis('yAxis', format='.02f', custom_format=False, label='Duration ({})'.format(y_unit)) chart.axislist['yAxis']['axisLabelDistance'] = '40' cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False, label='Duration ({})'.format(cum_y_unit)) cum_chart.axislist['yAxis']['axisLabelDistance'] = '40' for task in dag.tasks: if x[task.task_id]: chart.add_serie(name=task.task_id, x=x[task.task_id], y=scale_time_units(y[task.task_id], y_unit)) cum_chart.add_serie(name=task.task_id, x=x[task.task_id], y=scale_time_units(cum_y[task.task_id], cum_y_unit)) dates = sorted(list({ti.execution_date for ti in tis})) max_date = max([ti.execution_date for ti in tis]) if dates else None session.commit() form = DateTimeWithNumRunsForm(data={'base_date': max_date, 'num_runs': num_runs}) chart.buildcontent() cum_chart.buildcontent() s_index = cum_chart.htmlcontent.rfind('});') cum_chart.htmlcontent = (cum_chart.htmlcontent[:s_index] + "$(function() {$( document ).trigger('chartload') })" + cum_chart.htmlcontent[s_index:]) return self.render( 'airflow/duration_chart.html', dag=dag, demo_mode=conf.getboolean('webserver', 'demo_mode'), root=root, form=form, chart=chart.htmlcontent, cum_chart=cum_chart.htmlcontent ) @expose('/tries') @login_required @wwwutils.action_logging @provide_session def tries(self, session=None): default_dag_run = conf.getint('webserver', 'default_dag_run_display_number') dag_id = request.args.get('dag_id') dag = dagbag.get_dag(dag_id) base_date = request.args.get('base_date') num_runs = request.args.get('num_runs') num_runs = int(num_runs) if num_runs else default_dag_run if base_date: base_date = pendulum.parse(base_date) else: base_date = dag.latest_execution_date or timezone.utcnow() dates = dag.date_range(base_date, num=-abs(num_runs)) min_date = dates[0] if dates else datetime(2000, 1, 1) root = request.args.get('root') if root: dag = dag.sub_dag( task_regex=root, include_upstream=True, include_downstream=False) chart_height = get_chart_height(dag) chart = nvd3.lineChart( name="lineChart", x_is_date=True, y_axis_format='d', height=chart_height, width="1200") for task in dag.tasks: y = [] x = [] for ti in task.get_task_instances(session, start_date=min_date, end_date=base_date): dttm = wwwutils.epoch(ti.execution_date) x.append(dttm) y.append(ti.try_number) if x: chart.add_serie(name=task.task_id, x=x, y=y) tis = dag.get_task_instances( session, start_date=min_date, end_date=base_date) tries = sorted(list({ti.try_number for ti in tis})) max_date = max([ti.execution_date for ti in tis]) if tries else None session.commit() form = DateTimeWithNumRunsForm(data={'base_date': max_date, 'num_runs': num_runs}) chart.buildcontent() return self.render( 'airflow/chart.html', dag=dag, demo_mode=conf.getboolean('webserver', 'demo_mode'), root=root, form=form, chart=chart.htmlcontent ) @expose('/landing_times') @login_required @wwwutils.action_logging @provide_session def landing_times(self, session=None): default_dag_run = conf.getint('webserver', 'default_dag_run_display_number') dag_id = request.args.get('dag_id') dag = dagbag.get_dag(dag_id) base_date = request.args.get('base_date') num_runs = request.args.get('num_runs') num_runs = int(num_runs) if num_runs else default_dag_run if base_date: base_date = pendulum.parse(base_date) else: base_date = dag.latest_execution_date or timezone.utcnow() dates = dag.date_range(base_date, num=-abs(num_runs)) min_date = dates[0] if dates else datetime(2000, 1, 1) root = request.args.get('root') if root: dag = dag.sub_dag( task_regex=root, include_upstream=True, include_downstream=False) chart_height = get_chart_height(dag) chart = nvd3.lineChart( name="lineChart", x_is_date=True, height=chart_height, width="1200") y = {} x = {} for task in dag.tasks: y[task.task_id] = [] x[task.task_id] = [] for ti in task.get_task_instances(session, start_date=min_date, end_date=base_date): if ti.end_date: ts = ti.execution_date following_schedule = dag.following_schedule(ts) if dag.schedule_interval and following_schedule: ts = following_schedule dttm = wwwutils.epoch(ti.execution_date) secs = (ti.end_date - ts).total_seconds() x[ti.task_id].append(dttm) y[ti.task_id].append(secs) # determine the most relevant time unit for the set of landing times # for the DAG y_unit = infer_time_unit([d for t in y.values() for d in t]) # update the y Axis to have the correct time units chart.create_y_axis('yAxis', format='.02f', custom_format=False, label='Landing Time ({})'.format(y_unit)) chart.axislist['yAxis']['axisLabelDistance'] = '40' for task in dag.tasks: if x[task.task_id]: chart.add_serie(name=task.task_id, x=x[task.task_id], y=scale_time_units(y[task.task_id], y_unit)) tis = dag.get_task_instances( session, start_date=min_date, end_date=base_date) dates = sorted(list({ti.execution_date for ti in tis})) max_date = max([ti.execution_date for ti in tis]) if dates else None form = DateTimeWithNumRunsForm(data={'base_date': max_date, 'num_runs': num_runs}) chart.buildcontent() return self.render( 'airflow/chart.html', dag=dag, chart=chart.htmlcontent, height=str(chart_height + 100) + "px", demo_mode=conf.getboolean('webserver', 'demo_mode'), root=root, form=form, ) @expose('/paused', methods=['POST']) @login_required @wwwutils.action_logging @provide_session def paused(self, session=None): DagModel = models.DagModel dag_id = request.args.get('dag_id') orm_dag = session.query( DagModel).filter(DagModel.dag_id == dag_id).first() if request.args.get('is_paused') == 'false': orm_dag.is_paused = True else: orm_dag.is_paused = False session.merge(orm_dag) session.commit() dagbag.get_dag(dag_id) return "OK" @expose('/refresh') @login_required @wwwutils.action_logging @provide_session def refresh(self, session=None): DagModel = models.DagModel dag_id = request.args.get('dag_id') orm_dag = session.query( DagModel).filter(DagModel.dag_id == dag_id).first() if orm_dag: orm_dag.last_expired = timezone.utcnow() session.merge(orm_dag) session.commit() dagbag.get_dag(dag_id) flash("DAG [{}] is now fresh as a daisy".format(dag_id)) return redirect(request.referrer) @expose('/refresh_all') @login_required @wwwutils.action_logging def refresh_all(self): dagbag.collect_dags(only_if_updated=False) flash("All DAGs are now up to date") return redirect('/') @expose('/gantt') @login_required @wwwutils.action_logging @provide_session def gantt(self, session=None): dag_id = request.args.get('dag_id') dag = dagbag.get_dag(dag_id) demo_mode = conf.getboolean('webserver', 'demo_mode') root = request.args.get('root') if root: dag = dag.sub_dag( task_regex=root, include_upstream=True, include_downstream=False) dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag) dttm = dt_nr_dr_data['dttm'] form = DateTimeWithNumRunsWithDagRunsForm(data=dt_nr_dr_data) form.execution_date.choices = dt_nr_dr_data['dr_choices'] tis = [ ti for ti in dag.get_task_instances(session, dttm, dttm) if ti.start_date] tis = sorted(tis, key=lambda ti: ti.start_date) TF = models.TaskFail ti_fails = list(itertools.chain(*[( session .query(TF) .filter(TF.dag_id == ti.dag_id, TF.task_id == ti.task_id, TF.execution_date == ti.execution_date) .all() ) for ti in tis])) tis_with_fails = sorted(tis + ti_fails, key=lambda ti: ti.start_date) tasks = [] for ti in tis_with_fails: end_date = ti.end_date if ti.end_date else timezone.utcnow() state = ti.state if type(ti) == models.TaskInstance else State.FAILED tasks.append({ 'startDate': wwwutils.epoch(ti.start_date), 'endDate': wwwutils.epoch(end_date), 'isoStart': ti.start_date.isoformat()[:-4], 'isoEnd': end_date.isoformat()[:-4], 'taskName': ti.task_id, 'duration': "{}".format(end_date - ti.start_date)[:-4], 'status': state, 'executionDate': ti.execution_date.isoformat(), }) states = {task['status']: task['status'] for task in tasks} data = { 'taskNames': [ti.task_id for ti in tis], 'tasks': tasks, 'taskStatus': states, 'height': len(tis) * 25 + 25, } session.commit() return self.render( 'airflow/gantt.html', dag=dag, execution_date=dttm.isoformat(), form=form, data=json.dumps(data, indent=2), base_date='', demo_mode=demo_mode, root=root, ) @expose('/object/task_instances') @login_required @wwwutils.action_logging @provide_session def task_instances(self, session=None): dag_id = request.args.get('dag_id') dag = dagbag.get_dag(dag_id) dttm = request.args.get('execution_date') if dttm: dttm = pendulum.parse(dttm) else: return ("Error: Invalid execution_date") task_instances = { ti.task_id: alchemy_to_dict(ti) for ti in dag.get_task_instances(session, dttm, dttm)} return json.dumps(task_instances) @expose('/variables/<form>', methods=["GET", "POST"]) @login_required @wwwutils.action_logging def variables(self, form): try: if request.method == 'POST': data = request.json if data: with create_session() as session: var = models.Variable(key=form, val=json.dumps(data)) session.add(var) session.commit() return "" else: return self.render( 'airflow/variables/{}.html'.format(form) ) except: # prevent XSS form = escape(form) return ("Error: form airflow/variables/{}.html " "not found.").format(form), 404 @expose('/varimport', methods=["GET", "POST"]) @login_required @wwwutils.action_logging def varimport(self): try: d = json.load(UTF8_READER(request.files['file'])) except Exception as e: flash("Missing file or syntax error: {}.".format(e)) else: for k, v in d.items(): models.Variable.set(k, v, serialize_json=isinstance(v, dict)) flash("{} variable(s) successfully updated.".format(len(d))) return redirect('/admin/variable') class HomeView(AdminIndexView): @expose("/") @login_required @provide_session def index(self, session=None): DM = models.DagModel # restrict the dags shown if filter_by_owner and current user is not superuser do_filter = FILTER_BY_OWNER and (not current_user.is_superuser()) owner_mode = conf.get('webserver', 'OWNER_MODE').strip().lower() hide_paused_dags_by_default = conf.getboolean('webserver', 'hide_paused_dags_by_default') show_paused_arg = request.args.get('showPaused', 'None') def get_int_arg(value, default=0): try: return int(value) except ValueError: return default arg_current_page = request.args.get('page', '0') arg_search_query = request.args.get('search', None) dags_per_page = PAGE_SIZE current_page = get_int_arg(arg_current_page, default=0) if show_paused_arg.strip().lower() == 'false': hide_paused = True elif show_paused_arg.strip().lower() == 'true': hide_paused = False else: hide_paused = hide_paused_dags_by_default # read orm_dags from the db sql_query = session.query(DM) if do_filter and owner_mode == 'ldapgroup': sql_query = sql_query.filter( ~DM.is_subdag, DM.is_active, DM.owners.in_(current_user.ldap_groups) ) elif do_filter and owner_mode == 'user': sql_query = sql_query.filter( ~DM.is_subdag, DM.is_active, DM.owners == current_user.user.username ) else: sql_query = sql_query.filter( ~DM.is_subdag, DM.is_active ) # optionally filter out "paused" dags if hide_paused: sql_query = sql_query.filter(~DM.is_paused) orm_dags = {dag.dag_id: dag for dag in sql_query .all()} import_errors = session.query(models.ImportError).all() for ie in import_errors: flash( "Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie), "error") # get a list of all non-subdag dags visible to everyone # optionally filter out "paused" dags if hide_paused: unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if not dag.parent_dag and not dag.is_paused] else: unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if not dag.parent_dag] # optionally filter to get only dags that the user should see if do_filter and owner_mode == 'ldapgroup': # only show dags owned by someone in @current_user.ldap_groups webserver_dags = { dag.dag_id: dag for dag in unfiltered_webserver_dags if dag.owner in current_user.ldap_groups } elif do_filter and owner_mode == 'user': # only show dags owned by @current_user.user.username webserver_dags = { dag.dag_id: dag for dag in unfiltered_webserver_dags if dag.owner == current_user.user.username } else: webserver_dags = { dag.dag_id: dag for dag in unfiltered_webserver_dags } if arg_search_query: lower_search_query = arg_search_query.lower() # filter by dag_id webserver_dags_filtered = { dag_id: dag for dag_id, dag in webserver_dags.items() if (lower_search_query in dag_id.lower() or lower_search_query in dag.owner.lower()) } all_dag_ids = (set([dag.dag_id for dag in orm_dags.values() if lower_search_query in dag.dag_id.lower() or lower_search_query in dag.owners.lower()]) | set(webserver_dags_filtered.keys())) sorted_dag_ids = sorted(all_dag_ids) else: webserver_dags_filtered = webserver_dags sorted_dag_ids = sorted(set(orm_dags.keys()) | set(webserver_dags.keys())) start = current_page * dags_per_page end = start + dags_per_page num_of_all_dags = len(sorted_dag_ids) page_dag_ids = sorted_dag_ids[start:end] num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page))) auto_complete_data = set() for dag in webserver_dags_filtered.values(): auto_complete_data.add(dag.dag_id) auto_complete_data.add(dag.owner) for dag in orm_dags.values(): auto_complete_data.add(dag.dag_id) auto_complete_data.add(dag.owners) return self.render( 'airflow/dags.html', webserver_dags=webserver_dags_filtered, orm_dags=orm_dags, hide_paused=hide_paused, current_page=current_page, search_query=arg_search_query if arg_search_query else '', page_size=dags_per_page, num_of_pages=num_of_pages, num_dag_from=start + 1, num_dag_to=min(end, num_of_all_dags), num_of_all_dags=num_of_all_dags, paging=wwwutils.generate_pages(current_page, num_of_pages, search=arg_search_query, showPaused=not hide_paused), dag_ids_in_page=page_dag_ids, auto_complete_data=auto_complete_data) class QueryView(wwwutils.DataProfilingMixin, BaseView): @expose('/', methods=['POST', 'GET']) @wwwutils.gzipped @provide_session def query(self, session=None): dbs = session.query(models.Connection).order_by( models.Connection.conn_id).all() session.expunge_all() db_choices = list( ((db.conn_id, db.conn_id) for db in dbs if db.get_hook())) conn_id_str = request.form.get('conn_id') csv = request.form.get('csv') == "true" sql = request.form.get('sql') class QueryForm(Form): conn_id = SelectField("Layout", choices=db_choices) sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget()) data = { 'conn_id': conn_id_str, 'sql': sql, } results = None has_data = False error = False if conn_id_str: db = [db for db in dbs if db.conn_id == conn_id_str][0] hook = db.get_hook() try: df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type)) # df = hook.get_pandas_df(sql) has_data = len(df) > 0 df = df.fillna('') results = df.to_html( classes=[ 'table', 'table-bordered', 'table-striped', 'no-wrap'], index=False, na_rep='', ) if has_data else '' except Exception as e: flash(str(e), 'error') error = True if has_data and len(df) == QUERY_LIMIT: flash( "Query output truncated at " + str(QUERY_LIMIT) + " rows", 'info') if not has_data and error: flash('No data', 'error') if csv: return Response( response=df.to_csv(index=False), status=200, mimetype="application/text") form = QueryForm(request.form, data=data) session.commit() return self.render( 'airflow/query.html', form=form, title="Ad Hoc Query", results=results or '', has_data=has_data) class AirflowModelView(ModelView): list_template = 'airflow/model_list.html' edit_template = 'airflow/model_edit.html' create_template = 'airflow/model_create.html' column_display_actions = True page_size = PAGE_SIZE class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView): """ Modifying the base ModelView class for non edit, browse only operations """ named_filter_urls = True can_create = False can_edit = False can_delete = False column_display_pk = True class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView): column_list = ('pool', 'slots', 'used_slots', 'queued_slots') column_formatters = dict( pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots) named_filter_urls = True form_args = { 'pool': { 'validators': [ validators.DataRequired(), ] } } class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly): verbose_name_plural = "SLA misses" verbose_name = "SLA miss" column_list = ( 'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp') column_formatters = dict( task_id=task_instance_link, execution_date=datetime_f, timestamp=datetime_f, dag_id=dag_link) named_filter_urls = True column_searchable_list = ('dag_id', 'task_id',) column_filters = ( 'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date') filter_converter = wwwutils.UtcFilterConverter() form_widget_args = { 'email_sent': {'disabled': True}, 'timestamp': {'disabled': True}, } @provide_session def _connection_ids(session=None): return [ (c.conn_id, c.conn_id) for c in ( session.query(models.Connection.conn_id) .group_by(models.Connection.conn_id) ) ] class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView): verbose_name = "chart" verbose_name_plural = "charts" form_columns = ( 'label', 'owner', 'conn_id', 'chart_type', 'show_datatable', 'x_is_date', 'y_log_scale', 'show_sql', 'height', 'sql_layout', 'sql', 'default_params', ) column_list = ( 'label', 'conn_id', 'chart_type', 'owner', 'last_modified', ) column_sortable_list = ( 'label', 'conn_id', 'chart_type', ('owner', 'owner.username'), 'last_modified', ) column_formatters = dict(label=label_link, last_modified=datetime_f) column_default_sort = ('last_modified', True) create_template = 'airflow/chart/create.html' edit_template = 'airflow/chart/edit.html' column_filters = ('label', 'owner.username', 'conn_id') column_searchable_list = ('owner.username', 'label', 'sql') column_descriptions = { 'label': "Can include {{ templated_fields }} and {{ macros }}", 'chart_type': "The type of chart to be displayed", 'sql': "Can include {{ templated_fields }} and {{ macros }}.", 'height': "Height of the chart, in pixels.", 'conn_id': "Source database to run the query against", 'x_is_date': ( "Whether the X axis should be casted as a date field. Expect most " "intelligible date formats to get casted properly." ), 'owner': ( "The chart's owner, mostly used for reference and filtering in " "the list view." ), 'show_datatable': "Whether to display an interactive data table under the chart.", 'default_params': ( 'A dictionary of {"key": "values",} that define what the ' 'templated fields (parameters) values should be by default. ' 'To be valid, it needs to "eval" as a Python dict. ' 'The key values will show up in the url\'s querystring ' 'and can be altered there.' ), 'show_sql': "Whether to display the SQL statement as a collapsible " "section in the chart page.", 'y_log_scale': "Whether to use a log scale for the Y axis.", 'sql_layout': ( "Defines the layout of the SQL that the application should " "expect. Depending on the tables you are sourcing from, it may " "make more sense to pivot / unpivot the metrics." ), } column_labels = { 'sql': "SQL", 'height': "Chart Height", 'sql_layout': "SQL Layout", 'show_sql': "Display the SQL Statement", 'default_params': "Default Parameters", } form_choices = { 'chart_type': [ ('line', 'Line Chart'), ('spline', 'Spline Chart'), ('bar', 'Bar Chart'), ('column', 'Column Chart'), ('area', 'Overlapping Area Chart'), ('stacked_area', 'Stacked Area Chart'), ('percent_area', 'Percent Area Chart'), ('datatable', 'No chart, data table only'), ], 'sql_layout': [ ('series', 'SELECT series, x, y FROM ...'), ('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'), ], 'conn_id': _connection_ids() } def on_model_change(self, form, model, is_created=True): if model.iteration_no is None: model.iteration_no = 0 else: model.iteration_no += 1 if not model.user_id and current_user and hasattr(current_user, 'id'): model.user_id = current_user.id model.last_modified = timezone.utcnow() chart_mapping = ( ('line', 'lineChart'), ('spline', 'lineChart'), ('bar', 'multiBarChart'), ('column', 'multiBarChart'), ('area', 'stackedAreaChart'), ('stacked_area', 'stackedAreaChart'), ('percent_area', 'stackedAreaChart'), ('datatable', 'datatable'), ) chart_mapping = dict(chart_mapping) class KnownEventView(wwwutils.DataProfilingMixin, AirflowModelView): verbose_name = "known event" verbose_name_plural = "known events" form_columns = ( 'label', 'event_type', 'start_date', 'end_date', 'reported_by', 'description', ) form_args = { 'label': { 'validators': [ validators.DataRequired(), ], }, 'event_type': { 'validators': [ validators.DataRequired(), ], }, 'start_date': { 'validators': [ validators.DataRequired(), ], 'filters': [ parse_datetime_f, ], }, 'end_date': { 'validators': [ validators.DataRequired(), GreaterEqualThan(fieldname='start_date'), ], 'filters': [ parse_datetime_f, ] }, 'reported_by': { 'validators': [ validators.DataRequired(), ], } } column_list = ( 'label', 'event_type', 'start_date', 'end_date', 'reported_by', ) column_default_sort = ("start_date", True) column_sortable_list = ( 'label', # todo: yes this has a spelling error ('event_type', 'event_type.know_event_type'), 'start_date', 'end_date', ('reported_by', 'reported_by.username'), ) filter_converter = wwwutils.UtcFilterConverter() form_overrides = dict(start_date=DateTimeField, end_date=DateTimeField) class KnownEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView): pass # NOTE: For debugging / troubleshooting # mv = KnowEventTypeView( # models.KnownEventType, # Session, name="Known Event Types", category="Manage") # admin.add_view(mv) # class DagPickleView(SuperUserMixin, ModelView): # pass # mv = DagPickleView( # models.DagPickle, # Session, name="Pickles", category="Manage") # admin.add_view(mv) class VariableView(wwwutils.DataProfilingMixin, AirflowModelView): verbose_name = "Variable" verbose_name_plural = "Variables" list_template = 'airflow/variable_list.html' def hidden_field_formatter(view, context, model, name): if wwwutils.should_hide_value_for_key(model.key): return Markup('*' * 8) val = getattr(model, name) if val: return val else: return Markup('<span class="label label-danger">Invalid</span>') form_columns = ( 'key', 'val', ) column_list = ('key', 'val', 'is_encrypted',) column_filters = ('key', 'val') column_searchable_list = ('key', 'val', 'is_encrypted',) column_default_sort = ('key', False) form_widget_args = { 'is_encrypted': {'disabled': True}, 'val': { 'rows': 20, } } form_args = { 'key': { 'validators': { validators.DataRequired(), }, }, } column_sortable_list = ( 'key', 'val', 'is_encrypted', ) column_formatters = { 'val': hidden_field_formatter, } # Default flask-admin export functionality doesn't handle serialized json @action('varexport', 'Export', None) @provide_session def action_varexport(self, ids, session=None): V = models.Variable qry = session.query(V).filter(V.id.in_(ids)).all() var_dict = {} d = json.JSONDecoder() for var in qry: val = None try: val = d.decode(var.val) except: val = var.val var_dict[var.key] = val response = make_response(json.dumps(var_dict, sort_keys=True, indent=4)) response.headers["Content-Disposition"] = "attachment; filename=variables.json" return response def on_form_prefill(self, form, id): if wwwutils.should_hide_value_for_key(form.key.data): form.val.data = '*' * 8 class XComView(wwwutils.SuperUserMixin, AirflowModelView): verbose_name = "XCom" verbose_name_plural = "XComs" form_columns = ( 'key', 'value', 'execution_date', 'task_id', 'dag_id', ) form_extra_fields = { 'value': StringField('Value'), } form_args = { 'execution_date': { 'filters': [ parse_datetime_f, ] } } column_filters = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id') column_searchable_list = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id') filter_converter = wwwutils.UtcFilterConverter() form_overrides = dict(execution_date=DateTimeField) class JobModelView(ModelViewOnly): verbose_name_plural = "jobs" verbose_name = "job" column_display_actions = False column_default_sort = ('start_date', True) column_filters = ( 'job_type', 'dag_id', 'state', 'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat') column_formatters = dict( start_date=datetime_f, end_date=datetime_f, hostname=nobr_f, state=state_f, latest_heartbeat=datetime_f) filter_converter = wwwutils.UtcFilterConverter() class DagRunModelView(ModelViewOnly): verbose_name_plural = "DAG Runs" can_edit = True can_create = True column_editable_list = ('state',) verbose_name = "dag run" column_default_sort = ('execution_date', True) form_choices = { 'state': [ ('success', 'success'), ('running', 'running'), ('failed', 'failed'), ], } form_args = dict( dag_id=dict(validators=[validators.DataRequired()]) ) column_list = ( 'state', 'dag_id', 'execution_date', 'run_id', 'external_trigger') column_filters = column_list filter_converter = wwwutils.UtcFilterConverter() column_searchable_list = ('dag_id', 'state', 'run_id') column_formatters = dict( execution_date=datetime_f, state=state_f, start_date=datetime_f, dag_id=dag_link, run_id=dag_run_link ) @action('new_delete', "Delete", "Are you sure you want to delete selected records?") @provide_session def action_new_delete(self, ids, session=None): deleted = set(session.query(models.DagRun) .filter(models.DagRun.id.in_(ids)) .all()) session.query(models.DagRun) \ .filter(models.DagRun.id.in_(ids)) \ .delete(synchronize_session='fetch') session.commit() dirty_ids = [] for row in deleted: dirty_ids.append(row.dag_id) models.DagStat.update(dirty_ids, dirty_only=False, session=session) @action('set_running', "Set state to 'running'", None) @provide_session def action_set_running(self, ids, session=None): try: DR = models.DagRun count = 0 dirty_ids = [] for dr in session.query(DR).filter(DR.id.in_(ids)).all(): dirty_ids.append(dr.dag_id) count += 1 dr.state = State.RUNNING dr.start_date = timezone.utcnow() models.DagStat.update(dirty_ids, session=session) flash( "{count} dag runs were set to running".format(**locals())) except Exception as ex: if not self.handle_view_exception(ex): raise Exception("Ooops") flash('Failed to set state', 'error') @action('set_failed', "Set state to 'failed'", "All running task instances would also be marked as failed, are you sure?") @provide_session def action_set_failed(self, ids, session=None): try: DR = models.DagRun count = 0 dirty_ids = [] altered_tis = [] for dr in session.query(DR).filter(DR.id.in_(ids)).all(): dirty_ids.append(dr.dag_id) count += 1 altered_tis += \ set_dag_run_state_to_failed(dagbag.get_dag(dr.dag_id), dr.execution_date, commit=True, session=session) models.DagStat.update(dirty_ids, session=session) altered_ti_count = len(altered_tis) flash( "{count} dag runs and {altered_ti_count} task instances " "were set to failed".format(**locals())) except Exception as ex: if not self.handle_view_exception(ex): raise Exception("Ooops") flash('Failed to set state', 'error') @action('set_success', "Set state to 'success'", "All task instances would also be marked as success, are you sure?") @provide_session def action_set_success(self, ids, session=None): try: DR = models.DagRun count = 0 dirty_ids = [] altered_tis = [] for dr in session.query(DR).filter(DR.id.in_(ids)).all(): dirty_ids.append(dr.dag_id) count += 1 altered_tis += \ set_dag_run_state_to_success(dagbag.get_dag(dr.dag_id), dr.execution_date, commit=True, session=session) models.DagStat.update(dirty_ids, session=session) altered_ti_count = len(altered_tis) flash( "{count} dag runs and {altered_ti_count} task instances " "were set to success".format(**locals())) except Exception as ex: if not self.handle_view_exception(ex): raise Exception("Ooops") flash('Failed to set state', 'error') # Called after editing DagRun model in the UI. @provide_session def after_model_change(self, form, dagrun, is_created, session=None): altered_tis = [] if dagrun.state == State.SUCCESS: altered_tis = set_dag_run_state_to_success( dagbag.get_dag(dagrun.dag_id), dagrun.execution_date, commit=True) elif dagrun.state == State.FAILED: altered_tis = set_dag_run_state_to_failed( dagbag.get_dag(dagrun.dag_id), dagrun.execution_date, commit=True, session=session) elif dagrun.state == State.RUNNING: altered_tis = set_dag_run_state_to_running( dagbag.get_dag(dagrun.dag_id), dagrun.execution_date, commit=True, session=session) altered_ti_count = len(altered_tis) models.DagStat.update([dagrun.dag_id], session=session) flash( "1 dag run and {altered_ti_count} task instances " "were set to '{dagrun.state}'".format(**locals())) class LogModelView(ModelViewOnly): verbose_name_plural = "logs" verbose_name = "log" column_display_actions = False column_default_sort = ('dttm', True) column_filters = ('dag_id', 'task_id', 'execution_date', 'extra') filter_converter = wwwutils.UtcFilterConverter() column_formatters = dict( dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link) class TaskInstanceModelView(ModelViewOnly): verbose_name_plural = "task instances" verbose_name = "task instance" column_filters = ( 'state', 'dag_id', 'task_id', 'execution_date', 'hostname', 'queue', 'pool', 'operator', 'start_date', 'end_date') filter_converter = wwwutils.UtcFilterConverter() named_filter_urls = True column_formatters = dict( log_url=log_url_formatter, task_id=task_instance_link, hostname=nobr_f, state=state_f, execution_date=datetime_f, start_date=datetime_f, end_date=datetime_f, queued_dttm=datetime_f, dag_id=dag_link, run_id=dag_run_link, duration=duration_f) column_searchable_list = ('dag_id', 'task_id', 'state') column_default_sort = ('job_id', True) form_choices = { 'state': [ ('success', 'success'), ('running', 'running'), ('failed', 'failed'), ], } column_list = ( 'state', 'dag_id', 'task_id', 'execution_date', 'operator', 'start_date', 'end_date', 'duration', 'job_id', 'hostname', 'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number', 'pool', 'log_url') page_size = PAGE_SIZE @action('set_running', "Set state to 'running'", None) def action_set_running(self, ids): self.set_task_instance_state(ids, State.RUNNING) @action('set_failed', "Set state to 'failed'", None) def action_set_failed(self, ids): self.set_task_instance_state(ids, State.FAILED) @action('set_success', "Set state to 'success'", None) def action_set_success(self, ids): self.set_task_instance_state(ids, State.SUCCESS) @action('set_retry', "Set state to 'up_for_retry'", None) def action_set_retry(self, ids): self.set_task_instance_state(ids, State.UP_FOR_RETRY) @provide_session @action('clear', lazy_gettext('Clear'), lazy_gettext( 'Are you sure you want to clear the state of the selected task instance(s)' ' and set their dagruns to the running state?')) def action_clear(self, ids, session=None): try: TI = models.TaskInstance dag_to_task_details = {} dag_to_tis = {} # Collect dags upfront as dagbag.get_dag() will reset the session for id_str in ids: task_id, dag_id, execution_date = iterdecode(id_str) dag = dagbag.get_dag(dag_id) task_details = dag_to_task_details.setdefault(dag, []) task_details.append((task_id, execution_date)) for dag, task_details in dag_to_task_details.items(): for task_id, execution_date in task_details: execution_date = parse_execution_date(execution_date) ti = session.query(TI).filter(TI.task_id == task_id, TI.dag_id == dag.dag_id, TI.execution_date == execution_date).one() tis = dag_to_tis.setdefault(dag, []) tis.append(ti) for dag, tis in dag_to_tis.items(): models.clear_task_instances(tis, session, dag=dag) session.commit() flash("{0} task instances have been cleared".format(len(ids))) except Exception as ex: if not self.handle_view_exception(ex): raise Exception("Ooops") flash('Failed to clear task instances', 'error') @provide_session def set_task_instance_state(self, ids, target_state, session=None): try: TI = models.TaskInstance count = len(ids) for id in ids: task_id, dag_id, execution_date = iterdecode(id) execution_date = parse_execution_date(execution_date) ti = session.query(TI).filter(TI.task_id == task_id, TI.dag_id == dag_id, TI.execution_date == execution_date).one() ti.state = target_state session.commit() flash( "{count} task instances were set to '{target_state}'".format(**locals())) except Exception as ex: if not self.handle_view_exception(ex): raise Exception("Ooops") flash('Failed to set state', 'error') def get_one(self, id): """ As a workaround for AIRFLOW-252, this method overrides Flask-Admin's ModelView.get_one(). TODO: this method should be removed once the below bug is fixed on Flask-Admin side. https://github.com/flask-admin/flask-admin/issues/1226 """ task_id, dag_id, execution_date = iterdecode(id) execution_date = pendulum.parse(execution_date) return self.session.query(self.model).get((task_id, dag_id, execution_date)) class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView): create_template = 'airflow/conn_create.html' edit_template = 'airflow/conn_edit.html' list_template = 'airflow/conn_list.html' form_columns = ( 'conn_id', 'conn_type', 'host', 'schema', 'login', 'password', 'port', 'extra', 'extra__jdbc__drv_path', 'extra__jdbc__drv_clsname', 'extra__google_cloud_platform__project', 'extra__google_cloud_platform__key_path', 'extra__google_cloud_platform__keyfile_dict', 'extra__google_cloud_platform__scope', ) verbose_name = "Connection" verbose_name_plural = "Connections" column_default_sort = ('conn_id', False) column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted', 'is_extra_encrypted',) form_overrides = dict(_password=PasswordField, _extra=TextAreaField) form_widget_args = { 'is_extra_encrypted': {'disabled': True}, 'is_encrypted': {'disabled': True}, } # Used to customized the form, the forms elements get rendered # and results are stored in the extra field as json. All of these # need to be prefixed with extra__ and then the conn_type ___ as in # extra__{conn_type}__name. You can also hide form elements and rename # others from the connection_form.js file form_extra_fields = { 'extra__jdbc__drv_path': StringField('Driver Path'), 'extra__jdbc__drv_clsname': StringField('Driver Class'), 'extra__google_cloud_platform__project': StringField('Project Id'), 'extra__google_cloud_platform__key_path': StringField('Keyfile Path'), 'extra__google_cloud_platform__keyfile_dict': PasswordField('Keyfile JSON'), 'extra__google_cloud_platform__scope': StringField('Scopes (comma separated)'), } form_choices = { 'conn_type': models.Connection._types } def on_model_change(self, form, model, is_created): formdata = form.data if formdata['conn_type'] in ['jdbc', 'google_cloud_platform']: extra = { key: formdata[key] for key in self.form_extra_fields.keys() if key in formdata} model.extra = json.dumps(extra) @classmethod def alert_fernet_key(cls): fk = None try: fk = conf.get('core', 'fernet_key') except: pass return fk is None @classmethod def is_secure(cls): """ Used to display a message in the Connection list view making it clear that the passwords and `extra` field can't be encrypted. """ is_secure = False try: import cryptography conf.get('core', 'fernet_key') is_secure = True except: pass return is_secure def on_form_prefill(self, form, id): try: d = json.loads(form.data.get('extra', '{}')) except Exception: d = {} for field in list(self.form_extra_fields.keys()): value = d.get(field, '') if value: field = getattr(form, field) field.data = value class UserModelView(wwwutils.SuperUserMixin, AirflowModelView): verbose_name = "User" verbose_name_plural = "Users" column_default_sort = 'username' class VersionView(wwwutils.SuperUserMixin, BaseView): @expose('/') def version(self): # Look at the version from setup.py try: airflow_version = pkg_resources.require("apache-airflow")[0].version except Exception as e: airflow_version = None logging.error(e) # Get the Git repo and git hash git_version = None try: with open(os.path.join(*[settings.AIRFLOW_HOME, 'airflow', 'git_version'])) as f: git_version = f.readline() except Exception as e: logging.error(e) # Render information title = "Version Info" return self.render('airflow/version.html', title=title, airflow_version=airflow_version, git_version=git_version) class ConfigurationView(wwwutils.SuperUserMixin, BaseView): @expose('/') def conf(self): raw = request.args.get('raw') == "true" title = "Airflow Configuration" subtitle = conf.AIRFLOW_CONFIG if conf.getboolean("webserver", "expose_config"): with open(conf.AIRFLOW_CONFIG, 'r') as f: config = f.read() table = [(section, key, value, source) for section, parameters in conf.as_dict(True, True).items() for key, (value, source) in parameters.items()] else: config = ( "# Your Airflow administrator chose not to expose the " "configuration, most likely for security reasons.") table = None if raw: return Response( response=config, status=200, mimetype="application/text") else: code_html = Markup(highlight( config, lexers.IniLexer(), # Lexer call HtmlFormatter(noclasses=True)) ) return self.render( 'airflow/config.html', pre_subtitle=settings.HEADER + " v" + airflow.__version__, code_html=code_html, title=title, subtitle=subtitle, table=table) class DagModelView(wwwutils.SuperUserMixin, ModelView): column_list = ('dag_id', 'owners') column_editable_list = ('is_paused',) form_excluded_columns = ('is_subdag', 'is_active') column_searchable_list = ('dag_id',) column_filters = ( 'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag', 'last_scheduler_run', 'last_expired') filter_converter = wwwutils.UtcFilterConverter() form_widget_args = { 'last_scheduler_run': {'disabled': True}, 'fileloc': {'disabled': True}, 'is_paused': {'disabled': True}, 'last_pickled': {'disabled': True}, 'pickle_id': {'disabled': True}, 'last_loaded': {'disabled': True}, 'last_expired': {'disabled': True}, 'pickle_size': {'disabled': True}, 'scheduler_lock': {'disabled': True}, 'owners': {'disabled': True}, } column_formatters = dict( dag_id=dag_link, ) can_delete = False can_create = False page_size = PAGE_SIZE list_template = 'airflow/list_dags.html' named_filter_urls = True def get_query(self): """ Default filters for model """ return ( super(DagModelView, self) .get_query() .filter(or_(models.DagModel.is_active, models.DagModel.is_paused)) .filter(~models.DagModel.is_subdag) ) def get_count_query(self): """ Default filters for model """ return ( super(DagModelView, self) .get_count_query() .filter(models.DagModel.is_active) .filter(~models.DagModel.is_subdag) )
apache-2.0
hsaputra/tensorflow
tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py
8
31357
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implementations of different data feeders to provide data for TF trainer.""" # TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues. from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import math import numpy as np import six from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.platform import tf_logging as logging # pylint: disable=g-multiple-import,g-bad-import-order from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels # pylint: enable=g-multiple-import,g-bad-import-order def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None): """Returns shape for input and output of the data feeder.""" x_is_dict, y_is_dict = isinstance( x_shape, dict), y_shape is not None and isinstance(y_shape, dict) if y_is_dict and n_classes is not None: assert isinstance(n_classes, dict) if batch_size is None: batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0] elif batch_size <= 0: raise ValueError('Invalid batch_size %d.' % batch_size) if x_is_dict: input_shape = {} for k, v in list(x_shape.items()): input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1]) else: x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1] input_shape = [batch_size] + x_shape if y_shape is None: return input_shape, None, batch_size def out_el_shape(out_shape, num_classes): out_shape = list(out_shape[1:]) if len(out_shape) > 1 else [] # Skip first dimension if it is 1. if out_shape and out_shape[0] == 1: out_shape = out_shape[1:] if num_classes is not None and num_classes > 1: return [batch_size] + out_shape + [num_classes] else: return [batch_size] + out_shape if not y_is_dict: output_shape = out_el_shape(y_shape, n_classes) else: output_shape = dict([ (k, out_el_shape(v, n_classes[k] if n_classes is not None and k in n_classes else None)) for k, v in list(y_shape.items()) ]) return input_shape, output_shape, batch_size def _data_type_filter(x, y): """Filter data types into acceptable format.""" if HAS_DASK: x = extract_dask_data(x) if y is not None: y = extract_dask_labels(y) if HAS_PANDAS: x = extract_pandas_data(x) if y is not None: y = extract_pandas_labels(y) return x, y def _is_iterable(x): return hasattr(x, 'next') or hasattr(x, '__next__') def setup_train_data_feeder(x, y, n_classes, batch_size=None, shuffle=True, epochs=None): """Create data feeder, to sample inputs from dataset. If `x` and `y` are iterators, use `StreamingDataFeeder`. Args: x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also supports iterables. y: numpy, pandas or Dask array or dictionary of aforementioned. Also supports iterables. n_classes: number of classes. Must be None or same type as y. In case, `y` is `dict` (or iterable which returns dict) such that `n_classes[key] = n_classes for y[key]` batch_size: size to split data into parts. Must be >= 1. shuffle: Whether to shuffle the inputs. epochs: Number of epochs to run. Returns: DataFeeder object that returns training data. Raises: ValueError: if one of `x` and `y` is iterable and the other is not. """ x, y = _data_type_filter(x, y) if HAS_DASK: # pylint: disable=g-import-not-at-top import dask.dataframe as dd if (isinstance(x, (dd.Series, dd.DataFrame)) and (y is None or isinstance(y, (dd.Series, dd.DataFrame)))): data_feeder_cls = DaskDataFeeder else: data_feeder_cls = DataFeeder else: data_feeder_cls = DataFeeder if _is_iterable(x): if y is not None and not _is_iterable(y): raise ValueError('Both x and y should be iterators for ' 'streaming learning to work.') return StreamingDataFeeder(x, y, n_classes, batch_size) return data_feeder_cls( x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs) def _batch_data(x, batch_size=None): if (batch_size is not None) and (batch_size <= 0): raise ValueError('Invalid batch_size %d.' % batch_size) x_first_el = six.next(x) x = itertools.chain([x_first_el], x) chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance( x_first_el, dict) else [] chunk_filled = False for data in x: if isinstance(data, dict): for k, v in list(data.items()): chunk[k].append(v) if (batch_size is not None) and (len(chunk[k]) >= batch_size): chunk[k] = np.matrix(chunk[k]) chunk_filled = True if chunk_filled: yield chunk chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance( x_first_el, dict) else [] chunk_filled = False else: chunk.append(data) if (batch_size is not None) and (len(chunk) >= batch_size): yield np.matrix(chunk) chunk = [] if isinstance(x_first_el, dict): for k, v in list(data.items()): chunk[k] = np.matrix(chunk[k]) yield chunk else: yield np.matrix(chunk) def setup_predict_data_feeder(x, batch_size=None): """Returns an iterable for feeding into predict step. Args: x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports iterable. batch_size: Size of batches to split data into. If `None`, returns one batch of full size. Returns: List or iterator (or dictionary thereof) of parts of data to predict on. Raises: ValueError: if `batch_size` <= 0. """ if HAS_DASK: x = extract_dask_data(x) if HAS_PANDAS: x = extract_pandas_data(x) if _is_iterable(x): return _batch_data(x, batch_size) if len(x.shape) == 1: x = np.reshape(x, (-1, 1)) if batch_size is not None: if batch_size <= 0: raise ValueError('Invalid batch_size %d.' % batch_size) n_batches = int(math.ceil(float(len(x)) / batch_size)) return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)] return [x] def setup_processor_data_feeder(x): """Sets up processor iterable. Args: x: numpy, pandas or iterable. Returns: Iterable of data to process. """ if HAS_PANDAS: x = extract_pandas_matrix(x) return x def check_array(array, dtype): """Checks array on dtype and converts it if different. Args: array: Input array. dtype: Expected dtype. Returns: Original array or converted. """ # skip check if array is instance of other classes, e.g. h5py.Dataset # to avoid copying array and loading whole data into memory if isinstance(array, (np.ndarray, list)): array = np.array(array, dtype=dtype, order=None, copy=False) return array def _access(data, iloc): """Accesses an element from collection, using integer location based indexing. Args: data: array-like. The collection to access iloc: `int` or `list` of `int`s. Location(s) to access in `collection` Returns: The element of `a` found at location(s) `iloc`. """ if HAS_PANDAS: import pandas as pd # pylint: disable=g-import-not-at-top if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame): return data.iloc[iloc] return data[iloc] def _check_dtype(dtype): if dtypes.as_dtype(dtype) == dtypes.float64: logging.warn( 'float64 is not supported by many models, consider casting to float32.') return dtype class DataFeeder(object): """Data feeder is an example class to sample data for TF trainer.""" def __init__(self, x, y, n_classes, batch_size=None, shuffle=True, random_state=None, epochs=None): """Initializes a DataFeeder instance. Args: x: One feature sample which can either Nd numpy matrix of shape `[n_samples, n_features, ...]` or dictionary of Nd numpy matrix. y: label vector, either floats for regression or class id for classification. If matrix, will consider as a sequence of labels. Can be `None` for unsupervised setting. Also supports dictionary of labels. n_classes: Number of classes, 0 and 1 are considered regression, `None` will pass through the input labels without one-hot conversion. Also, if `y` is `dict`, then `n_classes` must be `dict` such that `n_classes[key] = n_classes for label y[key]`, `None` otherwise. batch_size: Mini-batch size to accumulate samples in one mini batch. shuffle: Whether to shuffle `x`. random_state: Numpy `RandomState` object to reproduce sampling. epochs: Number of times to iterate over input data before raising `StopIteration` exception. Attributes: x: Input features (ndarray or dictionary of ndarrays). y: Input label (ndarray or dictionary of ndarrays). n_classes: Number of classes (if `None`, pass through indices without one-hot conversion). batch_size: Mini-batch size to accumulate. input_shape: Shape of the input (or dictionary of shapes). output_shape: Shape of the output (or dictionary of shapes). input_dtype: DType of input (or dictionary of shapes). output_dtype: DType of output (or dictionary of shapes. """ x_is_dict, y_is_dict = isinstance(x, dict), y is not None and isinstance( y, dict) if isinstance(y, list): y = np.array(y) self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items()) ]) if x_is_dict else check_array(x, x.dtype) self._y = None if y is None else ( dict([(k, check_array(v, v.dtype)) for k, v in list(y.items())]) if y_is_dict else check_array(y, y.dtype)) # self.n_classes is not None means we're converting raw target indices # to one-hot. if n_classes is not None: if not y_is_dict: y_dtype = (np.int64 if n_classes is not None and n_classes > 1 else np.float32) self._y = (None if y is None else check_array(y, dtype=y_dtype)) self.n_classes = n_classes self.max_epochs = epochs x_shape = dict([(k, v.shape) for k, v in list(self._x.items()) ]) if x_is_dict else self._x.shape y_shape = dict([(k, v.shape) for k, v in list(self._y.items()) ]) if y_is_dict else None if y is None else self._y.shape self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape( x_shape, y_shape, n_classes, batch_size) # Input dtype matches dtype of x. self._input_dtype = ( dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())]) if x_is_dict else _check_dtype(self._x.dtype)) # self._output_dtype == np.float32 when y is None self._output_dtype = ( dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())]) if y_is_dict else ( _check_dtype(self._y.dtype) if y is not None else np.float32)) # self.n_classes is None means we're passing in raw target indices if n_classes is not None and y_is_dict: for key in list(n_classes.keys()): if key in self._output_dtype: self._output_dtype[key] = np.float32 self._shuffle = shuffle self.random_state = np.random.RandomState( 42) if random_state is None else random_state if x_is_dict: num_samples = list(self._x.values())[0].shape[0] elif tensor_util.is_tensor(self._x): num_samples = self._x.shape[ 0].value # shape will be a Dimension, extract an int else: num_samples = self._x.shape[0] if self._shuffle: self.indices = self.random_state.permutation(num_samples) else: self.indices = np.array(range(num_samples)) self.offset = 0 self.epoch = 0 self._epoch_placeholder = None @property def x(self): return self._x @property def y(self): return self._y @property def shuffle(self): return self._shuffle @property def input_dtype(self): return self._input_dtype @property def output_dtype(self): return self._output_dtype @property def batch_size(self): return self._batch_size def make_epoch_variable(self): """Adds a placeholder variable for the epoch to the graph. Returns: The epoch placeholder. """ self._epoch_placeholder = array_ops.placeholder( dtypes.int32, [1], name='epoch') return self._epoch_placeholder def input_builder(self): """Builds inputs in the graph. Returns: Two placeholders for inputs and outputs. """ def get_placeholder(shape, dtype, name_prepend): if shape is None: return None if isinstance(shape, dict): placeholder = {} for key in list(shape.keys()): placeholder[key] = array_ops.placeholder( dtypes.as_dtype(dtype[key]), [None] + shape[key][1:], name=name_prepend + '_' + key) else: placeholder = array_ops.placeholder( dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend) return placeholder self._input_placeholder = get_placeholder(self.input_shape, self._input_dtype, 'input') self._output_placeholder = get_placeholder(self.output_shape, self._output_dtype, 'output') return self._input_placeholder, self._output_placeholder def set_placeholders(self, input_placeholder, output_placeholder): """Sets placeholders for this data feeder. Args: input_placeholder: Placeholder for `x` variable. Should match shape of the examples in the x dataset. output_placeholder: Placeholder for `y` variable. Should match shape of the examples in the y dataset. Can be `None`. """ self._input_placeholder = input_placeholder self._output_placeholder = output_placeholder def get_feed_params(self): """Function returns a `dict` with data feed params while training. Returns: A `dict` with data feed params while training. """ return { 'epoch': self.epoch, 'offset': self.offset, 'batch_size': self._batch_size } def get_feed_dict_fn(self): """Returns a function that samples data into given placeholders. Returns: A function that when called samples a random subset of batch size from `x` and `y`. """ x_is_dict, y_is_dict = isinstance( self._x, dict), self._y is not None and isinstance(self._y, dict) # Assign input features from random indices. def extract(data, indices): return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if len(data.shape) == 1 else _access(data, indices)) # assign labels from random indices def assign_label(data, shape, dtype, n_classes, indices): shape[0] = indices.shape[0] out = np.zeros(shape, dtype=dtype) for i in xrange(out.shape[0]): sample = indices[i] # self.n_classes is None means we're passing in raw target indices if n_classes is None: out[i] = _access(data, sample) else: if n_classes > 1: if len(shape) == 2: out.itemset((i, int(_access(data, sample))), 1.0) else: for idx, value in enumerate(_access(data, sample)): out.itemset(tuple([i, idx, value]), 1.0) else: out[i] = _access(data, sample) return out def _feed_dict_fn(): """Function that samples data into given placeholders.""" if self.max_epochs is not None and self.epoch + 1 > self.max_epochs: raise StopIteration assert self._input_placeholder is not None feed_dict = {} if self._epoch_placeholder is not None: feed_dict[self._epoch_placeholder.name] = [self.epoch] # Take next batch of indices. x_len = list(self._x.values())[0].shape[ 0] if x_is_dict else self._x.shape[0] end = min(x_len, self.offset + self._batch_size) batch_indices = self.indices[self.offset:end] # adding input placeholder feed_dict.update( dict([(self._input_placeholder[k].name, extract(v, batch_indices)) for k, v in list(self._x.items())]) if x_is_dict else {self._input_placeholder.name: extract(self._x, batch_indices)}) # move offset and reset it if necessary self.offset += self._batch_size if self.offset >= x_len: self.indices = self.random_state.permutation( x_len) if self._shuffle else np.array(range(x_len)) self.offset = 0 self.epoch += 1 # return early if there are no labels if self._output_placeholder is None: return feed_dict # adding output placeholders if y_is_dict: for k, v in list(self._y.items()): n_classes = (self.n_classes[k] if k in self.n_classes else None) if self.n_classes is not None else None shape, dtype = self.output_shape[k], self._output_dtype[k] feed_dict.update({ self._output_placeholder[k].name: assign_label(v, shape, dtype, n_classes, batch_indices) }) else: shape, dtype, n_classes = self.output_shape, self._output_dtype, self.n_classes feed_dict.update({ self._output_placeholder.name: assign_label(self._y, shape, dtype, n_classes, batch_indices) }) return feed_dict return _feed_dict_fn class StreamingDataFeeder(DataFeeder): """Data feeder for TF trainer that reads data from iterator. Streaming data feeder allows to read data as it comes it from disk or somewhere else. It's custom to have this iterators rotate infinetly over the dataset, to allow control of how much to learn on the trainer side. """ def __init__(self, x, y, n_classes, batch_size): """Initializes a StreamingDataFeeder instance. Args: x: iterator each element of which returns one feature sample. Sample can be a Nd numpy matrix or dictionary of Nd numpy matrices. y: iterator each element of which returns one label sample. Sample can be a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many classes regression values. n_classes: indicator of how many classes the corresponding label sample has for the purposes of one-hot conversion of label. In case where `y` is a dictionary, `n_classes` must be dictionary (with same keys as `y`) of how many classes there are in each label in `y`. If key is present in `y` and missing in `n_classes`, the value is assumed `None` and no one-hot conversion will be applied to the label with that key. batch_size: Mini batch size to accumulate samples in one batch. If set `None`, then assumes that iterator to return already batched element. Attributes: x: input features (or dictionary of input features). y: input label (or dictionary of output features). n_classes: number of classes. batch_size: mini batch size to accumulate. input_shape: shape of the input (can be dictionary depending on `x`). output_shape: shape of the output (can be dictionary depending on `y`). input_dtype: dtype of input (can be dictionary depending on `x`). output_dtype: dtype of output (can be dictionary depending on `y`). """ # pylint: disable=invalid-name,super-init-not-called x_first_el = six.next(x) self._x = itertools.chain([x_first_el], x) if y is not None: y_first_el = six.next(y) self._y = itertools.chain([y_first_el], y) else: y_first_el = None self._y = None self.n_classes = n_classes x_is_dict = isinstance(x_first_el, dict) y_is_dict = y is not None and isinstance(y_first_el, dict) if y_is_dict and n_classes is not None: assert isinstance(n_classes, dict) # extract shapes for first_elements if x_is_dict: x_first_el_shape = dict( [(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())]) else: x_first_el_shape = [1] + list(x_first_el.shape) if y_is_dict: y_first_el_shape = dict( [(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())]) elif y is None: y_first_el_shape = None else: y_first_el_shape = ([1] + list(y_first_el[0].shape if isinstance( y_first_el, list) else y_first_el.shape)) self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape( x_first_el_shape, y_first_el_shape, n_classes, batch_size) # Input dtype of x_first_el. if x_is_dict: self._input_dtype = dict( [(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())]) else: self._input_dtype = _check_dtype(x_first_el.dtype) # Output dtype of y_first_el. def check_y_dtype(el): if isinstance(el, np.ndarray): return el.dtype elif isinstance(el, list): return check_y_dtype(el[0]) else: return _check_dtype(np.dtype(type(el))) # Output types are floats, due to both softmaxes and regression req. if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0: self._output_dtype = np.float32 elif y_is_dict: self._output_dtype = dict( [(k, check_y_dtype(v)) for k, v in list(y_first_el.items())]) elif y is None: self._output_dtype = None else: self._output_dtype = check_y_dtype(y_first_el) def get_feed_params(self): """Function returns a `dict` with data feed params while training. Returns: A `dict` with data feed params while training. """ return {'batch_size': self._batch_size} def get_feed_dict_fn(self): """Returns a function, that will sample data and provide it to placeholders. Returns: A function that when called samples a random subset of batch size from x and y. """ self.stopped = False def _feed_dict_fn(): """Samples data and provides it to placeholders. Returns: `dict` of input and output tensors. """ def init_array(shape, dtype): """Initialize array of given shape or dict of shapes and dtype.""" if shape is None: return None elif isinstance(shape, dict): return dict([(k, np.zeros(shape[k], dtype[k])) for k in list(shape.keys())]) else: return np.zeros(shape, dtype=dtype) def put_data_array(dest, index, source=None, n_classes=None): """Puts data array into container.""" if source is None: dest = dest[:index] elif n_classes is not None and n_classes > 1: if len(self.output_shape) == 2: dest.itemset((index, source), 1.0) else: for idx, value in enumerate(source): dest.itemset(tuple([index, idx, value]), 1.0) else: if len(dest.shape) > 1: dest[index, :] = source else: dest[index] = source[0] if isinstance(source, list) else source return dest def put_data_array_or_dict(holder, index, data=None, n_classes=None): """Puts data array or data dictionary into container.""" if holder is None: return None if isinstance(holder, dict): if data is None: data = {k: None for k in holder.keys()} assert isinstance(data, dict) for k in holder.keys(): num_classes = n_classes[k] if (n_classes is not None and k in n_classes) else None holder[k] = put_data_array(holder[k], index, data[k], num_classes) else: holder = put_data_array(holder, index, data, n_classes) return holder if self.stopped: raise StopIteration inp = init_array(self.input_shape, self._input_dtype) out = init_array(self.output_shape, self._output_dtype) for i in xrange(self._batch_size): # Add handling when queue ends. try: next_inp = six.next(self._x) inp = put_data_array_or_dict(inp, i, next_inp, None) except StopIteration: self.stopped = True if i == 0: raise inp = put_data_array_or_dict(inp, i, None, None) out = put_data_array_or_dict(out, i, None, None) break if self._y is not None: next_out = six.next(self._y) out = put_data_array_or_dict(out, i, next_out, self.n_classes) # creating feed_dict if isinstance(inp, dict): feed_dict = dict([(self._input_placeholder[k].name, inp[k]) for k in list(self._input_placeholder.keys())]) else: feed_dict = {self._input_placeholder.name: inp} if self._y is not None: if isinstance(out, dict): feed_dict.update( dict([(self._output_placeholder[k].name, out[k]) for k in list(self._output_placeholder.keys())])) else: feed_dict.update({self._output_placeholder.name: out}) return feed_dict return _feed_dict_fn class DaskDataFeeder(object): """Data feeder for that reads data from dask.Series and dask.DataFrame. Numpy arrays can be serialized to disk and it's possible to do random seeks into them. DaskDataFeeder will remove requirement to have full dataset in the memory and still do random seeks for sampling of batches. """ def __init__(self, x, y, n_classes, batch_size, shuffle=True, random_state=None, epochs=None): """Initializes a DaskDataFeeder instance. Args: x: iterator that returns for each element, returns features. y: iterator that returns for each element, returns 1 or many classes / regression values. n_classes: indicator of how many classes the label has. batch_size: Mini batch size to accumulate. shuffle: Whether to shuffle the inputs. random_state: random state for RNG. Note that it will mutate so use a int value for this if you want consistent sized batches. epochs: Number of epochs to run. Attributes: x: input features. y: input label. n_classes: number of classes. batch_size: mini batch size to accumulate. input_shape: shape of the input. output_shape: shape of the output. input_dtype: dtype of input. output_dtype: dtype of output. Raises: ValueError: if `x` or `y` are `dict`, as they are not supported currently. """ if isinstance(x, dict) or isinstance(y, dict): raise ValueError( 'DaskDataFeeder does not support dictionaries at the moment.') # pylint: disable=invalid-name,super-init-not-called import dask.dataframe as dd # pylint: disable=g-import-not-at-top # TODO(terrytangyuan): check x and y dtypes in dask_io like pandas self._x = x self._y = y # save column names self._x_columns = list(x.columns) if isinstance(y.columns[0], str): self._y_columns = list(y.columns) else: # deal with cases where two DFs have overlapped default numeric colnames self._y_columns = len(self._x_columns) + 1 self._y = self._y.rename(columns={y.columns[0]: self._y_columns}) # TODO(terrytangyuan): deal with unsupervised cases # combine into a data frame self.df = dd.multi.concat([self._x, self._y], axis=1) self.n_classes = n_classes x_count = x.count().compute()[0] x_shape = (x_count, len(self._x.columns)) y_shape = (x_count, len(self._y.columns)) # TODO(terrytangyuan): Add support for shuffle and epochs. self._shuffle = shuffle self.epochs = epochs self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape( x_shape, y_shape, n_classes, batch_size) self.sample_fraction = self._batch_size / float(x_count) self._input_dtype = _check_dtype(self._x.dtypes[0]) self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns]) if random_state is None: self.random_state = 66 else: self.random_state = random_state def get_feed_params(self): """Function returns a `dict` with data feed params while training. Returns: A `dict` with data feed params while training. """ return {'batch_size': self._batch_size} def get_feed_dict_fn(self, input_placeholder, output_placeholder): """Returns a function, that will sample data and provide it to placeholders. Args: input_placeholder: tf.placeholder for input features mini batch. output_placeholder: tf.placeholder for output labels. Returns: A function that when called samples a random subset of batch size from x and y. """ def _feed_dict_fn(): """Samples data and provides it to placeholders.""" # TODO(ipolosukhin): option for with/without replacement (dev version of # dask) sample = self.df.random_split( [self.sample_fraction, 1 - self.sample_fraction], random_state=self.random_state) inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist() out = extract_pandas_matrix(sample[0][self._y_columns].compute()) # convert to correct dtype inp = np.array(inp, dtype=self._input_dtype) # one-hot encode out for each class for cross entropy loss if HAS_PANDAS: import pandas as pd # pylint: disable=g-import-not-at-top if not isinstance(out, pd.Series): out = out.flatten() out_max = self._y.max().compute().values[0] encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype) encoded_out[np.arange(out.size), out] = 1 return {input_placeholder.name: inp, output_placeholder.name: encoded_out} return _feed_dict_fn
apache-2.0
patricksnape/menpo
menpo/image/base.py
2
131364
from typing import Iterable, Optional from warnings import warn import PIL.Image as PILImage import numpy as np from menpo.base import MenpoDeprecationWarning, Vectorizable, copy_landmarks_and_path from menpo.landmark import Landmarkable from menpo.shape import PointCloud, bounding_box from menpo.transform import ( AlignmentUniformScale, Homogeneous, NonUniformScale, Rotation, Translation, scale_about_centre, transform_about_centre, ) from menpo.visualize.base import ImageViewer, LandmarkableViewable, Viewable from .interpolation import scipy_interpolation try: from .interpolation import cv2_perspective_interpolation except ImportError: warn("Falling back to scipy interpolation for affine warps") cv2_perspective_interpolation = None # type: ignore from .patches import ( extract_patches_with_slice, set_patches, extract_patches_by_sampling, ) # Cache the greyscale luminosity coefficients as they are invariant. _greyscale_luminosity_coef: Optional[np.ndarray] = None class ImageBoundaryError(ValueError): r""" Exception that is thrown when an attempt is made to crop an image beyond the edge of it's boundary. Parameters ---------- requested_min : ``(d,)`` `ndarray` The per-dimension minimum index requested for the crop requested_max : ``(d,)`` `ndarray` The per-dimension maximum index requested for the crop snapped_min : ``(d,)`` `ndarray` The per-dimension minimum index that could be used if the crop was constrained to the image boundaries. requested_max : ``(d,)`` `ndarray` The per-dimension maximum index that could be used if the crop was constrained to the image boundaries. """ def __init__(self, requested_min, requested_max, snapped_min, snapped_max): super(ImageBoundaryError, self).__init__() self.requested_min = requested_min self.requested_max = requested_max self.snapped_min = snapped_min self.snapped_max = snapped_max def indices_for_image_of_shape(shape): r""" The indices of all pixels in an image with a given shape (without channel information). Parameters ---------- shape : ``(n_dims, n_pixels)`` `ndarray` The shape of the image. Returns ------- indices : `ndarray` The indices of all the pixels in the image. """ return np.indices(shape).reshape([len(shape), -1]).T def normalize_pixels_range(pixels, error_on_unknown_type=True): r""" Normalize the given pixels to the Menpo valid floating point range, [0, 1]. This is a single place to handle normalising pixels ranges. At the moment the supported types are uint8 and uint16. Parameters ---------- pixels : `ndarray` The pixels to normalize in the floating point range. error_on_unknown_type : `bool`, optional If ``True``, this method throws a ``ValueError`` if the given pixels array is an unknown type. If ``False``, this method performs no operation. Returns ------- normalized_pixels : `ndarray` The normalized pixels in the range [0, 1]. Raises ------ ValueError If ``pixels`` is an unknown type and ``error_on_unknown_type==True`` """ dtype = pixels.dtype if dtype == np.uint8: max_range = 255.0 elif dtype == np.uint16: max_range = 65535.0 else: if error_on_unknown_type: raise ValueError( "Unexpected dtype ({}) - normalisation range " "is unknown".format(dtype) ) else: # Do nothing return pixels # This multiplication is quite a bit faster than just dividing - will # automatically cast it up to float64 return pixels * (1.0 / max_range) def denormalize_pixels_range(pixels, out_dtype): """ Denormalize the given pixels array into the range of the given out dtype. If the given pixels are floating point or boolean then the values are scaled appropriately and cast to the output dtype. If the pixels are already the correct dtype they are immediately returned. Floating point pixels must be in the range [0, 1]. Currently uint8 and uint16 output dtypes are supported. Parameters ---------- pixels : `ndarray` The pixels to denormalize. out_dtype : `np.dtype` The numpy data type to output and scale the values into. Returns ------- out_pixels : `ndarray` Will be in the correct range and will have type ``out_dtype``. Raises ------ ValueError Pixels are floating point and range outside [0, 1] ValueError Input pixels dtype not in the set {float32, float64, bool}. ValueError Output dtype not in the set {uint8, uint16} """ in_dtype = pixels.dtype if in_dtype == out_dtype: return pixels if np.issubclass_(in_dtype.type, np.floating) or in_dtype == float: if np.issubclass_(out_dtype, np.floating) or out_dtype == float: return pixels.astype(out_dtype) else: p_min = pixels.min() p_max = pixels.max() if p_min < 0.0 or p_max > 1.0: raise ValueError( "Unexpected input range [{}, {}] - pixels " "must be in the range [0, 1]".format(p_min, p_max) ) elif in_dtype != bool: raise ValueError( "Unexpected input dtype ({}) - only float32, float64 " "and bool supported".format(in_dtype) ) if out_dtype == np.uint8: max_range = 255.0 elif out_dtype == np.uint16: max_range = 65535.0 else: raise ValueError( "Unexpected output dtype ({}) - normalisation range " "is unknown".format(out_dtype) ) return (pixels * max_range).astype(out_dtype) def channels_to_back(pixels): r""" Roll the channels from the front to the back for an image. If the image that is passed is already a numpy array, then that is also fine. Always returns a numpy array because our :map:`Image` containers do not support channels at the back. Parameters ---------- image : `ndarray` The pixels or image to roll the channel back for. Returns ------- rolled_pixels : `ndarray` The numpy array of pixels with the channels on the last axis. """ return np.require( np.rollaxis(pixels, 0, pixels.ndim), dtype=pixels.dtype, requirements=["C"] ) def channels_to_front(pixels): r""" Convert the given pixels array (channels assumed to be at the last axis as is common in other imaging packages) into a numpy array. Parameters ---------- pixels : ``(H, W, C)`` `buffer` The pixels to convert to the Menpo channels at axis 0. Returns ------- pixels : ``(C, H, W)`` `ndarray` Numpy array, channels as axis 0. """ if not isinstance(pixels, np.ndarray): pixels = np.array(pixels) return np.require(np.rollaxis(pixels, -1), dtype=pixels.dtype, requirements=["C"]) class Image(Vectorizable, Landmarkable, Viewable, LandmarkableViewable): r""" An n-dimensional image. Images are n-dimensional homogeneous regular arrays of data. Each spatially distinct location in the array is referred to as a `pixel`. At a pixel, ``k`` distinct pieces of information can be stored. Each datum at a pixel is refereed to as being in a `channel`. All pixels in the image have the same number of channels, and all channels have the same data-type (`float64`). Parameters ---------- image_data : ``(C, M, N ..., Q)`` `ndarray` Array representing the image pixels, with the first axis being channels. copy : `bool`, optional If ``False``, the ``image_data`` will not be copied on assignment. Note that this will miss out on additional checks. Further note that we still demand that the array is C-contiguous - if it isn't, a copy will be generated anyway. In general, this should only be used if you know what you are doing. Raises ------ Warning If ``copy=False`` cannot be honoured ValueError If the pixel array is malformed """ def __init__(self, image_data, copy=True): super(Image, self).__init__() if not copy: if not image_data.flags.c_contiguous: image_data = np.array(image_data, copy=True, order="C") warn( "The copy flag was NOT honoured. A copy HAS been made. " "Please ensure the data you pass is C-contiguous." ) else: image_data = np.array(image_data, copy=True, order="C") # Degenerate case whereby we can just put the extra axis # on ourselves if image_data.ndim == 2: # Ensures that the data STAYS C-contiguous image_data = image_data.reshape((1,) + image_data.shape) if image_data.ndim < 2: raise ValueError( "Pixel array has to be 2D (implicitly 1 channel, " "2D shape) or 3D+ (n_channels, 2D+ shape) " " - a {}D array " "was provided".format(image_data.ndim) ) self.pixels = image_data @classmethod def init_blank(cls, shape, n_channels=1, fill=0, dtype=float): r""" Returns a blank image. Parameters ---------- shape : `tuple` or `list` The shape of the image. Any floating point values are rounded up to the nearest integer. n_channels : `int`, optional The number of channels to create the image with. fill : `int`, optional The value to fill all pixels with. dtype : numpy data type, optional The data type of the image. Returns ------- blank_image : :map:`Image` A new image of the requested size. """ # Ensure that the '+' operator means concatenate tuples shape = tuple(np.ceil(shape).astype(int)) if fill == 0: pixels = np.zeros((n_channels,) + shape, dtype=dtype) else: pixels = np.ones((n_channels,) + shape, dtype=dtype) * fill # We know there is no need to copy... return cls(pixels, copy=False) @classmethod def init_from_rolled_channels(cls, pixels): r""" Deprecated - please use the equivalent ``init_from_channels_at_back`` method. """ warn( "This method is no longer supported and will be removed in a " "future version of Menpo. " "Use .init_from_channels_at_back instead.", MenpoDeprecationWarning, ) return cls.init_from_channels_at_back(pixels) @classmethod def init_from_channels_at_back(cls, pixels): r""" Create an Image from a set of pixels where the channels axis is on the last axis (the back). This is common in other frameworks, and therefore this method provides a convenient means of creating a menpo Image from such data. Note that a copy is always created due to the need to rearrange the data. Parameters ---------- pixels : ``(M, N ..., Q, C)`` `ndarray` Array representing the image pixels, with the last axis being channels. Returns ------- image : :map:`Image` A new image from the given pixels, with the FIRST axis as the channels. Raises ------ ValueError If image is not at least 2D, i.e. has at least 2 dimensions plus the channels in the end. """ if pixels.ndim == 2: pixels = pixels[..., None] if pixels.ndim < 2: raise ValueError( "Pixel array has to be 2D " "(2D shape, implicitly 1 channel) " "or 3D+ (2D+ shape, n_channels) " " - a {}D array " "was provided".format(pixels.ndim) ) return cls(channels_to_front(pixels)) @classmethod def init_from_pointcloud( cls, pointcloud, group=None, boundary=0, n_channels=1, fill=0, dtype=float, return_transform=False, ): r""" Create an Image that is big enough to contain the given pointcloud. The pointcloud will be translated to the origin and then translated according to its bounds in order to fit inside the new image. An optional boundary can be provided in order to increase the space around the boundary of the pointcloud. The boundary will be added to *all sides of the image* and so a boundary of 5 provides 10 pixels of boundary total for each dimension. Parameters ---------- pointcloud : :map:`PointCloud` Pointcloud to place inside the newly created image. group : `str`, optional If ``None``, the pointcloud will only be used to create the image. If a `str` then the pointcloud will be attached as a landmark group to the image, with the given string as key. boundary : `float` A optional padding distance that is added to the pointcloud bounds. Default is ``0``, meaning the max/min of tightest possible containing image is returned. n_channels : `int`, optional The number of channels to create the image with. fill : `int`, optional The value to fill all pixels with. dtype : numpy data type, optional The data type of the image. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to adjust the PointCloud in order to build the image, is returned. Returns ------- image : ``type(cls)`` Image or subclass A new image with the same size as the given pointcloud, optionally with the pointcloud attached as landmarks. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. """ # Translate pointcloud to the origin minimum = pointcloud.bounds(boundary=boundary)[0] tr = Translation(-minimum) origin_pc = tr.apply(pointcloud) image_shape = origin_pc.range(boundary=boundary) new_image = cls.init_blank( image_shape, n_channels=n_channels, fill=fill, dtype=dtype ) if group is not None: new_image.landmarks[group] = origin_pc if return_transform: return new_image, tr else: return new_image def as_masked(self, mask=None, copy=True): r""" Return a copy of this image with an attached mask behavior. A custom mask may be provided, or ``None``. See the :map:`MaskedImage` constructor for details of how the kwargs will be handled. Parameters ---------- mask : ``(self.shape)`` `ndarray` or :map:`BooleanImage` A mask to attach to the newly generated masked image. copy : `bool`, optional If ``False``, the produced :map:`MaskedImage` will share pixels with ``self``. Only suggested to be used for performance. Returns ------- masked_image : :map:`MaskedImage` An image with the same pixels and landmarks as this one, but with a mask. """ from menpo.image import MaskedImage return copy_landmarks_and_path( self, MaskedImage(self.pixels, mask=mask, copy=copy) ) @property def n_dims(self): r""" The number of dimensions in the image. The minimum possible ``n_dims`` is 2. :type: `int` """ return len(self.shape) @property def n_pixels(self): r""" Total number of pixels in the image ``(prod(shape),)`` :type: `int` """ return self.pixels[0, ...].size @property def n_elements(self): r""" Total number of data points in the image ``(prod(shape), n_channels)`` :type: `int` """ return self.pixels.size @property def n_channels(self): """ The number of channels on each pixel in the image. :type: `int` """ return self.pixels.shape[0] @property def width(self): r""" The width of the image. This is the width according to image semantics, and is thus the size of the **last** dimension. :type: `int` """ return self.pixels.shape[-1] @property def height(self): r""" The height of the image. This is the height according to image semantics, and is thus the size of the **second to last** dimension. :type: `int` """ return self.pixels.shape[-2] @property def shape(self): r""" The shape of the image (with ``n_channel`` values at each point). :type: `tuple` """ return self.pixels.shape[1:] def bounds(self): r""" The bounds of the image, minimum is always (0, 0). The maximum is the maximum **index** that can be used to index into the image for each dimension. Therefore, bounds will be of the form: ((0, 0), (self.height - 1, self.width - 1)) for a 2D image. Note that this is akin to supporting a nearest neighbour interpolation. Although the *actual* maximum subpixel value would be something like ``self.height - eps`` where ``eps`` is some value arbitrarily close to 0, this value at least allows sampling without worrying about floating point error. :type: `tuple` """ return (0,) * self.n_dims, tuple(s - 1 for s in self.shape) def diagonal(self): r""" The diagonal size of this image :type: `float` """ return np.sqrt(np.sum(np.array(self.shape) ** 2)) def centre(self): r""" The geometric centre of the Image - the subpixel that is in the middle. Useful for aligning shapes and images. :type: (``n_dims``,) `ndarray` """ return np.array(self.shape, dtype=np.double) / 2 def _str_shape(self): if self.n_dims > 2: return " x ".join(str(dim) for dim in self.shape) elif self.n_dims == 2: return "{}W x {}H".format(self.width, self.height) def indices(self): r""" Return the indices of all pixels in this image. :type: (``n_dims``, ``n_pixels``) ndarray """ return indices_for_image_of_shape(self.shape) def _as_vector(self, keep_channels=False): r""" The vectorized form of this image. Parameters ---------- keep_channels : `bool`, optional ========== ============================= Value Return shape ========== ============================= `False` ``(n_channels * n_pixels,)`` `True` ``(n_channels, n_pixels)`` ========== ============================= Returns ------- vec : (See ``keep_channels`` above) `ndarray` Flattened representation of this image, containing all pixel and channel information. """ if keep_channels: return self.pixels.reshape([self.n_channels, -1]) else: return self.pixels.ravel() def from_vector(self, vector, n_channels=None, copy=True): r""" Takes a flattened vector and returns a new image formed by reshaping the vector to the correct pixels and channels. The `n_channels` argument is useful for when we want to add an extra channel to an image but maintain the shape. For example, when calculating the gradient. Note that landmarks are transferred in the process. Parameters ---------- vector : ``(n_parameters,)`` `ndarray` A flattened vector of all pixels and channels of an image. n_channels : `int`, optional If given, will assume that vector is the same shape as this image, but with a possibly different number of channels. copy : `bool`, optional If ``False``, the vector will not be copied in creating the new image. Returns ------- image : :map:`Image` New image of same shape as this image and the number of specified channels. Raises ------ Warning If the ``copy=False`` flag cannot be honored """ # This is useful for when we want to add an extra channel to an image # but maintain the shape. For example, when calculating the gradient n_channels = self.n_channels if n_channels is None else n_channels image_data = vector.reshape((n_channels,) + self.shape) new_image = Image(image_data, copy=copy) new_image.landmarks = self.landmarks return new_image def _from_vector_inplace(self, vector, copy=True): r""" Takes a flattened vector and update this image by reshaping the vector to the correct dimensions. Parameters ---------- vector : ``(n_pixels,)`` `bool ndarray` A vector vector of all the pixels of a :map:`BooleanImage`. copy: `bool`, optional If ``False``, the vector will be set as the pixels. If ``True``, a copy of the vector is taken. Raises ------ Warning If ``copy=False`` flag cannot be honored Note ---- For :map:`BooleanImage` this is rebuilding a boolean image **itself** from boolean values. The mask is in no way interpreted in performing the operation, in contrast to :map:`MaskedImage`, where only the masked region is used in :meth:`from_vector_inplace` and :meth:`as_vector`. """ image_data = vector.reshape(self.pixels.shape) if not copy: if not image_data.flags.c_contiguous: warn( "The copy flag was NOT honoured. A copy HAS been made. " "Please ensure the data you pass is C-contiguous." ) image_data = np.array( image_data, copy=True, order="C", dtype=image_data.dtype ) else: image_data = np.array( image_data, copy=True, order="C", dtype=image_data.dtype ) self.pixels = image_data def extract_channels(self, channels): r""" A copy of this image with only the specified channels. Parameters ---------- channels : `int` or `[int]` The channel index or `list` of channel indices to retain. Returns ------- image : `type(self)` A copy of this image with only the channels requested. """ copy = self.copy() if not isinstance(channels, list): channels = [channels] # ensure we don't remove the channel axis copy.pixels = self.pixels[channels] return copy def as_histogram(self, keep_channels=True, bins="unique"): r""" Histogram binning of the values of this image. Parameters ---------- keep_channels : `bool`, optional If set to ``False``, it returns a single histogram for all the channels of the image. If set to ``True``, it returns a `list` of histograms, one for each channel. bins : ``{unique}``, positive `int` or sequence of scalars, optional If set equal to ``'unique'``, the bins of the histograms are centred on the unique values of each channel. If set equal to a positive `int`, then this is the number of bins. If set equal to a sequence of scalars, these will be used as bins centres. Returns ------- hist : `ndarray` or `list` with ``n_channels`` `ndarrays` inside The histogram(s). If ``keep_channels=False``, then hist is an `ndarray`. If ``keep_channels=True``, then hist is a `list` with ``len(hist)=n_channels``. bin_edges : `ndarray` or `list` with `n_channels` `ndarrays` inside An array or a list of arrays corresponding to the above histograms that store the bins' edges. Raises ------ ValueError Bins can be either 'unique', positive int or a sequence of scalars. Examples -------- Visualizing the histogram when a list of array bin edges is provided: >>> hist, bin_edges = image.as_histogram() >>> for k in range(len(hist)): >>> plt.subplot(1,len(hist),k) >>> width = 0.7 * (bin_edges[k][1] - bin_edges[k][0]) >>> centre = (bin_edges[k][:-1] + bin_edges[k][1:]) / 2 >>> plt.bar(centre, hist[k], align='center', width=width) """ # parse options if isinstance(bins, str): if bins == "unique": bins = 0 else: raise ValueError( "Bins can be either 'unique', positive int or" "a sequence of scalars." ) elif isinstance(bins, int) and bins < 1: raise ValueError( "Bins can be either 'unique', positive int or a " "sequence of scalars." ) # compute histogram vec = self.as_vector(keep_channels=keep_channels) if len(vec.shape) == 1 or vec.shape[0] == 1: if bins == 0: bins = np.unique(vec) hist, bin_edges = np.histogram(vec, bins=bins) else: hist = [] bin_edges = [] num_bins = bins for ch in range(vec.shape[0]): if bins == 0: num_bins = np.unique(vec[ch, :]) h_tmp, c_tmp = np.histogram(vec[ch, :], bins=num_bins) hist.append(h_tmp) bin_edges.append(c_tmp) return hist, bin_edges def _view_2d( self, figure_id=None, new_figure=False, channels=None, interpolation="bilinear", cmap_name=None, alpha=1.0, render_axes=False, axes_font_name="sans-serif", axes_font_size=10, axes_font_style="normal", axes_font_weight="normal", axes_x_limits=None, axes_y_limits=None, axes_x_ticks=None, axes_y_ticks=None, figure_size=(7, 7), ): r""" View the image using the default image viewer. This method will appear on the Image as ``view`` if the Image is 2D. Returns ------- figure_id : `object`, optional The id of the figure to be used. new_figure : `bool`, optional If ``True``, a new figure is created. channels : `int` or `list` of `int` or ``all`` or ``None`` If `int` or `list` of `int`, the specified channel(s) will be rendered. If ``all``, all the channels will be rendered in subplots. If ``None`` and the image is RGB, it will be rendered in RGB mode. If ``None`` and the image is not RGB, it is equivalent to ``all``. interpolation : See Below, optional The interpolation used to render the image. For example, if ``bilinear``, the image will be smooth and if ``nearest``, the image will be pixelated. Example options :: {none, nearest, bilinear, bicubic, spline16, spline36, hanning, hamming, hermite, kaiser, quadric, catrom, gaussian, bessel, mitchell, sinc, lanczos} cmap_name: `str`, optional, If ``None``, single channel and three channel images default to greyscale and rgb colormaps respectively. alpha : `float`, optional The alpha blending value, between 0 (transparent) and 1 (opaque). render_axes : `bool`, optional If ``True``, the axes will be rendered. axes_font_name : See Below, optional The font of the axes. Example options :: {serif, sans-serif, cursive, fantasy, monospace} axes_font_size : `int`, optional The font size of the axes. axes_font_style : {``normal``, ``italic``, ``oblique``}, optional The font style of the axes. axes_font_weight : See Below, optional The font weight of the axes. Example options :: {ultralight, light, normal, regular, book, medium, roman, semibold, demibold, demi, bold, heavy, extra bold, black} axes_x_limits : `float` or (`float`, `float`) or ``None``, optional The limits of the x axis. If `float`, then it sets padding on the right and left of the Image as a percentage of the Image's width. If `tuple` or `list`, then it defines the axis limits. If ``None``, then the limits are set automatically. axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional The limits of the y axis. If `float`, then it sets padding on the top and bottom of the Image as a percentage of the Image's height. If `tuple` or `list`, then it defines the axis limits. If ``None``, then the limits are set automatically. axes_x_ticks : `list` or `tuple` or ``None``, optional The ticks of the x axis. axes_y_ticks : `list` or `tuple` or ``None``, optional The ticks of the y axis. figure_size : (`float`, `float`) `tuple` or ``None``, optional The size of the figure in inches. Returns ------- viewer : `ImageViewer` The image viewing object. """ return ImageViewer( figure_id, new_figure, self.n_dims, self.pixels, channels=channels ).render( interpolation=interpolation, cmap_name=cmap_name, alpha=alpha, render_axes=render_axes, axes_font_name=axes_font_name, axes_font_size=axes_font_size, axes_font_style=axes_font_style, axes_font_weight=axes_font_weight, axes_x_limits=axes_x_limits, axes_y_limits=axes_y_limits, axes_x_ticks=axes_x_ticks, axes_y_ticks=axes_y_ticks, figure_size=figure_size, ) def _view_landmarks_2d( self, channels=None, group=None, with_labels=None, without_labels=None, figure_id=None, new_figure=False, interpolation="bilinear", cmap_name=None, alpha=1.0, render_lines=True, line_colour=None, line_style="-", line_width=1, render_markers=True, marker_style="o", marker_size=5, marker_face_colour=None, marker_edge_colour=None, marker_edge_width=1.0, render_numbering=False, numbers_horizontal_align="center", numbers_vertical_align="bottom", numbers_font_name="sans-serif", numbers_font_size=10, numbers_font_style="normal", numbers_font_weight="normal", numbers_font_colour="k", render_legend=False, legend_title="", legend_font_name="sans-serif", legend_font_style="normal", legend_font_size=10, legend_font_weight="normal", legend_marker_scale=None, legend_location=2, legend_bbox_to_anchor=(1.05, 1.0), legend_border_axes_pad=None, legend_n_columns=1, legend_horizontal_spacing=None, legend_vertical_spacing=None, legend_border=True, legend_border_padding=None, legend_shadow=False, legend_rounded_corners=False, render_axes=False, axes_font_name="sans-serif", axes_font_size=10, axes_font_style="normal", axes_font_weight="normal", axes_x_limits=None, axes_y_limits=None, axes_x_ticks=None, axes_y_ticks=None, figure_size=(7, 7), ): """ Visualize the landmarks. This method will appear on the Image as ``view_landmarks`` if the Image is 2D. Parameters ---------- channels : `int` or `list` of `int` or ``all`` or ``None`` If `int` or `list` of `int`, the specified channel(s) will be rendered. If ``all``, all the channels will be rendered in subplots. If ``None`` and the image is RGB, it will be rendered in RGB mode. If ``None`` and the image is not RGB, it is equivalent to ``all``. group : `str` or``None`` optional The landmark group to be visualized. If ``None`` and there are more than one landmark groups, an error is raised. with_labels : ``None`` or `str` or `list` of `str`, optional If not ``None``, only show the given label(s). Should **not** be used with the ``without_labels`` kwarg. without_labels : ``None`` or `str` or `list` of `str`, optional If not ``None``, show all except the given label(s). Should **not** be used with the ``with_labels`` kwarg. figure_id : `object`, optional The id of the figure to be used. new_figure : `bool`, optional If ``True``, a new figure is created. interpolation : See Below, optional The interpolation used to render the image. For example, if ``bilinear``, the image will be smooth and if ``nearest``, the image will be pixelated. Example options :: {none, nearest, bilinear, bicubic, spline16, spline36, hanning, hamming, hermite, kaiser, quadric, catrom, gaussian, bessel, mitchell, sinc, lanczos} cmap_name: `str`, optional, If ``None``, single channel and three channel images default to greyscale and rgb colormaps respectively. alpha : `float`, optional The alpha blending value, between 0 (transparent) and 1 (opaque). render_lines : `bool`, optional If ``True``, the edges will be rendered. line_colour : See Below, optional The colour of the lines. Example options:: {r, g, b, c, m, k, w} or (3, ) ndarray line_style : ``{-, --, -., :}``, optional The style of the lines. line_width : `float`, optional The width of the lines. render_markers : `bool`, optional If ``True``, the markers will be rendered. marker_style : See Below, optional The style of the markers. Example options :: {., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8} marker_size : `int`, optional The size of the markers in points. marker_face_colour : See Below, optional The face (filling) colour of the markers. Example options :: {r, g, b, c, m, k, w} or (3, ) ndarray marker_edge_colour : See Below, optional The edge colour of the markers. Example options :: {r, g, b, c, m, k, w} or (3, ) ndarray marker_edge_width : `float`, optional The width of the markers' edge. render_numbering : `bool`, optional If ``True``, the landmarks will be numbered. numbers_horizontal_align : ``{center, right, left}``, optional The horizontal alignment of the numbers' texts. numbers_vertical_align : ``{center, top, bottom, baseline}``, optional The vertical alignment of the numbers' texts. numbers_font_name : See Below, optional The font of the numbers. Example options :: {serif, sans-serif, cursive, fantasy, monospace} numbers_font_size : `int`, optional The font size of the numbers. numbers_font_style : ``{normal, italic, oblique}``, optional The font style of the numbers. numbers_font_weight : See Below, optional The font weight of the numbers. Example options :: {ultralight, light, normal, regular, book, medium, roman, semibold, demibold, demi, bold, heavy, extra bold, black} numbers_font_colour : See Below, optional The font colour of the numbers. Example options :: {r, g, b, c, m, k, w} or (3, ) ndarray render_legend : `bool`, optional If ``True``, the legend will be rendered. legend_title : `str`, optional The title of the legend. legend_font_name : See below, optional The font of the legend. Example options :: {serif, sans-serif, cursive, fantasy, monospace} legend_font_style : ``{normal, italic, oblique}``, optional The font style of the legend. legend_font_size : `int`, optional The font size of the legend. legend_font_weight : See Below, optional The font weight of the legend. Example options :: {ultralight, light, normal, regular, book, medium, roman, semibold, demibold, demi, bold, heavy, extra bold, black} legend_marker_scale : `float`, optional The relative size of the legend markers with respect to the original legend_location : `int`, optional The location of the legend. The predefined values are: =============== == 'best' 0 'upper right' 1 'upper left' 2 'lower left' 3 'lower right' 4 'right' 5 'center left' 6 'center right' 7 'lower center' 8 'upper center' 9 'center' 10 =============== == legend_bbox_to_anchor : (`float`, `float`) `tuple`, optional The bbox that the legend will be anchored. legend_border_axes_pad : `float`, optional The pad between the axes and legend border. legend_n_columns : `int`, optional The number of the legend's columns. legend_horizontal_spacing : `float`, optional The spacing between the columns. legend_vertical_spacing : `float`, optional The vertical space between the legend entries. legend_border : `bool`, optional If ``True``, a frame will be drawn around the legend. legend_border_padding : `float`, optional The fractional whitespace inside the legend border. legend_shadow : `bool`, optional If ``True``, a shadow will be drawn behind legend. legend_rounded_corners : `bool`, optional If ``True``, the frame's corners will be rounded (fancybox). render_axes : `bool`, optional If ``True``, the axes will be rendered. axes_font_name : See Below, optional The font of the axes. Example options :: {serif, sans-serif, cursive, fantasy, monospace} axes_font_size : `int`, optional The font size of the axes. axes_font_style : ``{normal, italic, oblique}``, optional The font style of the axes. axes_font_weight : See Below, optional The font weight of the axes. Example options :: {ultralight, light, normal, regular, book, medium, roman, semibold,demibold, demi, bold, heavy, extra bold, black} axes_x_limits : `float` or (`float`, `float`) or ``None``, optional The limits of the x axis. If `float`, then it sets padding on the right and left of the Image as a percentage of the Image's width. If `tuple` or `list`, then it defines the axis limits. If ``None``, then the limits are set automatically. axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional The limits of the y axis. If `float`, then it sets padding on the top and bottom of the Image as a percentage of the Image's height. If `tuple` or `list`, then it defines the axis limits. If ``None``, then the limits are set automatically. axes_x_ticks : `list` or `tuple` or ``None``, optional The ticks of the x axis. axes_y_ticks : `list` or `tuple` or ``None``, optional The ticks of the y axis. figure_size : (`float`, `float`) `tuple` or ``None`` optional The size of the figure in inches. Raises ------ ValueError If both ``with_labels`` and ``without_labels`` are passed. ValueError If the landmark manager doesn't contain the provided group label. """ from menpo.visualize import view_image_landmarks return view_image_landmarks( self, channels, False, group, with_labels, without_labels, figure_id, new_figure, interpolation, cmap_name, alpha, render_lines, line_colour, line_style, line_width, render_markers, marker_style, marker_size, marker_face_colour, marker_edge_colour, marker_edge_width, render_numbering, numbers_horizontal_align, numbers_vertical_align, numbers_font_name, numbers_font_size, numbers_font_style, numbers_font_weight, numbers_font_colour, render_legend, legend_title, legend_font_name, legend_font_style, legend_font_size, legend_font_weight, legend_marker_scale, legend_location, legend_bbox_to_anchor, legend_border_axes_pad, legend_n_columns, legend_horizontal_spacing, legend_vertical_spacing, legend_border, legend_border_padding, legend_shadow, legend_rounded_corners, render_axes, axes_font_name, axes_font_size, axes_font_style, axes_font_weight, axes_x_limits, axes_y_limits, axes_x_ticks, axes_y_ticks, figure_size, ) def crop( self, min_indices, max_indices, constrain_to_boundary=False, return_transform=False, ): r""" Return a cropped copy of this image using the given minimum and maximum indices. Landmarks are correctly adjusted so they maintain their position relative to the newly cropped image. Parameters ---------- min_indices : ``(n_dims,)`` `ndarray` The minimum index over each dimension. max_indices : ``(n_dims,)`` `ndarray` The maximum index over each dimension. constrain_to_boundary : `bool`, optional If ``True`` the crop will be snapped to not go beyond this images boundary. If ``False``, an :map:`ImageBoundaryError` will be raised if an attempt is made to go beyond the edge of the image. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the cropping is also returned. Returns ------- cropped_image : `type(self)` A new instance of self, but cropped. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. Raises ------ ValueError ``min_indices`` and ``max_indices`` both have to be of length ``n_dims``. All ``max_indices`` must be greater than ``min_indices``. ImageBoundaryError Raised if ``constrain_to_boundary=False``, and an attempt is made to crop the image in a way that violates the image bounds. """ min_indices = np.floor(min_indices) max_indices = np.ceil(max_indices) if not (min_indices.size == max_indices.size == self.n_dims): raise ValueError( "Both min and max indices should be 1D numpy arrays of" " length n_dims ({})".format(self.n_dims) ) elif not np.all(max_indices > min_indices): raise ValueError("All max indices must be greater that the min " "indices") min_bounded = self.constrain_points_to_bounds(min_indices) max_bounded = self.constrain_points_to_bounds(max_indices) all_max_bounded = np.all(min_bounded == min_indices) all_min_bounded = np.all(max_bounded == max_indices) if not (constrain_to_boundary or all_max_bounded or all_min_bounded): # points have been constrained and the user didn't want this - raise ImageBoundaryError(min_indices, max_indices, min_bounded, max_bounded) new_shape = (max_bounded - min_bounded).astype(int) return self.warp_to_shape( new_shape, Translation(min_bounded), order=0, warp_landmarks=True, return_transform=return_transform, ) def crop_to_pointcloud( self, pointcloud, boundary=0, constrain_to_boundary=True, return_transform=False ): r""" Return a copy of this image cropped so that it is bounded around a pointcloud with an optional ``n_pixel`` boundary. Parameters ---------- pointcloud : :map:`PointCloud` The pointcloud to crop around. boundary : `int`, optional An extra padding to be added all around the landmarks bounds. constrain_to_boundary : `bool`, optional If ``True`` the crop will be snapped to not go beyond this images boundary. If ``False``, an :map`ImageBoundaryError` will be raised if an attempt is made to go beyond the edge of the image. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the cropping is also returned. Returns ------- image : :map:`Image` A copy of this image cropped to the bounds of the pointcloud. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. Raises ------ ImageBoundaryError Raised if ``constrain_to_boundary=False``, and an attempt is made to crop the image in a way that violates the image bounds. """ min_indices, max_indices = pointcloud.bounds(boundary=boundary) return self.crop( min_indices, max_indices, constrain_to_boundary=constrain_to_boundary, return_transform=return_transform, ) def crop_to_landmarks( self, group=None, boundary=0, constrain_to_boundary=True, return_transform=False ): r""" Return a copy of this image cropped so that it is bounded around a set of landmarks with an optional ``n_pixel`` boundary Parameters ---------- group : `str`, optional The key of the landmark set that should be used. If ``None`` and if there is only one set of landmarks, this set will be used. boundary : `int`, optional An extra padding to be added all around the landmarks bounds. constrain_to_boundary : `bool`, optional If ``True`` the crop will be snapped to not go beyond this images boundary. If ``False``, an :map`ImageBoundaryError` will be raised if an attempt is made to go beyond the edge of the image. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the cropping is also returned. Returns ------- image : :map:`Image` A copy of this image cropped to its landmarks. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. Raises ------ ImageBoundaryError Raised if ``constrain_to_boundary=False``, and an attempt is made to crop the image in a way that violates the image bounds. """ pc = self.landmarks[group] return self.crop_to_pointcloud( pc, boundary=boundary, constrain_to_boundary=constrain_to_boundary, return_transform=return_transform, ) def crop_to_pointcloud_proportion( self, pointcloud, boundary_proportion, minimum=True, constrain_to_boundary=True, return_transform=False, ): r""" Return a copy of this image cropped so that it is bounded around a pointcloud with a border proportional to the pointcloud spread or range. Parameters ---------- pointcloud : :map:`PointCloud` The pointcloud to crop around. boundary_proportion : `float` Additional padding to be added all around the landmarks bounds defined as a proportion of the landmarks range. See the minimum parameter for a definition of how the range is calculated. minimum : `bool`, optional If ``True`` the specified proportion is relative to the minimum value of the pointclouds' per-dimension range; if ``False`` w.r.t. the maximum value of the pointclouds' per-dimension range. constrain_to_boundary : `bool`, optional If ``True``, the crop will be snapped to not go beyond this images boundary. If ``False``, an :map:`ImageBoundaryError` will be raised if an attempt is made to go beyond the edge of the image. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the cropping is also returned. Returns ------- image : :map:`Image` A copy of this image cropped to the border proportional to the pointcloud spread or range. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. Raises ------ ImageBoundaryError Raised if ``constrain_to_boundary=False``, and an attempt is made to crop the image in a way that violates the image bounds. """ if minimum: boundary = boundary_proportion * np.min(pointcloud.range()) else: boundary = boundary_proportion * np.max(pointcloud.range()) return self.crop_to_pointcloud( pointcloud, boundary=boundary, constrain_to_boundary=constrain_to_boundary, return_transform=return_transform, ) def crop_to_landmarks_proportion( self, boundary_proportion, group=None, minimum=True, constrain_to_boundary=True, return_transform=False, ): r""" Crop this image to be bounded around a set of landmarks with a border proportional to the landmark spread or range. Parameters ---------- boundary_proportion : `float` Additional padding to be added all around the landmarks bounds defined as a proportion of the landmarks range. See the minimum parameter for a definition of how the range is calculated. group : `str`, optional The key of the landmark set that should be used. If ``None`` and if there is only one set of landmarks, this set will be used. minimum : `bool`, optional If ``True`` the specified proportion is relative to the minimum value of the landmarks' per-dimension range; if ``False`` w.r.t. the maximum value of the landmarks' per-dimension range. constrain_to_boundary : `bool`, optional If ``True``, the crop will be snapped to not go beyond this images boundary. If ``False``, an :map:`ImageBoundaryError` will be raised if an attempt is made to go beyond the edge of the image. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the cropping is also returned. Returns ------- image : :map:`Image` This image, cropped to its landmarks with a border proportional to the landmark spread or range. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. Raises ------ ImageBoundaryError Raised if ``constrain_to_boundary=False``, and an attempt is made to crop the image in a way that violates the image bounds. """ pc = self.landmarks[group] return self.crop_to_pointcloud_proportion( pc, boundary_proportion, minimum=minimum, constrain_to_boundary=constrain_to_boundary, return_transform=return_transform, ) def constrain_points_to_bounds(self, points): r""" Constrains the points provided to be within the bounds of this image. Parameters ---------- points : ``(d,)`` `ndarray` Points to be snapped to the image boundaries. Returns ------- bounded_points : ``(d,)`` `ndarray` Points snapped to not stray outside the image edges. """ bounded_points = points.copy() # check we don't stray under any edges bounded_points[bounded_points < 0] = 0 # check we don't stray over any edges shape = np.array(self.shape) over_image = (shape - bounded_points) < 0 bounded_points[over_image] = shape[over_image] return bounded_points def extract_patches( self, patch_centers, patch_shape=(16, 16), sample_offsets=None, as_single_array=True, order=0, mode="constant", cval=0.0, ): r""" Extract a set of patches from an image. Given a set of patch centers and a patch size, patches are extracted from within the image, centred on the given coordinates. Sample offsets denote a set of offsets to extract from within a patch. This is very useful if you want to extract a dense set of features around a set of landmarks and simply sample the same grid of patches around the landmarks. If sample offsets are used, to access the offsets for each patch you need to slice the resulting `list`. So for 2 offsets, the first centers offset patches would be ``patches[:2]``. Currently only 2D images are supported. Note that the default is nearest neighbour sampling for the patches which is achieved via slicing and is much more efficient than using sampling/interpolation. Note that a significant performance decrease will be measured if the ``order`` or ``mode`` parameters are modified from ``order = 0`` and ``mode = 'constant'`` as internally sampling will be used rather than slicing. Parameters ---------- patch_centers : :map:`PointCloud` The centers to extract patches around. patch_shape : ``(1, n_dims)`` `tuple` or `ndarray`, optional The size of the patch to extract sample_offsets : ``(n_offsets, n_dims)`` `ndarray` or ``None``, optional The offsets to sample from within a patch. So ``(0, 0)`` is the centre of the patch (no offset) and ``(1, 0)`` would be sampling the patch from 1 pixel up the first axis away from the centre. If ``None``, then no offsets are applied. as_single_array : `bool`, optional If ``True``, an ``(n_center, n_offset, n_channels, patch_shape)`` `ndarray`, thus a single numpy array is returned containing each patch. If ``False``, a `list` of ``n_center * n_offset`` :map:`Image` objects is returned representing each patch. order : `int`, optional The order of interpolation. The order has to be in the range [0,5]. See warp_to_shape for more information. mode : ``{constant, nearest, reflect, wrap}``, optional Points outside the boundaries of the input are filled according to the given mode. cval : `float`, optional Used in conjunction with mode ``constant``, the value outside the image boundaries. Returns ------- patches : `list` or `ndarray` Returns the extracted patches. Returns a list if ``as_single_array=True`` and an `ndarray` if ``as_single_array=False``. Raises ------ ValueError If image is not 2D """ if self.n_dims != 2: raise ValueError( "Only two dimensional patch extraction is " "currently supported." ) if order == 0 and mode == "constant": # Fast path using slicing single_array = extract_patches_with_slice( self.pixels, patch_centers.points, patch_shape, offsets=sample_offsets, cval=cval, ) else: single_array = extract_patches_by_sampling( self.pixels, patch_centers.points, patch_shape, offsets=sample_offsets, order=order, mode=mode, cval=cval, ) if as_single_array: return single_array else: return [Image(o, copy=False) for p in single_array for o in p] def extract_patches_around_landmarks( self, group=None, patch_shape=(16, 16), sample_offsets=None, as_single_array=True, ): r""" Extract patches around landmarks existing on this image. Provided the group label and optionally the landmark label extract a set of patches. See `extract_patches` for more information. Currently only 2D images are supported. Parameters ---------- group : `str` or ``None``, optional The landmark group to use as patch centres. patch_shape : `tuple` or `ndarray`, optional The size of the patch to extract sample_offsets : ``(n_offsets, n_dims)`` `ndarray` or ``None``, optional The offsets to sample from within a patch. So ``(0, 0)`` is the centre of the patch (no offset) and ``(1, 0)`` would be sampling the patch from 1 pixel up the first axis away from the centre. If ``None``, then no offsets are applied. as_single_array : `bool`, optional If ``True``, an ``(n_center, n_offset, n_channels, patch_shape)`` `ndarray`, thus a single numpy array is returned containing each patch. If ``False``, a `list` of ``n_center * n_offset`` :map:`Image` objects is returned representing each patch. Returns ------- patches : `list` or `ndarray` Returns the extracted patches. Returns a list if ``as_single_array=True`` and an `ndarray` if ``as_single_array=False``. Raises ------ ValueError If image is not 2D """ return self.extract_patches( self.landmarks[group], patch_shape=patch_shape, sample_offsets=sample_offsets, as_single_array=as_single_array, ) def set_patches(self, patches, patch_centers, offset=None, offset_index=None): r""" Set the values of a group of patches into the correct regions of a copy of this image. Given an array of patches and a set of patch centers, the patches' values are copied in the regions of the image that are centred on the coordinates of the given centers. The patches argument can have any of the two formats that are returned from the `extract_patches()` and `extract_patches_around_landmarks()` methods. Specifically it can be: 1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray` 2. `list` of ``n_center * n_offset`` :map:`Image` objects Currently only 2D images are supported. Parameters ---------- patches : `ndarray` or `list` The values of the patches. It can have any of the two formats that are returned from the `extract_patches()` and `extract_patches_around_landmarks()` methods. Specifically, it can either be an ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray` or a `list` of ``n_center * n_offset`` :map:`Image` objects. patch_centers : :map:`PointCloud` The centers to set the patches around. offset : `list` or `tuple` or ``(1, 2)`` `ndarray` or ``None``, optional The offset to apply on the patch centers within the image. If ``None``, then ``(0, 0)`` is used. offset_index : `int` or ``None``, optional The offset index within the provided `patches` argument, thus the index of the second dimension from which to sample. If ``None``, then ``0`` is used. Raises ------ ValueError If image is not 2D ValueError If offset does not have shape (1, 2) """ # parse arguments if self.n_dims != 2: raise ValueError( "Only two dimensional patch insertion is " "currently supported." ) if offset is None: offset = np.zeros([1, 2], dtype=np.intp) elif isinstance(offset, tuple) or isinstance(offset, list): offset = np.asarray([offset]) offset = np.require(offset, dtype=np.intp) if not offset.shape == (1, 2): raise ValueError( "The offset must be a tuple, a list or a " "numpy.array with shape (1, 2)." ) if offset_index is None: offset_index = 0 # if patches is a list, convert it to array if isinstance(patches, list): patches = _convert_patches_list_to_single_array( patches, patch_centers.n_points ) copy = self.copy() # set patches set_patches(patches, copy.pixels, patch_centers.points, offset, offset_index) return copy def set_patches_around_landmarks( self, patches, group=None, offset=None, offset_index=None ): r""" Set the values of a group of patches around the landmarks existing in a copy of this image. Given an array of patches, a group and a label, the patches' values are copied in the regions of the image that are centred on the coordinates of corresponding landmarks. The patches argument can have any of the two formats that are returned from the `extract_patches()` and `extract_patches_around_landmarks()` methods. Specifically it can be: 1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray` 2. `list` of ``n_center * n_offset`` :map:`Image` objects Currently only 2D images are supported. Parameters ---------- patches : `ndarray` or `list` The values of the patches. It can have any of the two formats that are returned from the `extract_patches()` and `extract_patches_around_landmarks()` methods. Specifically, it can either be an ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray` or a `list` of ``n_center * n_offset`` :map:`Image` objects. group : `str` or ``None`` optional The landmark group to use as patch centres. offset : `list` or `tuple` or ``(1, 2)`` `ndarray` or ``None``, optional The offset to apply on the patch centers within the image. If ``None``, then ``(0, 0)`` is used. offset_index : `int` or ``None``, optional The offset index within the provided `patches` argument, thus the index of the second dimension from which to sample. If ``None``, then ``0`` is used. Raises ------ ValueError If image is not 2D ValueError If offset does not have shape (1, 2) """ return self.set_patches( patches, self.landmarks[group], offset=offset, offset_index=offset_index ) def warp_to_mask( self, template_mask, transform, warp_landmarks=True, order=1, mode="constant", cval=0.0, batch_size=None, return_transform=False, ): r""" Return a copy of this image warped into a different reference space. Note that warping into a mask is slower than warping into a full image. If you don't need a non-linear mask, consider :meth:``warp_to_shape`` instead. Parameters ---------- template_mask : :map:`BooleanImage` Defines the shape of the result, and what pixels should be sampled. transform : :map:`Transform` Transform **from the template space back to this image**. Defines, for each pixel location on the template, which pixel location should be sampled from on this image. warp_landmarks : `bool`, optional If ``True``, result will have the same landmark dictionary as ``self``, but with each landmark updated to the warped position. order : `int`, optional The order of interpolation. The order has to be in the range [0,5] ========= ===================== Order Interpolation ========= ===================== 0 Nearest-neighbor 1 Bi-linear *(default)* 2 Bi-quadratic 3 Bi-cubic 4 Bi-quartic 5 Bi-quintic ========= ===================== mode : ``{constant, nearest, reflect, wrap}``, optional Points outside the boundaries of the input are filled according to the given mode. cval : `float`, optional Used in conjunction with mode ``constant``, the value outside the image boundaries. batch_size : `int` or ``None``, optional This should only be considered for large images. Setting this value can cause warping to become much slower, particular for cached warps such as Piecewise Affine. This size indicates how many points in the image should be warped at a time, which keeps memory usage low. If ``None``, no batching is used and all points are warped at once. return_transform : `bool`, optional This argument is for internal use only. If ``True``, then the :map:`Transform` object is also returned. Returns ------- warped_image : :map:`MaskedImage` A copy of this image, warped. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. """ if self.n_dims != transform.n_dims: raise ValueError( "Trying to warp a {}D image with a {}D transform " "(they must match)".format(self.n_dims, transform.n_dims) ) template_points = template_mask.true_indices() points_to_sample = transform.apply(template_points, batch_size=batch_size) sampled = self.sample(points_to_sample, order=order, mode=mode, cval=cval) # set any nan values to 0 sampled[np.isnan(sampled)] = 0 # build a warped version of the image warped_image = self._build_warp_to_mask(template_mask, sampled) if warp_landmarks and self.has_landmarks: warped_image.landmarks = self.landmarks transform.pseudoinverse()._apply_inplace(warped_image.landmarks) if hasattr(self, "path"): warped_image.path = self.path # optionally return the transform if return_transform: return warped_image, transform else: return warped_image def _build_warp_to_mask(self, template_mask, sampled_pixel_values): r""" Builds the warped image from the template mask and sampled pixel values. Overridden for :map:`BooleanImage` as we can't use the usual :meth:`from_vector_inplace` method. All other :map:`Image` classes share the :map:`Image` implementation. Parameters ---------- template_mask : :map:`BooleanImage` or 2D `bool ndarray` Mask for warping. sampled_pixel_values : ``(n_true_pixels_in_mask,)`` `ndarray` Sampled value to rebuild the masked image from. """ from menpo.image import MaskedImage warped_image = MaskedImage.init_blank( template_mask.shape, n_channels=self.n_channels, mask=template_mask ) warped_image._from_vector_inplace(sampled_pixel_values.ravel()) return warped_image def sample(self, points_to_sample, order=1, mode="constant", cval=0.0): r""" Sample this image at the given sub-pixel accurate points. The input PointCloud should have the same number of dimensions as the image e.g. a 2D PointCloud for a 2D multi-channel image. A numpy array will be returned the has the values for every given point across each channel of the image. Parameters ---------- points_to_sample : :map:`PointCloud` Array of points to sample from the image. Should be `(n_points, n_dims)` order : `int`, optional The order of interpolation. The order has to be in the range [0,5]. See warp_to_shape for more information. mode : ``{constant, nearest, reflect, wrap}``, optional Points outside the boundaries of the input are filled according to the given mode. cval : `float`, optional Used in conjunction with mode ``constant``, the value outside the image boundaries. Returns ------- sampled_pixels : (`n_points`, `n_channels`) `ndarray` The interpolated values taken across every channel of the image. """ # The public interface is a PointCloud, but when this is used internally # a numpy array is passed. So let's just treat the PointCloud as a # 'special case' and not document the ndarray ability. if isinstance(points_to_sample, PointCloud): points_to_sample = points_to_sample.points return scipy_interpolation( self.pixels, points_to_sample, order=order, mode=mode, cval=cval ) def warp_to_shape( self, template_shape, transform, warp_landmarks=True, order=1, mode="constant", cval=0.0, batch_size=None, return_transform=False, ): """ Return a copy of this image warped into a different reference space. Parameters ---------- template_shape : `tuple` or `ndarray` Defines the shape of the result, and what pixel indices should be sampled (all of them). transform : :map:`Transform` Transform **from the template_shape space back to this image**. Defines, for each index on template_shape, which pixel location should be sampled from on this image. warp_landmarks : `bool`, optional If ``True``, result will have the same landmark dictionary as self, but with each landmark updated to the warped position. order : `int`, optional The order of interpolation. The order has to be in the range [0,5] ========= ==================== Order Interpolation ========= ==================== 0 Nearest-neighbor 1 Bi-linear *(default)* 2 Bi-quadratic 3 Bi-cubic 4 Bi-quartic 5 Bi-quintic ========= ==================== mode : ``{constant, nearest, reflect, wrap}``, optional Points outside the boundaries of the input are filled according to the given mode. cval : `float`, optional Used in conjunction with mode ``constant``, the value outside the image boundaries. batch_size : `int` or ``None``, optional This should only be considered for large images. Setting this value can cause warping to become much slower, particular for cached warps such as Piecewise Affine. This size indicates how many points in the image should be warped at a time, which keeps memory usage low. If ``None``, no batching is used and all points are warped at once. return_transform : `bool`, optional This argument is for internal use only. If ``True``, then the :map:`Transform` object is also returned. Returns ------- warped_image : `type(self)` A copy of this image, warped. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. """ template_shape = np.array(template_shape, dtype=int) if ( isinstance(transform, Homogeneous) and order in range(2) and self.n_dims == 2 and cv2_perspective_interpolation is not None ): # we couldn't do the crop, but OpenCV has an optimised # interpolation for 2D perspective warps - let's use that warped_pixels = cv2_perspective_interpolation( self.pixels, template_shape, transform, order=order, mode=mode, cval=cval, ) else: template_points = indices_for_image_of_shape(template_shape) points_to_sample = transform.apply(template_points, batch_size=batch_size) sampled = self.sample(points_to_sample, order=order, mode=mode, cval=cval) # set any nan values to 0 # (seems that map_coordinates can produce nan values) sampled[np.isnan(sampled)] = 0 # build a warped version of the image warped_pixels = sampled.reshape((self.n_channels,) + tuple(template_shape)) return self._build_warp_to_shape( warped_pixels, transform, warp_landmarks, return_transform ) def _build_warp_to_shape( self, warped_pixels, transform, warp_landmarks, return_transform ): # factored out common logic from the different paths we can take in # warp_to_shape. Rebuilds an image post-warp, adjusting landmarks # as necessary. warped_image = Image(warped_pixels, copy=False) # warp landmarks if requested. if warp_landmarks and self.has_landmarks: warped_image.landmarks = self.landmarks transform.pseudoinverse()._apply_inplace(warped_image.landmarks) if hasattr(self, "path"): warped_image.path = self.path # optionally return the transform if return_transform: return warped_image, transform else: return warped_image def rescale( self, scale, round="ceil", order=1, warp_landmarks=True, return_transform=False ): r""" Return a copy of this image, rescaled by a given factor. Landmarks are rescaled appropriately. Parameters ---------- scale : `float` or `tuple` of `floats` The scale factor. If a tuple, the scale to apply to each dimension. If a single `float`, the scale will be applied uniformly across each dimension. round: ``{ceil, floor, round}``, optional Rounding function to be applied to floating point shapes. order : `int`, optional The order of interpolation. The order has to be in the range [0,5] ========= ==================== Order Interpolation ========= ==================== 0 Nearest-neighbor 1 Bi-linear *(default)* 2 Bi-quadratic 3 Bi-cubic 4 Bi-quartic 5 Bi-quintic ========= ==================== warp_landmarks : `bool`, optional If ``True``, result will have the same landmark dictionary as self, but with each landmark updated to the warped position. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the rescale is also returned. Returns ------- rescaled_image : ``type(self)`` A copy of this image, rescaled. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. Raises ------ ValueError: If less scales than dimensions are provided. If any scale is less than or equal to 0. """ # Pythonic way of converting to list if we are passed a single float try: if len(scale) < self.n_dims: raise ValueError( "Must provide a scale per dimension." "{} scales were provided, {} were expected.".format( len(scale), self.n_dims ) ) except TypeError: # Thrown when len() is called on a float scale = [scale] * self.n_dims # Make sure we have a numpy array scale = np.asarray(scale) for s in scale: if s <= 0: raise ValueError("Scales must be positive floats.") transform = NonUniformScale(scale) # use the scale factor to make the template mask bigger # while respecting the users rounding preference. template_shape = round_image_shape(transform.apply(self.shape), round) # due to image indexing, we can't just apply the pseudoinverse # transform to achieve the scaling we want though! # Consider a 3x rescale on a 2x4 image. Looking at each dimension: # H 2 -> 6 so [0-1] -> [0-5] = 5/1 = 5x # W 4 -> 12 [0-3] -> [0-11] = 11/3 = 3.67x # => need to make the correct scale per dimension! shape = np.array(self.shape, dtype=float) # scale factors = max_index_after / current_max_index # (note that max_index = length - 1, as 0 based) scale_factors = (scale * shape - 1) / (shape - 1) inverse_transform = NonUniformScale(scale_factors).pseudoinverse() # for rescaling we enforce that mode is nearest to avoid num. errors return self.warp_to_shape( template_shape, inverse_transform, warp_landmarks=warp_landmarks, order=order, mode="nearest", return_transform=return_transform, ) def rescale_to_diagonal( self, diagonal, round="ceil", warp_landmarks=True, return_transform=False ): r""" Return a copy of this image, rescaled so that the it's diagonal is a new size. Parameters ---------- diagonal: `int` The diagonal size of the new image. round: ``{ceil, floor, round}``, optional Rounding function to be applied to floating point shapes. warp_landmarks : `bool`, optional If ``True``, result will have the same landmark dictionary as self, but with each landmark updated to the warped position. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the rescale is also returned. Returns ------- rescaled_image : type(self) A copy of this image, rescaled. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. """ return self.rescale( diagonal / self.diagonal(), round=round, warp_landmarks=warp_landmarks, return_transform=return_transform, ) def rescale_to_pointcloud( self, pointcloud, group=None, round="ceil", order=1, warp_landmarks=True, return_transform=False, ): r""" Return a copy of this image, rescaled so that the scale of a particular group of landmarks matches the scale of the passed reference pointcloud. Parameters ---------- pointcloud: :map:`PointCloud` The reference pointcloud to which the landmarks specified by ``group`` will be scaled to match. group : `str`, optional The key of the landmark set that should be used. If ``None``, and if there is only one set of landmarks, this set will be used. round: ``{ceil, floor, round}``, optional Rounding function to be applied to floating point shapes. order : `int`, optional The order of interpolation. The order has to be in the range [0,5] ========= ==================== Order Interpolation ========= ==================== 0 Nearest-neighbor 1 Bi-linear *(default)* 2 Bi-quadratic 3 Bi-cubic 4 Bi-quartic 5 Bi-quintic ========= ==================== warp_landmarks : `bool`, optional If ``True``, result will have the same landmark dictionary as self, but with each landmark updated to the warped position. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the rescale is also returned. Returns ------- rescaled_image : ``type(self)`` A copy of this image, rescaled. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. """ pc = self.landmarks[group] scale = AlignmentUniformScale(pc, pointcloud).as_vector().copy() return self.rescale( scale, round=round, order=order, warp_landmarks=warp_landmarks, return_transform=return_transform, ) def rescale_landmarks_to_diagonal_range( self, diagonal_range, group=None, round="ceil", order=1, warp_landmarks=True, return_transform=False, ): r""" Return a copy of this image, rescaled so that the ``diagonal_range`` of the bounding box containing its landmarks matches the specified ``diagonal_range`` range. Parameters ---------- diagonal_range: ``(n_dims,)`` `ndarray` The diagonal_range range that we want the landmarks of the returned image to have. group : `str`, optional The key of the landmark set that should be used. If ``None`` and if there is only one set of landmarks, this set will be used. round : ``{ceil, floor, round}``, optional Rounding function to be applied to floating point shapes. order : `int`, optional The order of interpolation. The order has to be in the range [0,5] ========= ===================== Order Interpolation ========= ===================== 0 Nearest-neighbor 1 Bi-linear *(default)* 2 Bi-quadratic 3 Bi-cubic 4 Bi-quartic 5 Bi-quintic ========= ===================== warp_landmarks : `bool`, optional If ``True``, result will have the same landmark dictionary as self, but with each landmark updated to the warped position. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the rescale is also returned. Returns ------- rescaled_image : ``type(self)`` A copy of this image, rescaled. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. """ x, y = self.landmarks[group].range() scale = diagonal_range / np.sqrt(x ** 2 + y ** 2) return self.rescale( scale, round=round, order=order, warp_landmarks=warp_landmarks, return_transform=return_transform, ) def resize(self, shape, order=1, warp_landmarks=True, return_transform=False): r""" Return a copy of this image, resized to a particular shape. All image information (landmarks, and mask in the case of :map:`MaskedImage`) is resized appropriately. Parameters ---------- shape : `tuple` The new shape to resize to. order : `int`, optional The order of interpolation. The order has to be in the range [0,5] ========= ===================== Order Interpolation ========= ===================== 0 Nearest-neighbor 1 Bi-linear *(default)* 2 Bi-quadratic 3 Bi-cubic 4 Bi-quartic 5 Bi-quintic ========= ===================== warp_landmarks : `bool`, optional If ``True``, result will have the same landmark dictionary as self, but with each landmark updated to the warped position. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the resize is also returned. Returns ------- resized_image : ``type(self)`` A copy of this image, resized. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. Raises ------ ValueError: If the number of dimensions of the new shape does not match the number of dimensions of the image. """ shape = np.asarray(shape, dtype=float) if len(shape) != self.n_dims: raise ValueError( "Dimensions must match." "{} dimensions provided, {} were expected.".format( shape.shape, self.n_dims ) ) scales = shape / self.shape # Have to round the shape when scaling to deal with floating point # errors. For example, if we want (250, 250), we need to ensure that # we get (250, 250) even if the number we obtain is 250 to some # floating point inaccuracy. return self.rescale( scales, round="round", order=order, warp_landmarks=warp_landmarks, return_transform=return_transform, ) def zoom(self, scale, order=1, warp_landmarks=True, return_transform=False): r""" Return a copy of this image, zoomed about the centre point. ``scale`` values greater than 1.0 denote zooming **in** to the image and values less than 1.0 denote zooming **out** of the image. The size of the image will not change, if you wish to scale an image, please see :meth:`rescale`. Parameters ---------- scale : `float` ``scale > 1.0`` denotes zooming in. Thus the image will appear larger and areas at the edge of the zoom will be 'cropped' out. ``scale < 1.0`` denotes zooming out. The image will be padded by the value of ``cval``. order : `int`, optional The order of interpolation. The order has to be in the range [0,5] ========= ===================== Order Interpolation ========= ===================== 0 Nearest-neighbor 1 Bi-linear *(default)* 2 Bi-quadratic 3 Bi-cubic 4 Bi-quartic 5 Bi-quintic ========= ===================== warp_landmarks : `bool`, optional If ``True``, result will have the same landmark dictionary as self, but with each landmark updated to the warped position. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the zooming is also returned. Returns ------- zoomed_image : ``type(self)`` A copy of this image, zoomed. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. """ t = scale_about_centre(self, 1.0 / scale) return self.warp_to_shape( self.shape, t, order=order, mode="nearest", warp_landmarks=warp_landmarks, return_transform=return_transform, ) def rotate_ccw_about_centre( self, theta, degrees=True, retain_shape=False, mode="constant", cval=0.0, round="round", order=1, warp_landmarks=True, return_transform=False, ): r""" Return a copy of this image, rotated counter-clockwise about its centre. Note that the `retain_shape` argument defines the shape of the rotated image. If ``retain_shape=True``, then the shape of the rotated image will be the same as the one of current image, so some regions will probably be cropped. If ``retain_shape=False``, then the returned image has the correct size so that the whole area of the current image is included. Parameters ---------- theta : `float` The angle of rotation about the centre. degrees : `bool`, optional If ``True``, `theta` is interpreted in degrees. If ``False``, ``theta`` is interpreted as radians. retain_shape : `bool`, optional If ``True``, then the shape of the rotated image will be the same as the one of current image, so some regions will probably be cropped. If ``False``, then the returned image has the correct size so that the whole area of the current image is included. mode : ``{constant, nearest, reflect, wrap}``, optional Points outside the boundaries of the input are filled according to the given mode. cval : `float`, optional The value to be set outside the rotated image boundaries. round : ``{'ceil', 'floor', 'round'}``, optional Rounding function to be applied to floating point shapes. This is only used in case ``retain_shape=True``. order : `int`, optional The order of interpolation. The order has to be in the range ``[0,5]``. This is only used in case ``retain_shape=True``. ========= ==================== Order Interpolation ========= ==================== 0 Nearest-neighbor 1 Bi-linear *(default)* 2 Bi-quadratic 3 Bi-cubic 4 Bi-quartic 5 Bi-quintic ========= ==================== warp_landmarks : `bool`, optional If ``True``, result will have the same landmark dictionary as ``self``, but with each landmark updated to the warped position. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the rotation is also returned. Returns ------- rotated_image : ``type(self)`` The rotated image. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. Raises ------ ValueError Image rotation is presently only supported on 2D images """ if self.n_dims != 2: raise ValueError( "Image rotation is presently only supported on " "2D images" ) rotation = Rotation.init_from_2d_ccw_angle(theta, degrees=degrees) return self.transform_about_centre( rotation, retain_shape=retain_shape, mode=mode, cval=cval, round=round, order=order, warp_landmarks=warp_landmarks, return_transform=return_transform, ) def transform_about_centre( self, transform, retain_shape=False, mode="constant", cval=0.0, round="round", order=1, warp_landmarks=True, return_transform=False, ): r""" Return a copy of this image, transformed about its centre. Note that the `retain_shape` argument defines the shape of the transformed image. If ``retain_shape=True``, then the shape of the transformed image will be the same as the one of current image, so some regions will probably be cropped. If ``retain_shape=False``, then the returned image has the correct size so that the whole area of the current image is included. .. note:: This method will not work for transforms that result in a transform chain as :map:`TransformChain` is not invertible. .. note:: Be careful when defining transforms for warping imgaes. All pixel locations must fall within a valid range as expected by the transform. Therefore, your transformation must accept 'negative' pixel locations as the pixel locations provided to your transform will have the object centre subtracted from them. Parameters ---------- transform : :map:`ComposableTransform` and :map:`VInvertible` type A composable transform. ``pseudoinverse`` will be invoked on the resulting transform so it must implement a valid inverse. retain_shape : `bool`, optional If ``True``, then the shape of the sheared image will be the same as the one of current image, so some regions will probably be cropped. If ``False``, then the returned image has the correct size so that the whole area of the current image is included. mode : ``{constant, nearest, reflect, wrap}``, optional Points outside the boundaries of the input are filled according to the given mode. cval : `float`, optional The value to be set outside the sheared image boundaries. round : ``{'ceil', 'floor', 'round'}``, optional Rounding function to be applied to floating point shapes. This is only used in case ``retain_shape=True``. order : `int`, optional The order of interpolation. The order has to be in the range ``[0,5]``. This is only used in case ``retain_shape=True``. ========= ==================== Order Interpolation ========= ==================== 0 Nearest-neighbor 1 Bi-linear *(default)* 2 Bi-quadratic 3 Bi-cubic 4 Bi-quartic 5 Bi-quintic ========= ==================== warp_landmarks : `bool`, optional If ``True``, result will have the same landmark dictionary as ``self``, but with each landmark updated to the warped position. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the shearing is also returned. Returns ------- transformed_image : ``type(self)`` The transformed image. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. Examples -------- This is an example for rotating an image about its center. Let's first load an image, create the rotation transform and then apply it :: import matplotlib.pyplot as plt import menpo.io as mio from menpo.transform import Rotation # Load image im = mio.import_builtin_asset.lenna_png() # Create shearing transform rot_tr = Rotation.init_from_2d_ccw_angle(45) # Render original image plt.subplot(131) im.view_landmarks() plt.title('Original') # Render rotated image plt.subplot(132) im.transform_about_centre(rot_tr).view_landmarks() plt.title('Rotated') # Render rotated image that has shape equal as original image plt.subplot(133) im.transform_about_centre(rot_tr, retain_shape=True).view_landmarks() plt.title('Rotated (Retain original shape)') Similarly, in order to apply a shear transform :: import matplotlib.pyplot as plt import menpo.io as mio from menpo.transform import Affine # Load image im = mio.import_builtin_asset.lenna_png() # Create shearing transform shear_tr = Affine.init_from_2d_shear(25, 10) # Render original image plt.subplot(131) im.view_landmarks() plt.title('Original') # Render sheared image plt.subplot(132) im.transform_about_centre(shear_tr).view_landmarks() plt.title('Sheared') # Render sheared image that has shape equal as original image plt.subplot(133) im.transform_about_centre(shear_tr, retain_shape=True).view_landmarks() plt.title('Sheared (Retain original shape)') """ if retain_shape: shape = self.shape applied_transform = transform_about_centre(self, transform) else: # Get image's bounding box coordinates original_bbox = bounding_box((0, 0), np.array(self.shape) - 1) # Translate to origin and apply transform trans = Translation(-self.centre(), skip_checks=True).compose_before( transform ) transformed_bbox = trans.apply(original_bbox) # Create new translation so that min bbox values go to 0 t = Translation(-transformed_bbox.bounds()[0]) applied_transform = trans.compose_before(t) transformed_bbox = trans.apply(original_bbox) # Output image's shape is the range of the sheared bounding box # while respecting the users rounding preference. shape = round_image_shape(transformed_bbox.range() + 1, round) # Warp image return self.warp_to_shape( shape, applied_transform.pseudoinverse(), order=order, warp_landmarks=warp_landmarks, mode=mode, cval=cval, return_transform=return_transform, ) def mirror(self, axis=1, order=1, warp_landmarks=True, return_transform=False): r""" Return a copy of this image, mirrored/flipped about a certain axis. Parameters ---------- axis : `int`, optional The axis about which to mirror the image. order : `int`, optional The order of interpolation. The order has to be in the range ``[0,5]``. ========= ==================== Order Interpolation ========= ==================== 0 Nearest-neighbor 1 Bi-linear *(default)* 2 Bi-quadratic 3 Bi-cubic 4 Bi-quartic 5 Bi-quintic ========= ==================== warp_landmarks : `bool`, optional If ``True``, result will have the same landmark dictionary as self, but with each landmark updated to the warped position. return_transform : `bool`, optional If ``True``, then the :map:`Transform` object that was used to perform the mirroring is also returned. Returns ------- mirrored_image : ``type(self)`` The mirrored image. transform : :map:`Transform` The transform that was used. It only applies if `return_transform` is ``True``. Raises ------ ValueError axis cannot be negative ValueError axis={} but the image has {} dimensions """ # Check axis argument if axis < 0: raise ValueError("axis cannot be negative") elif axis >= self.n_dims: raise ValueError( "axis={} but the image has {} " "dimensions".format(axis, self.n_dims) ) # Create transform that includes ... # ... flipping about the selected axis ... rot_matrix = np.eye(self.n_dims) rot_matrix[axis, axis] = -1 # ... and translating back to the image's bbox tr_matrix = np.zeros(self.n_dims) tr_matrix[axis] = self.shape[axis] - 1 # Create transform object trans = Rotation(rot_matrix, skip_checks=True).compose_before( Translation(tr_matrix, skip_checks=True) ) # Warp image return self.warp_to_shape( self.shape, trans.pseudoinverse(), mode="nearest", order=order, warp_landmarks=warp_landmarks, return_transform=return_transform, ) def pyramid(self, n_levels=3, downscale=2): r""" Return a rescaled pyramid of this image. The first image of the pyramid will be a copy of the original, unmodified, image, and counts as level 1. Parameters ---------- n_levels : `int`, optional Total number of levels in the pyramid, including the original unmodified image downscale : `float`, optional Downscale factor. Yields ------ image_pyramid: `generator` Generator yielding pyramid layers as :map:`Image` objects. """ image = self.copy() yield image for _ in range(n_levels - 1): image = image.rescale(1.0 / downscale) yield image def gaussian_pyramid(self, n_levels=3, downscale=2, sigma=None): r""" Return the gaussian pyramid of this image. The first image of the pyramid will be a copy of the original, unmodified, image, and counts as level 1. Parameters ---------- n_levels : `int`, optional Total number of levels in the pyramid, including the original unmodified image downscale : `float`, optional Downscale factor. sigma : `float`, optional Sigma for gaussian filter. Default is ``downscale / 3.`` which corresponds to a filter mask twice the size of the scale factor that covers more than 99% of the gaussian distribution. Yields ------ image_pyramid: `generator` Generator yielding pyramid layers as :map:`Image` objects. """ from menpo.feature import gaussian_filter if sigma is None: sigma = downscale / 3.0 image = self.copy() yield image for level in range(n_levels - 1): image = gaussian_filter(image, sigma).rescale(1.0 / downscale) yield image def as_greyscale(self, mode="luminosity", channel=None): r""" Returns a greyscale version of the image. If the image does *not* represent a 2D RGB image, then the ``luminosity`` mode will fail. Parameters ---------- mode : ``{average, luminosity, channel}``, optional ============== ===================================================== mode Greyscale Algorithm ============== ===================================================== average Equal average of all channels luminosity Calculates the luminance using the CCIR 601 formula: | .. math:: Y' = 0.2989 R' + 0.5870 G' + 0.1140 B' channel A specific channel is chosen as the intensity value. ============== ===================================================== channel: `int`, optional The channel to be taken. Only used if mode is ``channel``. Returns ------- greyscale_image : :map:`MaskedImage` A copy of this image in greyscale. """ greyscale = self.copy() if mode == "luminosity": if self.n_dims != 2: raise ValueError( "The 'luminosity' mode only works on 2D RGB" "images. {} dimensions found, " "2 expected.".format(self.n_dims) ) elif self.n_channels != 3: raise ValueError( "The 'luminosity' mode only works on RGB" "images. {} channels found, " "3 expected.".format(self.n_channels) ) # Only compute the coefficients once. global _greyscale_luminosity_coef if _greyscale_luminosity_coef is None: _greyscale_luminosity_coef = np.linalg.inv( np.array( [ [1.0, 0.956, 0.621], [1.0, -0.272, -0.647], [1.0, -1.106, 1.703], ] ) )[0, :] # Compute greyscale via dot product pixels = np.dot(_greyscale_luminosity_coef, greyscale.pixels.reshape(3, -1)) # Reshape image back to original shape (with 1 channel) pixels = pixels.reshape(greyscale.shape) elif mode == "average": pixels = np.mean(greyscale.pixels, axis=0) elif mode == "channel": if channel is None: raise ValueError( "For the 'channel' mode you have to provide" " a channel index" ) pixels = greyscale.pixels[channel] else: raise ValueError( "Unknown mode {} - expected 'luminosity', " "'average' or 'channel'.".format(mode) ) # Set new pixels - ensure channel axis and maintain greyscale.pixels = pixels[None, ...].astype(greyscale.pixels.dtype, copy=False) return greyscale def as_PILImage(self, out_dtype=np.uint8): r""" Return a PIL copy of the image scaled and cast to the correct values for the provided ``out_dtype``. Image must only have 1 or 3 channels and be 2 dimensional. Non `uint8` floating point images must be in the range ``[0, 1]`` to be converted. Parameters ---------- out_dtype : `np.dtype`, optional The dtype the output array should be. Returns ------- pil_image : `PILImage` PIL copy of image Raises ------ ValueError If image is not 2D and has 1 channel or 3 channels. ValueError If pixels data type is `float32` or `float64` and the pixel range is outside of ``[0, 1]`` ValueError If the output dtype is unsupported. Currently uint8 is supported. """ if self.n_dims != 2 or (self.n_channels != 1 and self.n_channels != 3): raise ValueError( "Can only convert greyscale or RGB 2D images. " "Received a {} channel {}D image.".format(self.n_channels, self.n_dims) ) # Slice off the channel for greyscale images if self.n_channels == 1: pixels = self.pixels[0] else: pixels = channels_to_back(self.pixels) pixels = denormalize_pixels_range(pixels, out_dtype) return PILImage.fromarray(pixels) def as_imageio(self, out_dtype=np.uint8): r""" Return an Imageio copy of the image scaled and cast to the correct values for the provided ``out_dtype``. Image must only have 1 or 3 channels and be 2 dimensional. Non `uint8` floating point images must be in the range ``[0, 1]`` to be converted. Parameters ---------- out_dtype : `np.dtype`, optional The dtype the output array should be. Returns ------- imageio_image : `ndarray` Imageio image (which is just a numpy ndarray with the channels as the last axis). Raises ------ ValueError If image is not 2D and has 1 channel or 3 channels. ValueError If pixels data type is `float32` or `float64` and the pixel range is outside of ``[0, 1]`` ValueError If the output dtype is unsupported. Currently uint8 and uint16 are supported. """ warn( "This method is no longer supported and will be removed in a " "future version of Menpo. " "Use .pixels_with_channels_at_back instead.", MenpoDeprecationWarning, ) if self.n_dims != 2 or (self.n_channels != 1 and self.n_channels != 3): raise ValueError( "Can only convert greyscale or RGB 2D images. " "Received a {} channel {}D image.".format(self.n_channels, self.n_dims) ) # Slice off the channel for greyscale images if self.n_channels == 1: pixels = self.pixels[0] else: pixels = channels_to_back(self.pixels) return denormalize_pixels_range(pixels, out_dtype) def pixels_range(self): r""" The range of the pixel values (min and max pixel values). Returns ------- min_max : ``(dtype, dtype)`` The minimum and maximum value of the pixels array. """ return self.pixels.min(), self.pixels.max() def rolled_channels(self): r""" Deprecated - please use the equivalent ``pixels_with_channels_at_back`` method. """ warn( "This method is no longer supported and will be removed in a " "future version of Menpo. " "Use .pixels_with_channels_at_back() instead.", MenpoDeprecationWarning, ) return self.pixels_with_channels_at_back() def pixels_with_channels_at_back(self, out_dtype=None): r""" Returns the pixels matrix, with the channels rolled to the back axis. This may be required for interacting with external code bases that require images to have channels as the last axis, rather than the Menpo convention of channels as the first axis. If this image is single channel, the final axis is dropped. Parameters ---------- out_dtype : `np.dtype`, optional The dtype the output array should be. Returns ------- rolled_channels : `ndarray` Pixels with channels as the back (last) axis. If single channel, the last axis will be dropped. """ p = channels_to_back(self.pixels) if out_dtype is not None: p = denormalize_pixels_range(p, out_dtype=out_dtype) return np.squeeze(p) def __str__(self): return "{} {}D Image with {} channel{}".format( self._str_shape(), self.n_dims, self.n_channels, "s" * (self.n_channels > 1) ) def has_landmarks_outside_bounds(self): """ Indicates whether there are landmarks located outside the image bounds. :type: `bool` """ if self.has_landmarks: for l_group in self.landmarks: pc = self.landmarks[l_group].points if np.any(np.logical_or(self.shape - pc < 1, pc < 0)): return True return False def constrain_landmarks_to_bounds(self): r""" Deprecated - please use the equivalent ``constrain_to_bounds`` method now on PointCloud, in conjunction with the new Image ``bounds()`` method. For example: >>> im.constrain_landmarks_to_bounds() # Equivalent to below >>> im.landmarks['test'] = im.landmarks['test'].constrain_to_bounds(im.bounds()) """ warn( "This method is no longer supported and will be removed in a " "future version of Menpo. " "Use .constrain_to_bounds() instead (on PointCloud).", MenpoDeprecationWarning, ) for l_group in self.landmarks: l = self.landmarks[l_group] for k in range(l.points.shape[1]): tmp = l.points[:, k] tmp[tmp < 0] = 0 tmp[tmp > self.shape[k] - 1] = self.shape[k] - 1 l.points[:, k] = tmp self.landmarks[l_group] = l def normalize_std(self, mode="all", **kwargs): r""" Returns a copy of this image normalized such that its pixel values have zero mean and unit variance. Parameters ---------- mode : ``{all, per_channel}``, optional If ``all``, the normalization is over all channels. If ``per_channel``, each channel individually is mean centred and normalized in variance. Returns ------- image : ``type(self)`` A copy of this image, normalized. """ warn( "This method is no longer supported and will be removed in a " "future version of Menpo. " "Use .normalize_std() instead (features package).", MenpoDeprecationWarning, ) return self._normalize(np.std, mode=mode) def normalize_norm(self, mode="all", **kwargs): r""" Returns a copy of this image normalized such that its pixel values have zero mean and its norm equals 1. Parameters ---------- mode : ``{all, per_channel}``, optional If ``all``, the normalization is over all channels. If ``per_channel``, each channel individually is mean centred and unit norm. Returns ------- image : ``type(self)`` A copy of this image, normalized. """ warn( "This method is no longer supported and will be removed in a " "future version of Menpo. " "Use .normalize_norm() instead (features package).", MenpoDeprecationWarning, ) def scale_func(pixels, axis=None): return np.linalg.norm(pixels, axis=axis, **kwargs) return self._normalize(scale_func, mode=mode) def _normalize(self, scale_func, mode="all"): from menpo.feature import normalize return normalize(self, scale_func=scale_func, mode=mode) def rescale_pixels(self, minimum, maximum, per_channel=True): r"""A copy of this image with pixels linearly rescaled to fit a range. Note that the only pixels that will be considered and rescaled are those that feature in the vectorized form of this image. If you want to use this routine on all the pixels in a :map:`MaskedImage`, consider using `as_unmasked()` prior to this call. Parameters ---------- minimum: `float` The minimal value of the rescaled pixels maximum: `float` The maximal value of the rescaled pixels per_channel: `boolean`, optional If ``True``, each channel will be rescaled independently. If ``False``, the scaling will be over all channels. Returns ------- rescaled_image: ``type(self)`` A copy of this image with pixels linearly rescaled to fit in the range provided. """ v = self.as_vector(keep_channels=True).T if per_channel: min_, max_ = v.min(axis=0), v.max(axis=0) else: min_, max_ = v.min(), v.max() sf = ((maximum - minimum) * 1.0) / (max_ - min_) v_new = ((v - min_) * sf) + minimum return self.from_vector(v_new.T.ravel()) def clip_pixels(self, minimum=None, maximum=None): r"""A copy of this image with pixels linearly clipped to fit a range. Parameters ---------- minimum: `float`, optional The minimal value of the clipped pixels. If None is provided, the default value will be 0. maximum: `float`, optional The maximal value of the clipped pixels. If None is provided, the default value will depend on the dtype. Returns ------- rescaled_image: ``type(self)`` A copy of this image with pixels linearly rescaled to fit in the range provided. """ if minimum is None: minimum = 0 if maximum is None: dtype = self.pixels.dtype if dtype == np.uint8: maximum = 255 elif dtype == np.uint16: maximum = 65535 elif dtype in [np.float32, np.float64]: maximum = 1.0 else: m1 = "Could not recognise the dtype ({}) to set the maximum." raise ValueError(m1.format(dtype)) copy = self.copy() copy.pixels = copy.pixels.clip(min=minimum, max=maximum) return copy def rasterize_landmarks( self, group=None, render_lines=True, line_style="-", line_colour="b", line_width=1, render_markers=True, marker_style="o", marker_size=1, marker_face_colour="b", marker_edge_colour="b", marker_edge_width=1, backend="matplotlib", ): r""" This method provides the ability to rasterize 2D landmarks onto the image. The returned image has the specified landmark groups rasterized onto the image - which is useful for things like creating result examples or rendering videos with annotations. Since multiple landmark groups can be specified, all arguments can take lists of parameters that map to the provided groups list. Therefore, the parameters must be lists of the correct length or a single parameter to apply to every landmark group. Multiple backends are provided, all with different strengths. The 'pillow' backend is very fast, but not very flexible. The `matplotlib` backend should be feature compatible with other Menpo rendering methods, but is much slower due to the overhead of creating a figure to render into. Parameters ---------- group : `str` or `list` of `str`, optional The landmark group key, or a list of keys. render_lines : `bool`, optional If ``True``, and the provided landmark group is a :map:`PointDirectedGraph`, the edges are rendered. line_style : `str`, optional The style of the edge line. Not all backends support this argument. line_colour : `str` or `tuple`, optional A Matplotlib style colour or a backend dependant colour. line_width : `int`, optional The width of the line to rasterize. render_markers : `bool`, optional If ``True``, render markers at the coordinates of each landmark. marker_style : `str`, optional A Matplotlib marker style. Not all backends support all marker styles. marker_size : `int`, optional The size of the marker - different backends use different scale spaces so consistent output may by difficult. marker_face_colour : `str`, optional A Matplotlib style colour or a backend dependant colour. marker_edge_colour : `str`, optional A Matplotlib style colour or a backend dependant colour. marker_edge_width : `int`, optional The width of the marker edge. Not all backends support this. backend : {'matplotlib', 'pillow'}, optional The backend to use. Returns ------- rasterized_image : :map:`Image` The image with the landmarks rasterized directly into the pixels. Raises ------ ValueError Only 2D images are supported. ValueError Only RGB (3-channel) or Greyscale (1-channel) images are supported. """ from .rasterize import rasterize_landmarks_2d return rasterize_landmarks_2d( self, group=group, render_lines=render_lines, line_style=line_style, line_colour=line_colour, line_width=line_width, render_markers=render_markers, marker_style=marker_style, marker_size=marker_size, marker_face_colour=marker_face_colour, marker_edge_colour=marker_edge_colour, marker_edge_width=marker_edge_width, backend=backend, ) def round_image_shape(shape, round): if round not in ["ceil", "round", "floor"]: raise ValueError("round must be either ceil, round or floor") # Ensure that the '+' operator means concatenate tuples return tuple(getattr(np, round)(shape).astype(int)) def _convert_patches_list_to_single_array(patches_list, n_center): r""" Converts patches from a `list` of :map:`Image` objects to a single `ndarray` with shape ``(n_center, n_offset, self.n_channels, patch_shape)``. Note that these two are the formats returned by the `extract_patches()` and `extract_patches_around_landmarks()` methods of :map:`Image` class. Parameters ---------- patches_list : `list` of `n_center * n_offset` :map:`Image` objects A `list` that contains all the patches as :map:`Image` objects. n_center : `int` The number of centers from which the patches are extracted. Returns ------- patches_array : `ndarray` ``(n_center, n_offset, n_channels, patch_shape)`` The numpy array that contains all the patches. """ n_offsets = int(len(patches_list) / n_center) n_channels = patches_list[0].n_channels height = patches_list[0].height width = patches_list[0].width patches_array = np.empty( (n_center, n_offsets, n_channels, height, width), dtype=patches_list[0].pixels.dtype, ) total_index = 0 for p in range(n_center): for o in range(n_offsets): patches_array[p, o, ...] = patches_list[total_index].pixels total_index += 1 return patches_array def _create_patches_image( patches, patch_centers, patches_indices=None, offset_index=None, background="black" ): r""" Creates an :map:`Image` object in which the patches are located on the correct regions based on the centers. Thus, the image is a block-sparse matrix. It has also attached a `patch_Centers` :map:`PointCloud` object with the centers that correspond to the patches that the user selected to set. The patches argument can have any of the two formats that are returned from the `extract_patches()` and `extract_patches_around_landmarks()` methods of the :map:`Image` class. Specifically it can be: 1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray` 2. `list` of ``n_center * n_offset`` :map:`Image` objects Parameters ---------- patches : `ndarray` or `list` The values of the patches. It can have any of the two formats that are returned from the `extract_patches()` and `extract_patches_around_landmarks()` methods. Specifically, it can either be an ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray` or a `list` of ``n_center * n_offset`` :map:`Image` objects. patch_centers : :map:`PointCloud` The centers to set the patches around. patches_indices : `int` or `list` of `int` or ``None``, optional Defines the patches that will be set (copied) to the image. If ``None``, then all the patches are copied. offset_index : `int` or ``None``, optional The offset index within the provided `patches` argument, thus the index of the second dimension from which to sample. If ``None``, then ``0`` is used. background : ``{'black', 'white'}``, optional If ``'black'``, then the background is set equal to the minimum value of `patches`. If ``'white'``, then the background is set equal to the maximum value of `patches`. Returns ------- patches_image : :map:`Image` The output patches image object. Raises ------ ValueError Background must be either ''black'' or ''white''. """ # If patches is a list, convert it to array if isinstance(patches, list): patches = _convert_patches_list_to_single_array(patches, patch_centers.n_points) # Parse inputs if offset_index is None: offset_index = 0 if patches_indices is None: patches_indices = np.arange(patches.shape[0]) elif not isinstance(patches_indices, Iterable): patches_indices = [patches_indices] # Compute patches image's shape n_channels = patches.shape[2] patch_shape0 = patches.shape[3] patch_shape1 = patches.shape[4] top, left = np.min(patch_centers.points, 0) bottom, right = np.max(patch_centers.points, 0) min_0 = np.floor(top - patch_shape0) min_1 = np.floor(left - patch_shape1) max_0 = np.ceil(bottom + patch_shape0) max_1 = np.ceil(right + patch_shape1) height = max_0 - min_0 + 1 width = max_1 - min_1 + 1 # Translate the patch centers to fit in the new image new_patch_centers = patch_centers.copy() new_patch_centers.points = patch_centers.points - np.array([[min_0, min_1]]) # Create new image with the correct background values if background == "black": patches_image = Image.init_blank( (height, width), n_channels, fill=np.min(patches[patches_indices]), dtype=patches.dtype, ) elif background == "white": patches_image = Image.init_blank( (height, width), n_channels, fill=np.max(patches[patches_indices]), dtype=patches.dtype, ) else: raise ValueError("Background must be either " "black" " or " "white" ".") # If there was no slicing on the patches, then attach the original patch # centers. Otherwise, attach the sliced ones. if set(patches_indices) == set(range(patches.shape[0])): patches_image.landmarks["patch_centers"] = new_patch_centers else: tmp_centers = PointCloud(new_patch_centers.points[patches_indices]) patches_image.landmarks["patch_centers"] = tmp_centers # Set the patches return patches_image.set_patches_around_landmarks( patches[patches_indices], group="patch_centers", offset_index=offset_index )
bsd-3-clause
Eric89GXL/mne-python
mne/externals/tqdm/_tqdm/std.py
14
55471
""" Customisable progressbar decorator for iterators. Includes a default (x)range iterator printing to stderr. Usage: >>> from tqdm import trange[, tqdm] >>> for i in trange(10): #same as: for i in tqdm(xrange(10)) ... ... """ from __future__ import absolute_import, division # compatibility functions and utilities from .utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \ _term_move_up, _unicode, WeakSet, _basestring, _OrderedDict, _text_width, \ Comparable, RE_ANSI, _is_ascii, FormatReplace, \ SimpleTextIOWrapper, CallbackIOWrapper from ._monitor import TMonitor # native libraries from contextlib import contextmanager import sys from numbers import Number from time import time # For parallelism safety import threading as th from warnings import warn __author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim", "casperdcl", "lrq3000"]} __all__ = ['tqdm', 'trange', 'TqdmTypeError', 'TqdmKeyError', 'TqdmWarning', 'TqdmExperimentalWarning', 'TqdmDeprecationWarning', 'TqdmMonitorWarning'] class TqdmTypeError(TypeError): pass class TqdmKeyError(KeyError): pass class TqdmWarning(Warning): """base class for all tqdm warnings. Used for non-external-code-breaking errors, such as garbled printing. """ def __init__(self, msg, fp_write=None, *a, **k): if fp_write is not None: fp_write("\n" + self.__class__.__name__ + ": " + str(msg).rstrip() + '\n') else: super(TqdmWarning, self).__init__(msg, *a, **k) class TqdmExperimentalWarning(TqdmWarning, FutureWarning): """beta feature, unstable API and behaviour""" pass class TqdmDeprecationWarning(TqdmWarning, DeprecationWarning): # not suppressed if raised pass class TqdmMonitorWarning(TqdmWarning, RuntimeWarning): """tqdm monitor errors which do not affect external functionality""" pass class TqdmDefaultWriteLock(object): """ Provide a default write lock for thread and multiprocessing safety. Works only on platforms supporting `fork` (so Windows is excluded). You must initialise a `tqdm` or `TqdmDefaultWriteLock` instance before forking in order for the write lock to work. On Windows, you need to supply the lock from the parent to the children as an argument to joblib or the parallelism lib you use. """ def __init__(self): # Create global parallelism locks to avoid racing issues with parallel # bars works only if fork available (Linux/MacOSX, but not Windows) self.create_mp_lock() self.create_th_lock() cls = type(self) self.locks = [lk for lk in [cls.mp_lock, cls.th_lock] if lk is not None] def acquire(self, *a, **k): for lock in self.locks: lock.acquire(*a, **k) def release(self): for lock in self.locks[::-1]: # Release in inverse order of acquisition lock.release() def __enter__(self): self.acquire() def __exit__(self, *exc): self.release() @classmethod def create_mp_lock(cls): if not hasattr(cls, 'mp_lock'): try: from multiprocessing import RLock cls.mp_lock = RLock() # multiprocessing lock except ImportError: # pragma: no cover cls.mp_lock = None except OSError: # pragma: no cover cls.mp_lock = None @classmethod def create_th_lock(cls): if not hasattr(cls, 'th_lock'): try: cls.th_lock = th.RLock() # thread lock except OSError: # pragma: no cover cls.th_lock = None # Create a thread lock before instantiation so that no setup needs to be done # before running in a multithreaded environment. # Do not create the multiprocessing lock because it sets the multiprocessing # context and does not allow the user to use 'spawn' or 'forkserver' methods. TqdmDefaultWriteLock.create_th_lock() class Bar(object): """ `str.format`-able bar with format specifiers: `[width][type]` - `width` + unspecified (default): use `self.default_len` + `int >= 0`: overrides `self.default_len` + `int < 0`: subtract from `self.default_len` - `type` + `a`: ascii (`charset=self.ASCII` override) + `u`: unicode (`charset=self.UTF` override) + `b`: blank (`charset=" "` override) """ ASCII = " 123456789#" UTF = u" " + u''.join(map(_unich, range(0x258F, 0x2587, -1))) BLANK = " " def __init__(self, frac, default_len=10, charset=UTF): if not (0 <= frac <= 1): warn("clamping frac to range [0, 1]", TqdmWarning, stacklevel=2) frac = max(0, min(1, frac)) assert default_len > 0 self.frac = frac self.default_len = default_len self.charset = charset def __format__(self, format_spec): if format_spec: _type = format_spec[-1].lower() try: charset = dict(a=self.ASCII, u=self.UTF, b=self.BLANK)[_type] except KeyError: charset = self.charset else: format_spec = format_spec[:-1] if format_spec: N_BARS = int(format_spec) if N_BARS < 0: N_BARS += self.default_len else: N_BARS = self.default_len else: charset = self.charset N_BARS = self.default_len nsyms = len(charset) - 1 bar_length, frac_bar_length = divmod( int(self.frac * N_BARS * nsyms), nsyms) bar = charset[-1] * bar_length frac_bar = charset[frac_bar_length] # whitespace padding if bar_length < N_BARS: return bar + frac_bar + \ charset[0] * (N_BARS - bar_length - 1) return bar class tqdm(Comparable): """ Decorate an iterable object, returning an iterator which acts exactly like the original iterable, but prints a dynamically updating progressbar every time a value is requested. """ monitor_interval = 10 # set to 0 to disable the thread monitor = None @staticmethod def format_sizeof(num, suffix='', divisor=1000): """ Formats a number (greater than unity) with SI Order of Magnitude prefixes. Parameters ---------- num : float Number ( >= 1) to format. suffix : str, optional Post-postfix [default: '']. divisor : float, optional Divisor between prefixes [default: 1000]. Returns ------- out : str Number with Order of Magnitude SI unit postfix. """ for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']: if abs(num) < 999.5: if abs(num) < 99.95: if abs(num) < 9.995: return '{0:1.2f}'.format(num) + unit + suffix return '{0:2.1f}'.format(num) + unit + suffix return '{0:3.0f}'.format(num) + unit + suffix num /= divisor return '{0:3.1f}Y'.format(num) + suffix @staticmethod def format_interval(t): """ Formats a number of seconds as a clock time, [H:]MM:SS Parameters ---------- t : int Number of seconds. Returns ------- out : str [H:]MM:SS """ mins, s = divmod(int(t), 60) h, m = divmod(mins, 60) if h: return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s) else: return '{0:02d}:{1:02d}'.format(m, s) @staticmethod def format_num(n): """ Intelligent scientific notation (.3g). Parameters ---------- n : int or float or Numeric A Number. Returns ------- out : str Formatted number. """ f = '{0:.3g}'.format(n).replace('+0', '+').replace('-0', '-') n = str(n) return f if len(f) < len(n) else n @staticmethod def ema(x, mu=None, alpha=0.3): """ Exponential moving average: smoothing to give progressively lower weights to older values. Parameters ---------- x : float New value to include in EMA. mu : float, optional Previous EMA value. alpha : float, optional Smoothing factor in range [0, 1], [default: 0.3]. Increase to give more weight to recent values. Ranges from 0 (yields mu) to 1 (yields x). """ return x if mu is None else (alpha * x) + (1 - alpha) * mu @staticmethod def status_printer(file): """ Manage the printing and in-place updating of a line of characters. Note that if the string is longer than a line, then in-place updating may not work (it will print a new line at each refresh). """ fp = file fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover def fp_write(s): fp.write(_unicode(s)) fp_flush() last_len = [0] def print_status(s): len_s = len(s) fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0))) last_len[0] = len_s return print_status @staticmethod def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False, unit='it', unit_scale=False, rate=None, bar_format=None, postfix=None, unit_divisor=1000, **extra_kwargs): """ Return a string-based progress bar given some parameters Parameters ---------- n : int or float Number of finished iterations. total : int or float The expected total number of iterations. If meaningless (None), only basic progress statistics are displayed (no ETA). elapsed : float Number of seconds passed since start. ncols : int, optional The width of the entire output message. If specified, dynamically resizes `{bar}` to stay within this bound [default: None]. If `0`, will not print any bar (only stats). The fallback is `{bar:10}`. prefix : str, optional Prefix message (included in total width) [default: '']. Use as {desc} in bar_format string. ascii : bool, optional or str, optional If not set, use unicode (smooth blocks) to fill the meter [default: False]. The fallback is to use ASCII characters " 123456789#". unit : str, optional The iteration unit [default: 'it']. unit_scale : bool or int or float, optional If 1 or True, the number of iterations will be printed with an appropriate SI metric prefix (k = 10^3, M = 10^6, etc.) [default: False]. If any other non-zero number, will scale `total` and `n`. rate : float, optional Manual override for iteration rate. If [default: None], uses n/elapsed. bar_format : str, optional Specify a custom bar string formatting. May impact performance. [default: '{l_bar}{bar}{r_bar}'], where l_bar='{desc}: {percentage:3.0f}%|' and r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, ' '{rate_fmt}{postfix}]' Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt, percentage, elapsed, elapsed_s, ncols, desc, unit, rate, rate_fmt, rate_noinv, rate_noinv_fmt, rate_inv, rate_inv_fmt, postfix, unit_divisor, remaining, remaining_s. Note that a trailing ": " is automatically removed after {desc} if the latter is empty. postfix : *, optional Similar to `prefix`, but placed at the end (e.g. for additional stats). Note: postfix is usually a string (not a dict) for this method, and will if possible be set to postfix = ', ' + postfix. However other types are supported (#382). unit_divisor : float, optional [default: 1000], ignored unless `unit_scale` is True. Returns ------- out : Formatted meter and stats, ready to display. """ # sanity check: total if total and n >= (total + 0.5): # allow float imprecision (#849) total = None # apply custom scale if necessary if unit_scale and unit_scale not in (True, 1): if total: total *= unit_scale n *= unit_scale if rate: rate *= unit_scale # by default rate = 1 / self.avg_time unit_scale = False elapsed_str = tqdm.format_interval(elapsed) # if unspecified, attempt to use rate = average speed # (we allow manual override since predicting time is an arcane art) if rate is None and elapsed: rate = n / elapsed inv_rate = 1 / rate if rate else None format_sizeof = tqdm.format_sizeof rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else '{0:5.2f}'.format(rate)) if rate else '?') + unit + '/s' rate_inv_fmt = ((format_sizeof(inv_rate) if unit_scale else '{0:5.2f}'.format(inv_rate)) if inv_rate else '?') + 's/' + unit rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt if unit_scale: n_fmt = format_sizeof(n, divisor=unit_divisor) total_fmt = format_sizeof(total, divisor=unit_divisor) \ if total is not None else '?' else: n_fmt = str(n) total_fmt = str(total) if total is not None else '?' try: postfix = ', ' + postfix if postfix else '' except TypeError: pass remaining = (total - n) / rate if rate and total else 0 remaining_str = tqdm.format_interval(remaining) if rate else '?' # format the stats displayed to the left and right sides of the bar if prefix: # old prefix setup work around bool_prefix_colon_already = (prefix[-2:] == ": ") l_bar = prefix if bool_prefix_colon_already else prefix + ": " else: l_bar = '' r_bar = '| {0}/{1} [{2}<{3}, {4}{5}]'.format( n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix) # Custom bar formatting # Populate a dict with all available progress indicators format_dict = dict( # slight extension of self.format_dict n=n, n_fmt=n_fmt, total=total, total_fmt=total_fmt, elapsed=elapsed_str, elapsed_s=elapsed, ncols=ncols, desc=prefix or '', unit=unit, rate=inv_rate if inv_rate and inv_rate > 1 else rate, rate_fmt=rate_fmt, rate_noinv=rate, rate_noinv_fmt=rate_noinv_fmt, rate_inv=inv_rate, rate_inv_fmt=rate_inv_fmt, postfix=postfix, unit_divisor=unit_divisor, # plus more useful definitions remaining=remaining_str, remaining_s=remaining, l_bar=l_bar, r_bar=r_bar, **extra_kwargs) # total is known: we can predict some stats if total: # fractional and percentage progress frac = n / total percentage = frac * 100 l_bar += '{0:3.0f}%|'.format(percentage) if ncols == 0: return l_bar[:-1] + r_bar[1:] format_dict.update(l_bar=l_bar) if bar_format: format_dict.update(percentage=percentage) # auto-remove colon for empty `desc` if not prefix: bar_format = bar_format.replace("{desc}: ", '') else: bar_format = "{l_bar}{bar}{r_bar}" full_bar = FormatReplace() try: nobar = bar_format.format(bar=full_bar, **format_dict) except UnicodeEncodeError: bar_format = _unicode(bar_format) nobar = bar_format.format(bar=full_bar, **format_dict) if not full_bar.format_called: # no {bar}, we can just format and return return nobar # Formatting progress bar space available for bar's display full_bar = Bar( frac, max(1, ncols - _text_width(RE_ANSI.sub('', nobar))) if ncols else 10, charset=Bar.ASCII if ascii is True else ascii or Bar.UTF) if not _is_ascii(full_bar.charset) and _is_ascii(bar_format): bar_format = _unicode(bar_format) return bar_format.format(bar=full_bar, **format_dict) elif bar_format: # user-specified bar_format but no total l_bar += '|' format_dict.update(l_bar=l_bar, percentage=0) full_bar = FormatReplace() nobar = bar_format.format(bar=full_bar, **format_dict) if not full_bar.format_called: return nobar full_bar = Bar( 0, max(1, ncols - _text_width(RE_ANSI.sub('', nobar))) if ncols else 10, charset=Bar.BLANK) return bar_format.format(bar=full_bar, **format_dict) else: # no total: no progressbar, ETA, just progress stats return ((prefix + ": ") if prefix else '') + \ '{0}{1} [{2}, {3}{4}]'.format( n_fmt, unit, elapsed_str, rate_fmt, postfix) def __new__(cls, *args, **kwargs): # Create a new instance instance = object.__new__(cls) # Construct the lock if it does not exist with cls.get_lock(): # Add to the list of instances if not hasattr(cls, '_instances'): cls._instances = WeakSet() cls._instances.add(instance) # Create the monitoring thread if cls.monitor_interval and (cls.monitor is None or not cls.monitor.report()): try: cls.monitor = TMonitor(cls, cls.monitor_interval) except Exception as e: # pragma: nocover warn("tqdm:disabling monitor support" " (monitor_interval = 0) due to:\n" + str(e), TqdmMonitorWarning, stacklevel=2) cls.monitor_interval = 0 # Return the instance return instance @classmethod def _get_free_pos(cls, instance=None): """Skips specified instance.""" positions = set(abs(inst.pos) for inst in cls._instances if inst is not instance and hasattr(inst, "pos")) return min(set(range(len(positions) + 1)).difference(positions)) @classmethod def _decr_instances(cls, instance): """ Remove from list and reposition other bars so that newer bars won't overlap previous bars """ with cls._lock: try: cls._instances.remove(instance) except KeyError: # if not instance.gui: # pragma: no cover # raise pass # py2: maybe magically removed already # else: if not instance.gui: for inst in cls._instances: # negative `pos` means fixed if hasattr(inst, "pos") and inst.pos > abs(instance.pos): inst.clear(nolock=True) inst.pos -= 1 # TODO: check this doesn't overwrite another fixed bar # Kill monitor if no instances are left if not cls._instances and cls.monitor: try: cls.monitor.exit() del cls.monitor except AttributeError: # pragma: nocover pass else: cls.monitor = None @classmethod def write(cls, s, file=None, end="\n", nolock=False): """Print a message via tqdm (without overlap with bars).""" fp = file if file is not None else sys.stdout with cls.external_write_mode(file=file, nolock=nolock): # Write the message fp.write(s) fp.write(end) @classmethod @contextmanager def external_write_mode(cls, file=None, nolock=False): """ Disable tqdm within context and refresh tqdm when exits. Useful when writing to standard output stream """ fp = file if file is not None else sys.stdout if not nolock: cls.get_lock().acquire() # Clear all bars inst_cleared = [] for inst in getattr(cls, '_instances', []): # Clear instance if in the target output file # or if write output + tqdm output are both either # sys.stdout or sys.stderr (because both are mixed in terminal) if hasattr(inst, "start_t") and (inst.fp == fp or all( f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))): inst.clear(nolock=True) inst_cleared.append(inst) yield # Force refresh display of bars we cleared for inst in inst_cleared: inst.refresh(nolock=True) if not nolock: cls._lock.release() @classmethod def set_lock(cls, lock): """Set the global lock.""" cls._lock = lock @classmethod def get_lock(cls): """Get the global lock. Construct it if it does not exist.""" if not hasattr(cls, '_lock'): cls._lock = TqdmDefaultWriteLock() return cls._lock @classmethod def pandas(tclass, *targs, **tkwargs): """ Registers the given `tqdm` class with pandas.core. ( frame.DataFrame | series.Series | groupby.(generic.)DataFrameGroupBy | groupby.(generic.)SeriesGroupBy ).progress_apply A new instance will be create every time `progress_apply` is called, and each instance will automatically close() upon completion. Parameters ---------- targs, tkwargs : arguments for the tqdm instance Examples -------- >>> import pandas as pd >>> import numpy as np >>> from tqdm import tqdm >>> from tqdm.gui import tqdm as tqdm_gui >>> >>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6))) >>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc >>> # Now you can use `progress_apply` instead of `apply` >>> df.groupby(0).progress_apply(lambda x: x**2) References ---------- https://stackoverflow.com/questions/18603270/ progress-indicator-during-pandas-operations-python """ from pandas.core.frame import DataFrame from pandas.core.series import Series try: from pandas import Panel except ImportError: # TODO: pandas>0.25.2 Panel = None try: # pandas>=0.18.0 from pandas.core.window import _Rolling_and_Expanding except ImportError: # pragma: no cover _Rolling_and_Expanding = None try: # pandas>=0.25.0 from pandas.core.groupby.generic import DataFrameGroupBy, \ SeriesGroupBy # , NDFrameGroupBy except ImportError: try: # pandas>=0.23.0 from pandas.core.groupby.groupby import DataFrameGroupBy, \ SeriesGroupBy except ImportError: from pandas.core.groupby import DataFrameGroupBy, \ SeriesGroupBy try: # pandas>=0.23.0 from pandas.core.groupby.groupby import GroupBy except ImportError: from pandas.core.groupby import GroupBy try: # pandas>=0.23.0 from pandas.core.groupby.groupby import PanelGroupBy except ImportError: try: from pandas.core.groupby import PanelGroupBy except ImportError: # pandas>=0.25.0 PanelGroupBy = None deprecated_t = [tkwargs.pop('deprecated_t', None)] def inner_generator(df_function='apply'): def inner(df, func, *args, **kwargs): """ Parameters ---------- df : (DataFrame|Series)[GroupBy] Data (may be grouped). func : function To be applied on the (grouped) data. **kwargs : optional Transmitted to `df.apply()`. """ # Precompute total iterations total = tkwargs.pop("total", getattr(df, 'ngroups', None)) if total is None: # not grouped if df_function == 'applymap': total = df.size elif isinstance(df, Series): total = len(df) elif _Rolling_and_Expanding is None or \ not isinstance(df, _Rolling_and_Expanding): # DataFrame or Panel axis = kwargs.get('axis', 0) if axis == 'index': axis = 0 elif axis == 'columns': axis = 1 # when axis=0, total is shape[axis1] total = df.size // df.shape[axis] # Init bar if deprecated_t[0] is not None: t = deprecated_t[0] deprecated_t[0] = None else: t = tclass(*targs, total=total, **tkwargs) if len(args) > 0: # *args intentionally not supported (see #244, #299) TqdmDeprecationWarning( "Except func, normal arguments are intentionally" + " not supported by" + " `(DataFrame|Series|GroupBy).progress_apply`." + " Use keyword arguments instead.", fp_write=getattr(t.fp, 'write', sys.stderr.write)) try: func = df._is_builtin_func(func) except TypeError: pass # Define bar updating wrapper def wrapper(*args, **kwargs): # update tbar correctly # it seems `pandas apply` calls `func` twice # on the first column/row to decide whether it can # take a fast or slow code path; so stop when t.total==t.n t.update(n=1 if not t.total or t.n < t.total else 0) return func(*args, **kwargs) # Apply the provided function (in **kwargs) # on the df using our wrapper (which provides bar updating) result = getattr(df, df_function)(wrapper, **kwargs) # Close bar and return pandas calculation result t.close() return result return inner # Monkeypatch pandas to provide easy methods # Enable custom tqdm progress in pandas! Series.progress_apply = inner_generator() SeriesGroupBy.progress_apply = inner_generator() Series.progress_map = inner_generator('map') SeriesGroupBy.progress_map = inner_generator('map') DataFrame.progress_apply = inner_generator() DataFrameGroupBy.progress_apply = inner_generator() DataFrame.progress_applymap = inner_generator('applymap') if Panel is not None: Panel.progress_apply = inner_generator() if PanelGroupBy is not None: PanelGroupBy.progress_apply = inner_generator() GroupBy.progress_apply = inner_generator() GroupBy.progress_aggregate = inner_generator('aggregate') GroupBy.progress_transform = inner_generator('transform') if _Rolling_and_Expanding is not None: # pragma: no cover _Rolling_and_Expanding.progress_apply = inner_generator() def __init__(self, iterable=None, desc=None, total=None, leave=True, file=None, ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None, ascii=None, disable=False, unit='it', unit_scale=False, dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0, position=None, postfix=None, unit_divisor=1000, write_bytes=None, lock_args=None, gui=False, **kwargs): """ Parameters ---------- iterable : iterable, optional Iterable to decorate with a progressbar. Leave blank to manually manage the updates. desc : str, optional Prefix for the progressbar. total : int or float, optional The number of expected iterations. If unspecified, len(iterable) is used if possible. If float("inf") or as a last resort, only basic progress statistics are displayed (no ETA, no progressbar). If `gui` is True and this parameter needs subsequent updating, specify an initial arbitrary large positive number, e.g. 9e9. leave : bool, optional If [default: True], keeps all traces of the progressbar upon termination of iteration. If `None`, will leave only if `position` is `0`. file : `io.TextIOWrapper` or `io.StringIO`, optional Specifies where to output the progress messages (default: sys.stderr). Uses `file.write(str)` and `file.flush()` methods. For encoding, see `write_bytes`. ncols : int, optional The width of the entire output message. If specified, dynamically resizes the progressbar to stay within this bound. If unspecified, attempts to use environment width. The fallback is a meter width of 10 and no limit for the counter and statistics. If 0, will not print any meter (only stats). mininterval : float, optional Minimum progress display update interval [default: 0.1] seconds. maxinterval : float, optional Maximum progress display update interval [default: 10] seconds. Automatically adjusts `miniters` to correspond to `mininterval` after long display update lag. Only works if `dynamic_miniters` or monitor thread is enabled. miniters : int or float, optional Minimum progress display update interval, in iterations. If 0 and `dynamic_miniters`, will automatically adjust to equal `mininterval` (more CPU efficient, good for tight loops). If > 0, will skip display of specified number of iterations. Tweak this and `mininterval` to get very efficient loops. If your progress is erratic with both fast and slow iterations (network, skipping items, etc) you should set miniters=1. ascii : bool or str, optional If unspecified or False, use unicode (smooth blocks) to fill the meter. The fallback is to use ASCII characters " 123456789#". disable : bool, optional Whether to disable the entire progressbar wrapper [default: False]. If set to None, disable on non-TTY. unit : str, optional String that will be used to define the unit of each iteration [default: it]. unit_scale : bool or int or float, optional If 1 or True, the number of iterations will be reduced/scaled automatically and a metric prefix following the International System of Units standard will be added (kilo, mega, etc.) [default: False]. If any other non-zero number, will scale `total` and `n`. dynamic_ncols : bool, optional If set, constantly alters `ncols` to the environment (allowing for window resizes) [default: False]. smoothing : float, optional Exponential moving average smoothing factor for speed estimates (ignored in GUI mode). Ranges from 0 (average speed) to 1 (current/instantaneous speed) [default: 0.3]. bar_format : str, optional Specify a custom bar string formatting. May impact performance. [default: '{l_bar}{bar}{r_bar}'], where l_bar='{desc}: {percentage:3.0f}%|' and r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, ' '{rate_fmt}{postfix}]' Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt, percentage, elapsed, elapsed_s, ncols, desc, unit, rate, rate_fmt, rate_noinv, rate_noinv_fmt, rate_inv, rate_inv_fmt, postfix, unit_divisor, remaining, remaining_s. Note that a trailing ": " is automatically removed after {desc} if the latter is empty. initial : int or float, optional The initial counter value. Useful when restarting a progress bar [default: 0]. If using float, consider specifying `{n:.3f}` or similar in `bar_format`, or specifying `unit_scale`. position : int, optional Specify the line offset to print this bar (starting from 0) Automatic if unspecified. Useful to manage multiple bars at once (eg, from threads). postfix : dict or *, optional Specify additional stats to display at the end of the bar. Calls `set_postfix(**postfix)` if possible (dict). unit_divisor : float, optional [default: 1000], ignored unless `unit_scale` is True. write_bytes : bool, optional If (default: None) and `file` is unspecified, bytes will be written in Python 2. If `True` will also write bytes. In all other cases will default to unicode. lock_args : tuple, optional Passed to `refresh` for intermediate output (initialisation, iterating, and updating). gui : bool, optional WARNING: internal parameter - do not use. Use tqdm.gui.tqdm(...) instead. If set, will attempt to use matplotlib animations for a graphical output [default: False]. Returns ------- out : decorated iterator. """ if write_bytes is None: write_bytes = file is None and sys.version_info < (3,) if file is None: file = sys.stderr if write_bytes: # Despite coercing unicode into bytes, py2 sys.std* streams # should have bytes written to them. file = SimpleTextIOWrapper( file, encoding=getattr(file, 'encoding', None) or 'utf-8') if disable is None and hasattr(file, "isatty") and not file.isatty(): disable = True if total is None and iterable is not None: try: total = len(iterable) except (TypeError, AttributeError): total = None if total == float("inf"): # Infinite iterations, behave same as unknown total = None if disable: self.iterable = iterable self.disable = disable with self._lock: self.pos = self._get_free_pos(self) self._instances.remove(self) self.n = initial self.total = total return if kwargs: self.disable = True with self._lock: self.pos = self._get_free_pos(self) self._instances.remove(self) raise ( TqdmDeprecationWarning( "`nested` is deprecated and automated.\n" "Use `position` instead for manual control.\n", fp_write=getattr(file, 'write', sys.stderr.write)) if "nested" in kwargs else TqdmKeyError("Unknown argument(s): " + str(kwargs))) # Preprocess the arguments if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \ dynamic_ncols: # pragma: no cover if dynamic_ncols: dynamic_ncols = _environ_cols_wrapper() if dynamic_ncols: ncols = dynamic_ncols(file) else: _dynamic_ncols = _environ_cols_wrapper() if _dynamic_ncols: ncols = _dynamic_ncols(file) if miniters is None: miniters = 0 dynamic_miniters = True else: dynamic_miniters = False if mininterval is None: mininterval = 0 if maxinterval is None: maxinterval = 0 if ascii is None: ascii = not _supports_unicode(file) if bar_format and not ((ascii is True) or _is_ascii(ascii)): # Convert bar format into unicode since terminal uses unicode bar_format = _unicode(bar_format) if smoothing is None: smoothing = 0 # Store the arguments self.iterable = iterable self.desc = desc or '' self.total = total self.leave = leave self.fp = file self.ncols = ncols self.mininterval = mininterval self.maxinterval = maxinterval self.miniters = miniters self.dynamic_miniters = dynamic_miniters self.ascii = ascii self.disable = disable self.unit = unit self.unit_scale = unit_scale self.unit_divisor = unit_divisor self.lock_args = lock_args self.gui = gui self.dynamic_ncols = dynamic_ncols self.smoothing = smoothing self.avg_time = None self._time = time self.bar_format = bar_format self.postfix = None if postfix: try: self.set_postfix(refresh=False, **postfix) except TypeError: self.postfix = postfix # Init the iterations counters self.last_print_n = initial self.n = initial # if nested, at initial sp() call we replace '\r' by '\n' to # not overwrite the outer progress bar with self._lock: if position is None: self.pos = self._get_free_pos(self) else: # mark fixed positions as negative self.pos = -position if not gui: # Initialize the screen printer self.sp = self.status_printer(self.fp) self.refresh(lock_args=self.lock_args) # Init the time counter self.last_print_t = self._time() # NB: Avoid race conditions by setting start_t at the very end of init self.start_t = self.last_print_t def __bool__(self): if self.total is not None: return self.total > 0 if self.iterable is None: raise TypeError('bool() undefined when iterable == total == None') return bool(self.iterable) def __nonzero__(self): return self.__bool__() def __len__(self): return self.total if self.iterable is None else \ (self.iterable.shape[0] if hasattr(self.iterable, "shape") else len(self.iterable) if hasattr(self.iterable, "__len__") else getattr(self, "total", None)) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): try: self.close() except AttributeError: # maybe eager thread cleanup upon external error if (exc_type, exc_value, traceback) == (None, None, None): raise warn("AttributeError ignored", TqdmWarning, stacklevel=2) def __del__(self): self.close() def __repr__(self): return self.format_meter(**self.format_dict) @property def _comparable(self): return abs(getattr(self, "pos", 1 << 31)) def __hash__(self): return id(self) def __iter__(self): """Backward-compatibility to use: for x in tqdm(iterable)""" # Inlining instance variables as locals (speed optimisation) iterable = self.iterable # If the bar is disabled, then just walk the iterable # (note: keep this check outside the loop for performance) if self.disable: for obj in iterable: yield obj return mininterval = self.mininterval maxinterval = self.maxinterval miniters = self.miniters dynamic_miniters = self.dynamic_miniters last_print_t = self.last_print_t last_print_n = self.last_print_n n = self.n smoothing = self.smoothing avg_time = self.avg_time time = self._time if not hasattr(self, 'sp'): raise TqdmDeprecationWarning( "Please use `tqdm.gui.tqdm(...)` instead of" " `tqdm(..., gui=True)`\n", fp_write=getattr(self.fp, 'write', sys.stderr.write)) for obj in iterable: yield obj # Update and possibly print the progressbar. # Note: does not call self.update(1) for speed optimisation. n += 1 # check counter first to avoid calls to time() if n - last_print_n >= self.miniters: miniters = self.miniters # watch monitoring thread changes delta_t = time() - last_print_t if delta_t >= mininterval: cur_t = time() delta_it = n - last_print_n # EMA (not just overall average) if smoothing and delta_t and delta_it: rate = delta_t / delta_it avg_time = self.ema(rate, avg_time, smoothing) self.avg_time = avg_time self.n = n self.refresh(lock_args=self.lock_args) # If no `miniters` was specified, adjust automatically # to the max iteration rate seen so far between 2 prints if dynamic_miniters: if maxinterval and delta_t >= maxinterval: # Adjust miniters to time interval by rule of 3 if mininterval: # Set miniters to correspond to mininterval miniters = delta_it * mininterval / delta_t else: # Set miniters to correspond to maxinterval miniters = delta_it * maxinterval / delta_t elif smoothing: # EMA-weight miniters to converge # towards the timeframe of mininterval rate = delta_it if mininterval and delta_t: rate *= mininterval / delta_t miniters = self.ema(rate, miniters, smoothing) else: # Maximum nb of iterations between 2 prints miniters = max(miniters, delta_it) # Store old values for next call self.n = self.last_print_n = last_print_n = n self.last_print_t = last_print_t = cur_t self.miniters = miniters # Closing the progress bar. # Update some internal variables for close(). self.last_print_n = last_print_n self.n = n self.miniters = miniters self.close() def update(self, n=1): """ Manually update the progress bar, useful for streams such as reading files. E.g.: >>> t = tqdm(total=filesize) # Initialise >>> for current_buffer in stream: ... ... ... t.update(len(current_buffer)) >>> t.close() The last line is highly recommended, but possibly not necessary if `t.update()` will be called in such a way that `filesize` will be exactly reached and printed. Parameters ---------- n : int or float, optional Increment to add to the internal counter of iterations [default: 1]. If using float, consider specifying `{n:.3f}` or similar in `bar_format`, or specifying `unit_scale`. """ # N.B.: see __iter__() for more comments. if self.disable: return if n < 0: self.last_print_n += n # for auto-refresh logic to work self.n += n # check counter first to reduce calls to time() if self.n - self.last_print_n >= self.miniters: delta_t = self._time() - self.last_print_t if delta_t >= self.mininterval: cur_t = self._time() delta_it = self.n - self.last_print_n # >= n # elapsed = cur_t - self.start_t # EMA (not just overall average) if self.smoothing and delta_t and delta_it: rate = delta_t / delta_it self.avg_time = self.ema( rate, self.avg_time, self.smoothing) if not hasattr(self, "sp"): raise TqdmDeprecationWarning( "Please use `tqdm.gui.tqdm(...)`" " instead of `tqdm(..., gui=True)`\n", fp_write=getattr(self.fp, 'write', sys.stderr.write)) self.refresh(lock_args=self.lock_args) # If no `miniters` was specified, adjust automatically to the # maximum iteration rate seen so far between two prints. # e.g.: After running `tqdm.update(5)`, subsequent # calls to `tqdm.update()` will only cause an update after # at least 5 more iterations. if self.dynamic_miniters: if self.maxinterval and delta_t >= self.maxinterval: if self.mininterval: self.miniters = delta_it * self.mininterval \ / delta_t else: self.miniters = delta_it * self.maxinterval \ / delta_t elif self.smoothing: self.miniters = self.smoothing * delta_it * \ (self.mininterval / delta_t if self.mininterval and delta_t else 1) + \ (1 - self.smoothing) * self.miniters else: self.miniters = max(self.miniters, delta_it) # Store old values for next call self.last_print_n = self.n self.last_print_t = cur_t def close(self): """Cleanup and (if leave=False) close the progressbar.""" if self.disable: return # Prevent multiple closures self.disable = True # decrement instance pos and remove from internal set pos = abs(self.pos) self._decr_instances(self) # GUI mode if not hasattr(self, "sp"): return # annoyingly, _supports_unicode isn't good enough def fp_write(s): self.fp.write(_unicode(s)) try: fp_write('') except ValueError as e: if 'closed' in str(e): return raise # pragma: no cover leave = pos == 0 if self.leave is None else self.leave with self._lock: if leave: # stats for overall rate (no weighted average) self.avg_time = None self.display(pos=0) fp_write('\n') else: self.display(msg='', pos=pos) if not pos: fp_write('\r') def clear(self, nolock=False): """Clear current bar display.""" if self.disable: return if not nolock: self._lock.acquire() self.moveto(abs(self.pos)) self.sp('') self.fp.write('\r') # place cursor back at the beginning of line self.moveto(-abs(self.pos)) if not nolock: self._lock.release() def refresh(self, nolock=False, lock_args=None): """ Force refresh the display of this bar. Parameters ---------- nolock : bool, optional If `True`, does not lock. If [default: `False`]: calls `acquire()` on internal lock. lock_args : tuple, optional Passed to internal lock's `acquire()`. If specified, will only `display()` if `acquire()` returns `True`. """ if self.disable: return if not nolock: if lock_args: if not self._lock.acquire(*lock_args): return False else: self._lock.acquire() self.display() if not nolock: self._lock.release() return True def unpause(self): """Restart tqdm timer from last print time.""" cur_t = self._time() self.start_t += cur_t - self.last_print_t self.last_print_t = cur_t def reset(self, total=None): """ Resets to 0 iterations for repeated use. Consider combining with `leave=True`. Parameters ---------- total : int or float, optional. Total to use for the new bar. """ self.last_print_n = self.n = 0 self.last_print_t = self.start_t = self._time() if total is not None: self.total = total self.refresh() def set_description(self, desc=None, refresh=True): """ Set/modify description of the progress bar. Parameters ---------- desc : str, optional refresh : bool, optional Forces refresh [default: True]. """ self.desc = desc + ': ' if desc else '' if refresh: self.refresh() def set_description_str(self, desc=None, refresh=True): """Set/modify description without ': ' appended.""" self.desc = desc or '' if refresh: self.refresh() def set_postfix(self, ordered_dict=None, refresh=True, **kwargs): """ Set/modify postfix (additional stats) with automatic formatting based on datatype. Parameters ---------- ordered_dict : dict or OrderedDict, optional refresh : bool, optional Forces refresh [default: True]. kwargs : dict, optional """ # Sort in alphabetical order to be more deterministic postfix = _OrderedDict([] if ordered_dict is None else ordered_dict) for key in sorted(kwargs.keys()): postfix[key] = kwargs[key] # Preprocess stats according to datatype for key in postfix.keys(): # Number: limit the length of the string if isinstance(postfix[key], Number): postfix[key] = self.format_num(postfix[key]) # Else for any other type, try to get the string conversion elif not isinstance(postfix[key], _basestring): postfix[key] = str(postfix[key]) # Else if it's a string, don't need to preprocess anything # Stitch together to get the final postfix self.postfix = ', '.join(key + '=' + postfix[key].strip() for key in postfix.keys()) if refresh: self.refresh() def set_postfix_str(self, s='', refresh=True): """ Postfix without dictionary expansion, similar to prefix handling. """ self.postfix = str(s) if refresh: self.refresh() def moveto(self, n): # TODO: private method self.fp.write(_unicode('\n' * n + _term_move_up() * -n)) self.fp.flush() @property def format_dict(self): """Public API for read-only member access.""" return dict( n=self.n, total=self.total, elapsed=self._time() - self.start_t if hasattr(self, 'start_t') else 0, ncols=self.dynamic_ncols(self.fp) if self.dynamic_ncols else self.ncols, prefix=self.desc, ascii=self.ascii, unit=self.unit, unit_scale=self.unit_scale, rate=1 / self.avg_time if self.avg_time else None, bar_format=self.bar_format, postfix=self.postfix, unit_divisor=self.unit_divisor) def display(self, msg=None, pos=None): """ Use `self.sp` to display `msg` in the specified `pos`. Consider overloading this function when inheriting to use e.g.: `self.some_frontend(**self.format_dict)` instead of `self.sp`. Parameters ---------- msg : str, optional. What to display (default: `repr(self)`). pos : int, optional. Position to `moveto` (default: `abs(self.pos)`). """ if pos is None: pos = abs(self.pos) if pos: self.moveto(pos) self.sp(self.__repr__() if msg is None else msg) if pos: self.moveto(-pos) @classmethod @contextmanager def wrapattr(tclass, stream, method, total=None, bytes=True, **tkwargs): """ stream : file-like object. method : str, "read" or "write". The result of `read()` and the first argument of `write()` should have a `len()`. >>> with tqdm.wrapattr(file_obj, "read", total=file_obj.size) as fobj: ... while True: ... chunk = fobj.read(chunk_size) ... if not chunk: ... break """ with tclass(total=total, **tkwargs) as t: if bytes: t.unit = "B" t.unit_scale = True t.unit_divisor = 1024 yield CallbackIOWrapper(t.update, stream, method) def trange(*args, **kwargs): """ A shortcut for tqdm(xrange(*args), **kwargs). On Python3+ range is used instead of xrange. """ return tqdm(_range(*args), **kwargs)
bsd-3-clause
exowanderer/Charge-Carrier-Trapping-Comparison
Charge Carrier Trapping Experiment.py
1
37127
# coding: utf-8 # # Charge Carrier Trapping Experiment # # CCT = Charge Carrier Trapping - This is a test of comparing the Zhou et al 2017 results with a data driven analysis using multinest # In[ ]: get_ipython().magic('matplotlib inline') from pylab import *;ion() from pandas import read_csv, DataFrame, concat from time import time from exoparams import PlanetParams from astropy import units as u # In[ ]: test_data = read_csv('test_data.dat') test_data # **Check if the data makes sense as is** # In[ ]: fig = figure(figsize=(10,10)) errorbar(test_data['DeltaPhase'], test_data['Flux'] , test_data['Sigma'], fmt='o') fig = figure(figsize=(10,10)) errorbar(np.arange(test_data['DeltaPhase'].size), test_data['Flux'] , test_data['Sigma'], fmt='o') # errorbar(h38_v2_orbitPhased['DeltaPhase'], h38_v2_orbitPhased['Flux'] , h38_v2_orbitPhased['Sigma'], fmt='o') # errorbar(w67_v1_orbitPhased['DeltaPhase'], w67_v1_orbitPhased['Flux'] , w67_v1_orbitPhased['Sigma'], fmt='o') # In[ ]: for k in test_data['OrbitNumber'].unique(): orbitNNow = test_data['OrbitNumber'] == k errorbar(test_data['DeltaPhase'][orbitNNow] , test_data['Flux'][orbitNNow] , test_data['Sigma'][orbitNNow], fmt='o') # In[ ]: def zhou_model(params): # Zhou et al. 2017 # The exponential ramp timescale is detector dependennt, and therfore uniform across all observations # But the difference from orbit to orbit is predicted to be related # ONLY to the inital number of charge traps populated at the start of the each ramp # BIG ASSUMPTION flux = ydata.copy() # I assume that what Zhou means by `flux` is either the WLC or avg WLC value # flux = ydata.copy() / 128 # I assume that what Zhou means by `flux` is either the WLC or avg WLC value E0fast = params[0] # Orbit 0; Start with per frame; may want per pixel E0slow = params[1] # Orbit 0; Start with per frame; may want per pixel # Separate out the delta-E0 components per orbit # Keep dE0fast[0] and dE0slow[0] == 0.0 because they correspond to E0fast and E0slow (initial) dE0fast = np.zeros(nOrbits) dE0slow = np.zeros(nOrbits) for k in range(1, nOrbits): print(k,2*k, 2*k+1,len(params)) dE0fast[k] = params[2*k] dE0slow[k] = params[2*k+1] # From Table 3 of Zhou et al. 2017 ETotFast = 270.6 etaFast = 0.006863 tauFast = 224.8 ETotSlow = 1320.0 etaSlow = 0.01311 tauSlow = 2.45e4 coeffFast0= (etaFast * flux / ETotFast + tauFast**-1) coeffSlow0= (etaSlow * flux / ETotSlow + tauSlow**-1) coeffFast1= etaFast*flux / coeffFast0 coeffSlow1= etaSlow*flux / coeffSlow0 Efast = zeros(orbit_phase.shape) Eslow = zeros(orbit_phase.shape) for k in range(nOrbits): orbitNow = where(orbitNumber == k)[0] Efast[orbitNow] = coeffFast1 + (E0fast + dE0fast[k] - coeffFast1)*exp(-coeffFast0 * tphase[orbitNow]) Eslow[orbitNow] = coeffSlow1 + (E0slow + dE0slow[k] - coeffSlow1)*exp(-coeffSlow0 * tphase[orbitNow]) dEFastDtP = etaFast * flux * (ETotFast - Efast) / ETotFast dEFastDtN = -Efast / tauFast dESlowDtP = etaSlow * flux * (ETotSlow - Eslow) / ETotSlow dESlowDtN = -Eslow / tauSlow lambda phase: 1 - dEFastDtP - dESlowDtP - dEFastDtP - dESlowDtP # # PyMultiNest Demo # In[ ]: from __future__ import absolute_import, unicode_literals, print_function import pymultinest import math import os import threading, subprocess from sys import platform if not os.path.exists("chains"): os.mkdir("chains") # In[ ]: get_ipython().magic('matplotlib inline') from pylab import *;ion() # from pymultinest.solve import Solver,solve from numpy import pi, sin, cos, linspace def single_exponential_model(cube): alpha = cube[0] beta = cube[1] gamma = cube[2] return lambda xdata: alpha - beta*exp(-gamma*xdata) def double_exponential_model(cube): alpha = cube[0] beta = cube[1] gamma = cube[2] delta = cube[3] epsilon = cube[4] return lambda xdata: alpha - beta*exp(-gamma*xdata) - delta*exp(-epsilon*xdata) def straight_line(cube): offset = cube[0] slope = cube[1] return lambda abscissa: offset + slope * abscissa def sine_wave(cube): amp = cube[0] period = cube[1] return lambda abscissa: amp*sin(2*pi / period * abscissa) # ** Generate Fake Data for Algorithm Testing ** # In[ ]: np.random.seed(0) param0_test= 1#0.05 param1_test= .1#5*pi param2_test= 10.0 yunc_test = 0.01 nPts_test = int(50) nThPts_test= int(1e3) xmin_test = -0.0#*pi xmax_test = 1.0#*pi dx_test = 0.01*(xmax_test - xmin_test) model_test = single_exponential_model # model_test = sine_wave # model_test = straight_line yuncs_test = np.random.normal(yunc_test, 1e-2 * yunc_test, nPts_test) thdata_test= np.linspace(xmin_test-dx_test, xmax_test+dx_test, nThPts_test) xdata_test = np.random.uniform(xmin_test, xmax_test, nPts_test) xdata_test = sort(xdata_test) ydata_test = model_test([param0_test,param1_test,param2_test])(xdata_test) yerr_test = np.random.normal(0, yuncs_test, nPts_test) zdata_test = ydata_test + yerr_test figure(figsize=(10,10)) plot(thdata_test, model_test([param0_test,param1_test,param2_test])(thdata_test)) errorbar(xdata_test, zdata_test, yunc_test*ones(zdata_test.size), fmt='o') # # Single Exponential Model # In[ ]: nThPts = int(1e3) model_SEM = single_exponential_model xdata = test_data['DeltaPhase'] ydata = test_data['Flux'] yuncs = test_data['Sigma'] xmin, xmax = xdata.min(), xdata.max() dx = (xmax - xmin)/100 thdata_SEM = np.linspace(xmin-dx, xmax+dx, nThPts) param0_SEM_init= 1.0 # by defintion param1_SEM_init= (ydata.max() - ydata.min())#/100 param2_SEM_init= round(5/(xdata.max() - xdata.min())) print(param0_SEM_init, param1_SEM_init, param2_SEM_init) figure(figsize=(10,10)) plot(thdata_SEM, model_SEM([param0_SEM_init,param1_SEM_init,param2_SEM_init])(thdata_SEM)) errorbar(xdata, ydata, yuncs, fmt='o') # In[ ]: # our probability functions # Taken from the eggbox problem. # model = sine_wave # parameters = ["amp", "period"] # model = straight_line # parameters = ["offset", "slope"] model_SEM = single_exponential_model parameters_SEM = ['max', 'amp1', 'scale1'] def myprior_SEM(cube, ndim, nparams): cube0_width = 1e-3 cube[0] = cube[0] * cube0_width + (1 - 0.5*cube0_width)# - 10# U(0,2) cube[1] = cube[1] # - 10# U(0,1) -- default cube[2] = cube[2] * 1e4 - 5e3# - 1000 # U(0,2000) def myloglike_SEM(cube, ndim, nparams): chi = 1. # print "cube", [cube[i] for i in range(ndim)], cube # for i in range(ndim): # chi *= -0.5 * ((cube[i] - 0.2) / 0.1)**2#math.cos(cube[i] / 2.) * math.sin(cube[i] / 2.) # print "returning", math.pow(2. + chi, 5) modelNow = model_SEM(cube)(xdata) return -0.5*((modelNow - ydata)**2. / yuncs**2.).sum() # In[ ]: if not os.path.exists("chains"): os.mkdir("chains") # number of dimensions our problem has # parameters = ["x", "y"] n_params_SEM = len(parameters_SEM) planetName = 'HAT38' visitName = 'visit1' modelName = 'single_exponential_model' outputfiles_basename = 'chains/' + planetName + '-' + visitName + '-' + modelName + '-' start = time() plt.figure(figsize=(5*n_params_SEM, 5*n_params_SEM)) # we want to see some output while it is running progress = pymultinest.ProgressPlotter(n_params = n_params_SEM, outputfiles_basename=outputfiles_basename); progress.start() # threading.Timer(2, show, ["chains/2-phys_live.points.pdf"]).start() # delayed opening # run MultiNest pymultinest.run(myloglike_SEM, myprior_SEM, n_params_SEM, importance_nested_sampling = False, resume = False, verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename=outputfiles_basename); # ok, done. Stop our progress watcher progress.stop(); print('SEM took', time() - start, 'seconds') # lets analyse the results a_SEM = pymultinest.Analyzer(n_params = n_params_SEM, outputfiles_basename=outputfiles_basename); s_SEM = a_SEM.get_stats(); # In[ ]: import json # store name of parameters, always useful with open('%sparams.json' % a_SEM.outputfiles_basename, 'w') as f: json.dump(parameters_SEM, f, indent=2) # store derived stats with open('%sstats.json' % a_SEM.outputfiles_basename, mode='w') as f: json.dump(s_SEM, f, indent=2) print() print("-" * 30, 'ANALYSIS', "-" * 30) print("Global Evidence:\t%.15e +- %.15e" % ( s_SEM['nested sampling global log-evidence'], s_SEM['nested sampling global log-evidence error'] )) print("Global Evidence:\t%.3f +- %.3f" % ( s_SEM['nested sampling global log-evidence'], s_SEM['nested sampling global log-evidence error'] )) # In[ ]: import matplotlib.pyplot as plt plt.clf() # Here we will plot all the marginals and whatnot, just to show off # You may configure the format of the output here, or in matplotlibrc # All pymultinest does is filling in the data of the plot. # Copy and edit this file, and play with it. p_SEM = pymultinest.PlotMarginalModes(a_SEM) plt.figure(figsize=(5*n_params_SEM, 5*n_params_SEM)) #plt.subplots_adjust(wspace=0, hspace=0) for i in range(n_params_SEM): plt.subplot(n_params_SEM, n_params_SEM, n_params_SEM * i + i + 1) p_SEM.plot_marginal(i, with_ellipses = False, with_points = False, grid_points=50) plt.ylabel("Probability") plt.xlabel(parameters_SEM[i]) for j in range(i): plt.subplot(n_params_SEM, n_params_SEM, n_params_SEM * j + i + 1) #plt.subplots_adjust(left=0, bottom=0, right=0, top=0, wspace=0, hspace=0) p_SEM.plot_conditional(i, j, with_ellipses = False, with_points = False, grid_points=30) plt.xlabel(parameters_SEM[i]) plt.ylabel(parameters_SEM[j]) # plt.savefig("chains/marginals_multinest.pdf") #, bbox_inches='tight') # show("chains/marginals_multinest.pdf") # In[ ]: # plt.figure(figsize=(5*n_params, 5*n_params)) # plt.subplot2grid((5*n_params, 5*n_params), loc=(0,0)) for i in range(n_params_SEM): # print(5*n_params, 1, i+1) plt.subplot(5*n_params_SEM, 1, i+1) p_SEM.plot_modes_marginal(i, with_ellipses = True, with_points = False) plt.ylabel("Probability") plt.xlabel(parameters_SEM[i]) # plt.savefig(outfile, format='pdf', bbox_inches='tight') # plt.close() # outfile = '%s-mode-marginal-cumulative-%d.pdf' % (a.outputfiles_basename,i) p_SEM.plot_modes_marginal(i, cumulative = True, with_ellipses = True, with_points = False) plt.ylabel("Cumulative probability") plt.xlabel(parameters_SEM[i]) # plt.savefig(outfile, format='pdf', bbox_inches='tight') # plt.close() # In[ ]: p_SEM.analyser.get_best_fit()['parameters'], [param0_SEM_init, param1_SEM_init, param2_SEM_init] # In[ ]: figure(figsize=(10,10)) plot(thdata_SEM, model_SEM([param0_SEM_init,param1_SEM_init, param2_SEM_init])(thdata_SEM), label='Initial Model') errorbar(xdata, ydata, yuncs, fmt='o', label='Data') plot(thdata_SEM, model_SEM(p_SEM.analyser.get_best_fit()['parameters'])(thdata_SEM), label='PMN Model') legend(loc=0) # In[ ]: p_SEM.analyser.get_stats() # # Unrestricted Double Exponential Model # In[ ]: nThPts= int(1e3) model_UDEM = double_exponential_model xdata = test_data['DeltaPhase'] ydata = test_data['Flux'] yuncs = test_data['Sigma'] xmin, xmax = xdata.min(), xdata.max() dx = (xmax - xmin)/100 thdata_UDEM = np.linspace(xmin-dx, xmax+dx, nThPts) param0_UDEM_init = 1.0 # by defintion param1_UDEM_init = 0.5*(ydata.max() - ydata.min())#/100 param2_UDEM_init = round(5/(xdata.max() - xdata.min())) param3_UDEM_init = 0.5*(ydata.max() - ydata.min())#/100 param4_UDEM_init = round(5/(xdata.max() - xdata.min())) print(param0_UDEM_init, param1_UDEM_init, param2_UDEM_init, param3_UDEM_init, param4_UDEM_init) figure(figsize=(10,10)) plot(thdata_UDEM, model_UDEM([param0_UDEM_init,param1_UDEM_init,param2_UDEM_init, param3_UDEM_init, param4_UDEM_init])(thdata_UDEM)) errorbar(xdata, ydata, yuncs, fmt='o') # In[ ]: # our probability functions # Taken from the eggbox problem. # model = sine_wave # parameters = ["amp", "period"] # model = straight_line # parameters = ["offset", "slope"] model_UDEM = double_exponential_model parameters_UDEM = ['max', 'amp1', 'scale1', 'amp2', 'scale2'] # def myprior_RDEM(cube, ndim, nparams): # cube[0] = cube[0] * 1e-3 + (1 - 1e-3/2)# - 10# U(0,2) # cube[1] = -cube[1] * 5e-3 + 5e-4 # - 10# U(0,1) -- default # cube[2] = cube[2] * 1e4 - 5e3# - 1000 # U(0,2000) # cube[3] = cube[3] * 5e-3 + 5e-4# - 10# U(0,1) -- default # cube[4] = cube[4] * 1e4 - 5e3# - 1000 # U(0,2000) def myprior_UDEM(cube, ndim, nparams): cube[0] = cube[0] * 1e-2 + (1 - 1e-2/2)# - 10# U(0,2) cube[1] = cube[1] * 2 - 2/2 # - 10# U(0,1) -- default cube[2] = cube[2] * 1e4 - 1e4/2# - 1000 # U(0,2000) cube[3] = cube[3] * 2 - 2/2# - 10# U(0,1) -- default cube[4] = cube[4] * 1e4 - 1e4/2# - 1000 # U(0,2000) def myloglike_UDEM(cube, ndim, nparams): modelNow = model_UDEM(cube)(xdata) return -0.5*((modelNow - ydata)**2. / yuncs**2.).sum() # In[ ]: if not os.path.exists("chains"): os.mkdir("chains") start = time() # number of dimensions our problem has # parameters = ["x", "y"] n_params_UDEM = len(parameters_UDEM) savedir = 'chains' planetName = 'HAT38' visitName = 'visit1' modelName = 'unrestricted_double_exponential_model' outputfiles_basename = savedir + '/' + planetName + '-' + visitName + '-' + modelName + '-' plt.figure(figsize=(5*n_params_UDEM, 5*n_params_UDEM)) # we want to see some output while it is running progress = pymultinest.ProgressPlotter(n_params = n_params_UDEM, outputfiles_basename=outputfiles_basename) progress.start() # threading.Timer(2, show, ["chains/2-phys_live.points.pdf"]).start() # delayed opening # run MultiNest pymultinest.run(myloglike_UDEM, myprior_UDEM, n_params_UDEM, importance_nested_sampling = False, resume = False, verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename=outputfiles_basename) # ok, done. Stop our progress watcher progress.stop() # lets analyse the results a_UDEM = pymultinest.Analyzer(n_params = n_params_UDEM, outputfiles_basename=outputfiles_basename) s_UDEM = a_UDEM.get_stats() print('UDEM took', time() - start, 'seconds') # fig = plt.gcf() # axs = fig.get_axes() # for ax in axs: # ax.set_ylim() # # ax.set_xscale("log", nonposx='clip') # # ax.set_yscale("log", nonposy='clip') # In[ ]: import json # store name of parameters, always useful with open('%sparams.json' % a_UDEM.outputfiles_basename, 'w') as f: json.dump(parameters_UDEM, f, indent=2) # store derived stats with open('%sstats.json' % a_UDEM.outputfiles_basename, mode='w') as f: json.dump(s_UDEM, f, indent=2) print() print("-" * 30, 'ANALYSIS', "-" * 30) print("Global Evidence:\t%.15e +- %.15e" % ( s_UDEM['nested sampling global log-evidence'], s_UDEM['nested sampling global log-evidence error'] )) # In[ ]: import matplotlib.pyplot as plt plt.clf() # Here we will plot all the marginals and whatnot, just to show off # You may configure the format of the output here, or in matplotlibrc # All pymultinest does is filling in the data of the plot. # Copy and edit this file, and play with it. p_UDEM = pymultinest.PlotMarginalModes(a_UDEM) plt.figure(figsize=(5*n_params_UDEM, 5*n_params_UDEM)) #plt.subplots_adjust(wspace=0, hspace=0) for i in range(n_params_UDEM): plt.subplot(n_params_UDEM, n_params_UDEM, n_params_UDEM * i + i + 1) p_UDEM.plot_marginal(i, with_ellipses = False, with_points = False, grid_points=50) plt.ylabel("Probability") plt.xlabel(parameters_UDEM[i]) for j in range(i): plt.subplot(n_params_UDEM, n_params_UDEM, n_params_UDEM * j + i + 1) #plt.subplots_adjust(left=0, bottom=0, right=0, top=0, wspace=0, hspace=0) # p_UDEM.plot_conditional(i, j, with_ellipses=False, with_points=False, grid_points=30) p_UDEM.plot_conditional(i, j, with_ellipses=False , with_points=False , grid_points=30, only_interpolate=False, use_log_values=False, marginalization_type='sum') plt.xlabel(parameters_UDEM[i]) plt.ylabel(parameters_UDEM[j]) # plt.savefig("chains/marginals_multinest.pdf") #, bbox_inches='tight') # show("chains/marginals_multinest.pdf") # In[ ]: axes_colors = rcParams['axes.prop_cycle'].by_key()['color'] nColors = len(axes_colors) # In[ ]: minLogE, maxLogE = min(a_UDEM.get_equal_weighted_posterior().T[-1]), max(a_UDEM.get_equal_weighted_posterior().T[-1]) rangeLogE = maxLogE - minLogE minLogE, maxLogE, rangeLogE, nColors # In[ ]: from astroML.plotting import hist from statsmodels.robust import scale hist(a_UDEM.get_equal_weighted_posterior().T[-1], bins='blocks') # In[ ]: nSig = 10 mad_logE = scale.mad(a_UDEM.get_equal_weighted_posterior().T[-1]) med_logE = median(a_UDEM.get_equal_weighted_posterior().T[-1]) madBins = [med_logE - nSig*mad_logE for nSig in range(nColors)] # In[ ]: fig = figure(figsize=(15,15)); logEchain = a_UDEM.get_equal_weighted_posterior().T[-1] mad_logE = scale.mad(a_UDEM.get_equal_weighted_posterior().T[-1]) med_logE = median(a_UDEM.get_equal_weighted_posterior().T[-1]) madBins = [med_logE - nSig*mad_logE for nSig in range(nColors+1)] for k in range(5): ax = fig.add_subplot(5,1,k+1); for nSig in range(nColors): for klogE in range(logEchain.size): if logEchain[klogE] > madBins[nSig] or logEchain[klogE] < madBins[nSig+1]: ax.plot(a_UDEM.get_equal_weighted_posterior().T[k], logEchain,'o', color = axes_colors[nSig], alpha=0.1); fig.canvas.draw(); # In[ ]: # plt.figure(figsize=(5*n_params, 5*n_params)) # plt.subplot2grid((5*n_params, 5*n_params), loc=(0,0)) for i in range(n_params_UDEM): # print(5*n_params, 1, i+1) plt.subplot(5*n_params_UDEM, 1, i+1) p_UDEM.plot_modes_marginal(i, with_ellipses = True, with_points = False) plt.ylabel("Probability") plt.xlabel(parameters_UDEM[i]) # plt.savefig(outfile, format='pdf', bbox_inches='tight') # plt.close() # outfile = '%s-mode-marginal-cumulative-%d.pdf' % (a.outputfiles_basename,i) p_UDEM.plot_modes_marginal(i, cumulative = True, with_ellipses = True, with_points = False) plt.ylabel("Cumulative probability") plt.xlabel(parameters_UDEM[i]) # plt.savefig(outfile, format='pdf', bbox_inches='tight') # plt.close() # In[ ]: p_UDEM.analyser.get_best_fit()['parameters'], [param0_UDEM_init, param1_UDEM_init, param2_UDEM_init, param3_UDEM_init, param4_UDEM_init] # In[ ]: figure(figsize=(10,10)) plot(thdata_UDEM, model_UDEM([param0_UDEM_init,param1_UDEM_init, param2_UDEM_init, param3_UDEM_init, param4_UDEM_init])(thdata_UDEM), label='Initial Model') errorbar(xdata, ydata, yuncs, fmt='o', label='Data') plot(thdata_UDEM, model_UDEM(p_UDEM.analyser.get_best_fit()['parameters'])(thdata_UDEM), label='PMN UDEM Model') legend(loc=0) # In[ ]: p_UDEM.analyser.get_stats() # # Restricted Double Exponential Model # In[ ]: nThPts= int(1e3) model_RDEM = double_exponential_model xdata = test_data['DeltaPhase'] ydata = test_data['Flux'] yuncs = test_data['Sigma'] xmin, xmax = xdata.min(), xdata.max() dx = (xmax - xmin)/100 thdata_RDEM = np.linspace(xmin-dx, xmax+dx, nThPts) param0_RDEM_init = 1.0 # by defintion param1_RDEM_init = 0.5*(ydata.max() - ydata.min())#/100 param2_RDEM_init = round(5/(xdata.max() - xdata.min())) param3_RDEM_init = 0.5*(ydata.max() - ydata.min())#/100 param4_RDEM_init = round(5/(xdata.max() - xdata.min())) print(param0_RDEM_init, param1_RDEM_init, param2_RDEM_init, param3_RDEM_init, param4_RDEM_init) figure(figsize=(10,10)) plot(thdata_RDEM, model_RDEM([param0_RDEM_init,param1_RDEM_init,param2_RDEM_init, param3_RDEM_init, param4_RDEM_init])(thdata_RDEM)) errorbar(xdata, ydata, yuncs, fmt='o') def show(filepath): """ open the output (pdf) file for the user """ if os.name == 'mac' or platform == 'darwin': subprocess.call(('open', filepath)) elif os.name == 'nt' or platform == 'win32': os.startfile(filepath) elif platform.startswith('linux') : subprocess.call(('xdg-open', filepath)) # In[ ]: # our probability functions # Taken from the eggbox problem. # model = sine_wave # parameters = ["amp", "period"] # model = straight_line # parameters = ["offset", "slope"] model_RDEM = double_exponential_model parameters_RDEM = ['max', 'amp1', 'scale1', 'amp2', 'scale2'] def myprior_RDEM(cube, ndim, nparams): cube[0] = cube[0] * 1e-3 + (1 - 1e-3/2)# - 10# U(0,2) cube[1] = -cube[1] * 5e-3 + 5e-4 # - 10# U(0,1) -- default cube[2] = cube[2] * 1e4 - 5e3# - 1000 # U(0,2000) cube[3] = cube[3] * 5e-3 + 5e-4# - 10# U(0,1) -- default cube[4] = cube[4] * 1e4 - 5e3# - 1000 # U(0,2000) def myloglike_RDEM(cube, ndim, nparams): chi = 1. # print "cube", [cube[i] for i in range(ndim)], cube # for i in range(ndim): # chi *= -0.5 * ((cube[i] - 0.2) / 0.1)**2#math.cos(cube[i] / 2.) * math.sin(cube[i] / 2.) # print "returning", math.pow(2. + chi, 5) modelNow = model_RDEM(cube)(xdata) return -0.5*((modelNow - ydata)**2. / yuncs**2.).sum() # In[ ]: if not os.path.exists("chains"): os.mkdir("chains") start = time() # number of dimensions our problem has # parameters = ["x", "y"] n_params_RDEM = len(parameters_RDEM) savedir = 'chains' planetName = 'HAT38' visitName = 'visit1' modelName = 'restricted_double_exponential_model' outputfiles_basename = savedir + '/' + planetName + '-' + visitName + '-' + modelName + '-' plt.figure(figsize=(5*n_params_RDEM, 5*n_params_RDEM)) # we want to see some output while it is running progress = pymultinest.ProgressPlotter(n_params = n_params_RDEM, outputfiles_basename=outputfiles_basename); progress.start() # threading.Timer(2, show, ["chains/2-phys_live.points.pdf"]).start() # delayed opening # run MultiNest pymultinest.run(myloglike_RDEM, myprior_RDEM, n_params_RDEM, importance_nested_sampling = False, resume = False, verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename=outputfiles_basename) # ok, done. Stop our progress watcher progress.stop() # lets analyse the results a_RDEM = pymultinest.Analyzer(n_params = n_params_RDEM, outputfiles_basename=outputfiles_basename) s_RDEM = a_RDEM.get_stats() print('RDEM took', time() - start, 'seconds') # fig = plt.gcf() # axs = fig.get_axes() # for ax in axs: # ax.set_ylim() # # ax.set_xscale("log", nonposx='clip') # # ax.set_yscale("log", nonposy='clip') # In[ ]: import json # store name of parameters, always useful with open('%sparams.json' % a_RDEM.outputfiles_basename, 'w') as f: json.dump(parameters_RDEM, f, indent=2) # store derived stats with open('%sstats.json' % a_RDEM.outputfiles_basename, mode='w') as f: json.dump(s_RDEM, f, indent=2) print() print("-" * 30, 'ANALYSIS', "-" * 30) print("Global Evidence:\t%.15e +- %.15e" % ( s_RDEM['nested sampling global log-evidence'], s_RDEM['nested sampling global log-evidence error'] )) # In[ ]: import matplotlib.pyplot as plt plt.clf() # Here we will plot all the marginals and whatnot, just to show off # You may configure the format of the output here, or in matplotlibrc # All pymultinest does is filling in the data of the plot. # Copy and edit this file, and play with it. p_RDEM = pymultinest.PlotMarginalModes(a_RDEM) plt.figure(figsize=(5*n_params_RDEM, 5*n_params_RDEM)) #plt.subplots_adjust(wspace=0, hspace=0) for i in range(n_params_RDEM): plt.subplot(n_params_RDEM, n_params_RDEM, n_params_RDEM * i + i + 1) p_RDEM.plot_marginal(i, with_ellipses = False, with_points = False, grid_points=50) plt.ylabel("Probability") plt.xlabel(parameters_RDEM[i]) for j in range(i): plt.subplot(n_params_RDEM, n_params_RDEM, n_params_RDEM * j + i + 1) #plt.subplots_adjust(left=0, bottom=0, right=0, top=0, wspace=0, hspace=0) p_RDEM.plot_conditional(i, j, with_ellipses = False, with_points = False, grid_points=30) plt.xlabel(parameters_RDEM[i]) plt.ylabel(parameters_RDEM[j]) # plt.savefig("chains/marginals_multinest.pdf") #, bbox_inches='tight') # show("chains/marginals_multinest.pdf") # In[ ]: # plt.figure(figsize=(5*n_params, 5*n_params)) # plt.subplot2grid((5*n_params, 5*n_params), loc=(0,0)) for i in range(n_params_RDEM): # print(5*n_params, 1, i+1) plt.subplot(5*n_params_RDEM, 1, i+1) p_RDEM.plot_modes_marginal(i, with_ellipses = True, with_points = False) plt.ylabel("Probability") plt.xlabel(parameters_RDEM[i]) # plt.savefig(outfile, format='pdf', bbox_inches='tight') # plt.close() # outfile = '%s-mode-marginal-cumulative-%d.pdf' % (a.outputfiles_basename,i) p_RDEM.plot_modes_marginal(i, cumulative = True, with_ellipses = True, with_points = False) plt.ylabel("Cumulative probability") plt.xlabel(parameters_RDEM[i]) # plt.savefig(outfile, format='pdf', bbox_inches='tight') # plt.close() # In[ ]: p_RDEM.analyser.get_best_fit()['parameters'], [param0_RDEM_init, param1_RDEM_init, param2_RDEM_init, param3_RDEM_init, param4_RDEM_init] # In[ ]: figure(figsize=(10,10)) plot(thdata_RDEM, model_RDEM([param0_RDEM_init,param1_RDEM_init, param2_RDEM_init, param3_RDEM_init, param4_RDEM_init])(thdata_RDEM), label='Initial Model') errorbar(xdata, ydata, yuncs, fmt='o', label='Data') plot(thdata_RDEM, model_RDEM(p_RDEM.analyser.get_best_fit()['parameters'])(thdata_RDEM), label='PMN Model') legend(loc=0) # In[ ]: p_RDEM.analyser.get_stats() # # Compare Unrestricted Double, Restricted Double, and Single Exponential # In[ ]: import json # store name of parameters, always useful with open('%sparams.json' % a_SEM.outputfiles_basename, 'w') as f: json.dump(parameters_SEM, f, indent=2) # store derived stats with open('%sstats.json' % a_SEM.outputfiles_basename, mode='w') as f: json.dump(s_SEM, f, indent=2) print() print("-" * 30, 'ANALYSIS', "-" * 30) print("SEM Global Evidence:\t\t%.3f +- %.3f" % ( s_SEM['nested sampling global log-evidence'], s_SEM['nested sampling global log-evidence error'] )) # store name of parameters, always useful with open('%sparams.json' % a_UDEM.outputfiles_basename, 'w') as f: json.dump(parameters_UDEM, f, indent=2) # store derived stats with open('%sstats.json' % a_UDEM.outputfiles_basename, mode='w') as f: json.dump(s_UDEM, f, indent=2) # print() # print("-" * 30, 'ANALYSIS', "-" * 30) print("UDEM Global Evidence:\t\t%.3f +- %.3f" % ( s_UDEM['nested sampling global log-evidence'], s_UDEM['nested sampling global log-evidence error'] )) # store name of parameters, always useful with open('%sparams.json' % a_RDEM.outputfiles_basename, 'w') as f: json.dump(parameters_RDEM, f, indent=2) # store derived stats with open('%sstats.json' % a_RDEM.outputfiles_basename, mode='w') as f: json.dump(s_RDEM, f, indent=2) # print() # print("-" * 30, 'ANALYSIS', "-" * 30) print("RDEM Global Evidence:\t\t%.3f +- %.3f" % ( s_RDEM['nested sampling global log-evidence'], s_RDEM['nested sampling global log-evidence error'] )) figure(figsize=(10,10)) plot(thdata_UDEM, model_SEM([param0_SEM_init,param1_SEM_init, param2_SEM_init])(thdata_SEM), '.', label='Initial SEM Model') plot(thdata_UDEM, model_RDEM([param0_RDEM_init,param1_RDEM_init, param2_RDEM_init, param3_RDEM_init, param4_RDEM_init])(thdata_RDEM), '--', label='Initial DEM Model') errorbar(xdata, ydata, yuncs, fmt='o', label='Data') plot(thdata_SEM, model_SEM(p_SEM.analyser.get_best_fit()['parameters'])(thdata_SEM), label='PMN SEM Model') plot(thdata_UDEM, model_UDEM(p_UDEM.analyser.get_best_fit()['parameters'])(thdata_UDEM), label='PMN UDEM Model') plot(thdata_RDEM, model_RDEM(p_RDEM.analyser.get_best_fit()['parameters'])(thdata_RDEM), label='PMN RDEM Model') legend(loc=0) # # Polynomials # In[ ]: figure(figsize=(20,20)) from numpy.polynomial import polynomial def time_polynomial(params): # modelOut = np.zeros(tdata.size) # for kc, coeff in enumerate(params): # modelOut += coeff * tdata**kc if len(params): return lambda tdata: polynomial.polyval(tdata, params) else: return lambda tdata: zeros(tdata.size) def orbital_polynomial(params): # modelOut = np.zeros(xdata.size) # for kc, coeff in enumerate(params): # modelOut += coeff * xdata**kc # return modelOut if len(params): return lambda odata: polynomial.polyval(odata, params) else: return lambda odata: zeros(odata.size) def wavelength_polynomial(params): # modelOut = np.zeros(ldata.size) # for kc, coeff in enumerate(params): # modelOut += coeff * ldata**kc # return modelOut if len(params): return lambda ldata: polynomial.polyval(ldata, params) else: return lambda ldata: zeros(ldata.size) def polynomial_model(params): params_list= list(params.copy())[::-1] timeParams = array([params_list.pop() for _ in range(nTimeCoeffs)]) orbitParams = array([params_list.pop() for _ in range(nOrbitCoeffs)]) waveParams = array([params_list.pop() for _ in range(nWaveCoeffs)]) return lambda tdata, odata, ldata: time_polynomial(timeParams)(tdata) + orbital_polynomial(orbitParams)(odata) + wavelength_polynomial(waveParams)(ldata) tdata, xdata, ldata = np.random.uniform(-10,10,(3,100)) tdata.sort() xdata.sort() ldata.sort() # tdata, xdata, ldata = [np.linspace(-10,10,100) for _ in range(3)] for nTimeCoeffs in range(4): for nOrbitCoeffs in range(4): for nWaveCoeffs in range(4): params = np.random.uniform(-20,20,nTimeCoeffs+nOrbitCoeffs+nWaveCoeffs) plot(tdata, polynomial_model(params)(tdata,xdata,ldata),'.', alpha=0.5, mew=0) plot(xdata, polynomial_model(params)(tdata,xdata,ldata),'.', alpha=0.5, mew=0) plot(ldata, polynomial_model(params)(tdata,xdata,ldata),'.', alpha=0.5, mew=0) # In[ ]: nThPts= int(1e3) model_Poly = polynomial_model xdata = test_data['DeltaPhase'] ydata = test_data['Flux'] yuncs = test_data['Sigma'] nTimeCoeffs = 2 nOrbitCoeffs = 3 nWaveCoeffs = 0 h38PlanetPhase = test_data_input_input['Phase'] h38HSTPhase = test_data['DeltaPhase'] xmin, xmax = xdata.min(), xdata.max() dx = (xmax - xmin)/100 thdata_Poly = np.linspace(xmin-dx, xmax+dx, nThPts) param0_Poly_init = 1.0 # by defintion param1_Poly_init = 1.0 param2_Poly_init = 1.0 param3_Poly_init = 1.0 param4_Poly_init = 1.0 print(param0_Poly_init, param1_Poly_init, param2_Poly_init, param3_Poly_init, param4_Poly_init) figure(figsize=(10,10)) plot(thdata_Poly, model_Poly([param0_Poly_init,param1_Poly_init,param2_Poly_init, param3_Poly_init, param4_Poly_init])(thdata_Poly)) errorbar(xdata, ydata, yuncs, fmt='o') # In[ ]: # our probability functions # Taken from the eggbox problem. # model = sine_wave # parameters = ["amp", "period"] # model = straight_line # parameters = ["offset", "slope"] nTimeCoeffs = 2 nOrbitCoeffs = 3 nWaveCoeffs = 0 h38PlanetPhase = test_data_input_input['Phase'] h38HSTPhase = test_data['DeltaPhase'] model_Poly = polynomial_model parameters_Poly = ['timeIntercept', 'timeSlope', 'orbitIntercept', 'orbitSlope', 'orbitQuadratic'] cubeKWith = 1e3 def myprior_Poly(cube, ndim, nparams): for k in len(cube): cube[k] = cube[k] * cubeKWith - 0.5*cubeKWith def myloglike_Poly(cube, ndim, nparams): chi = 1. # print "cube", [cube[i] for i in range(ndim)], cube # for i in range(ndim): # chi *= -0.5 * ((cube[i] - 0.2) / 0.1)**2#math.cos(cube[i] / 2.) * math.sin(cube[i] / 2.) # print "returning", math.pow(2. + chi, 5) modelNow = model_Poly(cube)(times, HSTPhase, 0) return -0.5*((modelNow - ydata)**2. / yuncs**2.).sum() # In[ ]: if not os.path.exists("chains"): os.mkdir("chains") start = time() # number of dimensions our problem has # parameters = ["x", "y"] n_params_Poly = len(parameters_Poly) savedir = 'chains' planetName = 'HAT38' visitName = 'visit1' modelName = 'polynomial_model' outputfiles_basename = savedir + '/' + planetName + '-' + visitName + '-' + modelName + '-' plt.figure(figsize=(5*n_params_Poly, 5*n_params_Poly)) # we want to see some output while it is running progress = pymultinest.ProgressPlotter(n_params = n_params_Poly, outputfiles_basename=outputfiles_basename) progress.start() # threading.Timer(2, show, ["chains/2-phys_live.points.pdf"]).start() # delayed opening # run MultiNest pymultinest.run(myloglike_Poly, myprior_Poly, n_params_Poly, importance_nested_sampling = False, resume = False, verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename=outputfiles_basename) # ok, done. Stop our progress watcher progress.stop() # lets analyse the results a_Poly = pymultinest.Analyzer(n_params = n_params_Poly, outputfiles_basename=outputfiles_basename) s_Poly = a_Poly.get_stats() print('Polynomial took', time() - start, 'seconds') # fig = plt.gcf() # axs = fig.get_axes() # for ax in axs: # ax.set_ylim() # # ax.set_xscale("log", nonposx='clip') # # ax.set_yscale("log", nonposy='clip') # In[ ]: import json # store name of parameters, always useful with open('%sparams.json' % a_RDEM.outputfiles_basename, 'w') as f: json.dump(parameters_RDEM, f, indent=2) # store derived stats with open('%sstats.json' % a_RDEM.outputfiles_basename, mode='w') as f: json.dump(s_RDEM, f, indent=2) print() print("-" * 30, 'ANALYSIS', "-" * 30) print("Global Evidence:\t%.15e +- %.15e" % ( s_RDEM['nested sampling global log-evidence'], s_RDEM['nested sampling global log-evidence error'] )) # In[ ]: import matplotlib.pyplot as plt plt.clf() # Here we will plot all the marginals and whatnot, just to show off # You may configure the format of the output here, or in matplotlibrc # All pymultinest does is filling in the data of the plot. # Copy and edit this file, and play with it. p_RDEM = pymultinest.PlotMarginalModes(a_RDEM) plt.figure(figsize=(5*n_params_RDEM, 5*n_params_RDEM)) #plt.subplots_adjust(wspace=0, hspace=0) for i in range(n_params_RDEM): plt.subplot(n_params_RDEM, n_params_RDEM, n_params_RDEM * i + i + 1) p_RDEM.plot_marginal(i, with_ellipses = False, with_points = False, grid_points=50) plt.ylabel("Probability") plt.xlabel(parameters_RDEM[i]) for j in range(i): plt.subplot(n_params_RDEM, n_params_RDEM, n_params_RDEM * j + i + 1) #plt.subplots_adjust(left=0, bottom=0, right=0, top=0, wspace=0, hspace=0) p_RDEM.plot_conditional(i, j, with_ellipses = False, with_points = False, grid_points=30) plt.xlabel(parameters_RDEM[i]) plt.ylabel(parameters_RDEM[j]) # plt.savefig("chains/marginals_multinest.pdf") #, bbox_inches='tight') # show("chains/marginals_multinest.pdf") # In[ ]: # plt.figure(figsize=(5*n_params, 5*n_params)) # plt.subplot2grid((5*n_params, 5*n_params), loc=(0,0)) for i in range(n_params_RDEM): # print(5*n_params, 1, i+1) plt.subplot(5*n_params_RDEM, 1, i+1) p_RDEM.plot_modes_marginal(i, with_ellipses = True, with_points = False) plt.ylabel("Probability") plt.xlabel(parameters_RDEM[i]) # plt.savefig(outfile, format='pdf', bbox_inches='tight') # plt.close() # outfile = '%s-mode-marginal-cumulative-%d.pdf' % (a.outputfiles_basename,i) p_RDEM.plot_modes_marginal(i, cumulative = True, with_ellipses = True, with_points = False) plt.ylabel("Cumulative probability") plt.xlabel(parameters_RDEM[i]) # plt.savefig(outfile, format='pdf', bbox_inches='tight') # plt.close() # In[ ]: p_RDEM.analyser.get_best_fit()['parameters'], [param0_RDEM_init, param1_RDEM_init, param2_RDEM_init, param3_RDEM_init, param4_RDEM_init] # In[ ]: figure(figsize=(10,10)) plot(thdata_RDEM, model_RDEM([param0_RDEM_init,param1_RDEM_init, param2_RDEM_init, param3_RDEM_init, param4_RDEM_init])(thdata_RDEM), label='Initial Model') errorbar(xdata, ydata, yuncs, fmt='o', label='Data') plot(thdata_RDEM, model_RDEM(p_RDEM.analyser.get_best_fit()['parameters'])(thdata_RDEM), label='PMN Model') legend(loc=0) # In[ ]: p_RDEM.analyser.get_stats()
gpl-3.0
krisht/Krishna-Thesis
Research/src/BrainNet.py
1
43359
from __future__ import print_function import datetime import itertools import matplotlib import os import re os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"]="0,1" matplotlib.use('Agg') import matplotlib.pyplot as plt plt.rcParams["font.family"] = "FreeSerif" import numpy as np import os import psutil import random import sys import tensorflow as tf import tensorflow.contrib.slim as slim from sklearn import neighbors from sklearn.metrics import confusion_matrix from sklearn.preprocessing import normalize from sklearn.manifold import TSNE curr_time = datetime.datetime.now() loss_mem = [] loss_mem_skip = [] def norm_op(vector, axisss): #return normalize(vector, axis=axisss, norm='l2') return vector * 10e4 def plot_embedding(X, y, epoch, accuracy, num_to_label, title): x_min, x_max = np.min(X, 0), np.max(X, 0) X = (X - x_min) / (x_max - x_min) cmap = plt.get_cmap('gist_rainbow') color_map = [cmap(1.*i/6) for i in range(6)] legend_entry = [] for ii, c in enumerate(color_map): legend_entry.append(matplotlib.patches.Patch(color=c, label=num_to_label[ii])) plt.figure(figsize=(4.0, 4.0)) plt.scatter(X[:,0], X[:, 1], c=y, cmap=matplotlib.colors.ListedColormap(color_map), s=2) plt.legend(handles=legend_entry) plt.xticks([]), plt.yticks([]) #plt.title(title) plt.savefig('./%s Results/%s_tSNE_plot_epoch%s_%.3f%%.pdf' % (curr_time, curr_time, epoch, accuracy), bbox_inches='tight') def compute_tSNE(X, y, epoch, accuracy, num_to_label, with_seizure=None, title="t-SNE Embedding of DCNN Clustering Network"): tsne = TSNE(n_components=2, init='random', random_state=0) X_tsne = tsne.fit_transform(X) plot_embedding(X_tsne, y, epoch=epoch, accuracy=accuracy, num_to_label=num_to_label, title=title) if with_seizure is None: np.savez('./%s Results/%s_tSNE_plot_epoch%s_%.3f%%' % (curr_time, curr_time, epoch, accuracy), X_tsne, y) elif with_seizure == 1: np.savez('./%s Results/%s_tSNE_plot_with_seizure_epoch%s_%.3f%%' % (curr_time, curr_time, epoch, accuracy), X_tsne, y) elif with_seizure == 0: np.savez('./%s Results/%s_tSNE_plot_without_seizure_epoch%s_%.3f%%' % (curr_time, curr_time, epoch, accuracy), X_tsne, y) elif with_seizure == 2: np.savez('./%s Results/%s_tSNE_plot_with_only_seizure_epoch%s_%.3f%%' % (curr_time, curr_time, epoch, accuracy), X_tsne, y) def get_loss(loss_mem, loss_mem_skip): plt.figure(figsize=(4.0, 4.0)) plt.plot(loss_mem_skip, 'ro-', markersize=2) plt.xlabel("1000 Iterations") plt.ylabel("Average Loss in 1000 Iterations") plt.title("Iterations vs. Average Loss") plt.savefig('./%s Results/%s_convergence_with_skip_plot.pdf' % (curr_time, curr_time), bbox_inches='tight') plt.figure(figsize=(4.0, 4.0)) plt.plot(loss_mem, 'ro-', markersize=2) plt.xlabel("1000 Iterations") plt.ylabel("Average Loss in 1000 Iterations") plt.title("Iterations vs. Average Loss") plt.savefig('./%s Results/%s_convergence_plot.pdf' % (curr_time, curr_time), bbox_inches='tight') def plot_confusion_matrix(cm, classes, normalize=True, cmap=plt.cm.Greys, accuracy = None, epoch=None, with_seizure=None, title = "Confusion Matrix on All Data"): plt.figure(figsize=(4, 4)) plt.imshow(cm, interpolation='nearest', cmap=cmap) ax = plt.gca() #plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) ax.yaxis.set_label_coords(-0.1,1.03) h = ax.set_ylabel('True label', rotation=0, horizontalalignment='left') if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] cm = np.nan_to_num(cm) print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, '{0:.2f}'.format(cm[i, j]), horizontalalignment="center", verticalalignment="center", color="white" if cm[i, j] > thresh else "black") #plt.tight_layout() plt.xlabel('Predicted label') #plt.title(title) #plt.show() if with_seizure is None: plt.savefig('./%s Results/%s_confusion_matrix_epoch%s_%.3f%%.pdf' % (curr_time, curr_time, epoch, accuracy), bbox_inches='tight') elif with_seizure == 1: plt.savefig('./%s Results/%s_confusion_matrix_with_seizure_epoch%s_%.3f%%.pdf' % (curr_time, curr_time, epoch, accuracy), bbox_inches='tight') elif with_seizure == 0: plt.savefig('./%s Results/%s_confusion_matrix_without_seizure_epoch%s_%.3f%%.pdf' % (curr_time, curr_time, epoch, accuracy), bbox_inches='tight') elif with_seizure == 2: plt.savefig('./%s Results/%s_confusion_matrix_with_only_seizure_epoch%s_%.3f%%.pdf' % (curr_time, curr_time, epoch, accuracy), bbox_inches='tight') class BrainNet: def __init__(self, input_shape=[None, 71, 125], path_to_files='/media/krishna/DATA', l2_weight=0.05, num_output=64, num_classes=6, alpha=.5, validation_size=500, learning_rate=1e-3, batch_size=100, train_epoch=5, keep_prob=None, debug=True, restore_dir=None): self.bckg_num = 0 self.artf_num = 1 self.eybl_num = 2 self.gped_num = 3 self.spsw_num = 4 self.pled_num = 5 self.path_to_files = path_to_files self.num_to_class = dict() self.num_to_class[0] = 'BCKG' self.num_to_class[1] = 'ARTF' self.num_to_class[2] = 'EYBL' self.num_to_class[3] = 'GPED' self.num_to_class[4] = 'SPSW' self.num_to_class[5] = 'PLED' self.count_of_triplets = dict() self.DEBUG = debug self.train_path = os.path.abspath(self.path_to_files + '/Train') self.val_path = os.path.abspath(self.path_to_files + '/Validation') path = os.path.abspath(self.path_to_files) self.artf = np.load(os.path.abspath(self.train_path + '/artf_files.npy')) self.bckg = np.load(os.path.abspath(self.train_path + '/bckg_files.npy')) self.spsw = np.load(os.path.abspath(self.train_path + '/spsw_files.npy')) self.pled = np.load(os.path.abspath(self.train_path + '/pled_files.npy')) self.gped = np.load(os.path.abspath(self.train_path + '/gped_files.npy')) self.eybl = np.load(os.path.abspath(self.train_path + '/eybl_files.npy')) self.artf_val = np.load(os.path.abspath(self.val_path + '/artf_files.npy')) self.bckg_val = np.load(os.path.abspath(self.val_path + '/bckg_files.npy')) self.spsw_val = np.load(os.path.abspath(self.val_path + '/spsw_files.npy')) self.pled_val = np.load(os.path.abspath(self.val_path + '/pled_files.npy')) self.gped_val = np.load(os.path.abspath(self.val_path + '/gped_files.npy')) self.eybl_val = np.load(os.path.abspath(self.val_path + '/eybl_files.npy')) if path_to_files != '/media/krishna/DATA': self.artf = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.artf]) self.bckg = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.bckg]) self.spsw = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.spsw]) self.pled = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.pled]) self.gped = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.gped]) self.eybl = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.eybl]) self.artf_val = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.artf_val]) self.bckg_val = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.bckg_val]) self.spsw_val = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.spsw_val]) self.pled_val = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.pled_val]) self.gped_val = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.gped_val]) self.eybl_val = np.asarray([s.replace('/media/krishna/DATA', self.path_to_files) for s in self.eybl_val]) files_with_spsw = set(['session' + re.search('session(.+?)_', a).group(1) + '_' for a in self.spsw]) files_with_gped = set(['session' + re.search('session(.+?)_', a).group(1) + '_' for a in self.gped]) files_with_pled = set(['session' + re.search('session(.+?)_', a).group(1) + '_' for a in self.pled]) files_with_bckg = set(['session' + re.search('session(.+?)_', a).group(1) + '_' for a in self.bckg]) files_with_artf = set(['session' + re.search('session(.+?)_', a).group(1) + '_' for a in self.artf]) files_with_eybl = set(['session' + re.search('session(.+?)_', a).group(1) + '_' for a in self.eybl]) total_set = (files_with_spsw | files_with_gped | files_with_pled | files_with_bckg | files_with_artf | files_with_eybl) self.files_without_seizures = total_set - files_with_spsw - files_with_pled - files_with_gped self.files_with_seizures = total_set - self.files_without_seizures print(self.files_with_seizures) print(self.files_without_seizures) self.sess = tf.Session() self.num_classes = num_classes self.num_output = num_output self.input_shape = input_shape self.batch_size = batch_size self.alpha = alpha self.train_epoch = train_epoch self.learning_rate = learning_rate self.keep_prob = keep_prob self.validation_size = validation_size self.l2_weight = l2_weight self.inference_input = tf.placeholder(tf.float32, shape=input_shape) self.inference_model = self.get_model(self.inference_input, reuse=False) if restore_dir is not None: if self.DEBUG: print("Loading saved data...") dir = tf.train.Saver() dir.restore(self.sess, restore_dir) if self.DEBUG: print("Finished loading saved data...") if not os.path.exists('./%s Results' % curr_time): os.makedirs('./%s Results' % curr_time) self.metadata_file = './%s Results/METADATA.txt' % curr_time with open(self.metadata_file, 'w') as file: file.write('DCNN Clustering Network\n') #file.write('Normalization on\n') file.write('Time of training: %s\n' % curr_time) file.write('Input shape: %s\n' % input_shape) file.write('Path to files: %s\n' % path_to_files) file.write('L2 Regularization Weight: %s\n' % l2_weight) file.write('Number of outputs: %s\n' % num_output) file.write('Number of classes: %s\n' % num_classes) file.write('Alpha value: %s\n' % alpha) file.write('Validation Size: %s\n' % validation_size) file.write('Learning rate: %s\n' % learning_rate) file.write('Batch size: %s\n' % batch_size) file.write('Number of Epochs: %s\n' % train_epoch) file.write('Dropout probability: %s\n' % keep_prob) file.write('Debug mode: %s\n' % debug) file.write('Restore directory: %s\n' % restore_dir) file.close() def distance_metric(self, a, b, metric='cosine'): if metric == 'cosine': num = tf.reduce_sum(a*b, 1) denom = tf.sqrt(tf.reduce_sum(a*a,1))*tf.sqrt(tf.reduce_sum(b*b, 1)) result = 1 - (self.num/self.denom) return result elif metric=='euclidean': return tf.reduce_sum(tf.square(tf.subtract(a, b)), 1) def triplet_loss(self, alpha): self.anchor = tf.placeholder(tf.float32, shape=self.input_shape) self.positive = tf.placeholder(tf.float32, shape=self.input_shape) self.negative = tf.placeholder(tf.float32, shape=self.input_shape) self.anchor_out = self.get_model(self.anchor, reuse=True) self.positive_out = self.get_model(self.positive, reuse=True) self.negative_out = self.get_model(self.negative, reuse=True) with tf.variable_scope('triplet_loss'): pos_dist = self.distance_metric(self.anchor_out, self.positive_out, metric='euclidean') neg_dist = self.distance_metric(self.anchor_out, self.negative_out, metric='euclidean') basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha) loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0) return loss def get_triplets(self, size=10): A = [] P = [] N = [] for _ in range(size): choices = ['bckg', 'eybl', 'gped', 'spsw', 'pled', 'artf'] neg_choices = list(choices) choice = random.choice(choices) neg_choices.remove(choice) if choice == 'bckg': a = np.load(random.choice(self.bckg)) p = np.load(random.choice(self.bckg)) elif choice == 'eybl': a = np.load(random.choice(self.eybl)) p = np.load(random.choice(self.eybl)) elif choice == 'gped': a = np.load(random.choice(self.gped)) p = np.load(random.choice(self.gped)) elif choice == 'spsw': a = np.load(random.choice(self.spsw)) p = np.load(random.choice(self.spsw)) elif choice == 'pled': a = np.load(random.choice(self.pled)) p = np.load(random.choice(self.pled)) else: a = np.load(random.choice(self.artf)) p = np.load(random.choice(self.artf)) neg_choice = random.choice(neg_choices) if neg_choice == 'bckg': n = np.load(random.choice(self.bckg)) elif neg_choice == 'eybl': n = np.load(random.choice(self.eybl)) elif neg_choice == 'gped': n = np.load(random.choice(self.gped)) elif neg_choice == 'spsw': n = np.load(random.choice(self.spsw)) elif neg_choice == 'pled': n = np.load(random.choice(self.pled)) else: n = np.load(random.choice(self.artf)) key = choice + choice + neg_choice if key in self.count_of_triplets: self.count_of_triplets[key]+=1 else: self.count_of_triplets[key] = 1 a = norm_op(a, axisss=0) p = norm_op(p, axisss=0) n = norm_op(n, axisss=0) A.append(a) P.append(p) N.append(n) A = np.asarray(A) P = np.asarray(P) N = np.asarray(N) return A, P, N # End new stuff # def simple_model(self, inputs, reuse=False): with slim.arg_scope([slim.layers.conv2d, slim.layers.fully_connected], weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True), weights_regularizer=slim.l2_regularizer(self.l2_weight), reuse=reuse): net = tf.expand_dims(inputs, dim=3) net = slim.layers.conv2d(net, num_outputs=32, kernel_size=5, scope='conv1', trainable=True) net = slim.layers.max_pool2d(net, kernel_size=5, scope='maxpool1') net = slim.layers.conv2d(net, num_outputs=64, kernel_size=3, scope='conv2', trainable=True) net = slim.layers.max_pool2d(net, kernel_size=3, scope='maxpool2') net = slim.layers.conv2d(net, num_outputs=128, kernel_size=2, scope='conv3', trainable=True) net = slim.layers.max_pool2d(net, kernel_size=2, scope='maxpool3') net = slim.layers.conv2d(net, num_outputs=256, kernel_size=1, scope='conv4', trainable=True) net = slim.layers.max_pool2d(net, kernel_size=2, scope='maxpool4') net = slim.layers.conv2d(net, num_outputs=1024, kernel_size=4, scope='conv5', trainable=True) net = slim.layers.max_pool2d(net, kernel_size=4, scope='maxpool5') net = slim.layers.flatten(net, scope='flatten') net = slim.layers.fully_connected(net, 1024, scope='fc1', trainable=True) net = slim.layers.fully_connected(net, 512, scope='fc2', trainable=True) net = slim.layers.fully_connected(net, 256, scope='fc3', trainable=True) net = slim.layers.fully_connected(net, self.num_output, weights_regularizer=None, scope='output') return net def inception_v3(self, inputs, dropout_keep_prob=0.8, reuse=False, scope=''): end_points = {} with tf.name_scope(scope, 'inception_v3', [inputs]): with slim.arg_scope([slim.layers.conv2d, slim.layers.fully_connected, slim.layers.batch_norm, slim.layers.dropout], weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True), weights_regularizer=slim.l2_regularizer(self.l2_weight), reuse=reuse): with slim.arg_scope([slim.layers.conv2d], stride=1, padding='VALID', reuse=reuse): # 299 x 299 x 3 inputs = tf.expand_dims(inputs, dim=3) end_points['conv0'] = slim.layers.conv2d(inputs, 32, kernel_size=3, stride=2, scope='conv0') # 149 x 149 x 32 end_points['conv1'] = slim.layers.conv2d(end_points['conv0'], 32, kernel_size=3, scope='conv1') # 147 x 147 x 32 end_points['conv2'] = slim.layers.conv2d(end_points['conv1'], 64, kernel_size=3, padding='SAME', scope='conv2') # 147 x 147 x 64 #end_points['pool1'] = slim.layers.max_pool2d(end_points['conv2'], kernel_size=3, stride=2, scope='pool1') # 73 x 73 x 64 end_points['conv3'] = slim.layers.conv2d(end_points['conv2'], 80, kernel_size=1, scope='conv3') # 73 x 73 x 80. end_points['conv4'] = slim.layers.conv2d(end_points['conv3'], 192, kernel_size=3, scope='conv4') # 71 x 71 x 192. #end_points['pool2'] = slim.layers.max_pool2d(end_points['conv4'], kernel_size=3, stride=2, scope='pool2') # 35 x 35 x 192. net = end_points['conv4'] # Inception blocks with slim.arg_scope([slim.layers.conv2d], stride=1, padding='SAME', reuse=reuse): # mixed: 35 x 35 x 256. with tf.variable_scope('mixed_35x35x256a'): with tf.variable_scope('branch1x1'): branch1x1 = slim.layers.conv2d(net, 64, kernel_size=1, scope='branch1x1/conv1') with tf.variable_scope('branch5x5'): branch5x5 = slim.layers.conv2d(net, 48, kernel_size=1, scope='branch1x1/conv2') branch5x5 = slim.layers.conv2d(branch5x5, 64, kernel_size=5, scope='branch1x1/conv3') with tf.variable_scope('branch3x3dbl'): branch3x3dbl = slim.layers.conv2d(net, 64, kernel_size=1, scope='branch3x3dbl/conv1') branch3x3dbl = slim.layers.conv2d(branch3x3dbl, 96, kernel_size=3, scope='branch3x3dbl/conv2') branch3x3dbl = slim.layers.conv2d(branch3x3dbl, 96, kernel_size=3, scope='branch3x3dbl/conv3') with tf.variable_scope('branch_pool'): branch_pool = slim.layers.avg_pool2d(net, kernel_size=3, stride=1, padding='SAME', scope='branch_pool/avg_pool1') branch_pool = slim.layers.conv2d(branch_pool, 32, kernel_size=1, scope='branch_pool/conv1') net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool]) end_points['mixed_35x35x256a'] = net # mixed_1: 35 x 35 x 288. with tf.variable_scope('mixed_35x35x288a'): with tf.variable_scope('branch1x1'): branch1x1 = slim.layers.conv2d(net, 64, kernel_size=1, scope='branch1x1/conv1') with tf.variable_scope('branch5x5'): branch5x5 = slim.layers.conv2d(net, 48, kernel_size=1, scope='branch5x5/conv1') branch5x5 = slim.layers.conv2d(branch5x5, 64, kernel_size=5, scope='branch5x5/conv2') with tf.variable_scope('branch3x3dbl'): branch3x3dbl = slim.layers.conv2d(net, 64, kernel_size=1, scope='branch3x3dbl/conv1') branch3x3dbl = slim.layers.conv2d(branch3x3dbl, 96, kernel_size=3, scope='branch3x3dbl/conv2') branch3x3dbl = slim.layers.conv2d(branch3x3dbl, 96, kernel_size=3, scope='branch3x3dbl/conv3') with tf.variable_scope('branch_pool'): branch_pool = slim.layers.avg_pool2d(net, kernel_size=3, stride=1, padding='SAME', scope='branch_pool/avg_pool1') branch_pool = slim.layers.conv2d(branch_pool, 64, kernel_size=1, scope='branch_pool/conv1') net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool]) end_points['mixed_35x35x288a'] = net # mixed_2: 35 x 35 x 288. with tf.variable_scope('mixed_35x35x288b'): with tf.variable_scope('branch1x1'): branch1x1 = slim.layers.conv2d(net, 64, kernel_size=1, scope='branch1x1/conv1') with tf.variable_scope('branch5x5'): branch5x5 = slim.layers.conv2d(net, 48, kernel_size=1, scope='branch5x5/conv1') branch5x5 = slim.layers.conv2d(branch5x5, 64, kernel_size=5, scope='branch5x5/conv2') with tf.variable_scope('branch3x3dbl'): branch3x3dbl = slim.layers.conv2d(net, 64, kernel_size=1, scope='branch3x3dbl/conv1') branch3x3dbl = slim.layers.conv2d(branch3x3dbl, 96, kernel_size=3, scope='branch3x3dbl/conv2') branch3x3dbl = slim.layers.conv2d(branch3x3dbl, 96, kernel_size=3, scope='branch3x3dbl/conv3') with tf.variable_scope('branch_pool'): branch_pool = slim.layers.avg_pool2d(net, kernel_size=3, stride=1, padding='SAME', scope='branch_pool/avg_pool1') branch_pool = slim.layers.conv2d(branch_pool, 64, kernel_size=1, scope='branch_pool/conv1') net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool]) end_points['mixed_35x35x288b'] = net # mixed_3: 17 x 17 x 768. with tf.variable_scope('mixed_17x17x768a'): with tf.variable_scope('branch3x3'): branch3x3 = slim.layers.conv2d(net, 384, kernel_size=3, stride=2, padding='VALID', scope='branch3x3/conv1') with tf.variable_scope('branch3x3dbl'): branch3x3dbl = slim.layers.conv2d(net, 64, kernel_size=1, scope='branch3x3dbl/conv1') branch3x3dbl = slim.layers.conv2d(branch3x3dbl, 96, kernel_size=3, scope='branch3x3dbl/conv2') branch3x3dbl = slim.layers.conv2d(branch3x3dbl, 96, kernel_size=3, stride=2, padding='VALID', scope='branch3x3dbl/conv3') with tf.variable_scope('branch_pool'): branch_pool = slim.layers.max_pool2d(net, kernel_size=3, stride=2, padding='VALID', scope='branch_pool/max_pool1') net = tf.concat(axis=3, values=[branch3x3, branch3x3dbl, branch_pool]) end_points['mixed_17x17x768a'] = net # mixed4: 17 x 17 x 768. with tf.variable_scope('mixed_17x17x768b'): with tf.variable_scope('branch1x1'): branch1x1 = slim.layers.conv2d(net, 192, kernel_size=1, scope='branch1x1/conv1') with tf.variable_scope('branch7x7'): branch7x7 = slim.layers.conv2d(net, 128, kernel_size=1, scope='branch7x7/conv1') branch7x7 = slim.layers.conv2d(branch7x7, 128, kernel_size=(1, 7), scope='branch7x7/conv2') branch7x7 = slim.layers.conv2d(branch7x7, 192, kernel_size=(7, 1), scope='branch7x7/conv3') with tf.variable_scope('branch7x7dbl'): branch7x7dbl = slim.layers.conv2d(net, 128, kernel_size=1, scope='branch7x7dbl/conv1') branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 128, kernel_size=(7, 1), scope='branch7x7dbl/conv2') branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 128, kernel_size=(1, 7), scope='branch7x7dbl/conv3') branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 128, kernel_size=(7, 1), scope='branch7x7dbl/conv4') branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 192, kernel_size=(1, 7), scope='branch7x7dbl/conv5') with tf.variable_scope('branch_pool'): branch_pool = slim.layers.avg_pool2d(net, kernel_size=3, stride=1, padding='SAME', scope='branch_pool/avg_pool1') branch_pool = slim.layers.conv2d(branch_pool, 192, kernel_size=1, scope='branch_pool/conv1') net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool]) end_points['mixed_17x17x768b'] = net # mixed_5: 17 x 17 x 768. with tf.variable_scope('mixed_17x17x768c'): with tf.variable_scope('branch1x1'): branch1x1 = slim.layers.conv2d(net, 192, kernel_size=1, scope='branch1x1/conv1') with tf.variable_scope('branch7x7'): branch7x7 = slim.layers.conv2d(net, 160, kernel_size=1, scope='branch7x7/conv1') branch7x7 = slim.layers.conv2d(branch7x7, 160, kernel_size=(1, 7), scope='branch7x7/conv2') branch7x7 = slim.layers.conv2d(branch7x7, 192, kernel_size=(7, 1), scope='branch7x7/conv3') with tf.variable_scope('branch7x7dbl'): branch7x7dbl = slim.layers.conv2d(net, 160, kernel_size=1, scope='branch7x7dbl/conv1') branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 160, kernel_size=(7, 1), scope='branch7x7dbl/conv2') branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 160, kernel_size=(1, 7), scope='branch7x7dbl/conv3') branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 160, kernel_size=(7, 1), scope='branch7x7dbl/conv4') branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 192, kernel_size=(1, 7), scope='branch7x7dbl/conv5') with tf.variable_scope('branch_pool'): branch_pool = slim.layers.avg_pool2d(net, kernel_size=3, stride=1, padding='SAME', scope='branch_pool/avg_pool1') branch_pool = slim.layers.conv2d(branch_pool, 192, kernel_size=1, scope='branch_pool/conv2') net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool]) end_points['mixed_17x17x768c'] = net # mixed_6: 17 x 17 x 768. with tf.variable_scope('mixed_17x17x768d'): with tf.variable_scope('branch1x1'): branch1x1 = slim.layers.conv2d(net, 192, kernel_size=1, scope='branch1x1/conv1') with tf.variable_scope('branch7x7'): branch7x7 = slim.layers.conv2d(net, 160, kernel_size=1, scope='branch7x7/conv1') branch7x7 = slim.layers.conv2d(branch7x7, 160, kernel_size=(1, 7), scope='branch7x7/conv2') branch7x7 = slim.layers.conv2d(branch7x7, 192, kernel_size=(7, 1), scope='branch7x7/conv3') with tf.variable_scope('branch7x7dbl'): branch7x7dbl = slim.layers.conv2d(net, 160, kernel_size=1, scope='branch7x7dbl/conv1') branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 160, kernel_size=(7, 1), scope='branch7x7dbl/conv2') branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 160, kernel_size=(1, 7), scope='branch7x7dbl/conv3') branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 160, kernel_size=(7, 1), scope='branch7x7dbl/conv4') branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 192, kernel_size=(1, 7), scope='branch7x7dbl/conv5') with tf.variable_scope('branch_pool'): branch_pool = slim.layers.avg_pool2d(net, kernel_size=3, stride=1, padding='SAME', scope='branch_pool/avg_pool1') branch_pool = slim.layers.conv2d(branch_pool, 192, kernel_size=1, scope='branch_pool/conv1') net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool]) end_points['mixed_17x17x768d'] = net # mixed_7: 17 x 17 x 768. with tf.variable_scope('mixed_17x17x768e'): with tf.variable_scope('branch1x1'): branch1x1 = slim.layers.conv2d(net, 192, kernel_size=1, scope='branch1x1/conv1') with tf.variable_scope('branch7x7'): branch7x7 = slim.layers.conv2d(net, 192, kernel_size=1, scope='branch7x7/conv1') branch7x7 = slim.layers.conv2d(branch7x7, 192, kernel_size=(1, 7), scope='branch7x7/conv2') branch7x7 = slim.layers.conv2d(branch7x7, 192, kernel_size=(7, 1), scope='branch7x7/conv3') with tf.variable_scope('branch7x7dbl'): branch7x7dbl = slim.layers.conv2d(net, 192, kernel_size=1, scope='branch7x7dbl/conv1') branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 192, kernel_size=(7, 1), scope='branch7x7dbl/conv2') branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 192, kernel_size=(1, 7), scope='branch7x7dbl/conv3') branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 192, kernel_size=(7, 1), scope='branch7x7dbl/conv4') branch7x7dbl = slim.layers.conv2d(branch7x7dbl, 192, kernel_size=(1, 7), scope='branch7x7dbl/conv5') with tf.variable_scope('branch_pool'): branch_pool = slim.layers.avg_pool2d(net, kernel_size=3, stride=1, padding='SAME', scope='branch_pool/avg_pool1') branch_pool = slim.layers.conv2d(branch_pool, 192, kernel_size=1, scope='branch_pool/conv1') net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool]) end_points['mixed_17x17x768e'] = net # Auxiliary Head logits aux_logits = tf.identity(end_points['mixed_17x17x768e']) with tf.variable_scope('aux_logits'): aux_logits = slim.layers.avg_pool2d(aux_logits, kernel_size=5, stride=3, padding='VALID', scope='aux_logits/avg_pool1') aux_logits = slim.layers.conv2d(aux_logits, 128, kernel_size=1, scope='aux_logits/proj') # Shape of feature map before the final layer. shape = aux_logits.get_shape() aux_logits = slim.layers.conv2d(aux_logits, 768, shape[1:3], padding='VALID', scope='aux_logits/conv2') aux_logits = slim.layers.flatten(aux_logits, scope='aux_logits/flatten') aux_logits = slim.layers.fully_connected(aux_logits, self.num_output, activation_fn=None, scope='aux_logits/fc1') end_points['aux_logits'] = aux_logits with tf.variable_scope('mixed_17x17x1280a'): with tf.variable_scope('branch3x3'): branch3x3 = slim.layers.conv2d(net, 192, kernel_size=1, scope='branch3x3/conv1') branch3x3 = slim.layers.conv2d(branch3x3, 320, kernel_size=3, stride=2, padding='VALID', scope='branch3x3/conv2') with tf.variable_scope('branch7x7x3'): branch7x7x3 = slim.layers.conv2d(net, 192, kernel_size=1, scope='branch7x7x3/conv1') branch7x7x3 = slim.layers.conv2d(branch7x7x3, 192, kernel_size=(1, 7), scope='branch7x7x3/conv2') branch7x7x3 = slim.layers.conv2d(branch7x7x3, 192, kernel_size=(7, 1), scope='branch7x7x3/conv3') branch7x7x3 = slim.layers.conv2d(branch7x7x3, 192, kernel_size=3, stride=2, padding='VALID', scope='branch7x7x3/conv4') with tf.variable_scope('branch_pool'): branch_pool = slim.layers.max_pool2d(net, kernel_size=3, stride=2, padding='VALID', scope='branch_pool/max_pool1') net = tf.concat(axis=3, values=[branch3x3, branch7x7x3, branch_pool]) end_points['mixed_17x17x1280a'] = net with tf.variable_scope('logits'): shape = net.get_shape() net = slim.layers.avg_pool2d(net, shape[1:3], stride=1, padding='VALID', scope='pool') end_points['prev_layer'] = net # 1 x 1 x 2048 #net = slim.layers.dropout(net, dropout_keep_prob, scope='dropout') net = slim.layers.flatten(net, scope='flatten') # 2048 logits = slim.layers.fully_connected(net, self.num_output, weights_regularizer=None, activation_fn=None, scope='logits') # 1000 end_points['logits'] = logits return end_points['logits'] def get_model(self, inputs, reuse=False, use_inception=True): if not use_inception: return self.simple_model(inputs, reuse=reuse) else: return self.inception_v3(inputs, reuse=reuse) def train_model(self, outdir=None): loss = self.triplet_loss(alpha=self.alpha) self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate) self.optim = self.optimizer.minimize(loss=loss) self.sess.run(tf.global_variables_initializer()) count = 0 ii = 0 val_percentage = 0 val_conf_matrix = 0 epoch = -1 while True: epoch += 1 ii = 0 count = 0 temp_count = 0 full_loss = 0 while ii <= self.batch_size: ii += 1 a, p, n = self.get_triplets() temploss = self.sess.run(loss, feed_dict={self.anchor: a, self.positive: p, self.negative: n}) if temploss == 0: ii -= 1 count += 1 temp_count += 1 continue full_loss += temploss if ((ii + epoch * self.batch_size) % 1000 == 0): loss_mem_skip.append(full_loss / (1000.0 + temp_count)) loss_mem.append(full_loss / (1000.0)) full_loss = 0 temp_count = 0 get_loss(loss_mem, loss_mem_skip) _, a, p, n = self.sess.run([self.optim, self.anchor_out, self.positive_out, self.negative_out], feed_dict={self.anchor: a, self.positive: p, self.negative: n}) d1 = np.linalg.norm(p - a) d2 = np.linalg.norm(n - a) if self.DEBUG: print("Epoch: %2d, Iter: %7d, IterSkip: %7d, Loss: %.4f, P_Diff: %.4f, N_diff: %.4f" % (epoch, ii, count, temploss, d1, d2)) val_percentage, val_conf_matrix = self.validate(epoch) self.sess.close() return epoch, val_percentage, val_conf_matrix def get_sample(self, size=1, validation=False, with_seizure=None): data_list = [] class_list = [] if not validation: for ii in range(0, size): if with_seizure is None: choice = random.choice(['bckg', 'eybl', 'gped', 'spsw', 'pled', 'artf']) if choice == 'bckg': data_list.append(norm_op(np.load(random.choice(self.bckg)), axisss=0)) class_list.append(self.bckg_num) elif choice == 'eybl': data_list.append(norm_op(np.load(random.choice(self.eybl)), axisss=0)) class_list.append(self.eybl_num) elif choice == 'gped': data_list.append(norm_op(np.load(random.choice(self.gped)), axisss=0)) class_list.append(self.gped_num) elif choice == 'spsw': data_list.append(norm_op(np.load(random.choice(self.spsw)), axisss=0)) class_list.append(self.spsw_num) elif choice == 'pled': data_list.append(norm_op(np.load(random.choice(self.pled)), axisss=0)) class_list.append(self.pled_num) else: data_list.append(norm_op(np.load(random.choice(self.artf)), axisss=0)) class_list.append(self.artf_num) elif with_seizure == 2: choice = random.choice(['gped', 'spsw', 'pled']) success = False the_file = '' class_num = None while not success: if choice == 'bckg': the_file = random.choice(self.bckg) class_num = self.bckg_num elif choice == 'eybl': the_file = random.choice(self.eybl) class_num = self.eybl_num elif choice == 'gped': the_file = random.choice(self.gped) class_num = self.gped_num elif choice == 'spsw': the_file = random.choice(self.spsw) class_num = self.spsw_num elif choice == 'pled': the_file = random.choice(self.pled) class_num = self.pled_num else: the_file = random.choice(self.artf) class_num = self.artf_num the_file_stripped = 'session' + re.search('session(.+?)_', str(the_file)).group(1) + '_' if the_file_stripped in self.files_with_seizures: success = True #print(the_file, the_file_stripped, the_file_stripped in self.files_with_seizures) data_list.append(norm_op(np.load(str(the_file)), axisss=0)) class_list.append(class_num) elif with_seizure == 1: choice = random.choice(['bckg', 'eybl', 'gped', 'spsw', 'pled', 'artf']) success = False the_file = '' class_num = None while not success: if choice == 'bckg': the_file = random.choice(self.bckg) class_num = self.bckg_num elif choice == 'eybl': the_file = random.choice(self.eybl) class_num = self.eybl_num elif choice == 'gped': the_file = random.choice(self.gped) class_num = self.gped_num elif choice == 'spsw': the_file = random.choice(self.spsw) class_num = self.spsw_num elif choice == 'pled': the_file = random.choice(self.pled) class_num = self.pled_num else: the_file = random.choice(self.artf) class_num = self.artf_num the_file_stripped = 'session' + re.search('session(.+?)_', str(the_file)).group(1) + '_' if the_file_stripped in self.files_with_seizures: success = True #print(the_file, the_file_stripped, the_file_stripped in self.files_with_seizures) data_list.append(norm_op(np.load(str(the_file)), axisss=0)) class_list.append(class_num) elif with_seizure == 0: choice = random.choice(['bckg', 'eybl', 'artf']) success = False the_file = '' class_num = None while not success: if choice == 'bckg': the_file = random.choice(self.bckg) class_num = self.bckg_num elif choice == 'eybl': the_file = random.choice(self.eybl) class_num = self.eybl_num elif choice == 'gped': the_file = random.choice(self.gped) class_num = self.gped_num elif choice == 'spsw': the_file = random.choice(self.spsw) class_num = self.spsw_num elif choice == 'pled': the_file = random.choice(self.pled) class_num = self.pled_num else: the_file = random.choice(self.artf) class_num = self.artf_num #print(the_file) the_file_stripped = 'session' + re.search('session(.+?)_', str(the_file)).group(1) + '_' if the_file_stripped in self.files_without_seizures: success = True #print(the_file, the_file_stripped, the_file_stripped in self.files_without_seizures) data_list.append(norm_op(np.load(str(the_file)), axisss=0)) class_list.append(class_num) else: for ii in range(0, size): choice = random.choice(['bckg', 'eybl', 'gped', 'spsw', 'pled', 'artf']) if choice == 'bckg': data_list.append(norm_op(np.load(random.choice(self.bckg_val)), axisss=0)) class_list.append(self.bckg_num) elif choice == 'eybl': data_list.append(norm_op(np.load(random.choice(self.eybl_val)), axisss=0)) class_list.append(self.eybl_num) elif choice == 'gped': data_list.append(norm_op(np.load(random.choice(self.gped_val)), axisss=0)) class_list.append(self.gped_num) elif choice == 'spsw': data_list.append(norm_op(np.load(random.choice(self.spsw_val)), axisss=0)) class_list.append(self.spsw_num) elif choice == 'pled': data_list.append(norm_op(np.load(random.choice(self.pled_val)), axisss=0)) class_list.append(self.pled_num) else: data_list.append(norm_op(np.load(random.choice(self.artf_val)), axisss=0)) class_list.append(self.artf_num) return data_list, class_list def validate(self, epoch): inputs, classes = self.get_sample(size=self.validation_size, validation=True) vector_inputs = self.sess.run(self.inference_model, feed_dict={self.inference_input: inputs}) del inputs tempClassifier = neighbors.KNeighborsClassifier(31) tempClassifier.fit(vector_inputs, classes) # All data (Files with Seizures & Files without Seizures) val_inputs, val_classes = self.get_sample(size=self.validation_size) vector_val_inputs = self.sess.run(self.inference_model, feed_dict={self.inference_input: val_inputs}) del val_inputs pred_class = tempClassifier.predict(vector_val_inputs) percentage = len([i for i, j in zip(val_classes, pred_class) if i == j]) * 100.0 / self.validation_size if self.DEBUG: print("Validation Results: %.3f%% of of %d correct" % (percentage, self.validation_size)) val_classes = list(map(lambda x: self.num_to_class[x], val_classes)) pred_class = list(map(lambda x: self.num_to_class[x], pred_class)) class_labels = [0, 1, 2, 3, 4, 5] class_labels = list(map(lambda x: self.num_to_class[x], class_labels)) conf_matrix = confusion_matrix(val_classes, pred_class, labels=class_labels) np.set_printoptions(precision=2) np.save('./%s Results/%s_confusion_matrix_epoch%s_%.3f%%' % (curr_time, curr_time, epoch, percentage), conf_matrix) plot_confusion_matrix(conf_matrix, classes=class_labels, epoch=epoch, accuracy=percentage) print("All data: %s" % set(val_classes)) compute_tSNE(vector_inputs, classes, epoch=epoch, accuracy=percentage, num_to_label=self.num_to_class) # Files with Seizures val_inputs_seizure, val_classes_seizure = self.get_sample(size=self.validation_size, with_seizure = 1) vector_val_inputs_seizure = self.sess.run(self.inference_model, feed_dict={self.inference_input: val_inputs_seizure}) del val_inputs_seizure pred_class_seizure = tempClassifier.predict(vector_val_inputs_seizure) percentage_seizure = len([i for i, j in zip(val_classes_seizure, pred_class_seizure) if i == j]) * 100.0 / self.validation_size if self.DEBUG: print("Validation Results: %.3f%% of of %d correct" % (percentage_seizure, self.validation_size)) val_classes_seizure = list(map(lambda x: self.num_to_class[x], val_classes_seizure)) pred_class_seizure = list(map(lambda x: self.num_to_class[x], pred_class_seizure)) class_labels_seizure = [0, 1, 2, 3, 4, 5] class_labels_seizure = list(map(lambda x: self.num_to_class[x], class_labels_seizure)) conf_matrix_seizure = confusion_matrix(val_classes_seizure, pred_class_seizure, labels=class_labels_seizure) np.set_printoptions(precision=2) np.save('./%s Results/%s_confusion_matrix_with_seizure_epoch%s_%.3f%%' % (curr_time, curr_time, epoch, percentage_seizure), conf_matrix_seizure) plot_confusion_matrix(conf_matrix_seizure, classes=class_labels_seizure, epoch=epoch, accuracy=percentage_seizure, with_seizure=1, title = "Confusion Matrix on Files with Seizure") print("With Seizure data: %s" % set(val_classes_seizure)) #compute_tSNE(vector_inputs, classes, epoch=epoch, accuracy=percentage_seizure, num_to_label=self.num_to_class) # ONLY Seizures val_inputs_only_seizure, val_classes_only_seizure = self.get_sample(size=self.validation_size, with_seizure = 2) vector_val_inputs_only_seizure = self.sess.run(self.inference_model, feed_dict={self.inference_input: val_inputs_only_seizure}) del val_inputs_only_seizure pred_class_only_seizure = tempClassifier.predict(vector_val_inputs_only_seizure) percentage_only_seizure = len([i for i, j in zip(val_classes_only_seizure, pred_class_only_seizure) if i == j]) * 100.0 / self.validation_size if self.DEBUG: print("Validation Results: %.3f%% of of %d correct" % (percentage_only_seizure, self.validation_size)) val_classes_only_seizure = list(map(lambda x: self.num_to_class[x], val_classes_only_seizure)) pred_class_only_seizure = list(map(lambda x: self.num_to_class[x], pred_class_only_seizure)) class_labels_only_seizure = [0, 1, 2, 3, 4, 5] class_labels_only_seizure = list(map(lambda x: self.num_to_class[x], class_labels_only_seizure)) conf_matrix_only_seizure = confusion_matrix(val_classes_only_seizure, pred_class_only_seizure, labels=class_labels_seizure) np.set_printoptions(precision=2) np.save('./%s Results/%s_confusion_matrix_with_only_seizure_epoch%s_%.3f%%' % (curr_time, curr_time, epoch, percentage_only_seizure), conf_matrix_only_seizure) plot_confusion_matrix(conf_matrix_only_seizure, classes=class_labels_only_seizure, epoch=epoch, accuracy=percentage_only_seizure, with_seizure=1, title = "Confusion Matrix on Files with Seizure") print("With only Seizure data: %s" % set(val_classes_only_seizure)) #compute_tSNE(vector_inputs, classes, epoch=epoch, accuracy=percentage_seizure, num_to_label=self.num_to_class) # Files without Seizures val_inputs_without_seizure, val_classes_without_seizure = self.get_sample(size=self.validation_size, with_seizure=0) vector_val_inputs_without_seizure = self.sess.run(self.inference_model, feed_dict={self.inference_input: val_inputs_without_seizure}) del val_inputs_without_seizure pred_class_without_seizure = tempClassifier.predict(vector_val_inputs_without_seizure) percentage_without_seizure = len([i for i, j in zip(val_classes_without_seizure, pred_class_without_seizure) if i == j]) * 100.0 / self.validation_size if self.DEBUG: print("Validation Results: %.3f%% of of %d correct" % (percentage_without_seizure, self.validation_size)) val_classes_without_seizure = list(map(lambda x: self.num_to_class[x], val_classes_without_seizure)) pred_class_without_seizure = list(map(lambda x: self.num_to_class[x], pred_class_without_seizure)) class_labels_without_seizure = [0, 1, 2, 3, 4, 5] class_labels_without_seizure = list(map(lambda x: self.num_to_class[x], class_labels_without_seizure)) conf_matrix_without_seizure = confusion_matrix(val_classes_without_seizure, pred_class_without_seizure, labels=class_labels_without_seizure) np.set_printoptions(precision=2) np.save('./%s Results/%s_confusion_matrix_without_seizure_epoch%s_%.3f%%' % (curr_time, curr_time, epoch, percentage_without_seizure), conf_matrix_without_seizure) plot_confusion_matrix(conf_matrix_without_seizure, classes=class_labels_without_seizure, epoch=epoch, accuracy=percentage_without_seizure, with_seizure=0, title = "Confusion Matrix on Files without Seizure") print("Without Seizure data: %s" % set(val_classes_without_seizure)) #compute_tSNE(vector_inputs, classes, epoch=epoch, accuracy=percentage_without_seizure, num_to_label=self.num_to_class) self.count_of_triplets = dict() return percentage, conf_matrix
mit
tkaitchuck/nupic
external/linux64/lib/python2.6/site-packages/matplotlib/backends/backend_wx.py
69
77038
from __future__ import division """ backend_wx.py A wxPython backend for matplotlib, based (very heavily) on backend_template.py and backend_gtk.py Author: Jeremy O'Donoghue (jeremy@o-donoghue.com) Derived from original copyright work by John Hunter (jdhunter@ace.bsd.uchicago.edu) Copyright (C) Jeremy O'Donoghue & John Hunter, 2003-4 License: This work is licensed under a PSF compatible license. A copy should be included with this source code. """ """ KNOWN BUGS - - Mousewheel (on Windows) only works after menu button has been pressed at least once - Mousewheel on Linux (wxGTK linked against GTK 1.2) does not work at all - Vertical text renders horizontally if you use a non TrueType font on Windows. This is a known wxPython issue. Work-around is to ensure that you use a TrueType font. - Pcolor demo puts chart slightly outside bounding box (approx 1-2 pixels to the bottom left) - Outputting to bitmap more than 300dpi results in some text being incorrectly scaled. Seems to be a wxPython bug on Windows or font point sizes > 60, as font size is correctly calculated. - Performance poorer than for previous direct rendering version - TIFF output not supported on wxGTK. This is a wxGTK issue - Text is not anti-aliased on wxGTK. This is probably a platform configuration issue. - If a second call is made to show(), no figure is generated (#866965) Not implemented: - Printing Fixed this release: - Bug #866967: Interactive operation issues fixed [JDH] - Bug #866969: Dynamic update does not function with backend_wx [JOD] Examples which work on this release: --------------------------------------------------------------- | Windows 2000 | Linux | | wxPython 2.3.3 | wxPython 2.4.2.4 | --------------------------------------------------------------| - alignment_test.py | TBE | OK | - arctest.py | TBE | (3) | - axes_demo.py | OK | OK | - axes_props.py | OK | OK | - bar_stacked.py | TBE | OK | - barchart_demo.py | OK | OK | - color_demo.py | OK | OK | - csd_demo.py | OK | OK | - dynamic_demo.py | N/A | N/A | - dynamic_demo_wx.py | TBE | OK | - embedding_in_gtk.py | N/A | N/A | - embedding_in_wx.py | OK | OK | - errorbar_demo.py | OK | OK | - figtext.py | OK | OK | - histogram_demo.py | OK | OK | - interactive.py | N/A (2) | N/A (2) | - interactive2.py | N/A (2) | N/A (2) | - legend_demo.py | OK | OK | - legend_demo2.py | OK | OK | - line_styles.py | OK | OK | - log_demo.py | OK | OK | - logo.py | OK | OK | - mpl_with_glade.py | N/A (2) | N/A (2) | - mri_demo.py | OK | OK | - mri_demo_with_eeg.py | OK | OK | - multiple_figs_demo.py | OK | OK | - pcolor_demo.py | OK | OK | - psd_demo.py | OK | OK | - scatter_demo.py | OK | OK | - scatter_demo2.py | OK | OK | - simple_plot.py | OK | OK | - stock_demo.py | OK | OK | - subplot_demo.py | OK | OK | - system_monitor.py | N/A (2) | N/A (2) | - text_handles.py | OK | OK | - text_themes.py | OK | OK | - vline_demo.py | OK | OK | --------------------------------------------------------------- (2) - Script uses GTK-specific features - cannot not run, but wxPython equivalent should be written. (3) - Clipping seems to be broken. """ cvs_id = '$Id: backend_wx.py 6484 2008-12-03 18:38:03Z jdh2358 $' import sys, os, os.path, math, StringIO, weakref, warnings import numpy as npy # Debugging settings here... # Debug level set here. If the debug level is less than 5, information # messages (progressively more info for lower value) are printed. In addition, # traceback is performed, and pdb activated, for all uncaught exceptions in # this case _DEBUG = 5 if _DEBUG < 5: import traceback, pdb _DEBUG_lvls = {1 : 'Low ', 2 : 'Med ', 3 : 'High', 4 : 'Error' } try: import wx backend_version = wx.VERSION_STRING except: raise ImportError("Matplotlib backend_wx requires wxPython be installed") #!!! this is the call that is causing the exception swallowing !!! #wx.InitAllImageHandlers() def DEBUG_MSG(string, lvl=3, o=None): if lvl >= _DEBUG: cls = o.__class__ # Jeremy, often times the commented line won't print but the # one below does. I think WX is redefining stderr, damned # beast #print >>sys.stderr, "%s- %s in %s" % (_DEBUG_lvls[lvl], string, cls) print "%s- %s in %s" % (_DEBUG_lvls[lvl], string, cls) def debug_on_error(type, value, tb): """Code due to Thomas Heller - published in Python Cookbook (O'Reilley)""" traceback.print_exc(type, value, tb) print pdb.pm() # jdh uncomment class fake_stderr: """Wx does strange things with stderr, as it makes the assumption that there is probably no console. This redirects stderr to the console, since we know that there is one!""" def write(self, msg): print "Stderr: %s\n\r" % msg #if _DEBUG < 5: # sys.excepthook = debug_on_error # WxLogger =wx.LogStderr() # sys.stderr = fake_stderr # Event binding code changed after version 2.5 if wx.VERSION_STRING >= '2.5': def bind(actor,event,action,**kw): actor.Bind(event,action,**kw) else: def bind(actor,event,action,id=None): if id is not None: event(actor, id, action) else: event(actor,action) import matplotlib from matplotlib import verbose from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\ FigureCanvasBase, FigureManagerBase, NavigationToolbar2, \ cursors from matplotlib._pylab_helpers import Gcf from matplotlib.artist import Artist from matplotlib.cbook import exception_to_str, is_string_like, is_writable_file_like from matplotlib.figure import Figure from matplotlib.path import Path from matplotlib.text import _process_text_args, Text from matplotlib.transforms import Affine2D from matplotlib.widgets import SubplotTool from matplotlib import rcParams # the True dots per inch on the screen; should be display dependent # see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi PIXELS_PER_INCH = 75 # Delay time for idle checks IDLE_DELAY = 5 def error_msg_wx(msg, parent=None): """ Signal an error condition -- in a GUI, popup a error dialog """ dialog =wx.MessageDialog(parent = parent, message = msg, caption = 'Matplotlib backend_wx error', style=wx.OK | wx.CENTRE) dialog.ShowModal() dialog.Destroy() return None def raise_msg_to_str(msg): """msg is a return arg from a raise. Join with new lines""" if not is_string_like(msg): msg = '\n'.join(map(str, msg)) return msg class RendererWx(RendererBase): """ The renderer handles all the drawing primitives using a graphics context instance that controls the colors/styles. It acts as the 'renderer' instance used by many classes in the hierarchy. """ #In wxPython, drawing is performed on a wxDC instance, which will #generally be mapped to the client aread of the window displaying #the plot. Under wxPython, the wxDC instance has a wx.Pen which #describes the colour and weight of any lines drawn, and a wxBrush #which describes the fill colour of any closed polygon. fontweights = { 100 : wx.LIGHT, 200 : wx.LIGHT, 300 : wx.LIGHT, 400 : wx.NORMAL, 500 : wx.NORMAL, 600 : wx.NORMAL, 700 : wx.BOLD, 800 : wx.BOLD, 900 : wx.BOLD, 'ultralight' : wx.LIGHT, 'light' : wx.LIGHT, 'normal' : wx.NORMAL, 'medium' : wx.NORMAL, 'semibold' : wx.NORMAL, 'bold' : wx.BOLD, 'heavy' : wx.BOLD, 'ultrabold' : wx.BOLD, 'black' : wx.BOLD } fontangles = { 'italic' : wx.ITALIC, 'normal' : wx.NORMAL, 'oblique' : wx.SLANT } # wxPython allows for portable font styles, choosing them appropriately # for the target platform. Map some standard font names to the portable # styles # QUESTION: Is it be wise to agree standard fontnames across all backends? fontnames = { 'Sans' : wx.SWISS, 'Roman' : wx.ROMAN, 'Script' : wx.SCRIPT, 'Decorative' : wx.DECORATIVE, 'Modern' : wx.MODERN, 'Courier' : wx.MODERN, 'courier' : wx.MODERN } def __init__(self, bitmap, dpi): """ Initialise a wxWindows renderer instance. """ DEBUG_MSG("__init__()", 1, self) if wx.VERSION_STRING < "2.8": raise RuntimeError("matplotlib no longer supports wxPython < 2.8 for the Wx backend.\nYou may, however, use the WxAgg backend.") self.width = bitmap.GetWidth() self.height = bitmap.GetHeight() self.bitmap = bitmap self.fontd = {} self.dpi = dpi self.gc = None def flipy(self): return True def offset_text_height(self): return True def get_text_width_height_descent(self, s, prop, ismath): """ get the width and height in display coords of the string s with FontPropertry prop """ #return 1, 1 if ismath: s = self.strip_math(s) if self.gc is None: gc = self.new_gc() else: gc = self.gc gfx_ctx = gc.gfx_ctx font = self.get_wx_font(s, prop) gfx_ctx.SetFont(font, wx.BLACK) w, h, descent, leading = gfx_ctx.GetFullTextExtent(s) return w, h, descent def get_canvas_width_height(self): 'return the canvas width and height in display coords' return self.width, self.height def handle_clip_rectangle(self, gc): new_bounds = gc.get_clip_rectangle() if new_bounds is not None: new_bounds = new_bounds.bounds gfx_ctx = gc.gfx_ctx if gfx_ctx._lastcliprect != new_bounds: gfx_ctx._lastcliprect = new_bounds if new_bounds is None: gfx_ctx.ResetClip() else: gfx_ctx.Clip(new_bounds[0], self.height - new_bounds[1] - new_bounds[3], new_bounds[2], new_bounds[3]) #@staticmethod def convert_path(gfx_ctx, tpath): wxpath = gfx_ctx.CreatePath() for points, code in tpath.iter_segments(): if code == Path.MOVETO: wxpath.MoveToPoint(*points) elif code == Path.LINETO: wxpath.AddLineToPoint(*points) elif code == Path.CURVE3: wxpath.AddQuadCurveToPoint(*points) elif code == Path.CURVE4: wxpath.AddCurveToPoint(*points) elif code == Path.CLOSEPOLY: wxpath.CloseSubpath() return wxpath convert_path = staticmethod(convert_path) def draw_path(self, gc, path, transform, rgbFace=None): gc.select() self.handle_clip_rectangle(gc) gfx_ctx = gc.gfx_ctx transform = transform + Affine2D().scale(1.0, -1.0).translate(0.0, self.height) tpath = transform.transform_path(path) wxpath = self.convert_path(gfx_ctx, tpath) if rgbFace is not None: gfx_ctx.SetBrush(wx.Brush(gc.get_wxcolour(rgbFace))) gfx_ctx.DrawPath(wxpath) else: gfx_ctx.StrokePath(wxpath) gc.unselect() def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None): if bbox != None: l,b,w,h = bbox.bounds else: l=0 b=0, w=self.width h=self.height rows, cols, image_str = im.as_rgba_str() image_array = npy.fromstring(image_str, npy.uint8) image_array.shape = rows, cols, 4 bitmap = wx.BitmapFromBufferRGBA(cols,rows,image_array) gc = self.get_gc() gc.select() gc.gfx_ctx.DrawBitmap(bitmap,int(l),int(b),int(w),int(h)) gc.unselect() def draw_text(self, gc, x, y, s, prop, angle, ismath): """ Render the matplotlib.text.Text instance None) """ if ismath: s = self.strip_math(s) DEBUG_MSG("draw_text()", 1, self) gc.select() self.handle_clip_rectangle(gc) gfx_ctx = gc.gfx_ctx font = self.get_wx_font(s, prop) color = gc.get_wxcolour(gc.get_rgb()) gfx_ctx.SetFont(font, color) w, h, d = self.get_text_width_height_descent(s, prop, ismath) x = int(x) y = int(y-h) if angle == 0.0: gfx_ctx.DrawText(s, x, y) else: rads = angle / 180.0 * math.pi xo = h * math.sin(rads) yo = h * math.cos(rads) gfx_ctx.DrawRotatedText(s, x - xo, y - yo, rads) gc.unselect() def new_gc(self): """ Return an instance of a GraphicsContextWx, and sets the current gc copy """ DEBUG_MSG('new_gc()', 2, self) self.gc = GraphicsContextWx(self.bitmap, self) self.gc.select() self.gc.unselect() return self.gc def get_gc(self): """ Fetch the locally cached gc. """ # This is a dirty hack to allow anything with access to a renderer to # access the current graphics context assert self.gc != None, "gc must be defined" return self.gc def get_wx_font(self, s, prop): """ Return a wx font. Cache instances in a font dictionary for efficiency """ DEBUG_MSG("get_wx_font()", 1, self) key = hash(prop) fontprop = prop fontname = fontprop.get_name() font = self.fontd.get(key) if font is not None: return font # Allow use of platform independent and dependent font names wxFontname = self.fontnames.get(fontname, wx.ROMAN) wxFacename = '' # Empty => wxPython chooses based on wx_fontname # Font colour is determined by the active wx.Pen # TODO: It may be wise to cache font information size = self.points_to_pixels(fontprop.get_size_in_points()) font =wx.Font(int(size+0.5), # Size wxFontname, # 'Generic' name self.fontangles[fontprop.get_style()], # Angle self.fontweights[fontprop.get_weight()], # Weight False, # Underline wxFacename) # Platform font name # cache the font and gc and return it self.fontd[key] = font return font def points_to_pixels(self, points): """ convert point measures to pixes using dpi and the pixels per inch of the display """ return points*(PIXELS_PER_INCH/72.0*self.dpi/72.0) class GraphicsContextWx(GraphicsContextBase): """ The graphics context provides the color, line styles, etc... This class stores a reference to a wxMemoryDC, and a wxGraphicsContext that draws to it. Creating a wxGraphicsContext seems to be fairly heavy, so these objects are cached based on the bitmap object that is passed in. The base GraphicsContext stores colors as a RGB tuple on the unit interval, eg, (0.5, 0.0, 1.0). wxPython uses an int interval, but since wxPython colour management is rather simple, I have not chosen to implement a separate colour manager class. """ _capd = { 'butt': wx.CAP_BUTT, 'projecting': wx.CAP_PROJECTING, 'round': wx.CAP_ROUND } _joind = { 'bevel': wx.JOIN_BEVEL, 'miter': wx.JOIN_MITER, 'round': wx.JOIN_ROUND } _dashd_wx = { 'solid': wx.SOLID, 'dashed': wx.SHORT_DASH, 'dashdot': wx.DOT_DASH, 'dotted': wx.DOT } _cache = weakref.WeakKeyDictionary() def __init__(self, bitmap, renderer): GraphicsContextBase.__init__(self) #assert self.Ok(), "wxMemoryDC not OK to use" DEBUG_MSG("__init__()", 1, self) dc, gfx_ctx = self._cache.get(bitmap, (None, None)) if dc is None: dc = wx.MemoryDC() dc.SelectObject(bitmap) gfx_ctx = wx.GraphicsContext.Create(dc) gfx_ctx._lastcliprect = None self._cache[bitmap] = dc, gfx_ctx self.bitmap = bitmap self.dc = dc self.gfx_ctx = gfx_ctx self._pen = wx.Pen('BLACK', 1, wx.SOLID) gfx_ctx.SetPen(self._pen) self._style = wx.SOLID self.renderer = renderer def select(self): """ Select the current bitmap into this wxDC instance """ if sys.platform=='win32': self.dc.SelectObject(self.bitmap) self.IsSelected = True def unselect(self): """ Select a Null bitmasp into this wxDC instance """ if sys.platform=='win32': self.dc.SelectObject(wx.NullBitmap) self.IsSelected = False def set_foreground(self, fg, isRGB=None): """ Set the foreground color. fg can be a matlab format string, a html hex color string, an rgb unit tuple, or a float between 0 and 1. In the latter case, grayscale is used. """ # Implementation note: wxPython has a separate concept of pen and # brush - the brush fills any outline trace left by the pen. # Here we set both to the same colour - if a figure is not to be # filled, the renderer will set the brush to be transparent # Same goes for text foreground... DEBUG_MSG("set_foreground()", 1, self) self.select() GraphicsContextBase.set_foreground(self, fg, isRGB) self._pen.SetColour(self.get_wxcolour(self.get_rgb())) self.gfx_ctx.SetPen(self._pen) self.unselect() def set_graylevel(self, frac): """ Set the foreground color. fg can be a matlab format string, a html hex color string, an rgb unit tuple, or a float between 0 and 1. In the latter case, grayscale is used. """ DEBUG_MSG("set_graylevel()", 1, self) self.select() GraphicsContextBase.set_graylevel(self, frac) self._pen.SetColour(self.get_wxcolour(self.get_rgb())) self.gfx_ctx.SetPen(self._pen) self.unselect() def set_linewidth(self, w): """ Set the line width. """ DEBUG_MSG("set_linewidth()", 1, self) self.select() if w>0 and w<1: w = 1 GraphicsContextBase.set_linewidth(self, w) lw = int(self.renderer.points_to_pixels(self._linewidth)) if lw==0: lw = 1 self._pen.SetWidth(lw) self.gfx_ctx.SetPen(self._pen) self.unselect() def set_capstyle(self, cs): """ Set the capstyle as a string in ('butt', 'round', 'projecting') """ DEBUG_MSG("set_capstyle()", 1, self) self.select() GraphicsContextBase.set_capstyle(self, cs) self._pen.SetCap(GraphicsContextWx._capd[self._capstyle]) self.gfx_ctx.SetPen(self._pen) self.unselect() def set_joinstyle(self, js): """ Set the join style to be one of ('miter', 'round', 'bevel') """ DEBUG_MSG("set_joinstyle()", 1, self) self.select() GraphicsContextBase.set_joinstyle(self, js) self._pen.SetJoin(GraphicsContextWx._joind[self._joinstyle]) self.gfx_ctx.SetPen(self._pen) self.unselect() def set_linestyle(self, ls): """ Set the line style to be one of """ DEBUG_MSG("set_linestyle()", 1, self) self.select() GraphicsContextBase.set_linestyle(self, ls) try: self._style = GraphicsContextWx._dashd_wx[ls] except KeyError: self._style = wx.LONG_DASH# Style not used elsewhere... # On MS Windows platform, only line width of 1 allowed for dash lines if wx.Platform == '__WXMSW__': self.set_linewidth(1) self._pen.SetStyle(self._style) self.gfx_ctx.SetPen(self._pen) self.unselect() def get_wxcolour(self, color): """return a wx.Colour from RGB format""" DEBUG_MSG("get_wx_color()", 1, self) if len(color) == 3: r, g, b = color r *= 255 g *= 255 b *= 255 return wx.Colour(red=int(r), green=int(g), blue=int(b)) else: r, g, b, a = color r *= 255 g *= 255 b *= 255 a *= 255 return wx.Colour(red=int(r), green=int(g), blue=int(b), alpha=int(a)) class FigureCanvasWx(FigureCanvasBase, wx.Panel): """ The FigureCanvas contains the figure and does event handling. In the wxPython backend, it is derived from wxPanel, and (usually) lives inside a frame instantiated by a FigureManagerWx. The parent window probably implements a wx.Sizer to control the displayed control size - but we give a hint as to our preferred minimum size. """ keyvald = { wx.WXK_CONTROL : 'control', wx.WXK_SHIFT : 'shift', wx.WXK_ALT : 'alt', wx.WXK_LEFT : 'left', wx.WXK_UP : 'up', wx.WXK_RIGHT : 'right', wx.WXK_DOWN : 'down', wx.WXK_ESCAPE : 'escape', wx.WXK_F1 : 'f1', wx.WXK_F2 : 'f2', wx.WXK_F3 : 'f3', wx.WXK_F4 : 'f4', wx.WXK_F5 : 'f5', wx.WXK_F6 : 'f6', wx.WXK_F7 : 'f7', wx.WXK_F8 : 'f8', wx.WXK_F9 : 'f9', wx.WXK_F10 : 'f10', wx.WXK_F11 : 'f11', wx.WXK_F12 : 'f12', wx.WXK_SCROLL : 'scroll_lock', wx.WXK_PAUSE : 'break', wx.WXK_BACK : 'backspace', wx.WXK_RETURN : 'enter', wx.WXK_INSERT : 'insert', wx.WXK_DELETE : 'delete', wx.WXK_HOME : 'home', wx.WXK_END : 'end', wx.WXK_PRIOR : 'pageup', wx.WXK_NEXT : 'pagedown', wx.WXK_PAGEUP : 'pageup', wx.WXK_PAGEDOWN : 'pagedown', wx.WXK_NUMPAD0 : '0', wx.WXK_NUMPAD1 : '1', wx.WXK_NUMPAD2 : '2', wx.WXK_NUMPAD3 : '3', wx.WXK_NUMPAD4 : '4', wx.WXK_NUMPAD5 : '5', wx.WXK_NUMPAD6 : '6', wx.WXK_NUMPAD7 : '7', wx.WXK_NUMPAD8 : '8', wx.WXK_NUMPAD9 : '9', wx.WXK_NUMPAD_ADD : '+', wx.WXK_NUMPAD_SUBTRACT : '-', wx.WXK_NUMPAD_MULTIPLY : '*', wx.WXK_NUMPAD_DIVIDE : '/', wx.WXK_NUMPAD_DECIMAL : 'dec', wx.WXK_NUMPAD_ENTER : 'enter', wx.WXK_NUMPAD_UP : 'up', wx.WXK_NUMPAD_RIGHT : 'right', wx.WXK_NUMPAD_DOWN : 'down', wx.WXK_NUMPAD_LEFT : 'left', wx.WXK_NUMPAD_PRIOR : 'pageup', wx.WXK_NUMPAD_NEXT : 'pagedown', wx.WXK_NUMPAD_PAGEUP : 'pageup', wx.WXK_NUMPAD_PAGEDOWN : 'pagedown', wx.WXK_NUMPAD_HOME : 'home', wx.WXK_NUMPAD_END : 'end', wx.WXK_NUMPAD_INSERT : 'insert', wx.WXK_NUMPAD_DELETE : 'delete', } def __init__(self, parent, id, figure): """ Initialise a FigureWx instance. - Initialise the FigureCanvasBase and wxPanel parents. - Set event handlers for: EVT_SIZE (Resize event) EVT_PAINT (Paint event) """ FigureCanvasBase.__init__(self, figure) # Set preferred window size hint - helps the sizer (if one is # connected) l,b,w,h = figure.bbox.bounds w = int(math.ceil(w)) h = int(math.ceil(h)) wx.Panel.__init__(self, parent, id, size=wx.Size(w, h)) def do_nothing(*args, **kwargs): warnings.warn('could not find a setinitialsize function for backend_wx; please report your wxpython version=%s to the matplotlib developers list'%backend_version) pass # try to find the set size func across wx versions try: getattr(self, 'SetInitialSize') except AttributeError: self.SetInitialSize = getattr(self, 'SetBestFittingSize', do_nothing) if not hasattr(self,'IsShownOnScreen'): self.IsShownOnScreen = getattr(self, 'IsVisible', lambda *args: True) # Create the drawing bitmap self.bitmap =wx.EmptyBitmap(w, h) DEBUG_MSG("__init__() - bitmap w:%d h:%d" % (w,h), 2, self) # TODO: Add support for 'point' inspection and plot navigation. self._isDrawn = False bind(self, wx.EVT_SIZE, self._onSize) bind(self, wx.EVT_PAINT, self._onPaint) bind(self, wx.EVT_ERASE_BACKGROUND, self._onEraseBackground) bind(self, wx.EVT_KEY_DOWN, self._onKeyDown) bind(self, wx.EVT_KEY_UP, self._onKeyUp) bind(self, wx.EVT_RIGHT_DOWN, self._onRightButtonDown) bind(self, wx.EVT_RIGHT_DCLICK, self._onRightButtonDown) bind(self, wx.EVT_RIGHT_UP, self._onRightButtonUp) bind(self, wx.EVT_MOUSEWHEEL, self._onMouseWheel) bind(self, wx.EVT_LEFT_DOWN, self._onLeftButtonDown) bind(self, wx.EVT_LEFT_DCLICK, self._onLeftButtonDown) bind(self, wx.EVT_LEFT_UP, self._onLeftButtonUp) bind(self, wx.EVT_MOTION, self._onMotion) bind(self, wx.EVT_LEAVE_WINDOW, self._onLeave) bind(self, wx.EVT_ENTER_WINDOW, self._onEnter) bind(self, wx.EVT_IDLE, self._onIdle) self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) self.macros = {} # dict from wx id to seq of macros self.Printer_Init() def Destroy(self, *args, **kwargs): wx.Panel.Destroy(self, *args, **kwargs) def Copy_to_Clipboard(self, event=None): "copy bitmap of canvas to system clipboard" bmp_obj = wx.BitmapDataObject() bmp_obj.SetBitmap(self.bitmap) wx.TheClipboard.Open() wx.TheClipboard.SetData(bmp_obj) wx.TheClipboard.Close() def Printer_Init(self): """initialize printer settings using wx methods""" self.printerData = wx.PrintData() self.printerData.SetPaperId(wx.PAPER_LETTER) self.printerData.SetPrintMode(wx.PRINT_MODE_PRINTER) self.printerPageData= wx.PageSetupDialogData() self.printerPageData.SetMarginBottomRight((25,25)) self.printerPageData.SetMarginTopLeft((25,25)) self.printerPageData.SetPrintData(self.printerData) self.printer_width = 5.5 self.printer_margin= 0.5 def Printer_Setup(self, event=None): """set up figure for printing. The standard wx Printer Setup Dialog seems to die easily. Therefore, this setup simply asks for image width and margin for printing. """ dmsg = """Width of output figure in inches. The current aspect ration will be kept.""" dlg = wx.Dialog(self, -1, 'Page Setup for Printing' , (-1,-1)) df = dlg.GetFont() df.SetWeight(wx.NORMAL) df.SetPointSize(11) dlg.SetFont(df) x_wid = wx.TextCtrl(dlg,-1,value="%.2f" % self.printer_width, size=(70,-1)) x_mrg = wx.TextCtrl(dlg,-1,value="%.2f" % self.printer_margin,size=(70,-1)) sizerAll = wx.BoxSizer(wx.VERTICAL) sizerAll.Add(wx.StaticText(dlg,-1,dmsg), 0, wx.ALL | wx.EXPAND, 5) sizer = wx.FlexGridSizer(0,3) sizerAll.Add(sizer, 0, wx.ALL | wx.EXPAND, 5) sizer.Add(wx.StaticText(dlg,-1,'Figure Width'), 1, wx.ALIGN_LEFT|wx.ALL, 2) sizer.Add(x_wid, 1, wx.ALIGN_LEFT|wx.ALL, 2) sizer.Add(wx.StaticText(dlg,-1,'in'), 1, wx.ALIGN_LEFT|wx.ALL, 2) sizer.Add(wx.StaticText(dlg,-1,'Margin'), 1, wx.ALIGN_LEFT|wx.ALL, 2) sizer.Add(x_mrg, 1, wx.ALIGN_LEFT|wx.ALL, 2) sizer.Add(wx.StaticText(dlg,-1,'in'), 1, wx.ALIGN_LEFT|wx.ALL, 2) btn = wx.Button(dlg,wx.ID_OK, " OK ") btn.SetDefault() sizer.Add(btn, 1, wx.ALIGN_LEFT, 5) btn = wx.Button(dlg,wx.ID_CANCEL, " CANCEL ") sizer.Add(btn, 1, wx.ALIGN_LEFT, 5) dlg.SetSizer(sizerAll) dlg.SetAutoLayout(True) sizerAll.Fit(dlg) if dlg.ShowModal() == wx.ID_OK: try: self.printer_width = float(x_wid.GetValue()) self.printer_margin = float(x_mrg.GetValue()) except: pass if ((self.printer_width + self.printer_margin) > 7.5): self.printerData.SetOrientation(wx.LANDSCAPE) else: self.printerData.SetOrientation(wx.PORTRAIT) dlg.Destroy() return def Printer_Setup2(self, event=None): """set up figure for printing. Using the standard wx Printer Setup Dialog. """ if hasattr(self, 'printerData'): data = wx.PageSetupDialogData() data.SetPrintData(self.printerData) else: data = wx.PageSetupDialogData() data.SetMarginTopLeft( (15, 15) ) data.SetMarginBottomRight( (15, 15) ) dlg = wx.PageSetupDialog(self, data) if dlg.ShowModal() == wx.ID_OK: data = dlg.GetPageSetupData() tl = data.GetMarginTopLeft() br = data.GetMarginBottomRight() self.printerData = wx.PrintData(data.GetPrintData()) dlg.Destroy() def Printer_Preview(self, event=None): """ generate Print Preview with wx Print mechanism""" po1 = PrintoutWx(self, width=self.printer_width, margin=self.printer_margin) po2 = PrintoutWx(self, width=self.printer_width, margin=self.printer_margin) self.preview = wx.PrintPreview(po1,po2,self.printerData) if not self.preview.Ok(): print "error with preview" self.preview.SetZoom(50) frameInst= self while not isinstance(frameInst, wx.Frame): frameInst= frameInst.GetParent() frame = wx.PreviewFrame(self.preview, frameInst, "Preview") frame.Initialize() frame.SetPosition(self.GetPosition()) frame.SetSize((850,650)) frame.Centre(wx.BOTH) frame.Show(True) self.gui_repaint() def Printer_Print(self, event=None): """ Print figure using wx Print mechanism""" pdd = wx.PrintDialogData() # SetPrintData for 2.4 combatibility pdd.SetPrintData(self.printerData) pdd.SetToPage(1) printer = wx.Printer(pdd) printout = PrintoutWx(self, width=int(self.printer_width), margin=int(self.printer_margin)) print_ok = printer.Print(self, printout, True) if wx.VERSION_STRING >= '2.5': if not print_ok and not printer.GetLastError() == wx.PRINTER_CANCELLED: wx.MessageBox("""There was a problem printing. Perhaps your current printer is not set correctly?""", "Printing", wx.OK) else: if not print_ok: wx.MessageBox("""There was a problem printing. Perhaps your current printer is not set correctly?""", "Printing", wx.OK) printout.Destroy() self.gui_repaint() def draw_idle(self): """ Delay rendering until the GUI is idle. """ DEBUG_MSG("draw_idle()", 1, self) self._isDrawn = False # Force redraw # Create a timer for handling draw_idle requests # If there are events pending when the timer is # complete, reset the timer and continue. The # alternative approach, binding to wx.EVT_IDLE, # doesn't behave as nicely. if hasattr(self,'_idletimer'): self._idletimer.Restart(IDLE_DELAY) else: self._idletimer = wx.FutureCall(IDLE_DELAY,self._onDrawIdle) # FutureCall is a backwards-compatible alias; # CallLater became available in 2.7.1.1. def _onDrawIdle(self, *args, **kwargs): if wx.GetApp().Pending(): self._idletimer.Restart(IDLE_DELAY, *args, **kwargs) else: del self._idletimer # GUI event or explicit draw call may already # have caused the draw to take place if not self._isDrawn: self.draw(*args, **kwargs) def draw(self, drawDC=None): """ Render the figure using RendererWx instance renderer, or using a previously defined renderer if none is specified. """ DEBUG_MSG("draw()", 1, self) self.renderer = RendererWx(self.bitmap, self.figure.dpi) self.figure.draw(self.renderer) self._isDrawn = True self.gui_repaint(drawDC=drawDC) def flush_events(self): wx.Yield() def start_event_loop(self, timeout=0): """ Start an event loop. This is used to start a blocking event loop so that interactive functions, such as ginput and waitforbuttonpress, can wait for events. This should not be confused with the main GUI event loop, which is always running and has nothing to do with this. Call signature:: start_event_loop(self,timeout=0) This call blocks until a callback function triggers stop_event_loop() or *timeout* is reached. If *timeout* is <=0, never timeout. Raises RuntimeError if event loop is already running. """ if hasattr(self, '_event_loop'): raise RuntimeError("Event loop already running") id = wx.NewId() timer = wx.Timer(self, id=id) if timeout > 0: timer.Start(timeout*1000, oneShot=True) bind(self, wx.EVT_TIMER, self.stop_event_loop, id=id) # Event loop handler for start/stop event loop self._event_loop = wx.EventLoop() self._event_loop.Run() timer.Stop() def stop_event_loop(self, event=None): """ Stop an event loop. This is used to stop a blocking event loop so that interactive functions, such as ginput and waitforbuttonpress, can wait for events. Call signature:: stop_event_loop_default(self) """ if hasattr(self,'_event_loop'): if self._event_loop.IsRunning(): self._event_loop.Exit() del self._event_loop def _get_imagesave_wildcards(self): 'return the wildcard string for the filesave dialog' default_filetype = self.get_default_filetype() filetypes = self.get_supported_filetypes_grouped() sorted_filetypes = filetypes.items() sorted_filetypes.sort() wildcards = [] extensions = [] filter_index = 0 for i, (name, exts) in enumerate(sorted_filetypes): ext_list = ';'.join(['*.%s' % ext for ext in exts]) extensions.append(exts[0]) wildcard = '%s (%s)|%s' % (name, ext_list, ext_list) if default_filetype in exts: filter_index = i wildcards.append(wildcard) wildcards = '|'.join(wildcards) return wildcards, extensions, filter_index def gui_repaint(self, drawDC=None): """ Performs update of the displayed image on the GUI canvas, using the supplied device context. If drawDC is None, a ClientDC will be used to redraw the image. """ DEBUG_MSG("gui_repaint()", 1, self) if self.IsShownOnScreen(): if drawDC is None: drawDC=wx.ClientDC(self) drawDC.BeginDrawing() drawDC.DrawBitmap(self.bitmap, 0, 0) drawDC.EndDrawing() #wx.GetApp().Yield() else: pass filetypes = FigureCanvasBase.filetypes.copy() filetypes['bmp'] = 'Windows bitmap' filetypes['jpeg'] = 'JPEG' filetypes['jpg'] = 'JPEG' filetypes['pcx'] = 'PCX' filetypes['png'] = 'Portable Network Graphics' filetypes['tif'] = 'Tagged Image Format File' filetypes['tiff'] = 'Tagged Image Format File' filetypes['xpm'] = 'X pixmap' def print_figure(self, filename, *args, **kwargs): # Use pure Agg renderer to draw FigureCanvasBase.print_figure(self, filename, *args, **kwargs) # Restore the current view; this is needed because the # artist contains methods rely on particular attributes # of the rendered figure for determining things like # bounding boxes. if self._isDrawn: self.draw() def print_bmp(self, filename, *args, **kwargs): return self._print_image(filename, wx.BITMAP_TYPE_BMP, *args, **kwargs) def print_jpeg(self, filename, *args, **kwargs): return self._print_image(filename, wx.BITMAP_TYPE_JPEG, *args, **kwargs) print_jpg = print_jpeg def print_pcx(self, filename, *args, **kwargs): return self._print_image(filename, wx.BITMAP_TYPE_PCX, *args, **kwargs) def print_png(self, filename, *args, **kwargs): return self._print_image(filename, wx.BITMAP_TYPE_PNG, *args, **kwargs) def print_tiff(self, filename, *args, **kwargs): return self._print_image(filename, wx.BITMAP_TYPE_TIF, *args, **kwargs) print_tif = print_tiff def print_xpm(self, filename, *args, **kwargs): return self._print_image(filename, wx.BITMAP_TYPE_XPM, *args, **kwargs) def _print_image(self, filename, filetype, *args, **kwargs): origBitmap = self.bitmap l,b,width,height = self.figure.bbox.bounds width = int(math.ceil(width)) height = int(math.ceil(height)) self.bitmap = wx.EmptyBitmap(width, height) renderer = RendererWx(self.bitmap, self.figure.dpi) gc = renderer.new_gc() self.figure.draw(renderer) # Now that we have rendered into the bitmap, save it # to the appropriate file type and clean up if is_string_like(filename): if not self.bitmap.SaveFile(filename, filetype): DEBUG_MSG('print_figure() file save error', 4, self) raise RuntimeError('Could not save figure to %s\n' % (filename)) elif is_writable_file_like(filename): if not self.bitmap.ConvertToImage().SaveStream(filename, filetype): DEBUG_MSG('print_figure() file save error', 4, self) raise RuntimeError('Could not save figure to %s\n' % (filename)) # Restore everything to normal self.bitmap = origBitmap # Note: draw is required here since bits of state about the # last renderer are strewn about the artist draw methods. Do # not remove the draw without first verifying that these have # been cleaned up. The artist contains() methods will fail # otherwise. if self._isDrawn: self.draw() self.Refresh() def get_default_filetype(self): return 'png' def _onPaint(self, evt): """ Called when wxPaintEvt is generated """ DEBUG_MSG("_onPaint()", 1, self) drawDC = wx.PaintDC(self) if not self._isDrawn: self.draw(drawDC=drawDC) else: self.gui_repaint(drawDC=drawDC) evt.Skip() def _onEraseBackground(self, evt): """ Called when window is redrawn; since we are blitting the entire image, we can leave this blank to suppress flicker. """ pass def _onSize(self, evt): """ Called when wxEventSize is generated. In this application we attempt to resize to fit the window, so it is better to take the performance hit and redraw the whole window. """ DEBUG_MSG("_onSize()", 2, self) # Create a new, correctly sized bitmap self._width, self._height = self.GetClientSize() self.bitmap =wx.EmptyBitmap(self._width, self._height) self._isDrawn = False if self._width <= 1 or self._height <= 1: return # Empty figure dpival = self.figure.dpi winch = self._width/dpival hinch = self._height/dpival self.figure.set_size_inches(winch, hinch) # Rendering will happen on the associated paint event # so no need to do anything here except to make sure # the whole background is repainted. self.Refresh(eraseBackground=False) def _get_key(self, evt): keyval = evt.m_keyCode if keyval in self.keyvald: key = self.keyvald[keyval] elif keyval <256: key = chr(keyval) else: key = None # why is wx upcasing this? if key is not None: key = key.lower() return key def _onIdle(self, evt): 'a GUI idle event' evt.Skip() FigureCanvasBase.idle_event(self, guiEvent=evt) def _onKeyDown(self, evt): """Capture key press.""" key = self._get_key(evt) evt.Skip() FigureCanvasBase.key_press_event(self, key, guiEvent=evt) def _onKeyUp(self, evt): """Release key.""" key = self._get_key(evt) #print 'release key', key evt.Skip() FigureCanvasBase.key_release_event(self, key, guiEvent=evt) def _onRightButtonDown(self, evt): """Start measuring on an axis.""" x = evt.GetX() y = self.figure.bbox.height - evt.GetY() evt.Skip() self.CaptureMouse() FigureCanvasBase.button_press_event(self, x, y, 3, guiEvent=evt) def _onRightButtonUp(self, evt): """End measuring on an axis.""" x = evt.GetX() y = self.figure.bbox.height - evt.GetY() evt.Skip() if self.HasCapture(): self.ReleaseMouse() FigureCanvasBase.button_release_event(self, x, y, 3, guiEvent=evt) def _onLeftButtonDown(self, evt): """Start measuring on an axis.""" x = evt.GetX() y = self.figure.bbox.height - evt.GetY() evt.Skip() self.CaptureMouse() FigureCanvasBase.button_press_event(self, x, y, 1, guiEvent=evt) def _onLeftButtonUp(self, evt): """End measuring on an axis.""" x = evt.GetX() y = self.figure.bbox.height - evt.GetY() #print 'release button', 1 evt.Skip() if self.HasCapture(): self.ReleaseMouse() FigureCanvasBase.button_release_event(self, x, y, 1, guiEvent=evt) def _onMouseWheel(self, evt): """Translate mouse wheel events into matplotlib events""" # Determine mouse location x = evt.GetX() y = self.figure.bbox.height - evt.GetY() # Convert delta/rotation/rate into a floating point step size delta = evt.GetWheelDelta() rotation = evt.GetWheelRotation() rate = evt.GetLinesPerAction() #print "delta,rotation,rate",delta,rotation,rate step = rate*float(rotation)/delta # Done handling event evt.Skip() # Mac is giving two events for every wheel event # Need to skip every second one if wx.Platform == '__WXMAC__': if not hasattr(self,'_skipwheelevent'): self._skipwheelevent = True elif self._skipwheelevent: self._skipwheelevent = False return # Return without processing event else: self._skipwheelevent = True # Convert to mpl event FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=evt) def _onMotion(self, evt): """Start measuring on an axis.""" x = evt.GetX() y = self.figure.bbox.height - evt.GetY() evt.Skip() FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=evt) def _onLeave(self, evt): """Mouse has left the window.""" evt.Skip() FigureCanvasBase.leave_notify_event(self, guiEvent = evt) def _onEnter(self, evt): """Mouse has entered the window.""" FigureCanvasBase.enter_notify_event(self, guiEvent = evt) ######################################################################## # # The following functions and classes are for pylab compatibility # mode (matplotlib.pylab) and implement figure managers, etc... # ######################################################################## def _create_wx_app(): """ Creates a wx.PySimpleApp instance if a wx.App has not been created. """ wxapp = wx.GetApp() if wxapp is None: wxapp = wx.PySimpleApp() wxapp.SetExitOnFrameDelete(True) # retain a reference to the app object so it does not get garbage # collected and cause segmentation faults _create_wx_app.theWxApp = wxapp def draw_if_interactive(): """ This should be overriden in a windowing environment if drawing should be done in interactive python mode """ DEBUG_MSG("draw_if_interactive()", 1, None) if matplotlib.is_interactive(): figManager = Gcf.get_active() if figManager is not None: figManager.canvas.draw() def show(): """ Current implementation assumes that matplotlib is executed in a PyCrust shell. It appears to be possible to execute wxPython applications from within a PyCrust without having to ensure that wxPython has been created in a secondary thread (e.g. SciPy gui_thread). Unfortunately, gui_thread seems to introduce a number of further dependencies on SciPy modules, which I do not wish to introduce into the backend at this point. If there is a need I will look into this in a later release. """ DEBUG_MSG("show()", 3, None) for figwin in Gcf.get_all_fig_managers(): figwin.frame.Show() if show._needmain and not matplotlib.is_interactive(): # start the wxPython gui event if there is not already one running wxapp = wx.GetApp() if wxapp is not None: # wxPython 2.4 has no wx.App.IsMainLoopRunning() method imlr = getattr(wxapp, 'IsMainLoopRunning', lambda: False) if not imlr(): wxapp.MainLoop() show._needmain = False show._needmain = True def new_figure_manager(num, *args, **kwargs): """ Create a new figure manager instance """ # in order to expose the Figure constructor to the pylab # interface we need to create the figure here DEBUG_MSG("new_figure_manager()", 3, None) _create_wx_app() FigureClass = kwargs.pop('FigureClass', Figure) fig = FigureClass(*args, **kwargs) frame = FigureFrameWx(num, fig) figmgr = frame.get_figure_manager() if matplotlib.is_interactive(): figmgr.frame.Show() return figmgr class FigureFrameWx(wx.Frame): def __init__(self, num, fig): # On non-Windows platform, explicitly set the position - fix # positioning bug on some Linux platforms if wx.Platform == '__WXMSW__': pos = wx.DefaultPosition else: pos =wx.Point(20,20) l,b,w,h = fig.bbox.bounds wx.Frame.__init__(self, parent=None, id=-1, pos=pos, title="Figure %d" % num) # Frame will be sized later by the Fit method DEBUG_MSG("__init__()", 1, self) self.num = num statbar = StatusBarWx(self) self.SetStatusBar(statbar) self.canvas = self.get_canvas(fig) self.canvas.SetInitialSize(wx.Size(fig.bbox.width, fig.bbox.height)) self.sizer =wx.BoxSizer(wx.VERTICAL) self.sizer.Add(self.canvas, 1, wx.TOP | wx.LEFT | wx.EXPAND) # By adding toolbar in sizer, we are able to put it at the bottom # of the frame - so appearance is closer to GTK version self.toolbar = self._get_toolbar(statbar) if self.toolbar is not None: self.toolbar.Realize() if wx.Platform == '__WXMAC__': # Mac platform (OSX 10.3, MacPython) does not seem to cope with # having a toolbar in a sizer. This work-around gets the buttons # back, but at the expense of having the toolbar at the top self.SetToolBar(self.toolbar) else: # On Windows platform, default window size is incorrect, so set # toolbar width to figure width. tw, th = self.toolbar.GetSizeTuple() fw, fh = self.canvas.GetSizeTuple() # By adding toolbar in sizer, we are able to put it at the bottom # of the frame - so appearance is closer to GTK version. # As noted above, doesn't work for Mac. self.toolbar.SetSize(wx.Size(fw, th)) self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND) self.SetSizer(self.sizer) self.Fit() self.figmgr = FigureManagerWx(self.canvas, num, self) bind(self, wx.EVT_CLOSE, self._onClose) def _get_toolbar(self, statbar): if matplotlib.rcParams['toolbar']=='classic': toolbar = NavigationToolbarWx(self.canvas, True) elif matplotlib.rcParams['toolbar']=='toolbar2': toolbar = NavigationToolbar2Wx(self.canvas) toolbar.set_status_bar(statbar) else: toolbar = None return toolbar def get_canvas(self, fig): return FigureCanvasWx(self, -1, fig) def get_figure_manager(self): DEBUG_MSG("get_figure_manager()", 1, self) return self.figmgr def _onClose(self, evt): DEBUG_MSG("onClose()", 1, self) self.canvas.stop_event_loop() Gcf.destroy(self.num) #self.Destroy() def GetToolBar(self): """Override wxFrame::GetToolBar as we don't have managed toolbar""" return self.toolbar def Destroy(self, *args, **kwargs): wx.Frame.Destroy(self, *args, **kwargs) if self.toolbar is not None: self.toolbar.Destroy() wxapp = wx.GetApp() if wxapp: wxapp.Yield() return True class FigureManagerWx(FigureManagerBase): """ This class contains the FigureCanvas and GUI frame It is instantiated by GcfWx whenever a new figure is created. GcfWx is responsible for managing multiple instances of FigureManagerWx. NB: FigureManagerBase is found in _pylab_helpers public attrs canvas - a FigureCanvasWx(wx.Panel) instance window - a wxFrame instance - http://www.lpthe.jussieu.fr/~zeitlin/wxWindows/docs/wxwin_wxframe.html#wxframe """ def __init__(self, canvas, num, frame): DEBUG_MSG("__init__()", 1, self) FigureManagerBase.__init__(self, canvas, num) self.frame = frame self.window = frame self.tb = frame.GetToolBar() self.toolbar = self.tb # consistent with other backends def notify_axes_change(fig): 'this will be called whenever the current axes is changed' if self.tb != None: self.tb.update() self.canvas.figure.add_axobserver(notify_axes_change) def showfig(*args): frame.Show() # attach a show method to the figure self.canvas.figure.show = showfig def destroy(self, *args): DEBUG_MSG("destroy()", 1, self) self.frame.Destroy() #if self.tb is not None: self.tb.Destroy() import wx #wx.GetApp().ProcessIdle() wx.WakeUpIdle() def set_window_title(self, title): self.window.SetTitle(title) def resize(self, width, height): 'Set the canvas size in pixels' self.canvas.SetInitialSize(wx.Size(width, height)) self.window.GetSizer().Fit(self.window) # Identifiers for toolbar controls - images_wx contains bitmaps for the images # used in the controls. wxWindows does not provide any stock images, so I've # 'stolen' those from GTK2, and transformed them into the appropriate format. #import images_wx _NTB_AXISMENU =wx.NewId() _NTB_AXISMENU_BUTTON =wx.NewId() _NTB_X_PAN_LEFT =wx.NewId() _NTB_X_PAN_RIGHT =wx.NewId() _NTB_X_ZOOMIN =wx.NewId() _NTB_X_ZOOMOUT =wx.NewId() _NTB_Y_PAN_UP =wx.NewId() _NTB_Y_PAN_DOWN =wx.NewId() _NTB_Y_ZOOMIN =wx.NewId() _NTB_Y_ZOOMOUT =wx.NewId() #_NTB_SUBPLOT =wx.NewId() _NTB_SAVE =wx.NewId() _NTB_CLOSE =wx.NewId() def _load_bitmap(filename): """ Load a bitmap file from the backends/images subdirectory in which the matplotlib library is installed. The filename parameter should not contain any path information as this is determined automatically. Returns a wx.Bitmap object """ basedir = os.path.join(rcParams['datapath'],'images') bmpFilename = os.path.normpath(os.path.join(basedir, filename)) if not os.path.exists(bmpFilename): raise IOError('Could not find bitmap file "%s"; dying'%bmpFilename) bmp = wx.Bitmap(bmpFilename) return bmp class MenuButtonWx(wx.Button): """ wxPython does not permit a menu to be incorporated directly into a toolbar. This class simulates the effect by associating a pop-up menu with a button in the toolbar, and managing this as though it were a menu. """ def __init__(self, parent): wx.Button.__init__(self, parent, _NTB_AXISMENU_BUTTON, "Axes: ", style=wx.BU_EXACTFIT) self._toolbar = parent self._menu =wx.Menu() self._axisId = [] # First two menu items never change... self._allId =wx.NewId() self._invertId =wx.NewId() self._menu.Append(self._allId, "All", "Select all axes", False) self._menu.Append(self._invertId, "Invert", "Invert axes selected", False) self._menu.AppendSeparator() bind(self, wx.EVT_BUTTON, self._onMenuButton, id=_NTB_AXISMENU_BUTTON) bind(self, wx.EVT_MENU, self._handleSelectAllAxes, id=self._allId) bind(self, wx.EVT_MENU, self._handleInvertAxesSelected, id=self._invertId) def Destroy(self): self._menu.Destroy() self.Destroy() def _onMenuButton(self, evt): """Handle menu button pressed.""" x, y = self.GetPositionTuple() w, h = self.GetSizeTuple() self.PopupMenuXY(self._menu, x, y+h-4) # When menu returned, indicate selection in button evt.Skip() def _handleSelectAllAxes(self, evt): """Called when the 'select all axes' menu item is selected.""" if len(self._axisId) == 0: return for i in range(len(self._axisId)): self._menu.Check(self._axisId[i], True) self._toolbar.set_active(self.getActiveAxes()) evt.Skip() def _handleInvertAxesSelected(self, evt): """Called when the invert all menu item is selected""" if len(self._axisId) == 0: return for i in range(len(self._axisId)): if self._menu.IsChecked(self._axisId[i]): self._menu.Check(self._axisId[i], False) else: self._menu.Check(self._axisId[i], True) self._toolbar.set_active(self.getActiveAxes()) evt.Skip() def _onMenuItemSelected(self, evt): """Called whenever one of the specific axis menu items is selected""" current = self._menu.IsChecked(evt.GetId()) if current: new = False else: new = True self._menu.Check(evt.GetId(), new) self._toolbar.set_active(self.getActiveAxes()) evt.Skip() def updateAxes(self, maxAxis): """Ensures that there are entries for max_axis axes in the menu (selected by default).""" if maxAxis > len(self._axisId): for i in range(len(self._axisId) + 1, maxAxis + 1, 1): menuId =wx.NewId() self._axisId.append(menuId) self._menu.Append(menuId, "Axis %d" % i, "Select axis %d" % i, True) self._menu.Check(menuId, True) bind(self, wx.EVT_MENU, self._onMenuItemSelected, id=menuId) self._toolbar.set_active(range(len(self._axisId))) def getActiveAxes(self): """Return a list of the selected axes.""" active = [] for i in range(len(self._axisId)): if self._menu.IsChecked(self._axisId[i]): active.append(i) return active def updateButtonText(self, lst): """Update the list of selected axes in the menu button""" axis_txt = '' for e in lst: axis_txt += '%d,' % (e+1) # remove trailing ',' and add to button string self.SetLabel("Axes: %s" % axis_txt[:-1]) cursord = { cursors.MOVE : wx.CURSOR_HAND, cursors.HAND : wx.CURSOR_HAND, cursors.POINTER : wx.CURSOR_ARROW, cursors.SELECT_REGION : wx.CURSOR_CROSS, } class SubplotToolWX(wx.Frame): def __init__(self, targetfig): wx.Frame.__init__(self, None, -1, "Configure subplots") toolfig = Figure((6,3)) canvas = FigureCanvasWx(self, -1, toolfig) # Create a figure manager to manage things figmgr = FigureManager(canvas, 1, self) # Now put all into a sizer sizer = wx.BoxSizer(wx.VERTICAL) # This way of adding to sizer allows resizing sizer.Add(canvas, 1, wx.LEFT|wx.TOP|wx.GROW) self.SetSizer(sizer) self.Fit() tool = SubplotTool(targetfig, toolfig) class NavigationToolbar2Wx(NavigationToolbar2, wx.ToolBar): def __init__(self, canvas): wx.ToolBar.__init__(self, canvas.GetParent(), -1) NavigationToolbar2.__init__(self, canvas) self.canvas = canvas self._idle = True self.statbar = None def get_canvas(self, frame, fig): return FigureCanvasWx(frame, -1, fig) def _init_toolbar(self): DEBUG_MSG("_init_toolbar", 1, self) self._parent = self.canvas.GetParent() _NTB2_HOME =wx.NewId() self._NTB2_BACK =wx.NewId() self._NTB2_FORWARD =wx.NewId() self._NTB2_PAN =wx.NewId() self._NTB2_ZOOM =wx.NewId() _NTB2_SAVE = wx.NewId() _NTB2_SUBPLOT =wx.NewId() self.SetToolBitmapSize(wx.Size(24,24)) self.AddSimpleTool(_NTB2_HOME, _load_bitmap('home.png'), 'Home', 'Reset original view') self.AddSimpleTool(self._NTB2_BACK, _load_bitmap('back.png'), 'Back', 'Back navigation view') self.AddSimpleTool(self._NTB2_FORWARD, _load_bitmap('forward.png'), 'Forward', 'Forward navigation view') # todo: get new bitmap self.AddCheckTool(self._NTB2_PAN, _load_bitmap('move.png'), shortHelp='Pan', longHelp='Pan with left, zoom with right') self.AddCheckTool(self._NTB2_ZOOM, _load_bitmap('zoom_to_rect.png'), shortHelp='Zoom', longHelp='Zoom to rectangle') self.AddSeparator() self.AddSimpleTool(_NTB2_SUBPLOT, _load_bitmap('subplots.png'), 'Configure subplots', 'Configure subplot parameters') self.AddSimpleTool(_NTB2_SAVE, _load_bitmap('filesave.png'), 'Save', 'Save plot contents to file') bind(self, wx.EVT_TOOL, self.home, id=_NTB2_HOME) bind(self, wx.EVT_TOOL, self.forward, id=self._NTB2_FORWARD) bind(self, wx.EVT_TOOL, self.back, id=self._NTB2_BACK) bind(self, wx.EVT_TOOL, self.zoom, id=self._NTB2_ZOOM) bind(self, wx.EVT_TOOL, self.pan, id=self._NTB2_PAN) bind(self, wx.EVT_TOOL, self.configure_subplot, id=_NTB2_SUBPLOT) bind(self, wx.EVT_TOOL, self.save, id=_NTB2_SAVE) self.Realize() def zoom(self, *args): self.ToggleTool(self._NTB2_PAN, False) NavigationToolbar2.zoom(self, *args) def pan(self, *args): self.ToggleTool(self._NTB2_ZOOM, False) NavigationToolbar2.pan(self, *args) def configure_subplot(self, evt): frame = wx.Frame(None, -1, "Configure subplots") toolfig = Figure((6,3)) canvas = self.get_canvas(frame, toolfig) # Create a figure manager to manage things figmgr = FigureManager(canvas, 1, frame) # Now put all into a sizer sizer = wx.BoxSizer(wx.VERTICAL) # This way of adding to sizer allows resizing sizer.Add(canvas, 1, wx.LEFT|wx.TOP|wx.GROW) frame.SetSizer(sizer) frame.Fit() tool = SubplotTool(self.canvas.figure, toolfig) frame.Show() def save(self, evt): # Fetch the required filename and file type. filetypes, exts, filter_index = self.canvas._get_imagesave_wildcards() default_file = "image." + self.canvas.get_default_filetype() dlg = wx.FileDialog(self._parent, "Save to file", "", default_file, filetypes, wx.SAVE|wx.OVERWRITE_PROMPT|wx.CHANGE_DIR) dlg.SetFilterIndex(filter_index) if dlg.ShowModal() == wx.ID_OK: dirname = dlg.GetDirectory() filename = dlg.GetFilename() DEBUG_MSG('Save file dir:%s name:%s' % (dirname, filename), 3, self) format = exts[dlg.GetFilterIndex()] basename, ext = os.path.splitext(filename) if ext.startswith('.'): ext = ext[1:] if ext in ('svg', 'pdf', 'ps', 'eps', 'png') and format!=ext: #looks like they forgot to set the image type drop #down, going with the extension. warnings.warn('extension %s did not match the selected image type %s; going with %s'%(ext, format, ext), stacklevel=0) format = ext try: self.canvas.print_figure( os.path.join(dirname, filename), format=format) except Exception, e: error_msg_wx(str(e)) def set_cursor(self, cursor): cursor =wx.StockCursor(cursord[cursor]) self.canvas.SetCursor( cursor ) def release(self, event): try: del self.lastrect except AttributeError: pass def dynamic_update(self): d = self._idle self._idle = False if d: self.canvas.draw() self._idle = True def draw_rubberband(self, event, x0, y0, x1, y1): 'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744' canvas = self.canvas dc =wx.ClientDC(canvas) # Set logical function to XOR for rubberbanding dc.SetLogicalFunction(wx.XOR) # Set dc brush and pen # Here I set brush and pen to white and grey respectively # You can set it to your own choices # The brush setting is not really needed since we # dont do any filling of the dc. It is set just for # the sake of completion. wbrush =wx.Brush(wx.Colour(255,255,255), wx.TRANSPARENT) wpen =wx.Pen(wx.Colour(200, 200, 200), 1, wx.SOLID) dc.SetBrush(wbrush) dc.SetPen(wpen) dc.ResetBoundingBox() dc.BeginDrawing() height = self.canvas.figure.bbox.height y1 = height - y1 y0 = height - y0 if y1<y0: y0, y1 = y1, y0 if x1<y0: x0, x1 = x1, x0 w = x1 - x0 h = y1 - y0 rect = int(x0), int(y0), int(w), int(h) try: lastrect = self.lastrect except AttributeError: pass else: dc.DrawRectangle(*lastrect) #erase last self.lastrect = rect dc.DrawRectangle(*rect) dc.EndDrawing() def set_status_bar(self, statbar): self.statbar = statbar def set_message(self, s): if self.statbar is not None: self.statbar.set_function(s) def set_history_buttons(self): can_backward = (self._views._pos > 0) can_forward = (self._views._pos < len(self._views._elements) - 1) self.EnableTool(self._NTB2_BACK, can_backward) self.EnableTool(self._NTB2_FORWARD, can_forward) class NavigationToolbarWx(wx.ToolBar): def __init__(self, canvas, can_kill=False): """ figure is the Figure instance that the toolboar controls win, if not None, is the wxWindow the Figure is embedded in """ wx.ToolBar.__init__(self, canvas.GetParent(), -1) DEBUG_MSG("__init__()", 1, self) self.canvas = canvas self._lastControl = None self._mouseOnButton = None self._parent = canvas.GetParent() self._NTB_BUTTON_HANDLER = { _NTB_X_PAN_LEFT : self.panx, _NTB_X_PAN_RIGHT : self.panx, _NTB_X_ZOOMIN : self.zoomx, _NTB_X_ZOOMOUT : self.zoomy, _NTB_Y_PAN_UP : self.pany, _NTB_Y_PAN_DOWN : self.pany, _NTB_Y_ZOOMIN : self.zoomy, _NTB_Y_ZOOMOUT : self.zoomy } self._create_menu() self._create_controls(can_kill) self.Realize() def _create_menu(self): """ Creates the 'menu' - implemented as a button which opens a pop-up menu since wxPython does not allow a menu as a control """ DEBUG_MSG("_create_menu()", 1, self) self._menu = MenuButtonWx(self) self.AddControl(self._menu) self.AddSeparator() def _create_controls(self, can_kill): """ Creates the button controls, and links them to event handlers """ DEBUG_MSG("_create_controls()", 1, self) # Need the following line as Windows toolbars default to 15x16 self.SetToolBitmapSize(wx.Size(16,16)) self.AddSimpleTool(_NTB_X_PAN_LEFT, _load_bitmap('stock_left.xpm'), 'Left', 'Scroll left') self.AddSimpleTool(_NTB_X_PAN_RIGHT, _load_bitmap('stock_right.xpm'), 'Right', 'Scroll right') self.AddSimpleTool(_NTB_X_ZOOMIN, _load_bitmap('stock_zoom-in.xpm'), 'Zoom in', 'Increase X axis magnification') self.AddSimpleTool(_NTB_X_ZOOMOUT, _load_bitmap('stock_zoom-out.xpm'), 'Zoom out', 'Decrease X axis magnification') self.AddSeparator() self.AddSimpleTool(_NTB_Y_PAN_UP,_load_bitmap('stock_up.xpm'), 'Up', 'Scroll up') self.AddSimpleTool(_NTB_Y_PAN_DOWN, _load_bitmap('stock_down.xpm'), 'Down', 'Scroll down') self.AddSimpleTool(_NTB_Y_ZOOMIN, _load_bitmap('stock_zoom-in.xpm'), 'Zoom in', 'Increase Y axis magnification') self.AddSimpleTool(_NTB_Y_ZOOMOUT, _load_bitmap('stock_zoom-out.xpm'), 'Zoom out', 'Decrease Y axis magnification') self.AddSeparator() self.AddSimpleTool(_NTB_SAVE, _load_bitmap('stock_save_as.xpm'), 'Save', 'Save plot contents as images') self.AddSeparator() bind(self, wx.EVT_TOOL, self._onLeftScroll, id=_NTB_X_PAN_LEFT) bind(self, wx.EVT_TOOL, self._onRightScroll, id=_NTB_X_PAN_RIGHT) bind(self, wx.EVT_TOOL, self._onXZoomIn, id=_NTB_X_ZOOMIN) bind(self, wx.EVT_TOOL, self._onXZoomOut, id=_NTB_X_ZOOMOUT) bind(self, wx.EVT_TOOL, self._onUpScroll, id=_NTB_Y_PAN_UP) bind(self, wx.EVT_TOOL, self._onDownScroll, id=_NTB_Y_PAN_DOWN) bind(self, wx.EVT_TOOL, self._onYZoomIn, id=_NTB_Y_ZOOMIN) bind(self, wx.EVT_TOOL, self._onYZoomOut, id=_NTB_Y_ZOOMOUT) bind(self, wx.EVT_TOOL, self._onSave, id=_NTB_SAVE) bind(self, wx.EVT_TOOL_ENTER, self._onEnterTool, id=self.GetId()) if can_kill: bind(self, wx.EVT_TOOL, self._onClose, id=_NTB_CLOSE) bind(self, wx.EVT_MOUSEWHEEL, self._onMouseWheel) def set_active(self, ind): """ ind is a list of index numbers for the axes which are to be made active """ DEBUG_MSG("set_active()", 1, self) self._ind = ind if ind != None: self._active = [ self._axes[i] for i in self._ind ] else: self._active = [] # Now update button text wit active axes self._menu.updateButtonText(ind) def get_last_control(self): """Returns the identity of the last toolbar button pressed.""" return self._lastControl def panx(self, direction): DEBUG_MSG("panx()", 1, self) for a in self._active: a.xaxis.pan(direction) self.canvas.draw() self.canvas.Refresh(eraseBackground=False) def pany(self, direction): DEBUG_MSG("pany()", 1, self) for a in self._active: a.yaxis.pan(direction) self.canvas.draw() self.canvas.Refresh(eraseBackground=False) def zoomx(self, in_out): DEBUG_MSG("zoomx()", 1, self) for a in self._active: a.xaxis.zoom(in_out) self.canvas.draw() self.canvas.Refresh(eraseBackground=False) def zoomy(self, in_out): DEBUG_MSG("zoomy()", 1, self) for a in self._active: a.yaxis.zoom(in_out) self.canvas.draw() self.canvas.Refresh(eraseBackground=False) def update(self): """ Update the toolbar menu - called when (e.g.) a new subplot or axes are added """ DEBUG_MSG("update()", 1, self) self._axes = self.canvas.figure.get_axes() self._menu.updateAxes(len(self._axes)) def _do_nothing(self, d): """A NULL event handler - does nothing whatsoever""" pass # Local event handlers - mainly supply parameters to pan/scroll functions def _onEnterTool(self, evt): toolId = evt.GetSelection() try: self.button_fn = self._NTB_BUTTON_HANDLER[toolId] except KeyError: self.button_fn = self._do_nothing evt.Skip() def _onLeftScroll(self, evt): self.panx(-1) evt.Skip() def _onRightScroll(self, evt): self.panx(1) evt.Skip() def _onXZoomIn(self, evt): self.zoomx(1) evt.Skip() def _onXZoomOut(self, evt): self.zoomx(-1) evt.Skip() def _onUpScroll(self, evt): self.pany(1) evt.Skip() def _onDownScroll(self, evt): self.pany(-1) evt.Skip() def _onYZoomIn(self, evt): self.zoomy(1) evt.Skip() def _onYZoomOut(self, evt): self.zoomy(-1) evt.Skip() def _onMouseEnterButton(self, button): self._mouseOnButton = button def _onMouseLeaveButton(self, button): if self._mouseOnButton == button: self._mouseOnButton = None def _onMouseWheel(self, evt): if evt.GetWheelRotation() > 0: direction = 1 else: direction = -1 self.button_fn(direction) _onSave = NavigationToolbar2Wx.save def _onClose(self, evt): self.GetParent().Destroy() class StatusBarWx(wx.StatusBar): """ A status bar is added to _FigureFrame to allow measurements and the previously selected scroll function to be displayed as a user convenience. """ def __init__(self, parent): wx.StatusBar.__init__(self, parent, -1) self.SetFieldsCount(2) self.SetStatusText("None", 1) #self.SetStatusText("Measurement: None", 2) #self.Reposition() def set_function(self, string): self.SetStatusText("%s" % string, 1) #def set_measurement(self, string): # self.SetStatusText("Measurement: %s" % string, 2) #< Additions for printing support: Matt Newville class PrintoutWx(wx.Printout): """Simple wrapper around wx Printout class -- all the real work here is scaling the matplotlib canvas bitmap to the current printer's definition. """ def __init__(self, canvas, width=5.5,margin=0.5, title='matplotlib'): wx.Printout.__init__(self,title=title) self.canvas = canvas # width, in inches of output figure (approximate) self.width = width self.margin = margin def HasPage(self, page): #current only supports 1 page print return page == 1 def GetPageInfo(self): return (1, 1, 1, 1) def OnPrintPage(self, page): self.canvas.draw() dc = self.GetDC() (ppw,pph) = self.GetPPIPrinter() # printer's pixels per in (pgw,pgh) = self.GetPageSizePixels() # page size in pixels (dcw,dch) = dc.GetSize() (grw,grh) = self.canvas.GetSizeTuple() # save current figure dpi resolution and bg color, # so that we can temporarily set them to the dpi of # the printer, and the bg color to white bgcolor = self.canvas.figure.get_facecolor() fig_dpi = self.canvas.figure.dpi # draw the bitmap, scaled appropriately vscale = float(ppw) / fig_dpi # set figure resolution,bg color for printer self.canvas.figure.dpi = ppw self.canvas.figure.set_facecolor('#FFFFFF') renderer = RendererWx(self.canvas.bitmap, self.canvas.figure.dpi) self.canvas.figure.draw(renderer) self.canvas.bitmap.SetWidth( int(self.canvas.bitmap.GetWidth() * vscale)) self.canvas.bitmap.SetHeight( int(self.canvas.bitmap.GetHeight()* vscale)) self.canvas.draw() # page may need additional scaling on preview page_scale = 1.0 if self.IsPreview(): page_scale = float(dcw)/pgw # get margin in pixels = (margin in in) * (pixels/in) top_margin = int(self.margin * pph * page_scale) left_margin = int(self.margin * ppw * page_scale) # set scale so that width of output is self.width inches # (assuming grw is size of graph in inches....) user_scale = (self.width * fig_dpi * page_scale)/float(grw) dc.SetDeviceOrigin(left_margin,top_margin) dc.SetUserScale(user_scale,user_scale) # this cute little number avoid API inconsistencies in wx try: dc.DrawBitmap(self.canvas.bitmap, 0, 0) except: try: dc.DrawBitmap(self.canvas.bitmap, (0, 0)) except: pass # restore original figure resolution self.canvas.figure.set_facecolor(bgcolor) self.canvas.figure.dpi = fig_dpi self.canvas.draw() return True #> ######################################################################## # # Now just provide the standard names that backend.__init__ is expecting # ######################################################################## Toolbar = NavigationToolbarWx FigureManager = FigureManagerWx
gpl-3.0
ScreamingUdder/mantid
scripts/HFIRPowderReduction/HfirPDReductionGUI.py
1
91448
# pylint: disable=invalid-name, relative-import, too-many-lines,too-many-instance-attributes,too-many-arguments,C901 ################################################################################ # Main class for HFIR powder reduction GUI # Key word for future developing: FUTURE, NEXT, REFACTOR, RELEASE 2.0 ################################################################################ from __future__ import (absolute_import, division, print_function) from six.moves import range import numpy import os try: import urllib.request as urllib except ImportError: import urllib from .ui_MainWindow import Ui_MainWindow # import line for the UI python class from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s import mantid import mantidqtpython as mqt from . import HfirPDReductionControl # ----- default configuration --------------- DEFAULT_SERVER = 'http://neutron.ornl.gov/user_data' DEFAULT_INSTRUMENT = 'hb2a' DEFAULT_WAVELENGTH = 2.4100 # ------------------------------------------- class EmptyError(Exception): """ Exception for finding empty input for integer or float """ def __init__(self, value): """ Init """ Exception.__init__(self) self.value = value def __str__(self): return repr(self.value) class MultiScanTabState(object): """ Description of the state of the multi-scan-tab is in """ NO_OPERATION = 0 RELOAD_DATA = 1 REDUCE_DATA = 2 def __init__(self): """ Initialization :return: """ self._expNo = -1 self._scanList = [] self._xMin = None self._xMax = None self._binSize = 0 self._unit = '' self._plotRaw = False self._useDetEfficiencyCorrection = False self._excludeDetectors = [] def compare_state(self, tab_state): """ Compare this tab state and another tab state :param tab_state: :return: """ if isinstance(tab_state, MultiScanTabState) is False: raise NotImplementedError('compare_state must have MultiScanTabStatus as input.') if self._expNo != tab_state.getExpNumber() or self._scanList != tab_state.getScanList: return self.RELOAD_DATA for attname in self.__dict__.keys(): if self.__getattribute__(attname) != tab_state.__getattribute__(attname): return self.REDUCE_DATA return self.NO_OPERATION def getExpNumber(self): """ Get experiment number :return: """ return self._expNo def getScanList(self): """ Get the list of scans :return: """ return self._scanList[:] # pyline: disable=too-many-arguments def setup(self, exp_no, scan_list, min_x, max_x, bin_size, unit, raw, correct_det_eff, exclude_dets): """ Set up the object :param exp_no: :param scan_list: :param min_x: :param max_x: :param bin_size: :param unit: :param raw: :param correct_det_eff: :param exclude_dets: :return: """ self._expNo = int(exp_no) if isinstance(scan_list, list) is False: raise NotImplementedError('Scan_List must be list!') self._scanList = scan_list self._xMin = min_x self._xMax = max_x self._binSize = float(bin_size) self._unit = str(unit) self._plotRaw = raw self._useDetEfficiencyCorrection = correct_det_eff self._excludeDetectors = exclude_dets return # pylint: disable=too-many-public-methods,too-many-branches,too-many-locals,too-many-statements class MainWindow(QtGui.QMainWindow): """ Class of Main Window (top) """ # Copy to ui.setupUI # # Version 3.0 + Import for ui_MainWindow.py # from MplFigureCanvas import Qt4MplCanvas # # Replace 'self.graphicsView = QtGui.QtGraphicsView' with the following # self.graphicsView = Qt4MplCanvas(self.centralwidget) # self.mainplot = self.graphicsView.getPlot() def __init__(self, parent=None): """ Initialization and set up """ # Base class QtGui.QMainWindow.__init__(self, parent) # UI Window (from Qt Designer) self.ui = Ui_MainWindow() self.ui.setupUi(self) # Define gui-event handling # menu self.connect(self.ui.actionQuit, QtCore.SIGNAL('triggered()'), self.doExist) self.connect(self.ui.actionFind_Help, QtCore.SIGNAL('triggered()'), self.doHelp) # main self.connect(self.ui.comboBox_wavelength, QtCore.SIGNAL('currentIndexChanged(int)'), self.doUpdateWavelength) self.connect(self.ui.pushButton_browseExcludedDetFile, QtCore.SIGNAL('clicked()'), self.doBrowseExcludedDetetorFile) self.connect(self.ui.checkBox_useDetExcludeFile, QtCore.SIGNAL('stateChanged(int)'), self.do_enable_excluded_dets) # tab 'Raw Detectors' self.connect(self.ui.pushButton_plotRaw, QtCore.SIGNAL('clicked()'), self.doPlotRawPtMain) self.connect(self.ui.pushButton_ptUp, QtCore.SIGNAL('clicked()'), self.do_plot_raw_pt_prev) self.connect(self.ui.pushButton_ptDown, QtCore.SIGNAL('clicked()'), self.doPlotRawPtNext) self.connect(self.ui.pushButton_clearRawDets, QtCore.SIGNAL('clicked()'), self.doClearRawDetCanvas) # tab 'Individual Detectors' self.connect(self.ui.pushButton_plotIndvDet, QtCore.SIGNAL('clicked()'), self.doPlotIndvDetMain) self.connect(self.ui.pushButton_plotPrevDet, QtCore.SIGNAL('clicked()'), self.doPlotIndvDetPrev) self.connect(self.ui.pushButton_plotNextDet, QtCore.SIGNAL('clicked()'), self.doPlotIndvDetNext) self.connect(self.ui.pushButton_clearCanvasIndDet, QtCore.SIGNAL('clicked()'), self.doClearIndDetCanvas) self.connect(self.ui.pushButton_plotLog, QtCore.SIGNAL('clicked()'), self.do_plot_sample_log) # tab 'Normalized' self.connect(self.ui.pushButton_loadData, QtCore.SIGNAL('clicked()'), self.doLoadData) self.connect(self.ui.pushButton_prevScan, QtCore.SIGNAL('clicked()'), self.doLoadReduceScanPrev) self.connect(self.ui.pushButton_nextScan, QtCore.SIGNAL('clicked()'), self.doLoadReduceScanNext) self.connect(self.ui.pushButton_unit2theta, QtCore.SIGNAL('clicked()'), self.doReduce2Theta) self.connect(self.ui.pushButton_unitD, QtCore.SIGNAL('clicked()'), self.doReduceDSpacing) self.connect(self.ui.pushButton_unitQ, QtCore.SIGNAL('clicked()'), self.doReduceQ) self.connect(self.ui.pushButton_saveData, QtCore.SIGNAL('clicked()'), self.doSaveData) self.connect(self.ui.pushButton_clearTab2Canvas, QtCore.SIGNAL('clicked()'), self.doClearCanvas) # tab 'Multiple Scans' self.connect(self.ui.pushButton_loadMultData, QtCore.SIGNAL('clicked()'), self.doLoadSetData) self.connect(self.ui.pushButton_mscanBin, QtCore.SIGNAL('clicked()'), self.doReduceSetData) self.connect(self.ui.pushButton_mergeScans, QtCore.SIGNAL('clicked()'), self.doMergeScans) self.connect(self.ui.pushButton_viewMScan1D, QtCore.SIGNAL('clicked()'), self.doMergeScanView1D) self.connect(self.ui.pushButton_view2D, QtCore.SIGNAL('clicked()'), self.doMergeScanView2D) self.connect(self.ui.pushButton_viewMerge, QtCore.SIGNAL('clicked()'), self.doMergeScanViewMerged) self.connect(self.ui.pushButton_clearMultCanvas, QtCore.SIGNAL('clicked()'), self.doClearMultiRunCanvas) self.connect(self.ui.pushButton_saveAllIndScans, QtCore.SIGNAL('clicked()'), self.doSaveMultipleScans) self.connect(self.ui.pushButton_saveMerge, QtCore.SIGNAL('clicked()'), self.doSaveMergedScan) self.connect(self.ui.pushButton_plotRawMultiScans, QtCore.SIGNAL('clicked()'), self.do_convert_plot_multi_scans) # tab 'Vanadium' self.connect(self.ui.pushButton_stripVanPeaks, QtCore.SIGNAL('clicked()'), self.doStripVandiumPeaks) self.connect(self.ui.pushButton_saveVanRun, QtCore.SIGNAL('clicked()'), self.doSaveVanRun) self.connect(self.ui.pushButton_rebin2Theta, QtCore.SIGNAL('clicked()'), self.doReduceVanadium2Theta) self.connect(self.ui.pushButton_smoothVanData, QtCore.SIGNAL('clicked()'), self.doSmoothVanadiumData) self.connect(self.ui.pushButton_applySmooth, QtCore.SIGNAL('clicked()'), self.doSmoothVanadiumApply) self.connect(self.ui.pushButton_undoSmooth, QtCore.SIGNAL('clicked()'), self.doSmoothVanadiumUndo) # tab 'Advanced Setup' self.connect(self.ui.pushButton_browseCache, QtCore.SIGNAL('clicked()'), self.doBrowseCache) self.connect(self.ui.radioButton_useServer, QtCore.SIGNAL('clicked()'), self.doChangeSrcLocation) self.connect(self.ui.radioButton_useLocal, QtCore.SIGNAL('clicked()'), self.doChangeSrcLocation) self.connect(self.ui.pushButton_browseLocalSrc, QtCore.SIGNAL('clicked()'), self.doBrowseLocalDataSrc) self.connect(self.ui.pushButton_chkServer, QtCore.SIGNAL('clicked()'), self.doCheckSrcServer) # Define signal-event handling # define event handlers for matplotlib canvas self.ui.graphicsView_mergeRun.canvas.mpl_connect('button_press_event', self.on_mouseDownEvent) self.ui.graphicsView_mergeRun.canvas.mpl_connect('motion_notify_event', self.on_mouseMotion) # Widget type definition validator0 = QtGui.QIntValidator(self.ui.lineEdit_expNo) validator0.setBottom(1) self.ui.lineEdit_expNo.setValidator(validator0) validator1 = QtGui.QIntValidator(self.ui.lineEdit_expNo) validator1.setBottom(1) self.ui.lineEdit_scanNo.setValidator(validator1) validator2 = QtGui.QDoubleValidator(self.ui.lineEdit_wavelength) validator2.setBottom(0.) self.ui.lineEdit_wavelength.setValidator(validator2) validator3 = QtGui.QDoubleValidator(self.ui.lineEdit_xmin) validator3.setBottom(0.) self.ui.lineEdit_xmin.setValidator(validator3) validator4 = QtGui.QDoubleValidator(self.ui.lineEdit_xmax) validator4.setBottom(0.) self.ui.lineEdit_xmax.setValidator(validator4) validator5 = QtGui.QDoubleValidator(self.ui.lineEdit_binsize) validator5.setBottom(0.) self.ui.lineEdit_binsize.setValidator(validator5) validator6 = QtGui.QDoubleValidator(self.ui.lineEdit_ptNo) validator6.setBottom(0) self.ui.lineEdit_ptNo.setValidator(validator6) validator7 = QtGui.QDoubleValidator(self.ui.lineEdit_detID) validator7.setBottom(0) self.ui.lineEdit_detID.setValidator(validator7) validator8 = QtGui.QDoubleValidator(self.ui.lineEdit_min2Theta) validator8.setBottom(0.) self.ui.lineEdit_min2Theta.setValidator(validator8) validator9 = QtGui.QDoubleValidator(self.ui.lineEdit_max2Theta) validator9.setBottom(0.) self.ui.lineEdit_max2Theta.setValidator(validator9) validator10 = QtGui.QDoubleValidator(self.ui.lineEdit_binsize2Theta) validator10.setBottom(0.) self.ui.lineEdit_binsize2Theta.setValidator(validator10) validator11 = QtGui.QIntValidator(self.ui.lineEdit_scanStart) validator11.setBottom(1) self.ui.lineEdit_scanStart.setValidator(validator11) validator12 = QtGui.QIntValidator(self.ui.lineEdit_scanEnd) validator12.setBottom(1) self.ui.lineEdit_scanEnd.setValidator(validator12) validator13 = QtGui.QDoubleValidator(self.ui.lineEdit_normalizeMonitor) validator13.setBottom(0.) self.ui.lineEdit_normalizeMonitor.setValidator(validator13) validator14 = QtGui.QDoubleValidator(self.ui.lineEdit_mergeMinX) validator14.setBottom(0.) self.ui.lineEdit_mergeMinX.setValidator(validator14) validator15 = QtGui.QDoubleValidator(self.ui.lineEdit_mergeMaxX) validator15.setBottom(0.) self.ui.lineEdit_mergeMaxX.setValidator(validator15) validator16 = QtGui.QDoubleValidator(self.ui.lineEdit_mergeBinSize) validator16.setBottom(0.) self.ui.lineEdit_mergeBinSize.setValidator(validator16) # Get initial setup # RELEASE 2.0 - This part will be implemented soon as default configuration is made # Mantid configuration self._instrument = str(self.ui.comboBox_instrument.currentText()) # UI widgets setup self.ui.comboBox_outputFormat.addItems(['Fullprof']) # Supports Fullprof only now, 'GSAS', 'Fullprof+GSAS']) # RELEASE 2.0 : Need to disable some widgets... consider to refactor the code self.ui.radioButton_useServer.setChecked(True) self.ui.radioButton_useLocal.setChecked(False) self.ui.checkBox_useDetExcludeFile.setChecked(True) self.ui.comboBox_wavelength.setCurrentIndex(0) self.ui.lineEdit_wavelength.setText('2.41') self.ui.pushButton_unit2theta.setText(r'$2\theta$') # vanadium spectrum smooth parameters self.ui.lineEdit_smoothParams.setText('20,2') # Set up data source self._serverAddress = DEFAULT_SERVER self._srcFromServer = True self._localSrcDataDir = None self._srcAtLocal = False self._currUnit = '2theta' # Workspaces self._myControl = HfirPDReductionControl.HFIRPDRedControl() # Interactive graphics self._viewMerge_X = None self._viewMerge_Y = None # Control of plots: key = canvas, value = list of 2-integer-tuple (expno, scanno) self._tabLineDict = {} self._tabBinParamDict = {} for key in [2]: self._tabLineDict[key] = [] for key in [2, 3, 4]: self._tabBinParamDict[key] = [None, None, None] self._lastMergeLabel = "" self._lastMergeIndex = -1 self._expNo = None self._scanNo = None self._detID = None self._indvXLabel = None self._rawDetExpNo = None self._rawDetScanNo = None self._rawDetPlotMode = None self._rawDetPtNo = None self._indvDetCanvasMode = 'samplelog' # Multiple scan tab self._multiScanExp = None self._multiScanList = [] # help self.assistantProcess = QtCore.QProcess(self) # pylint: disable=protected-access self.collectionFile = os.path.join(mantid._bindir, '../docs/qthelp/MantidProject.qhc') version = ".".join(mantid.__version__.split(".")[:2]) self.qtUrl = 'qthelp://org.sphinx.mantidproject.' + version + '/doc/interfaces/HFIR Powder Reduction.html' self.externalUrl = 'http://docs.mantidproject.org/nightly/interfaces/HFIR Powder Reduction.html' # Initial setup for tab self.ui.tabWidget.setCurrentIndex(0) cache_dir = str(self.ui.lineEdit_cache.text()).strip() if len(cache_dir) == 0 or os.path.exists(cache_dir) is False: invalid_cache = cache_dir cache_dir = os.path.expanduser('~') self.ui.lineEdit_cache.setText(cache_dir) if len(invalid_cache) == 0: warning_msg = 'Cache directory is not set. ' else: warning_msg = 'Cache directory {0} does not exist. '.format(invalid_cache) warning_msg += 'Using {0} for caching dowloaded file instead.'.format(cache_dir) print ('[WARNING] {0}'.format(warning_msg)) # Get on hold of raw data file useserver = self.ui.radioButton_useServer.isChecked() uselocal = self.ui.radioButton_useLocal.isChecked() if useserver == uselocal: self._logWarning("It is logically wrong to set up (1) neither server or local dir to " "access data or (2) both server and local dir to retrieve data. " "As default, it is set up to download data from server.") useserver = True uselocal = False self.ui.radioButton_useServer.setChecked(True) self.ui.radioButton_useLocal.setChecked(False) # register startup mantid.UsageService.registerFeatureUsage("Interface", "HfirPowderReduction", False) return # -- Event Handling ---------------------------------------------------- def doBrowseCache(self): """ Pop out a dialog to let user specify the directory to cache downloaded data """ # home directory homedir = str(self.ui.lineEdit_cache.text()).strip() if len(homedir) > 0 and os.path.exists(homedir): home = homedir else: home = os.getcwd() # pop out a dialog dirs = str(QtGui.QFileDialog.getExistingDirectory(self, 'Get Directory', home)) # set to line edit if dirs != home: self.ui.lineEdit_cache.setText(dirs) return def doBrowseExcludedDetetorFile(self): """ Browse excluded detector's file Return :: None """ # Get file name filefilter = "Text (*.txt);;Data (*.dat);;All files (*)" curDir = os.getcwd() excldetfnames = QtGui.QFileDialog.getOpenFileNames(self, 'Open File(s)', curDir, filefilter) try: excldetfname = excldetfnames[0] self.ui.lineEdit_excludedDetFileName.setText(excldetfname) except IndexError: # return if there is no file selected return # Parse det exclusion file print("Detector exclusion file name is %s." % (excldetfname)) excludedetlist, errmsg = self._myControl.parseExcludedDetFile('HB2A', excldetfname) if len(errmsg) > 0: self._logError(errmsg) textbuf = "" for detid in excludedetlist: textbuf += "%d," % (detid) if len(textbuf) > 0: textbuf = textbuf[:-1] self.ui.lineEdit_detExcluded.setText(textbuf) def doBrowseLocalDataSrc(self): """ Browse local data storage """ msg = "Browse local data storage location. Implement ASAP" QtGui.QMessageBox.information(self, "Click!", msg) def doChangeSrcLocation(self): """ Source file location is changed """ useserver = self.ui.radioButton_useServer.isChecked() uselocal = self.ui.radioButton_useLocal.isChecked() print("Use Server: ", useserver) print("Use Local : ", uselocal) if (useserver and uselocal) or not (useserver or uselocal): raise NotImplementedError("Impossible for radio buttons") self._srcAtLocal = uselocal self._srcFromServer = useserver if uselocal is True: self.ui.lineEdit_dataIP.setDisabled(True) self.ui.pushButton_chkServer.setDisabled(True) self.ui.lineEdit_localSrc.setDisabled(False) self.ui.pushButton_browseLocalSrc.setDisabled(False) else: self.ui.lineEdit_dataIP.setDisabled(False) self.ui.pushButton_chkServer.setDisabled(False) self.ui.lineEdit_localSrc.setDisabled(True) self.ui.pushButton_browseLocalSrc.setDisabled(True) def doCheckSrcServer(self): """" Check source data server's availability """ msg = "Check source data server! Implement ASAP" QtGui.QMessageBox.information(self, "Click!", msg) def doClearCanvas(self): """ Clear canvas """ itab = self.ui.tabWidget.currentIndex() if itab == 2: self.ui.graphicsView_reducedData.clearAllLines() self._tabLineDict[itab] = [] def doClearIndDetCanvas(self): """ Clear the canvas in tab 'Individual Detector' and current plotted lines in managing dictionary """ # Clear all lines on canvas self.ui.graphicsView_indvDet.clearAllLines() # Remove their references in dictionary if self.ui.graphicsView_indvDet in self._tabLineDict: self._tabLineDict[self.ui.graphicsView_indvDet] = [] # Reset colur schedule self.ui.graphicsView_indvDet.resetLineColorStyle() def doClearMultiRunCanvas(self): """ Clear the canvas in tab 'Multiple Run' This canvas is applied to both 1D and 2D image. Clear-all-lines might be not enough to clear 2D image """ self.ui.graphicsView_mergeRun.clearCanvas() def doClearRawDetCanvas(self): """ Clear the canvas in tab 'Raw Detector': only need to clear lines """ self.ui.graphicsView_Raw.clearAllLines() self._tabLineDict[self.ui.graphicsView_Raw] = [] def doClearVanadiumCanvas(self): """ Clear the canvas in tab 'Vanadium' """ self.ui.graphicsView_vanPeaks.clearAllLines() def doExist(self): """ Exist the application """ clearcache = self.ui.checkBox_delCache.isChecked() if clearcache: urllib.delAllFile(self._cache) self.close() def doHelp(self): """ Show help Copied from DGSPlanner """ try: import pymantidplot pymantidplot.proxies.showCustomInterfaceHelp('HFIR Powder Reduction') except ImportError: self.assistantProcess.close() self.assistantProcess.waitForFinished() helpapp = QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.BinariesPath) + QtCore.QDir.separator() helpapp += 'assistant' args = ['-enableRemoteControl', '-collectionFile', self.collectionFile, '-showUrl', self.qtUrl] if os.path.isfile(helpapp) and os.path.isfile(self.collectionFile): self.assistantProcess.close() self.assistantProcess.waitForFinished() self.assistantProcess.start(helpapp, args) else: mqt.MantidQt.API.MantidDesktopServices.openUrl(QtCore.QUrl(self.externalUrl)) def _load_spice_data_to_raw_table(self, exp_no, scan_no, data_file_name): # flake8: noqa try: success = self._myControl.loadSpicePDData(exp_no, scan_no, data_file_name) return success, "" if success else "Load data failed." except NotImplementedError as ne: return False, str(ne) def _get_corr_file_names_and_wavelength(self, exp_no, scan_no, data_file_name): # Obtain the correction file names and wavelength from SPICE file wavelength_error = False err_msg = "" local_dir = os.path.dirname(data_file_name) try: status, return_body = self._myControl.retrieveCorrectionData(instrument='HB2A', exp=exp_no, scan=scan_no, localdatadir=local_dir) except NotImplementedError as e: err_msg = str(e) if err_msg.count('m1') > 0: # error is about wavelength status = False wavelength_error = True else: # other error raise e if status: auto_wavelength = return_body[0] van_corr_filename = return_body[1] excl_det_filename = return_body[2] if van_corr_filename is not None: self.ui.lineEdit_vcorrFileName.setText(van_corr_filename) if excl_det_filename is not None: self.ui.lineEdit_excludedDetFileName.setText(excl_det_filename) else: auto_wavelength = None van_corr_filename = None excl_det_filename = None return auto_wavelength, van_corr_filename, excl_det_filename, wavelength_error, err_msg def _set_wavelength(self, auto_wavelength, wavelength_error, exp_no, scan_no, err_msg): if auto_wavelength is None: # unable to get wavelength from SPICE data self.ui.comboBox_wavelength.setCurrentIndex(4) if wavelength_error: self.ui.lineEdit_wavelength.setText(err_msg) else: self.ui.lineEdit_wavelength.setText(self.ui.comboBox_wavelength.currentText()) self._myControl.setWavelength(exp_no, scan_no, wavelength=None) else: # get wavelength from SPICE data. set value to GUI self.ui.lineEdit_wavelength.setText(str(auto_wavelength)) allowed_wavelengths = [2.41, 1.54, 1.12] num_items = self.ui.comboBox_wavelength.count() good = False for ic in range(num_items - 1): if abs(auto_wavelength - allowed_wavelengths[ic]) < 0.01: good = True self.ui.comboBox_wavelength.setCurrentIndex(ic) if not good: self.ui.comboBox_wavelength.setCurrentIndex(num_items - 1) self._myControl.setWavelength(exp_no, scan_no, wavelength=auto_wavelength) def _get_and_parse_det_efficiency_file(self, van_corr_filename): if self.ui.checkBox_useDetEffCorr.isChecked(): # Apply detector efficiency correction if van_corr_filename is None: # browse vanadium correction file file_filter = "Text (*.txt);;Data (*.dat);;All files (*)" current_dir = os.getcwd() van_corr_filenames = QtGui.QFileDialog.getOpenFileNames(self, 'Open File(s)', current_dir, file_filter) if len(van_corr_filenames) > 0: van_corr_filename = van_corr_filenames[0] self.ui.lineEdit_vcorrFileName.setText(str(van_corr_filename)) else: self._logError("User does not specify any vanadium correction file.") self.ui.checkBox_useDetEffCorr.setChecked(False) # Parse if it is not None if van_corr_filename is not None: detector_efficiency_ws, err_msg = self._myControl.parseDetEffCorrFile('HB2A', van_corr_filename) if detector_efficiency_ws is None: print("Parsing detectors efficiency file error: {0}.".format(err_msg)) return None else: return detector_efficiency_ws else: return None else: # Not chosen to apply detector efficiency correction:w return None def _parse_spice_data_to_MDEventWS(self, detector_efficiency_table, exp_no, scan_no): try: print("Det Efficiency Table WS: ", str(detector_efficiency_table)) exec_status = self._myControl.parseSpiceData(exp_no, scan_no, detector_efficiency_table) return exec_status, "" if exec_status else "Parse data failed." except NotImplementedError as e: return False, str(e) def _parse_detector_exclusion_file(self, exclude_detector_filename): if exclude_detector_filename is not None: exclude_detector_list, err_msg = self._myControl.parseExcludedDetFile('HB2A', exclude_detector_filename) text_buf = "" for det_id in exclude_detector_list: text_buf += "{0},".format(det_id) if len(text_buf) > 0: text_buf = text_buf[:-1] self.ui.lineEdit_detExcluded.setText(text_buf) def doLoadData(self, exp=None, scan=None): """ Load and reduce data It does not support for tab 'Advanced Setup' For tab 'Raw Detector' and 'Individual Detector', this method will load data to MDEventWorkspaces For tab 'Normalized' and 'Vanadium', this method will load data to MDEVentWorkspaces but NOT reduce to single spectrum """ # Kick away unsupported tabs i_tab = self.ui.tabWidget.currentIndex() tab_text = str(self.ui.tabWidget.tabText(i_tab)) print("[DB] Current active tab is No. {0} as {1}.".format(i_tab, tab_text)) # Rule out unsupported tab if i_tab == 5: # 'advanced' msg = "Tab {0} does not support 'Load Data'. Request is ambiguous.".format(tab_text) QtGui.QMessageBox.information(self, "Click!", msg) return # Get exp number and scan number if isinstance(exp, int) and isinstance(scan, int): # use input exp_no = exp scan_no = scan else: # read from GUI try: exp_no, scan_no = self._uiGetExpScanNumber() self._logDebug("Attending to load Exp {0} Scan {1}.".format(exp_no, scan_no)) except NotImplementedError as ne: self._logError("Error to get Exp and Scan due to {0}.".format(str(ne))) return # Form data file name and download data status, data_filename = self._uiDownloadDataFile(exp=exp_no, scan=scan_no) if not status: self._logError("Unable to download or locate local data file for Exp {0} Scan {1}.".format(exp_no, scan_no)) # (Load data for tab 0, 1, 2 and 4) if i_tab not in [0, 1, 2, 3, 4]: # Unsupported Tabs: programming error! err_msg = "{0}-th tab should not get this far.\n".format(i_tab) err_msg += 'GUI has been changed, but the change has not been considered! iTab = {0}'.format(i_tab) raise NotImplementedError(err_msg) # Load SPICE data to raw table (step 1) load_success, msg = self._load_spice_data_to_raw_table(exp_no, scan_no, data_filename) if not load_success: self._logError(msg) return # Obtain the correction file names and wavelength from SPICE file (auto_wavelength, van_corr_filename, exclude_detector_filename, wavelength_error, err_msg) \ = self._load_spice_data_to_raw_table(exp_no, scan_no, data_filename) # Set wavelength to GUI except 'multiple scans' self._set_wavelength(auto_wavelength, wavelength_error, exp_no, scan_no, err_msg) # Optionally obtain and parse det effecient file detector_efficiency_table_ws = self._get_and_parse_det_efficiency_file(van_corr_filename) # Parse SPICE data to MDEventWorkspaces success, msg = self._parse_spice_data_to_MDEventWS(detector_efficiency_table_ws, exp_no, scan_no) if not success: self._logError(msg) return # Optionally parse detector exclusion file and set to line text self._parse_detector_exclusion_file(exclude_detector_filename) # Set up some widgets for raw detector data. Won't be applied to tab 3 if i_tab != 3: float_sample_log_name_list = self._myControl.getSampleLogNames(exp_no, scan_no) self.ui.comboBox_indvDetXLabel.clear() self.ui.comboBox_indvDetXLabel.addItem("2theta/Scattering Angle") self.ui.comboBox_indvDetXLabel.addItems(float_sample_log_name_list) self.ui.comboBox_indvDetYLabel.clear() self.ui.comboBox_indvDetYLabel.addItems(float_sample_log_name_list) return True def doLoadSetData(self): """ Load a set of data This is the first step of doing multiple scans processing """ # Get inputs for exp number and scans try: rtup = self._uiGetExpScanTabMultiScans() expno = rtup[0] scanlist = rtup[1] except NotImplementedError as nie: self._logError("Unable to load data set in multiple scans due to %s." % (str(nie))) # Load and reduce data loadstatus = True for scan in sorted(scanlist): tempstatus = self.doLoadData(expno, scan) if not tempstatus: self.ui.label_mergeMessage.setText('Error to load Exp %d Scan %d.' % (expno, scan)) loadstatus = False else: message = 'Loaded Exp %d Scan %d.' % (expno, scan) self.ui.label_mergeMessage.setText(message) # Load status if loadstatus: self.ui.label_mergeMessage.setText('All data files are loaded') else: self.ui.label_mergeMessage.setText('Not all data files are loaded') # Wave length haswavelength = True for scan in scanlist: if self._myControl.getWavelength(expno, scan) is None: self._logNotice("Exp %d Scan %d has no wavelength set up." % (expno, scan)) haswavelength = False break # Set unit box if haswavelength: self.ui.comboBox_mscanUnit.clear() self.ui.comboBox_mscanUnit.addItems(['2theta', 'dSpacing', 'Momentum Transfer (Q)']) else: self.ui.comboBox_mscanUnit.clear() self.ui.comboBox_mscanUnit.addItems(['2theta']) return def doLoadReduceScanPrev(self): """ Load and reduce previous scan for tab 'Normalized' """ # Reduce scan number by 1 try: scanno = int(self.ui.lineEdit_scanNo.text()) except ValueError: self._logError("Either Exp No or Scan No is not set up right as integer.") return else: scanno = scanno - 1 if scanno < 1: self._logWarning("Scan number is 1 already. Cannot have previous scan") return self.ui.lineEdit_scanNo.setText(str(scanno)) # Load data self.ui.lineEdit_scanNo.setText(str(scanno)) self.doLoadData() # Reduce data self._uiReducePlotNoramlized(self._currUnit) def doLoadReduceScanNext(self): """ Load and reduce next scan for tab 'Normalized' """ # Advance scan number by 1 try: scanno = int(self.ui.lineEdit_scanNo.text()) except ValueError: self._logError("Either Exp No or Scan No is not set up right as integer.") return False else: scanno = scanno + 1 if scanno < 1: self._logWarning("Scan number is 1 already. Cannot have previous scan") return False # Load data self.ui.lineEdit_scanNo.setText(str(scanno)) execstatus = self.doLoadData() print("[DB] Load data : ", execstatus) # Reduce data self._uiReducePlotNoramlized(self._currUnit) def doMergeScans(self): """ Merge several scans for tab 'merge' """ # Get exp number and list of scans try: r = self._uiGetExpScanTabMultiScans() expno = r[0] scanlist = r[1] except NotImplementedError as ne: self._logError(str(ne)) return False # Check whether the wavelengths are same to merge try: wl_list = [] for scanno in scanlist: print("Exp %d Scan %d. Wavelength = %s." % ( expno, scanno, str(self._myControl.getWavelength(expno, scanno)))) wl_list.append(float(self._myControl.getWavelength(expno, scanno))) wl_list = sorted(wl_list) min_wl = wl_list[0] max_wl = wl_list[-1] if max_wl - min_wl > 1.0: self._logWarning("All scans do not have same wavelengths!") except TypeError: self._logError('Not all scans have wavelength set up. Unable to merge scans.') return # Check! try: unit = str(self.ui.comboBox_mscanUnit.currentText()) xmin, binsize, xmax = self._uiGetBinningParams(itab=3) # wavelength = min_wl mindex = self._myControl.mergeReduceSpiceData(expno, scanlist, unit, xmin, xmax, binsize) except Exception as e: raise e label = "Exp %d, Scan %s." % (expno, str(scanlist)) self._plotMergedReducedData(mindex, label) self._lastMergeIndex = mindex self._lastMergeLabel = label return def doMergeScanView1D(self): """ Change the multiple runs to 1D """ # Highlight the button's color self.ui.pushButton_view2D.setStyleSheet('QPushButton {background-color: yellow; color: red;}') self.ui.pushButton_view2D.setEnabled(True) self.ui.pushButton_viewMScan1D.setStyleSheet('QPushButton {background-color: white; color: gray;}') self.ui.pushButton_viewMScan1D.setEnabled(False) # Process input experiment number and scan list try: r = self._uiGetExpScanTabMultiScans() expno = r[0] scanlist = r[1] except NotImplementedError as e: self._logError(str(e)) return False # Clear image canvas = self.ui.graphicsView_mergeRun canvas.clearAllLines() canvas.clearCanvas() # Plot data unit = str(self.ui.comboBox_mscanUnit.currentText()) xlabel = self._getXLabelFromUnit(unit) for scanno in scanlist: label = "Exp %s Scan %s" % (str(expno), str(scanno)) self._plotReducedData(expno, scanno, canvas, xlabel, label=label, clearcanvas=False) def doMergeScanView2D(self): """ Change the merged run's view to 2D plot """ # Highlight button color and change the color of another one self.ui.pushButton_view2D.setStyleSheet('QPushButton {background-color: white; color: gray;}') self.ui.pushButton_view2D.setEnabled(False) self.ui.pushButton_viewMScan1D.setStyleSheet('QPushButton {background-color: yellow; color: red;}') self.ui.pushButton_viewMScan1D.setEnabled(True) # Get list of data to plot r = self._uiGetExpScanTabMultiScans() expno = r[0] scanlist = r[1] # Convert the workspaces to 2D vector vecylist = [] yticklabels = [] xmin = None xmax = None for scanno in scanlist: # put y values to list for constructing 2D array vecx, vecy = self._myControl.getVectorToPlot(expno, scanno) vecylist.append(vecy) yticklabels.append('Exp %d Scan %d' % (expno, scanno)) # set up range of x if xmin is None: xmin = vecx[0] xmax = vecx[-1] dim2array = numpy.array(vecylist) # Plot holdprev = False self.ui.graphicsView_mergeRun.clearAllLines() self.ui.graphicsView_mergeRun.addPlot2D(dim2array, xmin=xmin, xmax=xmax, ymin=0, ymax=len(vecylist), holdprev=holdprev, yticklabels=yticklabels) def doMergeScanViewMerged(self): """ Change the merged run's view to 1D plot """ # Highlight the button's color self.ui.pushButton_view2D.setStyleSheet('QPushButton {color: red;}') self.ui.pushButton_view2D.setEnabled(True) self.ui.pushButton_viewMScan1D.setStyleSheet('QPushButton {color: red;}') self.ui.pushButton_viewMScan1D.setEnabled(True) # Clear image self.ui.graphicsView_mergeRun.clearCanvas() # Plot self._plotMergedReducedData(mkey=self._lastMergeIndex, label=self._lastMergeLabel) def doPlotIndvDetMain(self): """ Plot individual detector """ # Get exp and scan numbers and check whether the data has been loaded try: expno = self._getInteger(self.ui.lineEdit_expNo) scanno = self._getInteger(self.ui.lineEdit_scanNo) except EmptyError as e: self._logError(str(e)) return # Get detector ID and x-label option try: status, detidlist = self._getIntArray(self.ui.lineEdit_detID.text()) if status is False: errmsg = detidlist print("Unable to parse detector IDs due to %s." % (errmsg)) return else: print("[DB] Detectors to plot: %s" % (detidlist)) except EmptyError: self._logError("Detector ID must be specified for plotting individual detector.") return # Over plot previous or clear overplot = self.ui.checkBox_overPlotIndvDet.isChecked() if overplot is False: self.doClearIndDetCanvas() xlabel = str(self.ui.comboBox_indvDetXLabel.currentText()).strip() if xlabel != "" and xlabel != "Pt." and xlabel != "2theta/Scattering Angle": # Plot with sample logs other than Pt. self._logNotice("New Feature: X-label %s is supported for plotting individual detector's counts. " " Set to detector angle." % xlabel) xlabel = xlabel else: # Plot with Pt. or detector angles if xlabel != "Pt.": xlabel = "" self._logNotice("X-label for individual detectror is '%s'." % (xlabel)) # plot for detid in sorted(detidlist): try: self._plot_individual_detector_counts(expno, scanno, detid, xlabel, resetboundary=not overplot) self._expNo = expno self._scanNo = scanno self._detID = detid self._indvXLabel = xlabel except NotImplementedError as e: self._logError(str(e)) def doPlotIndvDetNext(self): """ Plot next raw detector signals for tab 'Individual Detector' """ # Plot try: currdetid = self._detID + 1 # Over plot previous or clear overplot = self.ui.checkBox_overPlotIndvDet.isChecked() if overplot is False: self.doClearIndDetCanvas() self._plot_individual_detector_counts(self._expNo, self._scanNo, currdetid, self._indvXLabel) except KeyError as e: self._logError(str(e)) else: self._detID = currdetid # Update widget self.ui.lineEdit_detID.setText(str(self._detID)) def doPlotIndvDetPrev(self): """ Plot previous individual detector's signal for tab 'Individual Detector' """ # Plot try: currdetid = self._detID - 1 # Over plot previous or clear overplot = self.ui.checkBox_overPlotIndvDet.isChecked() if overplot is False: self.doClearIndDetCanvas() self._plot_individual_detector_counts(self._expNo, self._scanNo, currdetid, self._indvXLabel) except KeyError as e: self._logError(str(e)) else: self._detID = currdetid # Update widget self.ui.lineEdit_detID.setText(str(self._detID)) def do_convert_plot_multi_scans(self): """ Convert individual plots from normalized to raw or vice verse """ # Identify the mode if str(self.ui.pushButton_plotRawMultiScans.text()) == 'Plot Raw': new_mode = 'Plot Raw' else: new_mode = 'Plot Normalized' # Get information try: min_x = self._getFloat(self.ui.lineEdit_mergeMinX) except EmptyError: min_x = None try: max_x = self._getFloat(self.ui.lineEdit_mergeMaxX) except EmptyError: max_x = None bin_size = self._getFloat(self.ui.lineEdit_mergeBinSize) # Process input experiment number and scan list try: r = self._uiGetExpScanTabMultiScans() exp_no = r[0] scan_list = r[1] except NotImplementedError as e: self._logError(str(e)) return False # Re-process the data if new_mode == 'Plot Raw': if self._multiScanList is None or self._multiScanExp is None: raise NotImplementedError('Experiment and scan list are not set up for plot raw.') self._myControl.scale_to_raw_monitor_counts(self._multiScanExp, self._multiScanList, min_x, max_x, bin_size) else: self._myControl.reset_to_normalized(self._multiScanExp, self._multiScanList, min_x, max_x, bin_size) # Clear image canvas = self.ui.graphicsView_mergeRun canvas.clearAllLines() canvas.clearCanvas() canvas.resetLineColorStyle() # Plot data unit = str(self.ui.comboBox_mscanUnit.currentText()) xlabel = self._getXLabelFromUnit(unit) for scan_no in scan_list: label = "Exp %s Scan %s" % (str(exp_no), str(scan_no)) self._plotReducedData(exp_no, scan_no, canvas, xlabel, label=label, clearcanvas=False) # Change the button name if new_mode == 'Plot Raw': self.ui.pushButton_plotRawMultiScans.setText('Plot Normalized') else: self.ui.pushButton_plotRawMultiScans.setText('Plot Raw') def doPlotRawPtMain(self): """ Plot current raw detector signal for a specific Pt. """ # Get experiment number and scan number for data file try: expno = self._getInteger(self.ui.lineEdit_expNo) scanno = self._getInteger(self.ui.lineEdit_scanNo) except EmptyError as e: self._logError(str(e)) return # plot options doOverPlot = bool(self.ui.checkBox_overpltRawDet.isChecked()) plotmode = str(self.ui.comboBox_rawDetMode.currentText()) try: ptNo = self._getInteger(self.ui.lineEdit_ptNo) except EmptyError: ptNo = None # plot print("[DB] Plot Raw Detector: PlotMode = %s." % (plotmode)) execstatus = self._plotRawDetSignal(expno, scanno, plotmode, ptNo, doOverPlot) # set global values if good if execstatus is True: self._rawDetPtNo = ptNo self._rawDetExpNo = expno self._rawDetScanNo = scanno self._rawDetPlotMode = plotmode else: print("[Error] Execution fails with signal %s. " % (str(execstatus))) def doPlotRawPtNext(self): """ Plot next raw detector signals """ # Check if self._rawDetPtNo is not None: ptno = self._rawDetPtNo + 1 else: self._logError("Unable to plot previous raw detector \ because Pt. or Detector ID has not been set up yet.") return # Get plot mode and plot plotmode = str(self.ui.comboBox_rawDetMode.currentText()) overplot = bool(self.ui.checkBox_overpltRawDet.isChecked()) execstatus = self._plotRawDetSignal(self._rawDetExpNo, self._rawDetScanNo, plotmode, ptno, overplot) # update if it is good to plot if execstatus: self._rawDetPtNo = ptno self.ui.lineEdit_ptNo.setText(str(ptno)) def do_enable_excluded_dets(self): """ Enable or disable the line editor for excluded detectors :return: """ if self.ui.checkBox_useDetExcludeFile.isChecked(): self.ui.lineEdit_detExcluded.setEnabled(True) else: self.ui.lineEdit_detExcluded.setDisabled(True) def do_plot_raw_pt_prev(self): """ Plot previous raw detector """ # Validate input if self._rawDetPtNo is not None: ptno = self._rawDetPtNo - 1 else: self._logError("Unable to plot previous raw detector \ because Pt. or Detector ID has not been set up yet.") return # get plot mode and do plt plotmode = str(self.ui.comboBox_rawDetMode.currentText()) overplot = bool(self.ui.checkBox_overpltRawDet.isChecked()) execstatus = self._plotRawDetSignal(self._rawDetExpNo, self._rawDetScanNo, plotmode, ptno, overplot) # update if it is good to plot if execstatus: self._rawDetPtNo = ptno self.ui.lineEdit_ptNo.setText(str(ptno)) def do_plot_sample_log(self): """ Plot sample log vs. Pt. in tab 'Individual Detector' """ expNo = int(self.ui.lineEdit_expNo.text()) scanno = int(self.ui.lineEdit_scanNo.text()) logname = str(self.ui.comboBox_indvDetYLabel.currentText()) self._plotSampleLog(expNo, scanno, logname) def doReduce2Theta(self): """ Rebin the data and plot in 2theta for tab 'Normalized' """ unit = '2theta' self._uiReducePlotNoramlized(unit) def doReduceDSpacing(self): """ Rebin the data and plot in d-spacing for tab 'Normalized' """ # new unit and information unit = "dSpacing" self._uiReducePlotNoramlized(unit) def doReduceQ(self): """ Rebin the data and plot in momentum transfer Q for tab 'Normalized' """ unit = 'Momentum Transfer (Q)' self._uiReducePlotNoramlized(unit) def doReduceSetData(self): """ Reduce multiple data """ # Get exp number and list of scans try: r = self._uiGetExpScanTabMultiScans() expno = r[0] scanlist = r[1] except NotImplementedError as e: self._logError(str(e)) return False else: self._multiScanExp = expno self._multiScanList = scanlist # Reduce and plot data unit = str(self.ui.comboBox_mscanUnit.currentText()) xlabel = self._getXLabelFromUnit(unit) canvas = self.ui.graphicsView_mergeRun # canvas.clearAllLines() NO NEED canvas.clearCanvas() canvas.resetLineColorStyle() for scan in scanlist: r = self._uiReduceData(3, unit, expno, scan) good = r[0] expno = r[1] scanno = r[2] if good is True: label = "Exp %s Scan %s" % (str(expno), str(scanno)) self._plotReducedData(expno, scanno, canvas, xlabel, label=label, clearcanvas=False) else: self._logError('Failed to reduce Exp %s Scan %s' % (str(expno), str(scanno))) def doReduceVanadium2Theta(self): """ Rebin MDEventWorkspaces in 2-theta. for pushButton_rebinD in vanadium peak strip tab Suggested workflow 1. Rebin data 2. Calculate vanadium peaks in 2theta 3. """ # Reduce data unit = '2theta' itab = 4 r = self._uiReduceData(itab, unit) good = r[0] expno = r[1] scanno = r[2] # Plot reduced data and vanadium peaks if good is True: canvas = self.ui.graphicsView_vanPeaks xlabel = self._getXLabelFromUnit(unit) label = "Exp %s Scan %s" % (str(expno), str(scanno)) self._plotReducedData(expno, scanno, canvas, xlabel, label=label, clearcanvas=True) # plot vanadium peaks vanpeakpos = self._myControl.getVanadiumPeaksPos(expno, scanno) self.ui.lineEdit_stripVPeaks.setText(str(vanpeakpos)) self._plotPeakIndicators(self.ui.graphicsView_vanPeaks, vanpeakpos) return good def doSaveData(self): """ Save data """ # get exp number and scan number try: # exp and scan expno, scanno = self._uiGetExpScanNumber() # file type filetype = str(self.ui.comboBox_outputFormat.currentText()) # file name savedatadir = str(self.ui.lineEdit_outputFileName.text()).strip() if savedatadir is not None and os.path.exists(savedatadir) is True: homedir = savedatadir else: homedir = os.getcwd() # launch a dialog to get data filefilter = "All files (*);;Fullprof (*.dat);;GSAS (*.gsa)" sfilename = str(QtGui.QFileDialog.getSaveFileName(self, 'Save File', homedir, filefilter)) except NotImplementedError as e: self._logError(str(e)) else: self._myControl.savePDFile(expno, scanno, filetype, sfilename) def doSaveMergedScan(self): """ Save merged scan """ homedir = os.getcwd() filefilter = "Fullprof (*.dat)" sfilename = str(QtGui.QFileDialog.getSaveFileName(self, 'Save File In Fullprof', homedir, filefilter)) self._myControl.saveMergedScan(sfilename, mergeindex=self._lastMergeIndex) def doSaveMultipleScans(self): """ Save multiple scans """ # Get experiment number and scans r = self._uiGetExpScanTabMultiScans() expno = r[0] scanslist = r[1] # Get base file name homedir = os.getcwd() savedir = str(QtGui.QFileDialog.getExistingDirectory(self, 'Get Directory To Save Fullprof', homedir)) for scanno in scanslist: sfilename = os.path.join(savedir, "HB2A_Exp%d_Scan%d_FP.dat" % (expno, scanno)) self._myControl.savePDFile(expno, scanno, 'fullprof', sfilename) def doSaveVanRun(self): """ Save the vanadium run with peaks removed """ # Get experiment number and scan number try: expno, scanno = self._uiGetExpScanNumber() except NotImplementedError as e: self._logError("Unable to get exp number and scan number for smoothing vanadium data due to %s." % ( str(e))) return False homedir = os.getcwd() filefilter = "Fullprof (*.dat)" sfilename = str(QtGui.QFileDialog.getSaveFileName(self, 'Save File In Fullprof', homedir, filefilter)) self._myControl.saveProcessedVanadium(expno, scanno, sfilename) def doSmoothVanadiumData(self): """ Smooth vanadium spectrum """ # Get experiment number and scan number try: expno, scanno = self._uiGetExpScanNumber() except NotImplementedError as e: self._logError("Unable to get exp number and scan number for smoothing vanadium data due to %s." % ( str(e))) return False smoothparams_str = str(self.ui.lineEdit_smoothParams.text()) # Smooth data status = self._myControl.smoothVanadiumSpectrum(expno, scanno, smoothparams_str) if not status: self._logError("Failed to smooth vanadium data") # Plot unit = '2theta' xlabel = self._getXLabelFromUnit(unit) label = "Vanadium Exp %d Scan %d FFT-Smooth by %s" % (expno, scanno, smoothparams_str) self._plotVanadiumRun(expno, scanno, xlabel, label, False, True) def doSmoothVanadiumApply(self): """ Apply smoothing effect to vanadium data """ # Get experiment number and scan number try: expno, scanno = self._uiGetExpScanNumber() except NotImplementedError as e: self._logError("Unable to get exp number and scan number for smoothing vanadium data due to %s." % ( str(e))) return False self._myControl.applySmoothVanadium(expno, scanno, True) def doSmoothVanadiumUndo(self): """ Undo smoothing vanadium """ try: expno, scanno = self._uiGetExpScanNumber() except NotImplementedError as e: self._logError("Unable to get exp number and scan number for smoothing vanadium data due to %s." % ( str(e))) return False self._myControl.applySmoothVanadium(expno, scanno, False) def doStripVandiumPeaks(self): """ Strip vanadium peaks """ # Get exp number an scan number try: expno, scanno = self._uiGetExpScanNumber() except NotImplementedError as e: self._logError("Error to get Exp and Scan due to %s." % (str(e))) return False # Default unit unit = '2theta' # Get and build binning parameter xmin, binsize, xmax = self._uiGetBinningParams(itab=4) if xmin is None: binparams = '%f' % (binsize) else: binparams = '%f,%f,%f' % (xmin, binsize, xmax) # Strip vanadium peak good = self._myControl.stripVanadiumPeaks(expno, scanno, binparams, vanpeakposlist=None) # Plot if good: xlabel = self._getXLabelFromUnit(unit) label = "Exp %d Scan %d Bin = %.5f Vanadium Stripped" % (expno, scanno, binsize) self._plotVanadiumRun(expno, scanno, xlabel, label, False) def doUpdateWavelength(self): """ Update the wavelength to line edit """ index = self.ui.comboBox_wavelength.currentIndex() print("Update wavelength to ", index) if index == 0: wavelength = 2.41 elif index == 1: wavelength = 1.54 elif index == 2: wavelength = 1.12 else: wavelength = None self.ui.lineEdit_wavelength.setText(str(wavelength)) def on_mouseDownEvent(self, event): """ Respond to pick up a value with mouse down event Definition of button_press_event is: button_press_event(x, y, button, dblclick=False, guiEvent=None) Thus event has x, y and button. event.button has 3 values: 1: left 2: middle 3: right """ # FUTURE: Need to make this work x = event.xdata y = event.ydata button = event.button if x is not None and y is not None: # mouse is clicked within graph if button == 1: msg = "Mouse 1: You've clicked on a bar with coords:\n %f, %f\n and button %d" % (x, y, button) print(msg) elif button == 2: msg = "Mouse 2: You've clicked on a bar with coords:\n %f, %f\n and button %d" % (x, y, button) QtGui.QMessageBox.information(self, "Click!", msg) elif button == 3: # right click of mouse will pop up a context-menu # menu should be self.ui.menu? menu = QtGui.QMenu(self) addAction = QtGui.QAction('Add', self) addAction.triggered.connect(self.addSomething) menu.addAction(addAction) rmAction = QtGui.QAction('Remove', self) rmAction.triggered.connect(self.rmSomething) menu.addAction(rmAction) # add other required actions menu.popup(QtGui.QCursor.pos()) def on_mouseMotion(self, event): """ Event handler for mouse being detected to move """ # prev_x = self._viewMerge_X # prev_y = self._viewMerge_Y curx = event.xdata cury = event.ydata if curx is None or cury is None: return self._viewMerge_X = event.xdata self._viewMerge_Y = event.ydata def addSomething(self): """ """ # FUTURE - Need to implement how to deal with this print("Add scan back to merge") def rmSomething(self): """ """ # FUTURE - Need to implement how to deal with this print("Remove a scan from merged data.") # -------------------------------------------------------------------------- # Private methods to plot data # -------------------------------------------------------------------------- def _plotIndividualDetCountsVsSampleLog(self, expno, scanno, detid, samplename, raw=True): """ Plot one specific detector's counts vs. one specified sample log's value along with all Pts. For example: detector 11's counts vs. sample_b's value :param expno: :param scanno: :param detid: :param samplename: :param raw: boolean whether the output is normalized by monitor counts :return: """ # Validate input try: expno = int(expno) scanno = int(scanno) detid = int(detid) samplename = str(samplename) except ValueError: raise NotImplementedError("ExpNo, ScanNo or DetID is not integer.") # Get the array for detector counts vs. sample log value by mapping Pt. vecx, vecy = self._myControl.getIndividualDetCountsVsSample(expno, scanno, detid, samplename, raw) # Clear canvas self.ui.graphicsView_indvDet.clearCanvas() # Plot marker, color = self.ui.graphicsView_indvDet.getNextLineMarkerColorCombo() self.ui.graphicsView_indvDet.add_plot1d(vec_x=vecx, vec_y=vecy, marker=marker, color=color, x_label=samplename, y_label='Counts', label='DetID = %d' % (detid)) # FUTURE: In future, need to find out how to use self._graphIndDevMode def _plot_individual_detector_counts(self, expno, scanno, detid, xaxis, resetboundary=False): """ Plot a specific detector's counts along all experiment points (pt) :param expno: :param scanno: :param detid: :param xaxis: :param resetboundary: :return: """ # Validate input expno = int(expno) scanno = int(scanno) detid = int(detid) plot_error_bar = self.ui.checkBox_indDetErrorBar.isChecked() plot_normal = self.ui.checkBox_indDetNormByMon.isChecked() # Reject if data is not loaded if self._myControl.hasDataLoaded(expno, scanno) is False: self._logError("Data file for Exp %d Scan %d has not been loaded." % (expno, scanno)) return False # Canvas and line information canvas = self.ui.graphicsView_indvDet if canvas not in self._tabLineDict: self._tabLineDict[canvas] = [] # get data self._logNotice("Input x-axis is '%s' for plotting individual detector's counts." % (xaxis)) if len(xaxis) == 0: xaxis = None vecx, vecy = self._myControl.getIndividualDetCounts(expno, scanno, detid, xaxis, plot_normal) if not isinstance(vecx, numpy.ndarray): raise NotImplementedError('vecx, vecy must be numpy arrays.') if plot_error_bar: y_err = numpy.sqrt(vecy) else: y_err = None # Plot to canvas marker, color = canvas.getNextLineMarkerColorCombo() if xaxis == "" or xaxis == "2theta/Scattering Angle": xlabel = r'$2\theta$' else: xlabel = xaxis # FUTURE - If it works with any way of plotting, then refactor Pt. with any other sample names label = "Detector ID: %d" % (detid) if self._tabLineDict[canvas].count((expno, scanno, detid)) == 0: canvas.add_plot1d(vecx, vecy, marker=marker, color=color, x_label=xlabel, y_label='Counts', label=label, y_err=y_err) self._tabLineDict[canvas].append((expno, scanno, detid)) if resetboundary: # Set xmin and xmax about the data for first time xmin = min(vecx) xmax = max(vecx) ymin = min(vecy) ymax = max(vecy) else: # auto setup for image boundary xmin = min(min(vecx), canvas.getXLimit()[0]) xmax = max(max(vecx), canvas.getXLimit()[1]) ymin = min(min(vecy), canvas.getYLimit()[0]) ymax = max(max(vecy), canvas.getYLimit()[1]) dx = xmax - xmin dy = ymax - ymin canvas.setXYLimit(xmin - dx * 0.0001, xmax + dx * 0.0001, ymin - dy * 0.0001, ymax + dy * 0.0001) # Set canvas mode # FUTURE: Consider how to use self._graphIndDevMode in future # self._graphIndDevMode = (xlabel, 'Counts') return True def _plotPeakIndicators(self, canvas, peakposlist): """ Plot indicators for peaks """ print("[DB] Peak indicators are at ", peakposlist) rangey = canvas.getYLimit() rangex = canvas.getXLimit() for pos in peakposlist: if pos >= rangex[0] and pos <= rangex[1]: vecx = numpy.array([pos, pos]) vecy = numpy.array([rangey[0], rangey[1]]) canvas.add_plot1d(vecx, vecy, color='black', line_style='--') def _plotRawDetSignal(self, expno, scanno, plotmode, ptno, dooverplot): """ Plot the counts of all detectors of a certain Pt. in an experiment """ # Validate input expno = int(expno) scanno = int(scanno) # Set up canvas and dictionary canvas = self.ui.graphicsView_Raw if canvas not in self._tabLineDict: self._tabLineDict[canvas] = [] # Check whether data exists if not self._myControl.hasDataLoaded(expno, scanno): self._logError("File has not been loaded for Exp %d Scan %d. Load data first!" % (expno, scanno)) return # Get vecx and vecy if plotmode == "All Pts.": # Plot all Pts. vecxylist = self._myControl.getRawDetectorCounts(expno, scanno) # Clear previous self.ui.graphicsView_Raw.clearAllLines() self.ui.graphicsView_Raw.setLineMarkerColorIndex(0) self._tabLineDict[canvas] = [] elif plotmode == "Single Pts.": # Plot plot ptno = int(ptno) if not dooverplot: self.ui.graphicsView_Raw.clearAllLines() self.ui.graphicsView_Raw.setLineMarkerColorIndex(0) self._tabLineDict[canvas] = [] # Plot one pts. vecxylist = self._myControl.getRawDetectorCounts(expno, scanno, [ptno]) else: # Raise exception raise NotImplementedError("Plot mode %s is not supported." % (plotmode)) # Set up unit/x-label unit = r"$2\theta$" # plot xmin = None xmax = None ymin = None ymax = None for ptno, vecx, vecy in vecxylist: # FUTURE: Label is left blank as there can be too many labels label = 'Pt %d' % (ptno) # skip if this plot has existed if self._tabLineDict[canvas].count((expno, scanno, ptno)) == 1: continue marker, color = canvas.getNextLineMarkerColorCombo() canvas.add_plot1d(vecx, vecy, marker=marker, color=color, x_label=unit, y_label='intensity', label=label) # set up line tuple self._tabLineDict[canvas].append((expno, scanno, ptno)) # auto setup for image boundary xmin = min(min(vecx), canvas.getXLimit()[0]) xmax = max(max(vecx), canvas.getXLimit()[1]) ymin = min(min(vecy), canvas.getYLimit()[0]) ymax = max(max(vecy), canvas.getYLimit()[1]) # Reset canvas x-y limit if xmin is not None: dx = xmax - xmin dy = ymax - ymin canvas.setXYLimit(xmin - dx * 0.0001, xmax + dx * 0.0001, ymin - dy * 0.0001, ymax + dy * 0.0001) return True def _plotMergedReducedData(self, mkey, label): """ Plot the reduced data from merged ... """ # get the data try: vecx, vecy = self._myControl.getMergedVector(mkey) except KeyError as e: self._logError("Unable to retrieve merged reduced data due to %s." % (str(e))) return canvas = self.ui.graphicsView_mergeRun # Clear canvas canvas.clearAllLines() canvas.clearCanvas() # Plot marker, color = canvas.getNextLineMarkerColorCombo() xlabel = self._getXLabelFromUnit(self.ui.comboBox_mscanUnit.currentText()) canvas.add_plot1d(vecx, vecy, marker=marker, color=color, x_label=xlabel, y_label='intensity', label=label) xmax = max(vecx) xmin = min(vecx) dx = xmax - xmin ymax = max(vecy) ymin = min(vecy) dy = ymax - ymin canvas.setXYLimit(xmin - dx * 0.1, xmax + dx * 0.1, ymin - dy * 0.1, ymax + dy * 0.1) def _plotReducedData(self, exp, scan, canvas, xlabel, label=None, clearcanvas=True, spectrum=0, plot_error=False): """ Plot reduced data for exp and scan """ if spectrum != 0: raise NotImplementedError("Unable to support spectrum = %d case." % (spectrum)) # whether the data is load if not self._myControl.hasReducedWS(exp, scan): self._logWarning("No data to plot!") return # get to know whether it is required to clear the image if clearcanvas: canvas.clearAllLines() canvas.setLineMarkerColorIndex(0) # plot vec_x, vec_y = self._myControl.getVectorToPlot(exp, scan) if not isinstance(vec_x, numpy.ndarray): vec_x = numpy.array(vec_x) vec_y = numpy.array(vec_y) # FUTURE - Should check y_err set up correctly in Mantid or not if plot_error: raise RuntimeError('Implement how to return y_err ASAP.') else: y_err = None # get the marker color for the line marker, color = canvas.getNextLineMarkerColorCombo() # plot if label is None: label = "Exp %d Scan %d" % (exp, scan) canvas.add_plot1d(vec_x, vec_y, marker=marker, color=color, x_label=xlabel, y_label='intensity', label=label, y_err=y_err) if clearcanvas: xmax = max(vec_x) xmin = min(vec_x) dx = xmax - xmin ymax = max(vec_y) ymin = min(vec_y) dy = ymax - ymin canvas.setXYLimit(xmin - dx * 0.1, xmax + dx * 0.1, ymin - dy * 0.1, ymax + dy * 0.1) def _plotSampleLog(self, expno, scanno, samplelogname): """ Plot the value of a sample log among all Pt. """ # Validate input expno = int(expno) scanno = int(scanno) samplelogname = str(samplelogname) # Reject if data is not loaded if not self._myControl.hasDataLoaded(expno, scanno): self._logError("Data file for Exp %d Scan %d has not been loaded." % (expno, scanno)) return False # Canvas and line information self._indvDetCanvasMode = 'samplelog' # pop out the xlabel list # REFACTOR - Only need to set up once if previous plot has the same setup if self.ui.comboBox_indvDetXLabel.count() == 0: floatsamplelognamelist = self._myControl.getSampleLogNames(expno, scanno) self.ui.comboBox_indvDetXLabel.clear() self.ui.comboBox_indvDetXLabel.addItems(floatsamplelognamelist) raise RuntimeError("This X-label combo box should be set up during loading data before.") xlabel = str(self.ui.comboBox_indvDetXLabel.currentText()) # get data vecx, vecy = self._myControl.getSampleLogValue(expno, scanno, samplelogname, xlabel) # Plot to canvas canvas = self.ui.graphicsView_indvDet # FUTURE - Clear canvas (think of a case that no need to clear canvas) canvas.clearCanvas() # canvas.clearAllLines() marker, color = canvas.getNextLineMarkerColorCombo() if xlabel is None: xlabel = r'Pt' label = samplelogname canvas.add_plot1d(vecx, vecy, marker=marker, color=color, x_label=xlabel, y_label='Counts', label=label) # auto setup for image boundary xmin = min(vecx) xmax = max(vecx) ymin = min(vecy) ymax = max(vecy) dx = xmax - xmin dy = ymax - ymin canvas.setXYLimit(xmin - dx * 0.0001, xmax + dx * 0.0001, ymin - dy * 0.0001, ymax + dy * 0.0001) return True def _plotVanadiumRun(self, exp, scan, xlabel, label, clearcanvas=False, TempData=False): """ Plot processed vanadium data Arguments: - TempData :: flag whether the vanadium run is a temporary data set """ # Check whether the data is load exp = int(exp) scan = int(scan) if not self._myControl.hasReducedWS(exp, scan): self._logWarning("No data to plot!") return # Get data to plot try: vecx, vecy = self._myControl.getVectorProcessVanToPlot(exp, scan, TempData) if not TempData: vecx, vecyOrig = self._myControl.getVectorToPlot(exp, scan) diffY = vecyOrig - vecy except NotImplementedError as e: errmsg = '[Error] Unable to retrieve processed vanadium spectrum for exp %d scan %d. ' \ 'Reason: %s' % (exp, scan, str(e)) QtGui.QMessageBox.information(self, "Return!", errmsg) return # Get to know whether it is required to clear the image canvas = self.ui.graphicsView_vanPeaks if TempData: clearcanvas = False if clearcanvas: canvas.clearAllLines() canvas.setLineMarkerColorIndex(0) # get the marker color for the line if TempData: marker = None color = 'blue' else: marker, color = canvas.getNextLineMarkerColorCombo() # plot canvas.add_plot1d(vecx, vecy, marker=marker, color=color, x_label=xlabel, y_label='intensity', label=label) if not TempData: canvas.add_plot1d(vecx, diffY, marker='+', color='green', x_label=xlabel, y_label='intensity', label='Diff') # reset canvas limits if clearcanvas: xmax = max(vecx) xmin = min(vecx) dx = xmax - xmin ymax = max(vecy) ymin = min(diffY) dy = ymax - ymin canvas.setXYLimit(xmin - dx * 0.1, xmax + dx * 0.1, ymin - dy * 0.1, ymax + dy * 0.1) def _uiDownloadDataFile(self, exp, scan): """ Download data file according to its exp and scan Either download the data from a server or copy the data file from local disk """ # Get on hold of raw data file useserver = self.ui.radioButton_useServer.isChecked() uselocal = self.ui.radioButton_useLocal.isChecked() if useserver == uselocal: self._logError("It is logically wrong to set up server/local dir for data.") self.ui.radioButton_useServer.setChecked(True) self.ui.radioButton_useLocal.setChecked(False) rvalue = False if self._srcFromServer: # Use server: build the URl to download data if not self._serverAddress.endswith('/'): self._serverAddress += '/' fullurl = "%s%s/exp%d/Datafiles/%s_exp%04d_scan%04d.dat" % (self._serverAddress, self._instrument.lower(), exp, self._instrument.upper(), exp, scan) print("URL: ", fullurl) cachedir = str(self.ui.lineEdit_cache.text()).strip() if not os.path.exists(cachedir): invalidcache = cachedir cachedir = os.getcwd() self.ui.lineEdit_cache.setText(cachedir) self._logWarning("Cache directory %s is not valid. " "Using current workspace directory %s as cache." % (invalidcache, cachedir)) filename = '%s_exp%04d_scan%04d.dat' % (self._instrument.upper(), exp, scan) srcFileName = os.path.join(cachedir, filename) status, errmsg = HfirPDReductionControl.downloadFile(fullurl, srcFileName) if not status: self._logError(errmsg) srcFileName = None else: rvalue = True elif self._srcAtLocal: # Data from local srcFileName = os.path.join(self._localSrcDataDir, "%s/Exp%d_Scan%04d.dat" % (self._instrument, exp, scan)) if os.path.exists(srcFileName): rvalue = True else: raise NotImplementedError("Logic error. Neither downloaded from server.\ Nor from local drive") return (rvalue, srcFileName) def _uiGetBinningParams(self, itab): """ Get binning parameters Return: - xmin, binsize, xmax """ # Get value if itab == 2: xmin = str(self.ui.lineEdit_xmin.text()) xmax = str(self.ui.lineEdit_xmax.text()) binsize = str(self.ui.lineEdit_binsize.text()) elif itab == 3: xmin = str(self.ui.lineEdit_mergeMinX.text()) xmax = str(self.ui.lineEdit_mergeMaxX.text()) binsize = str(self.ui.lineEdit_mergeBinSize.text()) elif itab == 4: xmin = str(self.ui.lineEdit_min2Theta.text()) xmax = str(self.ui.lineEdit_max2Theta.text()) binsize = str(self.ui.lineEdit_binsize2Theta.text()) else: raise NotImplementedError("Binning parameters are not used for %d-th tab." % (itab)) # Parse values try: xmin = float(xmin) xmax = float(xmax) except ValueError: xmin = None xmax = None else: if xmin >= xmax: raise NotImplementedError("set minimum X = %.5f is larger than \ maximum X = %.5f" % (xmin, xmax)) try: binsize = float(binsize) except ValueError: raise NotImplementedError("Error: bins size '%s' is not a float number." % (binsize)) # Fix for merging as xmin and xmax must be same for all scans if itab == 3 and xmin is None: xmin = 5. xmax = 150. return (xmin, binsize, xmax) def _uiGetExcludedDetectors(self): """ Get excluded detectors from input line edit Return :: list of detector IDs to exclude from reduction """ excludedetidlist = [] if self.ui.checkBox_useDetExcludeFile.isChecked(): detids_str = str(self.ui.lineEdit_detExcluded.text()).strip() status, excludedetidlist = self._getIntArray(detids_str) if status is False: self._logError("Extra scans are not a list of integers: %s." % ( str(self.ui.lineEdit_extraScans.text()))) excludedetidlist = [] return excludedetidlist def _uiGetExpScanNumber(self): """ Get experiment number and scan number from widgets for merged """ expnostr = self.ui.lineEdit_expNo.text() scannostr = self.ui.lineEdit_scanNo.text() try: expno = int(expnostr) scanno = int(scannostr) except ValueError: raise NotImplementedError("Either Exp No '%s' or Scan No '%s \ is not set up right as integer." % (expnostr, scannostr)) return (expno, scanno) def _uiGetExpScanTabMultiScans(self): """ Get exp number and scans from tab 3 """ try: expno = int(self.ui.lineEdit_expNo.text()) startscan = int(self.ui.lineEdit_scanStart.text()) endscan = int(self.ui.lineEdit_scanEnd.text()) except ValueError as e: raise RuntimeError("For merging scans, Exp No, Starting scan number and \ end scan number must be given: %s" % (str(e))) # scans = [startscan, endscan] + [others] - [excluded] status, extrascanlist = self._getIntArray(str(self.ui.lineEdit_extraScans.text())) if not status: raise RuntimeError(extrascanlist) status, excludedlist = self._getIntArray(str(self.ui.lineEdit_exclScans.text())) self._logDebug("Excluded list: %s" % (str(excludedlist))) if not status: self._logError(excludedlist) return scanslist = list(range(startscan, endscan + 1)) scanslist.extend(extrascanlist) scanslist = list(set(scanslist)) for scan in excludedlist: scanslist.remove(scan) return (expno, sorted(scanslist)) def _uiIsBinParamsChange(self, itab, binparams): """ Check whether current bin parameters are same as given value """ xmin, binsize, xmax = self._uiGetBinningParams(itab) newbinparams = [xmin, binsize, xmax] # check binning same = True for i in range(3): par_0 = binparams[i] par_1 = newbinparams[i] try: if abs(float(par_0) - float(par_1)) > 1.0E-6: same = False except TypeError: if par_0 is not None or par_1 is not None: same = False if not same: break change = not same if change: print("[D...............B]", end=' ') print("%s vs %s " % (str(xmin), str(self._tabBinParamDict[itab][0])), end=' ') print("%s vs %s " % (str(xmax), str(self._tabBinParamDict[itab][2])), end=' ') print("%s vs %s " % (str(binsize), str(self._tabBinParamDict[itab][1]))) else: print("[DB] Rebin = False") return change def _uiReduceData(self, itab, unit, expno=None, scanno=None): """ Rebin and plot by reading GUI widgets' value Arguments: - itab : index of the tab. Only 2m 3 and 4 are allowed - unit : string for target unit """ # Experiment number and Scan number if isinstance(expno, int) and isinstance(scanno, int): # Call from tab-3 multiple scan pass else: try: expno, scanno = self._uiGetExpScanNumber() except NotImplementedError as e: self._logError(str(e)) return # Get binning parameter xmin, binsize, xmax = self._uiGetBinningParams(itab) # Get wavelength try: if itab == 3: wavelength = float(self._myControl.getWavelength(expno, scanno)) else: wavelength = float(str(self.ui.lineEdit_wavelength.text())) except TypeError: if unit != '2theta': raise NotImplementedError('Wavelength must be specified for unit %s.' % (unit)) # Get scale factor try: scalefactor = self._getFloat(self.ui.lineEdit_normalizeMonitor) except EmptyError: scalefactor = None except ValueError as valueerror: raise ValueError("Unable to get normalization factor due to %s." % (str(valueerror))) # Rebin try: # rebinned = self._myControl.rebin(expno, scanno, unit, wavelength, xmin, binsize, xmax) excludeddetlist = self._uiGetExcludedDetectors() self._myControl.reduceSpicePDData(expno, scanno, unit, xmin, xmax, binsize, wavelength, excludeddetlist, scalefactor) # Record binning self._tabBinParamDict[itab] = [xmin, binsize, xmax] except NotImplementedError as e: self._logError(str(e)) return (False, expno, scanno) return (True, expno, scanno) def _uiReducePlotNoramlized(self, unit): """ Support Reduce2Theta, ReduceDspacing and ReduceQ """ itab = 2 canvas = self.ui.graphicsView_reducedData expno, scanno = self._uiGetExpScanNumber() change = self._uiIsBinParamsChange(itab, self._tabBinParamDict[itab]) # check whether line record if unit == self._currUnit and self._tabLineDict[itab].count((expno, scanno)) > 0 and not change: # there is no need to plot again as line exists return # reduce r = self._uiReduceData(2, unit) good = r[0] expno = r[1] scanno = r[2] # failed to reduce if not good: self._logError("Failed to reduce Exp %d Scan %d" % (expno, scanno)) return # clear canvas??? if unit != self._currUnit: clearcanvas = True elif not self.ui.checkBox_clearPrevious.isChecked(): # NOTE: naming of the widget is VERY confusing. Should be changed to keepPrevious clearcanvas = True else: clearcanvas = False # reset record dictionary if unit is different from present if clearcanvas: self._tabLineDict[itab] = [] self._currUnit = unit self._tabLineDict[itab].append((expno, scanno)) xlabel = self._getXLabelFromUnit(unit) label = "Exp %s Scan %s" % (str(expno), str(scanno)) self._plotReducedData(expno, scanno, canvas, xlabel, label=label, clearcanvas=clearcanvas) def _logDebug(self, dbinfo): """ Log debug information """ print(dbinfo) def _logError(self, errinfo): """ Log error information """ QtGui.QMessageBox.information(self, "Click!", errinfo) def _logNotice(self, loginfo): """ Log error information """ msg = '[Notice] %s' % loginfo print(msg) # QtGui.QMessageBox.information(self, "Click!", msg) def _logWarning(self, warning_info): """ Log error information """ msg = "[Warning]: %s" % (warning_info) QtGui.QMessageBox.information(self, "OK!", msg) def _getFloat(self, lineedit): """ Get integer from line edit Exception: ValueError if empty or no input """ valuestr = str(lineedit.text()).strip() if len(valuestr) == 0: raise EmptyError("Input is empty. It cannot be converted to float.") try: value = float(valuestr) except ValueError as e: raise e return value def _getInteger(self, lineedit): """ Get integer from line edit """ valuestr = str(lineedit.text()).strip() if len(valuestr) == 0: raise EmptyError("Input is empty. It cannot be converted to integer.") try: value = int(valuestr) except ValueError as e: raise e return value def _getIntArray(self, intliststring): """ Validate whether the string can be divided into integer strings. Allowed: a, b, c-d, e, f Return :: 2-tuple (status, list/error message) """ intliststring = str(intliststring) if intliststring == "": return (True, []) # Split by "," termlevel0s = intliststring.split(",") intlist = [] # For each term errmsg = "" returnstatus = True for level0term in termlevel0s: level0term = level0term.strip() # split upon dash - numdashes = level0term.count("-") if numdashes == 0: # one integer valuestr = level0term try: intvalue = int(valuestr) if str(intvalue) != valuestr: returnstatus = False errmsg = "Contains non-integer string %s." % (valuestr) except ValueError: returnstatus = False errmsg = "String %s is not an integer." % (valuestr) else: intlist.append(intvalue) elif numdashes == 1: # Integer range twoterms = level0term.split("-") templist = [] for i in range(2): valuestr = twoterms[i] try: intvalue = int(valuestr) if str(intvalue) != valuestr: returnstatus = False errmsg = "Contains non-integer string %s." % (valuestr) except ValueError: returnstatus = False errmsg = "String %s is not an integer." % (valuestr) else: templist.append(intvalue) # break loop if not returnstatus: break intlist.extend(range(templist[0], templist[1] + 1)) else: # Undefined siutation returnstatus = False errmsg = "Term %s contains more than 1 dash." % (level0term) # break loop if something is wrong if not returnstatus: break # Return with false if not returnstatus: return (False, errmsg) return (True, intlist) def _getXLabelFromUnit(self, unit): """ Get X-label from unit """ if unit == '2theta': xlabel = r'$2\theta$ (Degrees)' elif unit == 'dSpacing': xlabel = r"d $(\AA)$" elif unit == 'Momentum Transfer (Q)': xlabel = r"Q $(\AA^{-1})$" else: xlabel = 'Wacky Unknown' return xlabel
gpl-3.0
toobaz/pandas
pandas/core/groupby/generic.py
1
62983
""" Define the SeriesGroupBy and DataFrameGroupBy classes that hold the groupby interfaces (and some implementations). These are user facing as the result of the ``df.groupby(...)`` operations, which here returns a DataFrameGroupBy object. """ from collections import OrderedDict, abc, namedtuple import copy import functools from functools import partial from textwrap import dedent import typing from typing import Any, Callable, FrozenSet, Iterator, Sequence, Type, Union import warnings import numpy as np from pandas._libs import Timestamp, lib from pandas.compat import PY36 from pandas.errors import AbstractMethodError from pandas.util._decorators import Appender, Substitution from pandas.core.dtypes.cast import maybe_convert_objects, maybe_downcast_to_dtype from pandas.core.dtypes.common import ( ensure_int64, ensure_platform_int, is_bool, is_datetimelike, is_dict_like, is_integer_dtype, is_interval_dtype, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, ) from pandas.core.dtypes.missing import _isna_ndarraylike, isna, notna from pandas._typing import FrameOrSeries import pandas.core.algorithms as algorithms from pandas.core.base import DataError, SpecificationError import pandas.core.common as com from pandas.core.frame import DataFrame from pandas.core.generic import ABCDataFrame, ABCSeries, NDFrame, _shared_docs from pandas.core.groupby import base from pandas.core.groupby.groupby import ( GroupBy, _apply_docs, _transform_template, groupby, ) from pandas.core.index import Index, MultiIndex, _all_indexes_same import pandas.core.indexes.base as ibase from pandas.core.internals import BlockManager, make_block from pandas.core.series import Series from pandas.core.sparse.frame import SparseDataFrame from pandas.plotting import boxplot_frame_groupby NamedAgg = namedtuple("NamedAgg", ["column", "aggfunc"]) # TODO(typing) the return value on this callable should be any *scalar*. AggScalar = Union[str, Callable[..., Any]] # TODO: validate types on ScalarResult and move to _typing # Blocked from using by https://github.com/python/mypy/issues/1484 # See note at _mangle_lambda_list ScalarResult = typing.TypeVar("ScalarResult") def whitelist_method_generator( base_class: Type[GroupBy], klass: Type[FrameOrSeries], whitelist: FrozenSet[str] ) -> Iterator[str]: """ Yields all GroupBy member defs for DataFrame/Series names in whitelist. Parameters ---------- base_class : Groupby class base class klass : DataFrame or Series class class where members are defined. whitelist : frozenset Set of names of klass methods to be constructed Returns ------- The generator yields a sequence of strings, each suitable for exec'ing, that define implementations of the named methods for DataFrameGroupBy or SeriesGroupBy. Since we don't want to override methods explicitly defined in the base class, any such name is skipped. """ property_wrapper_template = """@property def %(name)s(self) : \"""%(doc)s\""" return self.__getattr__('%(name)s')""" for name in whitelist: # don't override anything that was explicitly defined # in the base class if hasattr(base_class, name): continue # ugly, but we need the name string itself in the method. f = getattr(klass, name) doc = f.__doc__ doc = doc if type(doc) == str else "" wrapper_template = property_wrapper_template params = {"name": name, "doc": doc} yield wrapper_template % params class NDFrameGroupBy(GroupBy): def _iterate_slices(self): if self.axis == 0: # kludge if self._selection is None: slice_axis = self.obj.columns else: slice_axis = self._selection_list slicer = lambda x: self.obj[x] else: slice_axis = self.obj.index slicer = self.obj.xs for val in slice_axis: if val in self.exclusions: continue yield val, slicer(val) def _cython_agg_general(self, how, alt=None, numeric_only=True, min_count=-1): new_items, new_blocks = self._cython_agg_blocks( how, alt=alt, numeric_only=numeric_only, min_count=min_count ) return self._wrap_agged_blocks(new_items, new_blocks) _block_agg_axis = 0 def _cython_agg_blocks(self, how, alt=None, numeric_only=True, min_count=-1): # TODO: the actual managing of mgr_locs is a PITA # here, it should happen via BlockManager.combine data, agg_axis = self._get_data_to_aggregate() if numeric_only: data = data.get_numeric_data(copy=False) new_blocks = [] new_items = [] deleted_items = [] no_result = object() for block in data.blocks: # Avoid inheriting result from earlier in the loop result = no_result locs = block.mgr_locs.as_array try: result, _ = self.grouper.aggregate( block.values, how, axis=agg_axis, min_count=min_count ) except NotImplementedError: # generally if we have numeric_only=False # and non-applicable functions # try to python agg if alt is None: # we cannot perform the operation # in an alternate way, exclude the block deleted_items.append(locs) continue # call our grouper again with only this block obj = self.obj[data.items[locs]] s = groupby(obj, self.grouper) try: result = s.aggregate(lambda x: alt(x, axis=self.axis)) except TypeError: # we may have an exception in trying to aggregate # continue and exclude the block deleted_items.append(locs) continue finally: if result is not no_result: dtype = block.values.dtype # see if we can cast the block back to the original dtype result = block._try_coerce_and_cast_result(result, dtype=dtype) newb = block.make_block(result) new_items.append(locs) new_blocks.append(newb) if len(new_blocks) == 0: raise DataError("No numeric types to aggregate") # reset the locs in the blocks to correspond to our # current ordering indexer = np.concatenate(new_items) new_items = data.items.take(np.sort(indexer)) if len(deleted_items): # we need to adjust the indexer to account for the # items we have removed # really should be done in internals :< deleted = np.concatenate(deleted_items) ai = np.arange(len(data)) mask = np.zeros(len(data)) mask[deleted] = 1 indexer = (ai - mask.cumsum())[indexer] offset = 0 for b in new_blocks: loc = len(b.mgr_locs) b.mgr_locs = indexer[offset : (offset + loc)] offset += loc return new_items, new_blocks def aggregate(self, func, *args, **kwargs): _level = kwargs.pop("_level", None) relabeling = func is None and _is_multi_agg_with_relabel(**kwargs) if relabeling: func, columns, order = _normalize_keyword_aggregation(kwargs) kwargs = {} elif func is None: # nicer error message raise TypeError("Must provide 'func' or tuples of " "'(column, aggfunc).") func = _maybe_mangle_lambdas(func) result, how = self._aggregate(func, _level=_level, *args, **kwargs) if how is None: return result if result is None: # grouper specific aggregations if self.grouper.nkeys > 1: return self._python_agg_general(func, *args, **kwargs) else: # try to treat as if we are passing a list try: assert not args and not kwargs result = self._aggregate_multiple_funcs( [func], _level=_level, _axis=self.axis ) result.columns = Index( result.columns.levels[0], name=self._selected_obj.columns.name ) if isinstance(self.obj, SparseDataFrame): # Backwards compat for groupby.agg() with sparse # values. concat no longer converts DataFrame[Sparse] # to SparseDataFrame, so we do it here. result = SparseDataFrame(result._data) except Exception: result = self._aggregate_generic(func, *args, **kwargs) if not self.as_index: self._insert_inaxis_grouper_inplace(result) result.index = np.arange(len(result)) if relabeling: result = result[order] result.columns = columns return result._convert(datetime=True) agg = aggregate def _aggregate_generic(self, func, *args, **kwargs): if self.grouper.nkeys != 1: raise AssertionError("Number of keys must be 1") axis = self.axis obj = self._obj_with_exclusions result = OrderedDict() if axis != obj._info_axis_number: try: for name, data in self: result[name] = self._try_cast(func(data, *args, **kwargs), data) except Exception: return self._aggregate_item_by_item(func, *args, **kwargs) else: for name in self.indices: try: data = self.get_group(name, obj=obj) result[name] = self._try_cast(func(data, *args, **kwargs), data) except Exception: wrapper = lambda x: func(x, *args, **kwargs) result[name] = data.apply(wrapper, axis=axis) return self._wrap_generic_output(result, obj) def _wrap_aggregated_output(self, output, names=None): raise AbstractMethodError(self) def _aggregate_item_by_item(self, func, *args, **kwargs): # only for axis==0 obj = self._obj_with_exclusions result = OrderedDict() cannot_agg = [] errors = None for item in obj: try: data = obj[item] colg = SeriesGroupBy(data, selection=item, grouper=self.grouper) cast = self._transform_should_cast(func) result[item] = colg.aggregate(func, *args, **kwargs) if cast: result[item] = self._try_cast(result[item], data) except ValueError: cannot_agg.append(item) continue except TypeError as e: cannot_agg.append(item) errors = e continue result_columns = obj.columns if cannot_agg: result_columns = result_columns.drop(cannot_agg) # GH6337 if not len(result_columns) and errors is not None: raise errors return DataFrame(result, columns=result_columns) def _decide_output_index(self, output, labels): if len(output) == len(labels): output_keys = labels else: output_keys = sorted(output) try: output_keys.sort() except Exception: # pragma: no cover pass if isinstance(labels, MultiIndex): output_keys = MultiIndex.from_tuples(output_keys, names=labels.names) return output_keys def _wrap_applied_output(self, keys, values, not_indexed_same=False): if len(keys) == 0: return DataFrame(index=keys) key_names = self.grouper.names # GH12824. def first_not_none(values): try: return next(com._not_none(*values)) except StopIteration: return None v = first_not_none(values) if v is None: # GH9684. If all values are None, then this will throw an error. # We'd prefer it return an empty dataframe. return DataFrame() elif isinstance(v, DataFrame): return self._concat_objects(keys, values, not_indexed_same=not_indexed_same) elif self.grouper.groupings is not None: if len(self.grouper.groupings) > 1: key_index = self.grouper.result_index else: ping = self.grouper.groupings[0] if len(keys) == ping.ngroups: key_index = ping.group_index key_index.name = key_names[0] key_lookup = Index(keys) indexer = key_lookup.get_indexer(key_index) # reorder the values values = [values[i] for i in indexer] else: key_index = Index(keys, name=key_names[0]) # don't use the key indexer if not self.as_index: key_index = None # make Nones an empty object v = first_not_none(values) if v is None: return DataFrame() elif isinstance(v, NDFrame): values = [ x if x is not None else v._constructor(**v._construct_axes_dict()) for x in values ] v = values[0] if isinstance(v, (np.ndarray, Index, Series)): if isinstance(v, Series): applied_index = self._selected_obj._get_axis(self.axis) all_indexed_same = _all_indexes_same([x.index for x in values]) singular_series = len(values) == 1 and applied_index.nlevels == 1 # GH3596 # provide a reduction (Frame -> Series) if groups are # unique if self.squeeze: # assign the name to this series if singular_series: values[0].name = keys[0] # GH2893 # we have series in the values array, we want to # produce a series: # if any of the sub-series are not indexed the same # OR we don't have a multi-index and we have only a # single values return self._concat_objects( keys, values, not_indexed_same=not_indexed_same ) # still a series # path added as of GH 5545 elif all_indexed_same: from pandas.core.reshape.concat import concat return concat(values) if not all_indexed_same: # GH 8467 return self._concat_objects(keys, values, not_indexed_same=True) try: if self.axis == 0: # GH6124 if the list of Series have a consistent name, # then propagate that name to the result. index = v.index.copy() if index.name is None: # Only propagate the series name to the result # if all series have a consistent name. If the # series do not have a consistent name, do # nothing. names = {v.name for v in values} if len(names) == 1: index.name = list(names)[0] # normally use vstack as its faster than concat # and if we have mi-columns if ( isinstance(v.index, MultiIndex) or key_index is None or isinstance(key_index, MultiIndex) ): stacked_values = np.vstack([np.asarray(v) for v in values]) result = DataFrame( stacked_values, index=key_index, columns=index ) else: # GH5788 instead of stacking; concat gets the # dtypes correct from pandas.core.reshape.concat import concat result = concat( values, keys=key_index, names=key_index.names, axis=self.axis, ).unstack() result.columns = index else: stacked_values = np.vstack([np.asarray(v) for v in values]) result = DataFrame( stacked_values.T, index=v.index, columns=key_index ) except (ValueError, AttributeError): # GH1738: values is list of arrays of unequal lengths fall # through to the outer else caluse return Series(values, index=key_index, name=self._selection_name) # if we have date/time like in the original, then coerce dates # as we are stacking can easily have object dtypes here so = self._selected_obj if so.ndim == 2 and so.dtypes.apply(is_datetimelike).any(): result = _recast_datetimelike_result(result) else: result = result._convert(datetime=True) return self._reindex_output(result) # values are not series or array-like but scalars else: # only coerce dates if we find at least 1 datetime coerce = any(isinstance(x, Timestamp) for x in values) # self._selection_name not passed through to Series as the # result should not take the name of original selection # of columns return Series(values, index=key_index)._convert( datetime=True, coerce=coerce ) else: # Handle cases like BinGrouper return self._concat_objects(keys, values, not_indexed_same=not_indexed_same) def _transform_general(self, func, *args, **kwargs): from pandas.core.reshape.concat import concat applied = [] obj = self._obj_with_exclusions gen = self.grouper.get_iterator(obj, axis=self.axis) fast_path, slow_path = self._define_paths(func, *args, **kwargs) path = None for name, group in gen: object.__setattr__(group, "name", name) if path is None: # Try slow path and fast path. try: path, res = self._choose_path(fast_path, slow_path, group) except TypeError: return self._transform_item_by_item(obj, fast_path) except ValueError: msg = "transform must return a scalar value for each group" raise ValueError(msg) else: res = path(group) if isinstance(res, Series): # we need to broadcast across the # other dimension; this will preserve dtypes # GH14457 if not np.prod(group.shape): continue elif res.index.is_(obj.index): r = concat([res] * len(group.columns), axis=1) r.columns = group.columns r.index = group.index else: r = DataFrame( np.concatenate([res.values] * len(group.index)).reshape( group.shape ), columns=group.columns, index=group.index, ) applied.append(r) else: applied.append(res) concat_index = obj.columns if self.axis == 0 else obj.index other_axis = 1 if self.axis == 0 else 0 # switches between 0 & 1 concatenated = concat(applied, axis=self.axis, verify_integrity=False) concatenated = concatenated.reindex(concat_index, axis=other_axis, copy=False) return self._set_result_index_ordered(concatenated) @Substitution(klass="DataFrame", selected="") @Appender(_transform_template) def transform(self, func, *args, **kwargs): # optimized transforms func = self._get_cython_func(func) or func if isinstance(func, str): if not (func in base.transform_kernel_whitelist): msg = "'{func}' is not a valid function name for transform(name)" raise ValueError(msg.format(func=func)) if func in base.cythonized_kernels: # cythonized transformation or canned "reduction+broadcast" return getattr(self, func)(*args, **kwargs) else: # If func is a reduction, we need to broadcast the # result to the whole group. Compute func result # and deal with possible broadcasting below. result = getattr(self, func)(*args, **kwargs) else: return self._transform_general(func, *args, **kwargs) # a reduction transform if not isinstance(result, DataFrame): return self._transform_general(func, *args, **kwargs) obj = self._obj_with_exclusions # nuisance columns if not result.columns.equals(obj.columns): return self._transform_general(func, *args, **kwargs) return self._transform_fast(result, obj, func) def _transform_fast(self, result, obj, func_nm): """ Fast transform path for aggregations """ # if there were groups with no observations (Categorical only?) # try casting data to original dtype cast = self._transform_should_cast(func_nm) # for each col, reshape to to size of original frame # by take operation ids, _, ngroup = self.grouper.group_info output = [] for i, _ in enumerate(result.columns): res = algorithms.take_1d(result.iloc[:, i].values, ids) if cast: res = self._try_cast(res, obj.iloc[:, i]) output.append(res) return DataFrame._from_arrays(output, columns=result.columns, index=obj.index) def _define_paths(self, func, *args, **kwargs): if isinstance(func, str): fast_path = lambda group: getattr(group, func)(*args, **kwargs) slow_path = lambda group: group.apply( lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis ) else: fast_path = lambda group: func(group, *args, **kwargs) slow_path = lambda group: group.apply( lambda x: func(x, *args, **kwargs), axis=self.axis ) return fast_path, slow_path def _choose_path(self, fast_path, slow_path, group): path = slow_path res = slow_path(group) # if we make it here, test if we can use the fast path try: res_fast = fast_path(group) # verify fast path does not change columns (and names), otherwise # its results cannot be joined with those of the slow path if res_fast.columns != group.columns: return path, res # verify numerical equality with the slow path if res.shape == res_fast.shape: res_r = res.values.ravel() res_fast_r = res_fast.values.ravel() mask = notna(res_r) if (res_r[mask] == res_fast_r[mask]).all(): path = fast_path except Exception: pass return path, res def _transform_item_by_item(self, obj, wrapper): # iterate through columns output = {} inds = [] for i, col in enumerate(obj): try: output[col] = self[col].transform(wrapper) inds.append(i) except Exception: pass if len(output) == 0: # pragma: no cover raise TypeError("Transform function invalid for data types") columns = obj.columns if len(output) < len(obj.columns): columns = columns.take(inds) return DataFrame(output, index=obj.index, columns=columns) def filter(self, func, dropna=True, *args, **kwargs): # noqa """ Return a copy of a DataFrame excluding elements from groups that do not satisfy the boolean criterion specified by func. Parameters ---------- f : function Function to apply to each subframe. Should return True or False. dropna : Drop groups that do not pass the filter. True by default; if False, groups that evaluate False are filled with NaNs. Returns ------- filtered : DataFrame Notes ----- Each subframe is endowed the attribute 'name' in case you need to know which group you are working on. Examples -------- >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', ... 'foo', 'bar'], ... 'B' : [1, 2, 3, 4, 5, 6], ... 'C' : [2.0, 5., 8., 1., 2., 9.]}) >>> grouped = df.groupby('A') >>> grouped.filter(lambda x: x['B'].mean() > 3.) A B C 1 bar 2 5.0 3 bar 4 1.0 5 bar 6 9.0 """ indices = [] obj = self._selected_obj gen = self.grouper.get_iterator(obj, axis=self.axis) for name, group in gen: object.__setattr__(group, "name", name) res = func(group, *args, **kwargs) try: res = res.squeeze() except AttributeError: # allow e.g., scalars and frames to pass pass # interpret the result of the filter if is_bool(res) or (is_scalar(res) and isna(res)): if res and notna(res): indices.append(self._get_index(name)) else: # non scalars aren't allowed raise TypeError( "filter function returned a %s, " "but expected a scalar bool" % type(res).__name__ ) return self._apply_filter(indices, dropna) class SeriesGroupBy(GroupBy): # # Make class defs of attributes on SeriesGroupBy whitelist _apply_whitelist = base.series_apply_whitelist for _def_str in whitelist_method_generator(GroupBy, Series, _apply_whitelist): exec(_def_str) @property def _selection_name(self): """ since we are a series, we by definition only have a single name, but may be the result of a selection or the name of our object """ if self._selection is None: return self.obj.name else: return self._selection _agg_see_also_doc = dedent( """ See Also -------- pandas.Series.groupby.apply pandas.Series.groupby.transform pandas.Series.aggregate """ ) _agg_examples_doc = dedent( """ Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.groupby([1, 1, 2, 2]).min() 1 1 2 3 dtype: int64 >>> s.groupby([1, 1, 2, 2]).agg('min') 1 1 2 3 dtype: int64 >>> s.groupby([1, 1, 2, 2]).agg(['min', 'max']) min max 1 1 2 2 3 4 The output column names can be controlled by passing the desired column names and aggregations as keyword arguments. >>> s.groupby([1, 1, 2, 2]).agg( ... minimum='min', ... maximum='max', ... ) minimum maximum 1 1 2 2 3 4 """ ) @Appender( _apply_docs["template"].format( input="series", examples=_apply_docs["series_examples"] ) ) def apply(self, func, *args, **kwargs): return super().apply(func, *args, **kwargs) @Substitution( see_also=_agg_see_also_doc, examples=_agg_examples_doc, versionadded="", klass="Series", axis="", ) @Appender(_shared_docs["aggregate"]) def aggregate(self, func_or_funcs=None, *args, **kwargs): _level = kwargs.pop("_level", None) relabeling = func_or_funcs is None columns = None no_arg_message = ( "Must provide 'func_or_funcs' or named " "aggregation **kwargs." ) if relabeling: columns = list(kwargs) if not PY36: # sort for 3.5 and earlier columns = list(sorted(columns)) func_or_funcs = [kwargs[col] for col in columns] kwargs = {} if not columns: raise TypeError(no_arg_message) if isinstance(func_or_funcs, str): return getattr(self, func_or_funcs)(*args, **kwargs) if isinstance(func_or_funcs, abc.Iterable): # Catch instances of lists / tuples # but not the class list / tuple itself. func_or_funcs = _maybe_mangle_lambdas(func_or_funcs) ret = self._aggregate_multiple_funcs(func_or_funcs, (_level or 0) + 1) if relabeling: ret.columns = columns else: cyfunc = self._get_cython_func(func_or_funcs) if cyfunc and not args and not kwargs: return getattr(self, cyfunc)() if self.grouper.nkeys > 1: return self._python_agg_general(func_or_funcs, *args, **kwargs) try: return self._python_agg_general(func_or_funcs, *args, **kwargs) except Exception: result = self._aggregate_named(func_or_funcs, *args, **kwargs) index = Index(sorted(result), name=self.grouper.names[0]) ret = Series(result, index=index) if not self.as_index: # pragma: no cover print("Warning, ignoring as_index=True") # _level handled at higher if not _level and isinstance(ret, dict): from pandas import concat ret = concat(ret, axis=1) return ret agg = aggregate def _aggregate_multiple_funcs(self, arg, _level): if isinstance(arg, dict): # show the deprecation, but only if we # have not shown a higher level one # GH 15931 if isinstance(self._selected_obj, Series) and _level <= 1: msg = dedent( """\ using a dict on a Series for aggregation is deprecated and will be removed in a future version. Use \ named aggregation instead. >>> grouper.agg(name_1=func_1, name_2=func_2) """ ) warnings.warn(msg, FutureWarning, stacklevel=3) columns = list(arg.keys()) arg = arg.items() elif any(isinstance(x, (tuple, list)) for x in arg): arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg] # indicated column order columns = next(zip(*arg)) else: # list of functions / function names columns = [] for f in arg: columns.append(com.get_callable_name(f) or f) arg = zip(columns, arg) results = OrderedDict() for name, func in arg: obj = self if name in results: raise SpecificationError( "Function names must be unique, found multiple named " "{}".format(name) ) # reset the cache so that we # only include the named selection if name in self._selected_obj: obj = copy.copy(obj) obj._reset_cache() obj._selection = name results[name] = obj.aggregate(func) if any(isinstance(x, DataFrame) for x in results.values()): # let higher level handle if _level: return results return DataFrame(results, columns=columns) def _wrap_output(self, output, index, names=None): """ common agg/transform wrapping logic """ output = output[self._selection_name] if names is not None: return DataFrame(output, index=index, columns=names) else: name = self._selection_name if name is None: name = self._selected_obj.name return Series(output, index=index, name=name) def _wrap_aggregated_output(self, output, names=None): result = self._wrap_output( output=output, index=self.grouper.result_index, names=names ) return self._reindex_output(result)._convert(datetime=True) def _wrap_transformed_output(self, output, names=None): return self._wrap_output(output=output, index=self.obj.index, names=names) def _wrap_applied_output(self, keys, values, not_indexed_same=False): if len(keys) == 0: # GH #6265 return Series([], name=self._selection_name, index=keys) def _get_index(): if self.grouper.nkeys > 1: index = MultiIndex.from_tuples(keys, names=self.grouper.names) else: index = Index(keys, name=self.grouper.names[0]) return index if isinstance(values[0], dict): # GH #823 #24880 index = _get_index() result = self._reindex_output(DataFrame(values, index=index)) # if self.observed is False, # keep all-NaN rows created while re-indexing result = result.stack(dropna=self.observed) result.name = self._selection_name return result if isinstance(values[0], Series): return self._concat_objects(keys, values, not_indexed_same=not_indexed_same) elif isinstance(values[0], DataFrame): # possible that Series -> DataFrame by applied function return self._concat_objects(keys, values, not_indexed_same=not_indexed_same) else: # GH #6265 #24880 result = Series(data=values, index=_get_index(), name=self._selection_name) return self._reindex_output(result) def _aggregate_named(self, func, *args, **kwargs): result = OrderedDict() for name, group in self: group.name = name output = func(group, *args, **kwargs) if isinstance(output, (Series, Index, np.ndarray)): raise Exception("Must produce aggregated value") result[name] = self._try_cast(output, group) return result @Substitution(klass="Series", selected="A.") @Appender(_transform_template) def transform(self, func, *args, **kwargs): func = self._get_cython_func(func) or func if isinstance(func, str): if not (func in base.transform_kernel_whitelist): msg = "'{func}' is not a valid function name for transform(name)" raise ValueError(msg.format(func=func)) if func in base.cythonized_kernels: # cythonized transform or canned "agg+broadcast" return getattr(self, func)(*args, **kwargs) else: # If func is a reduction, we need to broadcast the # result to the whole group. Compute func result # and deal with possible broadcasting below. return self._transform_fast( lambda: getattr(self, func)(*args, **kwargs), func ) # reg transform klass = self._selected_obj.__class__ results = [] wrapper = lambda x: func(x, *args, **kwargs) for name, group in self: object.__setattr__(group, "name", name) res = wrapper(group) if isinstance(res, (ABCDataFrame, ABCSeries)): res = res._values indexer = self._get_index(name) s = klass(res, indexer) results.append(s) # check for empty "results" to avoid concat ValueError if results: from pandas.core.reshape.concat import concat result = concat(results).sort_index() else: result = Series() # we will only try to coerce the result type if # we have a numeric dtype, as these are *always* udfs # the cython take a different path (and casting) dtype = self._selected_obj.dtype if is_numeric_dtype(dtype): result = maybe_downcast_to_dtype(result, dtype) result.name = self._selected_obj.name result.index = self._selected_obj.index return result def _transform_fast(self, func, func_nm): """ fast version of transform, only applicable to builtin/cythonizable functions """ if isinstance(func, str): func = getattr(self, func) ids, _, ngroup = self.grouper.group_info cast = self._transform_should_cast(func_nm) out = algorithms.take_1d(func()._values, ids) if cast: out = self._try_cast(out, self.obj) return Series(out, index=self.obj.index, name=self.obj.name) def filter(self, func, dropna=True, *args, **kwargs): # noqa """ Return a copy of a Series excluding elements from groups that do not satisfy the boolean criterion specified by func. Parameters ---------- func : function To apply to each group. Should return True or False. dropna : Drop groups that do not pass the filter. True by default; if False, groups that evaluate False are filled with NaNs. Examples -------- >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', ... 'foo', 'bar'], ... 'B' : [1, 2, 3, 4, 5, 6], ... 'C' : [2.0, 5., 8., 1., 2., 9.]}) >>> grouped = df.groupby('A') >>> df.groupby('A').B.filter(lambda x: x.mean() > 3.) 1 2 3 4 5 6 Name: B, dtype: int64 Returns ------- filtered : Series """ if isinstance(func, str): wrapper = lambda x: getattr(x, func)(*args, **kwargs) else: wrapper = lambda x: func(x, *args, **kwargs) # Interpret np.nan as False. def true_and_notna(x, *args, **kwargs): b = wrapper(x, *args, **kwargs) return b and notna(b) try: indices = [ self._get_index(name) for name, group in self if true_and_notna(group) ] except ValueError: raise TypeError("the filter must return a boolean result") except TypeError: raise TypeError("the filter must return a boolean result") filtered = self._apply_filter(indices, dropna) return filtered def nunique(self, dropna=True): """ Return number of unique elements in the group. Returns ------- Series Number of unique values within each group. """ ids, _, _ = self.grouper.group_info val = self.obj._internal_get_values() try: sorter = np.lexsort((val, ids)) except TypeError: # catches object dtypes msg = "val.dtype must be object, got {}".format(val.dtype) assert val.dtype == object, msg val, _ = algorithms.factorize(val, sort=False) sorter = np.lexsort((val, ids)) _isna = lambda a: a == -1 else: _isna = isna ids, val = ids[sorter], val[sorter] # group boundaries are where group ids change # unique observations are where sorted values change idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]] inc = np.r_[1, val[1:] != val[:-1]] # 1st item of each group is a new unique observation mask = _isna(val) if dropna: inc[idx] = 1 inc[mask] = 0 else: inc[mask & np.r_[False, mask[:-1]]] = 0 inc[idx] = 1 out = np.add.reduceat(inc, idx).astype("int64", copy=False) if len(ids): # NaN/NaT group exists if the head of ids is -1, # so remove it from res and exclude its index from idx if ids[0] == -1: res = out[1:] idx = idx[np.flatnonzero(idx)] else: res = out else: res = out[1:] ri = self.grouper.result_index # we might have duplications among the bins if len(res) != len(ri): res, out = np.zeros(len(ri), dtype=out.dtype), res res[ids[idx]] = out return Series(res, index=ri, name=self._selection_name) @Appender(Series.describe.__doc__) def describe(self, **kwargs): result = self.apply(lambda x: x.describe(**kwargs)) if self.axis == 1: return result.T return result.unstack() def value_counts( self, normalize=False, sort=True, ascending=False, bins=None, dropna=True ): from pandas.core.reshape.tile import cut from pandas.core.reshape.merge import _get_join_indexers if bins is not None and not np.iterable(bins): # scalar bins cannot be done at top level # in a backward compatible way return self.apply( Series.value_counts, normalize=normalize, sort=sort, ascending=ascending, bins=bins, ) ids, _, _ = self.grouper.group_info val = self.obj._internal_get_values() # groupby removes null keys from groupings mask = ids != -1 ids, val = ids[mask], val[mask] if bins is None: lab, lev = algorithms.factorize(val, sort=True) llab = lambda lab, inc: lab[inc] else: # lab is a Categorical with categories an IntervalIndex lab = cut(Series(val), bins, include_lowest=True) lev = lab.cat.categories lab = lev.take(lab.cat.codes) llab = lambda lab, inc: lab[inc]._multiindex.codes[-1] if is_interval_dtype(lab): # TODO: should we do this inside II? sorter = np.lexsort((lab.left, lab.right, ids)) else: sorter = np.lexsort((lab, ids)) ids, lab = ids[sorter], lab[sorter] # group boundaries are where group ids change idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]] # new values are where sorted labels change lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1)) inc = np.r_[True, lchanges] inc[idx] = True # group boundaries are also new values out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts # num. of times each group should be repeated rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx)) # multi-index components labels = list(map(rep, self.grouper.recons_labels)) + [llab(lab, inc)] levels = [ping.group_index for ping in self.grouper.groupings] + [lev] names = self.grouper.names + [self._selection_name] if dropna: mask = labels[-1] != -1 if mask.all(): dropna = False else: out, labels = out[mask], [label[mask] for label in labels] if normalize: out = out.astype("float") d = np.diff(np.r_[idx, len(ids)]) if dropna: m = ids[lab == -1] np.add.at(d, m, -1) acc = rep(d)[mask] else: acc = rep(d) out /= acc if sort and bins is None: cat = ids[inc][mask] if dropna else ids[inc] sorter = np.lexsort((out if ascending else -out, cat)) out, labels[-1] = out[sorter], labels[-1][sorter] if bins is None: mi = MultiIndex( levels=levels, codes=labels, names=names, verify_integrity=False ) if is_integer_dtype(out): out = ensure_int64(out) return Series(out, index=mi, name=self._selection_name) # for compat. with libgroupby.value_counts need to ensure every # bin is present at every index level, null filled with zeros diff = np.zeros(len(out), dtype="bool") for lab in labels[:-1]: diff |= np.r_[True, lab[1:] != lab[:-1]] ncat, nbin = diff.sum(), len(levels[-1]) left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)] right = [diff.cumsum() - 1, labels[-1]] _, idx = _get_join_indexers(left, right, sort=False, how="left") out = np.where(idx != -1, out[idx], 0) if sort: sorter = np.lexsort((out if ascending else -out, left[0])) out, left[-1] = out[sorter], left[-1][sorter] # build the multi-index w/ full levels codes = list(map(lambda lab: np.repeat(lab[diff], nbin), labels[:-1])) codes.append(left[-1]) mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False) if is_integer_dtype(out): out = ensure_int64(out) return Series(out, index=mi, name=self._selection_name) def count(self): """ Compute count of group, excluding missing values. Returns ------- Series Count of values within each group. """ ids, _, ngroups = self.grouper.group_info val = self.obj._internal_get_values() mask = (ids != -1) & ~isna(val) ids = ensure_platform_int(ids) minlength = ngroups or 0 out = np.bincount(ids[mask], minlength=minlength) return Series( out, index=self.grouper.result_index, name=self._selection_name, dtype="int64", ) def _apply_to_column_groupbys(self, func): """ return a pass thru """ return func(self) def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None): """Calculate pct_change of each value to previous entry in group""" # TODO: Remove this conditional when #23918 is fixed if freq: return self.apply( lambda x: x.pct_change( periods=periods, fill_method=fill_method, limit=limit, freq=freq ) ) filled = getattr(self, fill_method)(limit=limit) fill_grp = filled.groupby(self.grouper.labels) shifted = fill_grp.shift(periods=periods, freq=freq) return (filled / shifted) - 1 class DataFrameGroupBy(NDFrameGroupBy): _apply_whitelist = base.dataframe_apply_whitelist # # Make class defs of attributes on DataFrameGroupBy whitelist. for _def_str in whitelist_method_generator(GroupBy, DataFrame, _apply_whitelist): exec(_def_str) _block_agg_axis = 1 _agg_see_also_doc = dedent( """ See Also -------- pandas.DataFrame.groupby.apply pandas.DataFrame.groupby.transform pandas.DataFrame.aggregate """ ) _agg_examples_doc = dedent( """ Examples -------- >>> df = pd.DataFrame({'A': [1, 1, 2, 2], ... 'B': [1, 2, 3, 4], ... 'C': np.random.randn(4)}) >>> df A B C 0 1 1 0.362838 1 1 2 0.227877 2 2 3 1.267767 3 2 4 -0.562860 The aggregation is for each column. >>> df.groupby('A').agg('min') B C A 1 1 0.227877 2 3 -0.562860 Multiple aggregations >>> df.groupby('A').agg(['min', 'max']) B C min max min max A 1 1 2 0.227877 0.362838 2 3 4 -0.562860 1.267767 Select a column for aggregation >>> df.groupby('A').B.agg(['min', 'max']) min max A 1 1 2 2 3 4 Different aggregations per column >>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'}) B C min max sum A 1 1 2 0.590716 2 3 4 0.704907 To control the output names with different aggregations per column, pandas supports "named aggregation" >>> df.groupby("A").agg( ... b_min=pd.NamedAgg(column="B", aggfunc="min"), ... c_sum=pd.NamedAgg(column="C", aggfunc="sum")) b_min c_sum A 1 1 -1.956929 2 3 -0.322183 - The keywords are the *output* column names - The values are tuples whose first element is the column to select and the second element is the aggregation to apply to that column. Pandas provides the ``pandas.NamedAgg`` namedtuple with the fields ``['column', 'aggfunc']`` to make it clearer what the arguments are. As usual, the aggregation can be a callable or a string alias. See :ref:`groupby.aggregate.named` for more. """ ) @Substitution( see_also=_agg_see_also_doc, examples=_agg_examples_doc, versionadded="", klass="DataFrame", axis="", ) @Appender(_shared_docs["aggregate"]) def aggregate(self, arg=None, *args, **kwargs): return super().aggregate(arg, *args, **kwargs) agg = aggregate def _gotitem(self, key, ndim, subset=None): """ sub-classes to define return a sliced object Parameters ---------- key : string / list of selections ndim : 1,2 requested ndim of result subset : object, default None subset to act on """ if ndim == 2: if subset is None: subset = self.obj return DataFrameGroupBy( subset, self.grouper, selection=key, grouper=self.grouper, exclusions=self.exclusions, as_index=self.as_index, observed=self.observed, ) elif ndim == 1: if subset is None: subset = self.obj[key] return SeriesGroupBy( subset, selection=key, grouper=self.grouper, observed=self.observed ) raise AssertionError("invalid ndim for _gotitem") def _wrap_generic_output(self, result, obj): result_index = self.grouper.levels[0] if self.axis == 0: return DataFrame(result, index=obj.columns, columns=result_index).T else: return DataFrame(result, index=obj.index, columns=result_index) def _get_data_to_aggregate(self): obj = self._obj_with_exclusions if self.axis == 1: return obj.T._data, 1 else: return obj._data, 1 def _insert_inaxis_grouper_inplace(self, result): # zip in reverse so we can always insert at loc 0 izip = zip( *map( reversed, ( self.grouper.names, self.grouper.get_group_levels(), [grp.in_axis for grp in self.grouper.groupings], ), ) ) for name, lev, in_axis in izip: if in_axis: result.insert(0, name, lev) def _wrap_aggregated_output(self, output, names=None): agg_axis = 0 if self.axis == 1 else 1 agg_labels = self._obj_with_exclusions._get_axis(agg_axis) output_keys = self._decide_output_index(output, agg_labels) if not self.as_index: result = DataFrame(output, columns=output_keys) self._insert_inaxis_grouper_inplace(result) result = result._consolidate() else: index = self.grouper.result_index result = DataFrame(output, index=index, columns=output_keys) if self.axis == 1: result = result.T return self._reindex_output(result)._convert(datetime=True) def _wrap_transformed_output(self, output, names=None): return DataFrame(output, index=self.obj.index) def _wrap_agged_blocks(self, items, blocks): if not self.as_index: index = np.arange(blocks[0].values.shape[-1]) mgr = BlockManager(blocks, [items, index]) result = DataFrame(mgr) self._insert_inaxis_grouper_inplace(result) result = result._consolidate() else: index = self.grouper.result_index mgr = BlockManager(blocks, [items, index]) result = DataFrame(mgr) if self.axis == 1: result = result.T return self._reindex_output(result)._convert(datetime=True) def _iterate_column_groupbys(self): for i, colname in enumerate(self._selected_obj.columns): yield colname, SeriesGroupBy( self._selected_obj.iloc[:, i], selection=colname, grouper=self.grouper, exclusions=self.exclusions, ) def _apply_to_column_groupbys(self, func): from pandas.core.reshape.concat import concat return concat( (func(col_groupby) for _, col_groupby in self._iterate_column_groupbys()), keys=self._selected_obj.columns, axis=1, ) def count(self): """ Compute count of group, excluding missing values. Returns ------- DataFrame Count of values within each group. """ data, _ = self._get_data_to_aggregate() ids, _, ngroups = self.grouper.group_info mask = ids != -1 val = ( (mask & ~_isna_ndarraylike(np.atleast_2d(blk.get_values()))) for blk in data.blocks ) loc = (blk.mgr_locs for blk in data.blocks) counter = partial(lib.count_level_2d, labels=ids, max_bin=ngroups, axis=1) blk = map(make_block, map(counter, val), loc) return self._wrap_agged_blocks(data.items, list(blk)) def nunique(self, dropna=True): """ Return DataFrame with number of distinct observations per group for each column. .. versionadded:: 0.20.0 Parameters ---------- dropna : boolean, default True Don't include NaN in the counts. Returns ------- nunique: DataFrame Examples -------- >>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam', ... 'ham', 'ham'], ... 'value1': [1, 5, 5, 2, 5, 5], ... 'value2': list('abbaxy')}) >>> df id value1 value2 0 spam 1 a 1 egg 5 b 2 egg 5 b 3 spam 2 a 4 ham 5 x 5 ham 5 y >>> df.groupby('id').nunique() id value1 value2 id egg 1 1 1 ham 1 1 2 spam 1 2 1 Check for rows with the same id but conflicting values: >>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any()) id value1 value2 0 spam 1 a 3 spam 2 a 4 ham 5 x 5 ham 5 y """ obj = self._selected_obj def groupby_series(obj, col=None): return SeriesGroupBy(obj, selection=col, grouper=self.grouper).nunique( dropna=dropna ) if isinstance(obj, Series): results = groupby_series(obj) else: from pandas.core.reshape.concat import concat results = [groupby_series(obj[col], col) for col in obj.columns] results = concat(results, axis=1) results.columns.names = obj.columns.names if not self.as_index: results.index = ibase.default_index(len(results)) return results boxplot = boxplot_frame_groupby def _is_multi_agg_with_relabel(**kwargs): """ Check whether kwargs passed to .agg look like multi-agg with relabeling. Parameters ---------- **kwargs : dict Returns ------- bool Examples -------- >>> _is_multi_agg_with_relabel(a='max') False >>> _is_multi_agg_with_relabel(a_max=('a', 'max'), ... a_min=('a', 'min')) True >>> _is_multi_agg_with_relabel() False """ return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values()) and kwargs def _normalize_keyword_aggregation(kwargs): """ Normalize user-provided "named aggregation" kwargs. Transforms from the new ``Dict[str, NamedAgg]`` style kwargs to the old OrderedDict[str, List[scalar]]]. Parameters ---------- kwargs : dict Returns ------- aggspec : dict The transformed kwargs. columns : List[str] The user-provided keys. order : List[Tuple[str, str]] Pairs of the input and output column names. Examples -------- >>> _normalize_keyword_aggregation({'output': ('input', 'sum')}) (OrderedDict([('input', ['sum'])]), ('output',), [('input', 'sum')]) """ if not PY36: kwargs = OrderedDict(sorted(kwargs.items())) # Normalize the aggregation functions as Dict[column, List[func]], # process normally, then fixup the names. # TODO(Py35): When we drop python 3.5, change this to # defaultdict(list) # TODO: aggspec type: typing.OrderedDict[str, List[AggScalar]] # May be hitting https://github.com/python/mypy/issues/5958 # saying it doesn't have an attribute __name__ aggspec = OrderedDict() order = [] columns, pairs = list(zip(*kwargs.items())) for name, (column, aggfunc) in zip(columns, pairs): if column in aggspec: aggspec[column].append(aggfunc) else: aggspec[column] = [aggfunc] order.append((column, com.get_callable_name(aggfunc) or aggfunc)) return aggspec, columns, order # TODO: Can't use, because mypy doesn't like us setting __name__ # error: "partial[Any]" has no attribute "__name__" # the type is: # typing.Sequence[Callable[..., ScalarResult]] # -> typing.Sequence[Callable[..., ScalarResult]]: def _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]: """ Possibly mangle a list of aggfuncs. Parameters ---------- aggfuncs : Sequence Returns ------- mangled: list-like A new AggSpec sequence, where lambdas have been converted to have unique names. Notes ----- If just one aggfunc is passed, the name will not be mangled. """ if len(aggfuncs) <= 1: # don't mangle for .agg([lambda x: .]) return aggfuncs i = 0 mangled_aggfuncs = [] for aggfunc in aggfuncs: if com.get_callable_name(aggfunc) == "<lambda>": aggfunc = functools.partial(aggfunc) aggfunc.__name__ = "<lambda_{}>".format(i) i += 1 mangled_aggfuncs.append(aggfunc) return mangled_aggfuncs def _maybe_mangle_lambdas(agg_spec: Any) -> Any: """ Make new lambdas with unique names. Parameters ---------- agg_spec : Any An argument to NDFrameGroupBy.agg. Non-dict-like `agg_spec` are pass through as is. For dict-like `agg_spec` a new spec is returned with name-mangled lambdas. Returns ------- mangled : Any Same type as the input. Examples -------- >>> _maybe_mangle_lambdas('sum') 'sum' >>> _maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP [<function __main__.<lambda_0>, <function pandas...._make_lambda.<locals>.f(*args, **kwargs)>] """ is_dict = is_dict_like(agg_spec) if not (is_dict or is_list_like(agg_spec)): return agg_spec mangled_aggspec = type(agg_spec)() # dict or OrderdDict if is_dict: for key, aggfuncs in agg_spec.items(): if is_list_like(aggfuncs) and not is_dict_like(aggfuncs): mangled_aggfuncs = _managle_lambda_list(aggfuncs) else: mangled_aggfuncs = aggfuncs mangled_aggspec[key] = mangled_aggfuncs else: mangled_aggspec = _managle_lambda_list(agg_spec) return mangled_aggspec def _recast_datetimelike_result(result: DataFrame) -> DataFrame: """ If we have date/time like in the original, then coerce dates as we are stacking can easily have object dtypes here. Parameters ---------- result : DataFrame Returns ------- DataFrame Notes ----- - Assumes Groupby._selected_obj has ndim==2 and at least one datetimelike column """ result = result.copy() obj_cols = [ idx for idx in range(len(result.columns)) if is_object_dtype(result.dtypes[idx]) ] # See GH#26285 for n in obj_cols: converted = maybe_convert_objects( result.iloc[:, n].values, convert_numeric=False ) result.iloc[:, n] = converted return result
bsd-3-clause
JPFrancoia/scikit-learn
sklearn/cluster/k_means_.py
4
59475
"""K-means clustering""" # Authors: Gael Varoquaux <gael.varoquaux@normalesup.org> # Thomas Rueckstiess <ruecksti@in.tum.de> # James Bergstra <james.bergstra@umontreal.ca> # Jan Schlueter <scikit-learn@jan-schlueter.de> # Nelle Varoquaux # Peter Prettenhofer <peter.prettenhofer@gmail.com> # Olivier Grisel <olivier.grisel@ensta.org> # Mathieu Blondel <mathieu@mblondel.org> # Robert Layton <robertlayton@gmail.com> # License: BSD 3 clause import warnings import numpy as np import scipy.sparse as sp from ..base import BaseEstimator, ClusterMixin, TransformerMixin from ..metrics.pairwise import euclidean_distances from ..utils.extmath import row_norms, squared_norm, stable_cumsum from ..utils.sparsefuncs_fast import assign_rows_csr from ..utils.sparsefuncs import mean_variance_axis from ..utils.fixes import astype from ..utils import check_array from ..utils import check_random_state from ..utils import as_float_array from ..utils import gen_batches from ..utils.validation import check_is_fitted from ..utils.validation import FLOAT_DTYPES from ..utils.random import choice from ..externals.joblib import Parallel from ..externals.joblib import delayed from ..externals.six import string_types from . import _k_means from ._k_means_elkan import k_means_elkan ############################################################################### # Initialization heuristic def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None): """Init n_clusters seeds according to k-means++ Parameters ----------- X: array or sparse matrix, shape (n_samples, n_features) The data to pick seeds for. To avoid memory copy, the input data should be double precision (dtype=np.float64). n_clusters: integer The number of seeds to choose x_squared_norms: array, shape (n_samples,) Squared Euclidean norm of each data point. random_state: numpy.RandomState The generator used to initialize the centers. n_local_trials: integer, optional The number of seeding trials for each center (except the first), of which the one reducing inertia the most is greedily chosen. Set to None to make the number of trials depend logarithmically on the number of seeds (2+log(k)); this is the default. Notes ----- Selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. see: Arthur, D. and Vassilvitskii, S. "k-means++: the advantages of careful seeding". ACM-SIAM symposium on Discrete algorithms. 2007 Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip, which is the implementation used in the aforementioned paper. """ n_samples, n_features = X.shape centers = np.empty((n_clusters, n_features), dtype=X.dtype) assert x_squared_norms is not None, 'x_squared_norms None in _k_init' # Set the number of local seeding trials if none is given if n_local_trials is None: # This is what Arthur/Vassilvitskii tried, but did not report # specific results for other than mentioning in the conclusion # that it helped. n_local_trials = 2 + int(np.log(n_clusters)) # Pick first center randomly center_id = random_state.randint(n_samples) if sp.issparse(X): centers[0] = X[center_id].toarray() else: centers[0] = X[center_id] # Initialize list of closest distances and calculate current potential closest_dist_sq = euclidean_distances( centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms, squared=True) current_pot = closest_dist_sq.sum() # Pick the remaining n_clusters-1 points for c in range(1, n_clusters): # Choose center candidates by sampling with probability proportional # to the squared distance to the closest existing center rand_vals = random_state.random_sample(n_local_trials) * current_pot candidate_ids = np.searchsorted(stable_cumsum(closest_dist_sq), rand_vals) # Compute distances to center candidates distance_to_candidates = euclidean_distances( X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True) # Decide which candidate is the best best_candidate = None best_pot = None best_dist_sq = None for trial in range(n_local_trials): # Compute potential when including center candidate new_dist_sq = np.minimum(closest_dist_sq, distance_to_candidates[trial]) new_pot = new_dist_sq.sum() # Store result if it is the best local trial so far if (best_candidate is None) or (new_pot < best_pot): best_candidate = candidate_ids[trial] best_pot = new_pot best_dist_sq = new_dist_sq # Permanently add best center candidate found in local tries if sp.issparse(X): centers[c] = X[best_candidate].toarray() else: centers[c] = X[best_candidate] current_pot = best_pot closest_dist_sq = best_dist_sq return centers ############################################################################### # K-means batch estimation by EM (expectation maximization) def _validate_center_shape(X, n_centers, centers): """Check if centers is compatible with X and n_centers""" if len(centers) != n_centers: raise ValueError('The shape of the initial centers (%s) ' 'does not match the number of clusters %i' % (centers.shape, n_centers)) if centers.shape[1] != X.shape[1]: raise ValueError( "The number of features of the initial centers %s " "does not match the number of features of the data %s." % (centers.shape[1], X.shape[1])) def _tolerance(X, tol): """Return a tolerance which is independent of the dataset""" if sp.issparse(X): variances = mean_variance_axis(X, axis=0)[1] else: variances = np.var(X, axis=0) return np.mean(variances) * tol def k_means(X, n_clusters, init='k-means++', precompute_distances='auto', n_init=10, max_iter=300, verbose=False, tol=1e-4, random_state=None, copy_x=True, n_jobs=1, algorithm="auto", return_n_iter=False): """K-means clustering algorithm. Read more in the :ref:`User Guide <k_means>`. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The observations to cluster. n_clusters : int The number of clusters to form as well as the number of centroids to generate. max_iter : int, optional, default 300 Maximum number of iterations of the k-means algorithm to run. n_init : int, optional, default: 10 Number of time the k-means algorithm will be run with different centroid seeds. The final results will be the best output of n_init consecutive runs in terms of inertia. init : {'k-means++', 'random', or ndarray, or a callable}, optional Method for initialization, default to 'k-means++': 'k-means++' : selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. See section Notes in k_init for more details. 'random': generate k centroids from a Gaussian with mean and variance estimated from the data. If an ndarray is passed, it should be of shape (n_clusters, n_features) and gives the initial centers. If a callable is passed, it should take arguments X, k and and a random state and return an initialization. algorithm : "auto", "full" or "elkan", default="auto" K-means algorithm to use. The classical EM-style algorithm is "full". The "elkan" variation is more efficient by using the triangle inequality, but currently doesn't support sparse data. "auto" chooses "elkan" for dense data and "full" for sparse data. precompute_distances : {'auto', True, False} Precompute distances (faster but takes more memory). 'auto' : do not precompute distances if n_samples * n_clusters > 12 million. This corresponds to about 100MB overhead per job using double precision. True : always precompute distances False : never precompute distances tol : float, optional The relative increment in the results before declaring convergence. verbose : boolean, optional Verbosity mode. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. copy_x : boolean, optional When pre-computing distances it is more numerically accurate to center the data first. If copy_x is True, then the original data is not modified. If False, the original data is modified, and put back before the function returns, but small numerical differences may be introduced by subtracting and then adding the data mean. n_jobs : int The number of jobs to use for the computation. This works by computing each of the n_init runs in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. return_n_iter : bool, optional Whether or not to return the number of iterations. Returns ------- centroid : float ndarray with shape (k, n_features) Centroids found at the last iteration of k-means. label : integer ndarray with shape (n_samples,) label[i] is the code or index of the centroid the i'th observation is closest to. inertia : float The final value of the inertia criterion (sum of squared distances to the closest centroid for all observations in the training set). best_n_iter: int Number of iterations corresponding to the best results. Returned only if `return_n_iter` is set to True. """ if n_init <= 0: raise ValueError("Invalid number of initializations." " n_init=%d must be bigger than zero." % n_init) random_state = check_random_state(random_state) if max_iter <= 0: raise ValueError('Number of iterations should be a positive number,' ' got %d instead' % max_iter) X = as_float_array(X, copy=copy_x) tol = _tolerance(X, tol) # If the distances are precomputed every job will create a matrix of shape # (n_clusters, n_samples). To stop KMeans from eating up memory we only # activate this if the created matrix is guaranteed to be under 100MB. 12 # million entries consume a little under 100MB if they are of type double. if precompute_distances == 'auto': n_samples = X.shape[0] precompute_distances = (n_clusters * n_samples) < 12e6 elif isinstance(precompute_distances, bool): pass else: raise ValueError("precompute_distances should be 'auto' or True/False" ", but a value of %r was passed" % precompute_distances) # subtract of mean of x for more accurate distance computations if not sp.issparse(X) or hasattr(init, '__array__'): X_mean = X.mean(axis=0) if not sp.issparse(X): # The copy was already done above X -= X_mean if hasattr(init, '__array__'): init = check_array(init, dtype=X.dtype.type, copy=True) _validate_center_shape(X, n_clusters, init) init -= X_mean if n_init != 1: warnings.warn( 'Explicit initial center position passed: ' 'performing only one init in k-means instead of n_init=%d' % n_init, RuntimeWarning, stacklevel=2) n_init = 1 # precompute squared norms of data points x_squared_norms = row_norms(X, squared=True) best_labels, best_inertia, best_centers = None, None, None if n_clusters == 1: # elkan doesn't make sense for a single cluster, full will produce # the right result. algorithm = "full" if algorithm == "auto": algorithm = "full" if sp.issparse(X) else 'elkan' if algorithm == "full": kmeans_single = _kmeans_single_lloyd elif algorithm == "elkan": kmeans_single = _kmeans_single_elkan else: raise ValueError("Algorithm must be 'auto', 'full' or 'elkan', got" " %s" % str(algorithm)) if n_jobs == 1: # For a single thread, less memory is needed if we just store one set # of the best results (as opposed to one set per run per thread). for it in range(n_init): # run a k-means once labels, inertia, centers, n_iter_ = kmeans_single( X, n_clusters, max_iter=max_iter, init=init, verbose=verbose, precompute_distances=precompute_distances, tol=tol, x_squared_norms=x_squared_norms, random_state=random_state) # determine if these results are the best so far if best_inertia is None or inertia < best_inertia: best_labels = labels.copy() best_centers = centers.copy() best_inertia = inertia best_n_iter = n_iter_ else: # parallelisation of k-means runs seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init) results = Parallel(n_jobs=n_jobs, verbose=0)( delayed(kmeans_single)(X, n_clusters, max_iter=max_iter, init=init, verbose=verbose, tol=tol, precompute_distances=precompute_distances, x_squared_norms=x_squared_norms, # Change seed to ensure variety random_state=seed) for seed in seeds) # Get results with the lowest inertia labels, inertia, centers, n_iters = zip(*results) best = np.argmin(inertia) best_labels = labels[best] best_inertia = inertia[best] best_centers = centers[best] best_n_iter = n_iters[best] if not sp.issparse(X): if not copy_x: X += X_mean best_centers += X_mean if return_n_iter: return best_centers, best_labels, best_inertia, best_n_iter else: return best_centers, best_labels, best_inertia def _kmeans_single_elkan(X, n_clusters, max_iter=300, init='k-means++', verbose=False, x_squared_norms=None, random_state=None, tol=1e-4, precompute_distances=True): if sp.issparse(X): raise ValueError("algorithm='elkan' not supported for sparse input X") X = check_array(X, order="C") random_state = check_random_state(random_state) if x_squared_norms is None: x_squared_norms = row_norms(X, squared=True) # init centers = _init_centroids(X, n_clusters, init, random_state=random_state, x_squared_norms=x_squared_norms) centers = np.ascontiguousarray(centers) if verbose: print('Initialization complete') centers, labels, n_iter = k_means_elkan(X, n_clusters, centers, tol=tol, max_iter=max_iter, verbose=verbose) inertia = np.sum((X - centers[labels]) ** 2, dtype=np.float64) return labels, inertia, centers, n_iter def _kmeans_single_lloyd(X, n_clusters, max_iter=300, init='k-means++', verbose=False, x_squared_norms=None, random_state=None, tol=1e-4, precompute_distances=True): """A single run of k-means, assumes preparation completed prior. Parameters ---------- X: array-like of floats, shape (n_samples, n_features) The observations to cluster. n_clusters: int The number of clusters to form as well as the number of centroids to generate. max_iter: int, optional, default 300 Maximum number of iterations of the k-means algorithm to run. init: {'k-means++', 'random', or ndarray, or a callable}, optional Method for initialization, default to 'k-means++': 'k-means++' : selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. See section Notes in k_init for more details. 'random': generate k centroids from a Gaussian with mean and variance estimated from the data. If an ndarray is passed, it should be of shape (k, p) and gives the initial centers. If a callable is passed, it should take arguments X, k and and a random state and return an initialization. tol: float, optional The relative increment in the results before declaring convergence. verbose: boolean, optional Verbosity mode x_squared_norms: array Precomputed x_squared_norms. precompute_distances : boolean, default: True Precompute distances (faster but takes more memory). random_state: integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. Returns ------- centroid: float ndarray with shape (k, n_features) Centroids found at the last iteration of k-means. label: integer ndarray with shape (n_samples,) label[i] is the code or index of the centroid the i'th observation is closest to. inertia: float The final value of the inertia criterion (sum of squared distances to the closest centroid for all observations in the training set). n_iter : int Number of iterations run. """ random_state = check_random_state(random_state) best_labels, best_inertia, best_centers = None, None, None # init centers = _init_centroids(X, n_clusters, init, random_state=random_state, x_squared_norms=x_squared_norms) if verbose: print("Initialization complete") # Allocate memory to store the distances for each sample to its # closer center for reallocation in case of ties distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype) # iterations for i in range(max_iter): centers_old = centers.copy() # labels assignment is also called the E-step of EM labels, inertia = \ _labels_inertia(X, x_squared_norms, centers, precompute_distances=precompute_distances, distances=distances) # computation of the means is also called the M-step of EM if sp.issparse(X): centers = _k_means._centers_sparse(X, labels, n_clusters, distances) else: centers = _k_means._centers_dense(X, labels, n_clusters, distances) if verbose: print("Iteration %2d, inertia %.3f" % (i, inertia)) if best_inertia is None or inertia < best_inertia: best_labels = labels.copy() best_centers = centers.copy() best_inertia = inertia center_shift_total = squared_norm(centers_old - centers) if center_shift_total <= tol: if verbose: print("Converged at iteration %d: " "center shift %e within tolerance %e" % (i, center_shift_total, tol)) break if center_shift_total > 0: # rerun E-step in case of non-convergence so that predicted labels # match cluster centers best_labels, best_inertia = \ _labels_inertia(X, x_squared_norms, best_centers, precompute_distances=precompute_distances, distances=distances) return best_labels, best_inertia, best_centers, i + 1 def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances): """Compute labels and inertia using a full distance matrix. This will overwrite the 'distances' array in-place. Parameters ---------- X : numpy array, shape (n_sample, n_features) Input data. x_squared_norms : numpy array, shape (n_samples,) Precomputed squared norms of X. centers : numpy array, shape (n_clusters, n_features) Cluster centers which data is assigned to. distances : numpy array, shape (n_samples,) Pre-allocated array in which distances are stored. Returns ------- labels : numpy array, dtype=np.int, shape (n_samples,) Indices of clusters that samples are assigned to. inertia : float Sum of distances of samples to their closest cluster center. """ n_samples = X.shape[0] k = centers.shape[0] all_distances = euclidean_distances(centers, X, x_squared_norms, squared=True) labels = np.empty(n_samples, dtype=np.int32) labels.fill(-1) mindist = np.empty(n_samples) mindist.fill(np.infty) for center_id in range(k): dist = all_distances[center_id] labels[dist < mindist] = center_id mindist = np.minimum(dist, mindist) if n_samples == distances.shape[0]: # distances will be changed in-place distances[:] = mindist inertia = mindist.sum() return labels, inertia def _labels_inertia(X, x_squared_norms, centers, precompute_distances=True, distances=None): """E step of the K-means EM algorithm. Compute the labels and the inertia of the given samples and centers. This will compute the distances in-place. Parameters ---------- X: float64 array-like or CSR sparse matrix, shape (n_samples, n_features) The input samples to assign to the labels. x_squared_norms: array, shape (n_samples,) Precomputed squared euclidean norm of each data point, to speed up computations. centers: float array, shape (k, n_features) The cluster centers. precompute_distances : boolean, default: True Precompute distances (faster but takes more memory). distances: float array, shape (n_samples,) Pre-allocated array to be filled in with each sample's distance to the closest center. Returns ------- labels: int array of shape(n) The resulting assignment inertia : float Sum of distances of samples to their closest cluster center. """ n_samples = X.shape[0] # set the default value of centers to -1 to be able to detect any anomaly # easily labels = -np.ones(n_samples, np.int32) if distances is None: distances = np.zeros(shape=(0,), dtype=X.dtype) # distances will be changed in-place if sp.issparse(X): inertia = _k_means._assign_labels_csr( X, x_squared_norms, centers, labels, distances=distances) else: if precompute_distances: return _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances) inertia = _k_means._assign_labels_array( X, x_squared_norms, centers, labels, distances=distances) return labels, inertia def _init_centroids(X, k, init, random_state=None, x_squared_norms=None, init_size=None): """Compute the initial centroids Parameters ---------- X: array, shape (n_samples, n_features) k: int number of centroids init: {'k-means++', 'random' or ndarray or callable} optional Method for initialization random_state: integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. x_squared_norms: array, shape (n_samples,), optional Squared euclidean norm of each data point. Pass it if you have it at hands already to avoid it being recomputed here. Default: None init_size : int, optional Number of samples to randomly sample for speeding up the initialization (sometimes at the expense of accuracy): the only algorithm is initialized by running a batch KMeans on a random subset of the data. This needs to be larger than k. Returns ------- centers: array, shape(k, n_features) """ random_state = check_random_state(random_state) n_samples = X.shape[0] if x_squared_norms is None: x_squared_norms = row_norms(X, squared=True) if init_size is not None and init_size < n_samples: if init_size < k: warnings.warn( "init_size=%d should be larger than k=%d. " "Setting it to 3*k" % (init_size, k), RuntimeWarning, stacklevel=2) init_size = 3 * k init_indices = random_state.randint(0, n_samples, init_size) X = X[init_indices] x_squared_norms = x_squared_norms[init_indices] n_samples = X.shape[0] elif n_samples < k: raise ValueError( "n_samples=%d should be larger than k=%d" % (n_samples, k)) if isinstance(init, string_types) and init == 'k-means++': centers = _k_init(X, k, random_state=random_state, x_squared_norms=x_squared_norms) elif isinstance(init, string_types) and init == 'random': seeds = random_state.permutation(n_samples)[:k] centers = X[seeds] elif hasattr(init, '__array__'): # ensure that the centers have the same dtype as X # this is a requirement of fused types of cython centers = np.array(init, dtype=X.dtype) elif callable(init): centers = init(X, k, random_state=random_state) centers = np.asarray(centers, dtype=X.dtype) else: raise ValueError("the init parameter for the k-means should " "be 'k-means++' or 'random' or an ndarray, " "'%s' (type '%s') was passed." % (init, type(init))) if sp.issparse(centers): centers = centers.toarray() _validate_center_shape(X, k, centers) return centers class KMeans(BaseEstimator, ClusterMixin, TransformerMixin): """K-Means clustering Read more in the :ref:`User Guide <k_means>`. Parameters ---------- n_clusters : int, optional, default: 8 The number of clusters to form as well as the number of centroids to generate. max_iter : int, default: 300 Maximum number of iterations of the k-means algorithm for a single run. n_init : int, default: 10 Number of time the k-means algorithm will be run with different centroid seeds. The final results will be the best output of n_init consecutive runs in terms of inertia. init : {'k-means++', 'random' or an ndarray} Method for initialization, defaults to 'k-means++': 'k-means++' : selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. See section Notes in k_init for more details. 'random': choose k observations (rows) at random from data for the initial centroids. If an ndarray is passed, it should be of shape (n_clusters, n_features) and gives the initial centers. algorithm : "auto", "full" or "elkan", default="auto" K-means algorithm to use. The classical EM-style algorithm is "full". The "elkan" variation is more efficient by using the triangle inequality, but currently doesn't support sparse data. "auto" chooses "elkan" for dense data and "full" for sparse data. precompute_distances : {'auto', True, False} Precompute distances (faster but takes more memory). 'auto' : do not precompute distances if n_samples * n_clusters > 12 million. This corresponds to about 100MB overhead per job using double precision. True : always precompute distances False : never precompute distances tol : float, default: 1e-4 Relative tolerance with regards to inertia to declare convergence n_jobs : int The number of jobs to use for the computation. This works by computing each of the n_init runs in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. verbose : int, default 0 Verbosity mode. copy_x : boolean, default True When pre-computing distances it is more numerically accurate to center the data first. If copy_x is True, then the original data is not modified. If False, the original data is modified, and put back before the function returns, but small numerical differences may be introduced by subtracting and then adding the data mean. Attributes ---------- cluster_centers_ : array, [n_clusters, n_features] Coordinates of cluster centers labels_ : Labels of each point inertia_ : float Sum of distances of samples to their closest cluster center. Examples -------- >>> from sklearn.cluster import KMeans >>> import numpy as np >>> X = np.array([[1, 2], [1, 4], [1, 0], ... [4, 2], [4, 4], [4, 0]]) >>> kmeans = KMeans(n_clusters=2, random_state=0).fit(X) >>> kmeans.labels_ array([0, 0, 0, 1, 1, 1], dtype=int32) >>> kmeans.predict([[0, 0], [4, 4]]) array([0, 1], dtype=int32) >>> kmeans.cluster_centers_ array([[ 1., 2.], [ 4., 2.]]) See also -------- MiniBatchKMeans Alternative online implementation that does incremental updates of the centers positions using mini-batches. For large scale learning (say n_samples > 10k) MiniBatchKMeans is probably much faster than the default batch implementation. Notes ------ The k-means problem is solved using Lloyd's algorithm. The average complexity is given by O(k n T), were n is the number of samples and T is the number of iteration. The worst case complexity is given by O(n^(k+2/p)) with n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii, 'How slow is the k-means method?' SoCG2006) In practice, the k-means algorithm is very fast (one of the fastest clustering algorithms available), but it falls in local minima. That's why it can be useful to restart it several times. """ def __init__(self, n_clusters=8, init='k-means++', n_init=10, max_iter=300, tol=1e-4, precompute_distances='auto', verbose=0, random_state=None, copy_x=True, n_jobs=1, algorithm='auto'): self.n_clusters = n_clusters self.init = init self.max_iter = max_iter self.tol = tol self.precompute_distances = precompute_distances self.n_init = n_init self.verbose = verbose self.random_state = random_state self.copy_x = copy_x self.n_jobs = n_jobs self.algorithm = algorithm def _check_fit_data(self, X): """Verify that the number of samples given is larger than k""" X = check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32]) if X.shape[0] < self.n_clusters: raise ValueError("n_samples=%d should be >= n_clusters=%d" % ( X.shape[0], self.n_clusters)) return X def _check_test_data(self, X): X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES) n_samples, n_features = X.shape expected_n_features = self.cluster_centers_.shape[1] if not n_features == expected_n_features: raise ValueError("Incorrect number of features. " "Got %d features, expected %d" % ( n_features, expected_n_features)) return X def fit(self, X, y=None): """Compute k-means clustering. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) """ random_state = check_random_state(self.random_state) X = self._check_fit_data(X) self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \ k_means( X, n_clusters=self.n_clusters, init=self.init, n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose, precompute_distances=self.precompute_distances, tol=self.tol, random_state=random_state, copy_x=self.copy_x, n_jobs=self.n_jobs, algorithm=self.algorithm, return_n_iter=True) return self def fit_predict(self, X, y=None): """Compute cluster centers and predict cluster index for each sample. Convenience method; equivalent to calling fit(X) followed by predict(X). """ return self.fit(X).labels_ def fit_transform(self, X, y=None): """Compute clustering and transform X to cluster-distance space. Equivalent to fit(X).transform(X), but more efficiently implemented. """ # Currently, this just skips a copy of the data if it is not in # np.array or CSR format already. # XXX This skips _check_test_data, which may change the dtype; # we should refactor the input validation. X = self._check_fit_data(X) return self.fit(X)._transform(X) def transform(self, X, y=None): """Transform X to a cluster-distance space. In the new space, each dimension is the distance to the cluster centers. Note that even if X is sparse, the array returned by `transform` will typically be dense. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] New data to transform. Returns ------- X_new : array, shape [n_samples, k] X transformed in the new space. """ check_is_fitted(self, 'cluster_centers_') X = self._check_test_data(X) return self._transform(X) def _transform(self, X): """guts of transform method; no input validation""" return euclidean_distances(X, self.cluster_centers_) def predict(self, X): """Predict the closest cluster each sample in X belongs to. In the vector quantization literature, `cluster_centers_` is called the code book and each value returned by `predict` is the index of the closest code in the code book. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] New data to predict. Returns ------- labels : array, shape [n_samples,] Index of the cluster each sample belongs to. """ check_is_fitted(self, 'cluster_centers_') X = self._check_test_data(X) x_squared_norms = row_norms(X, squared=True) return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0] def score(self, X, y=None): """Opposite of the value of X on the K-means objective. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] New data. Returns ------- score : float Opposite of the value of X on the K-means objective. """ check_is_fitted(self, 'cluster_centers_') X = self._check_test_data(X) x_squared_norms = row_norms(X, squared=True) return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1] def _mini_batch_step(X, x_squared_norms, centers, counts, old_center_buffer, compute_squared_diff, distances, random_reassign=False, random_state=None, reassignment_ratio=.01, verbose=False): """Incremental update of the centers for the Minibatch K-Means algorithm. Parameters ---------- X : array, shape (n_samples, n_features) The original data array. x_squared_norms : array, shape (n_samples,) Squared euclidean norm of each data point. centers : array, shape (k, n_features) The cluster centers. This array is MODIFIED IN PLACE counts : array, shape (k,) The vector in which we keep track of the numbers of elements in a cluster. This array is MODIFIED IN PLACE distances : array, dtype float, shape (n_samples), optional If not None, should be a pre-allocated array that will be used to store the distances of each sample to its closest center. May not be None when random_reassign is True. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. random_reassign : boolean, optional If True, centers with very low counts are randomly reassigned to observations. reassignment_ratio : float, optional Control the fraction of the maximum number of counts for a center to be reassigned. A higher value means that low count centers are more likely to be reassigned, which means that the model will take longer to converge, but should converge in a better clustering. verbose : bool, optional, default False Controls the verbosity. compute_squared_diff : bool If set to False, the squared diff computation is skipped. old_center_buffer : int Copy of old centers for monitoring convergence. Returns ------- inertia : float Sum of distances of samples to their closest cluster center. squared_diff : numpy array, shape (n_clusters,) Squared distances between previous and updated cluster centers. """ # Perform label assignment to nearest centers nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers, distances=distances) if random_reassign and reassignment_ratio > 0: random_state = check_random_state(random_state) # Reassign clusters that have very low counts to_reassign = counts < reassignment_ratio * counts.max() # pick at most .5 * batch_size samples as new centers if to_reassign.sum() > .5 * X.shape[0]: indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):] to_reassign[indices_dont_reassign] = False n_reassigns = to_reassign.sum() if n_reassigns: # Pick new clusters amongst observations with uniform probability new_centers = choice(X.shape[0], replace=False, size=n_reassigns, random_state=random_state) if verbose: print("[MiniBatchKMeans] Reassigning %i cluster centers." % n_reassigns) if sp.issparse(X) and not sp.issparse(centers): assign_rows_csr(X, astype(new_centers, np.intp), astype(np.where(to_reassign)[0], np.intp), centers) else: centers[to_reassign] = X[new_centers] # reset counts of reassigned centers, but don't reset them too small # to avoid instant reassignment. This is a pretty dirty hack as it # also modifies the learning rates. counts[to_reassign] = np.min(counts[~to_reassign]) # implementation for the sparse CSR representation completely written in # cython if sp.issparse(X): return inertia, _k_means._mini_batch_update_csr( X, x_squared_norms, centers, counts, nearest_center, old_center_buffer, compute_squared_diff) # dense variant in mostly numpy (not as memory efficient though) k = centers.shape[0] squared_diff = 0.0 for center_idx in range(k): # find points from minibatch that are assigned to this center center_mask = nearest_center == center_idx count = center_mask.sum() if count > 0: if compute_squared_diff: old_center_buffer[:] = centers[center_idx] # inplace remove previous count scaling centers[center_idx] *= counts[center_idx] # inplace sum with new points members of this cluster centers[center_idx] += np.sum(X[center_mask], axis=0) # update the count statistics for this center counts[center_idx] += count # inplace rescale to compute mean of all points (old and new) # Note: numpy >= 1.10 does not support '/=' for the following # expression for a mixture of int and float (see numpy issue #6464) centers[center_idx] = centers[center_idx] / counts[center_idx] # update the squared diff if necessary if compute_squared_diff: diff = centers[center_idx].ravel() - old_center_buffer.ravel() squared_diff += np.dot(diff, diff) return inertia, squared_diff def _mini_batch_convergence(model, iteration_idx, n_iter, tol, n_samples, centers_squared_diff, batch_inertia, context, verbose=0): """Helper function to encapsulate the early stopping logic""" # Normalize inertia to be able to compare values when # batch_size changes batch_inertia /= model.batch_size centers_squared_diff /= model.batch_size # Compute an Exponentially Weighted Average of the squared # diff to monitor the convergence while discarding # minibatch-local stochastic variability: # https://en.wikipedia.org/wiki/Moving_average ewa_diff = context.get('ewa_diff') ewa_inertia = context.get('ewa_inertia') if ewa_diff is None: ewa_diff = centers_squared_diff ewa_inertia = batch_inertia else: alpha = float(model.batch_size) * 2.0 / (n_samples + 1) alpha = 1.0 if alpha > 1.0 else alpha ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha # Log progress to be able to monitor convergence if verbose: progress_msg = ( 'Minibatch iteration %d/%d:' ' mean batch inertia: %f, ewa inertia: %f ' % ( iteration_idx + 1, n_iter, batch_inertia, ewa_inertia)) print(progress_msg) # Early stopping based on absolute tolerance on squared change of # centers position (using EWA smoothing) if tol > 0.0 and ewa_diff <= tol: if verbose: print('Converged (small centers change) at iteration %d/%d' % (iteration_idx + 1, n_iter)) return True # Early stopping heuristic due to lack of improvement on smoothed inertia ewa_inertia_min = context.get('ewa_inertia_min') no_improvement = context.get('no_improvement', 0) if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min: no_improvement = 0 ewa_inertia_min = ewa_inertia else: no_improvement += 1 if (model.max_no_improvement is not None and no_improvement >= model.max_no_improvement): if verbose: print('Converged (lack of improvement in inertia)' ' at iteration %d/%d' % (iteration_idx + 1, n_iter)) return True # update the convergence context to maintain state across successive calls: context['ewa_diff'] = ewa_diff context['ewa_inertia'] = ewa_inertia context['ewa_inertia_min'] = ewa_inertia_min context['no_improvement'] = no_improvement return False class MiniBatchKMeans(KMeans): """Mini-Batch K-Means clustering Read more in the :ref:`User Guide <mini_batch_kmeans>`. Parameters ---------- n_clusters : int, optional, default: 8 The number of clusters to form as well as the number of centroids to generate. max_iter : int, optional Maximum number of iterations over the complete dataset before stopping independently of any early stopping criterion heuristics. max_no_improvement : int, default: 10 Control early stopping based on the consecutive number of mini batches that does not yield an improvement on the smoothed inertia. To disable convergence detection based on inertia, set max_no_improvement to None. tol : float, default: 0.0 Control early stopping based on the relative center changes as measured by a smoothed, variance-normalized of the mean center squared position changes. This early stopping heuristics is closer to the one used for the batch variant of the algorithms but induces a slight computational and memory overhead over the inertia heuristic. To disable convergence detection based on normalized center change, set tol to 0.0 (default). batch_size : int, optional, default: 100 Size of the mini batches. init_size : int, optional, default: 3 * batch_size Number of samples to randomly sample for speeding up the initialization (sometimes at the expense of accuracy): the only algorithm is initialized by running a batch KMeans on a random subset of the data. This needs to be larger than n_clusters. init : {'k-means++', 'random' or an ndarray}, default: 'k-means++' Method for initialization, defaults to 'k-means++': 'k-means++' : selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. See section Notes in k_init for more details. 'random': choose k observations (rows) at random from data for the initial centroids. If an ndarray is passed, it should be of shape (n_clusters, n_features) and gives the initial centers. n_init : int, default=3 Number of random initializations that are tried. In contrast to KMeans, the algorithm is only run once, using the best of the ``n_init`` initializations as measured by inertia. compute_labels : boolean, default=True Compute label assignment and inertia for the complete dataset once the minibatch optimization has converged in fit. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. reassignment_ratio : float, default: 0.01 Control the fraction of the maximum number of counts for a center to be reassigned. A higher value means that low count centers are more easily reassigned, which means that the model will take longer to converge, but should converge in a better clustering. verbose : boolean, optional Verbosity mode. Attributes ---------- cluster_centers_ : array, [n_clusters, n_features] Coordinates of cluster centers labels_ : Labels of each point (if compute_labels is set to True). inertia_ : float The value of the inertia criterion associated with the chosen partition (if compute_labels is set to True). The inertia is defined as the sum of square distances of samples to their nearest neighbor. See also -------- KMeans The classic implementation of the clustering method based on the Lloyd's algorithm. It consumes the whole set of input data at each iteration. Notes ----- See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf """ def __init__(self, n_clusters=8, init='k-means++', max_iter=100, batch_size=100, verbose=0, compute_labels=True, random_state=None, tol=0.0, max_no_improvement=10, init_size=None, n_init=3, reassignment_ratio=0.01): super(MiniBatchKMeans, self).__init__( n_clusters=n_clusters, init=init, max_iter=max_iter, verbose=verbose, random_state=random_state, tol=tol, n_init=n_init) self.max_no_improvement = max_no_improvement self.batch_size = batch_size self.compute_labels = compute_labels self.init_size = init_size self.reassignment_ratio = reassignment_ratio def fit(self, X, y=None): """Compute the centroids on X by chunking it into mini-batches. Parameters ---------- X : array-like, shape = [n_samples, n_features] Coordinates of the data points to cluster """ random_state = check_random_state(self.random_state) X = check_array(X, accept_sparse="csr", order='C', dtype=[np.float64, np.float32]) n_samples, n_features = X.shape if n_samples < self.n_clusters: raise ValueError("Number of samples smaller than number " "of clusters.") n_init = self.n_init if hasattr(self.init, '__array__'): self.init = np.ascontiguousarray(self.init, dtype=X.dtype) if n_init != 1: warnings.warn( 'Explicit initial center position passed: ' 'performing only one init in MiniBatchKMeans instead of ' 'n_init=%d' % self.n_init, RuntimeWarning, stacklevel=2) n_init = 1 x_squared_norms = row_norms(X, squared=True) if self.tol > 0.0: tol = _tolerance(X, self.tol) # using tol-based early stopping needs the allocation of a # dedicated before which can be expensive for high dim data: # hence we allocate it outside of the main loop old_center_buffer = np.zeros(n_features, dtype=X.dtype) else: tol = 0.0 # no need for the center buffer if tol-based early stopping is # disabled old_center_buffer = np.zeros(0, dtype=X.dtype) distances = np.zeros(self.batch_size, dtype=X.dtype) n_batches = int(np.ceil(float(n_samples) / self.batch_size)) n_iter = int(self.max_iter * n_batches) init_size = self.init_size if init_size is None: init_size = 3 * self.batch_size if init_size > n_samples: init_size = n_samples self.init_size_ = init_size validation_indices = random_state.randint(0, n_samples, init_size) X_valid = X[validation_indices] x_squared_norms_valid = x_squared_norms[validation_indices] # perform several inits with random sub-sets best_inertia = None for init_idx in range(n_init): if self.verbose: print("Init %d/%d with method: %s" % (init_idx + 1, n_init, self.init)) counts = np.zeros(self.n_clusters, dtype=np.int32) # TODO: once the `k_means` function works with sparse input we # should refactor the following init to use it instead. # Initialize the centers using only a fraction of the data as we # expect n_samples to be very large when using MiniBatchKMeans cluster_centers = _init_centroids( X, self.n_clusters, self.init, random_state=random_state, x_squared_norms=x_squared_norms, init_size=init_size) # Compute the label assignment on the init dataset batch_inertia, centers_squared_diff = _mini_batch_step( X_valid, x_squared_norms[validation_indices], cluster_centers, counts, old_center_buffer, False, distances=None, verbose=self.verbose) # Keep only the best cluster centers across independent inits on # the common validation set _, inertia = _labels_inertia(X_valid, x_squared_norms_valid, cluster_centers) if self.verbose: print("Inertia for init %d/%d: %f" % (init_idx + 1, n_init, inertia)) if best_inertia is None or inertia < best_inertia: self.cluster_centers_ = cluster_centers self.counts_ = counts best_inertia = inertia # Empty context to be used inplace by the convergence check routine convergence_context = {} # Perform the iterative optimization until the final convergence # criterion for iteration_idx in range(n_iter): # Sample a minibatch from the full dataset minibatch_indices = random_state.randint( 0, n_samples, self.batch_size) # Perform the actual update step on the minibatch data batch_inertia, centers_squared_diff = _mini_batch_step( X[minibatch_indices], x_squared_norms[minibatch_indices], self.cluster_centers_, self.counts_, old_center_buffer, tol > 0.0, distances=distances, # Here we randomly choose whether to perform # random reassignment: the choice is done as a function # of the iteration index, and the minimum number of # counts, in order to force this reassignment to happen # every once in a while random_reassign=((iteration_idx + 1) % (10 + self.counts_.min()) == 0), random_state=random_state, reassignment_ratio=self.reassignment_ratio, verbose=self.verbose) # Monitor convergence and do early stopping if necessary if _mini_batch_convergence( self, iteration_idx, n_iter, tol, n_samples, centers_squared_diff, batch_inertia, convergence_context, verbose=self.verbose): break self.n_iter_ = iteration_idx + 1 if self.compute_labels: self.labels_, self.inertia_ = self._labels_inertia_minibatch(X) return self def _labels_inertia_minibatch(self, X): """Compute labels and inertia using mini batches. This is slightly slower than doing everything at once but preventes memory errors / segfaults. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data. Returns ------- labels : array, shap (n_samples,) Cluster labels for each point. inertia : float Sum of squared distances of points to nearest cluster. """ if self.verbose: print('Computing label assignment and total inertia') x_squared_norms = row_norms(X, squared=True) slices = gen_batches(X.shape[0], self.batch_size) results = [_labels_inertia(X[s], x_squared_norms[s], self.cluster_centers_) for s in slices] labels, inertia = zip(*results) return np.hstack(labels), np.sum(inertia) def partial_fit(self, X, y=None): """Update k means estimate on a single mini-batch X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Coordinates of the data points to cluster. """ X = check_array(X, accept_sparse="csr") n_samples, n_features = X.shape if hasattr(self.init, '__array__'): self.init = np.ascontiguousarray(self.init, dtype=X.dtype) if n_samples == 0: return self x_squared_norms = row_norms(X, squared=True) self.random_state_ = getattr(self, "random_state_", check_random_state(self.random_state)) if (not hasattr(self, 'counts_') or not hasattr(self, 'cluster_centers_')): # this is the first call partial_fit on this object: # initialize the cluster centers self.cluster_centers_ = _init_centroids( X, self.n_clusters, self.init, random_state=self.random_state_, x_squared_norms=x_squared_norms, init_size=self.init_size) self.counts_ = np.zeros(self.n_clusters, dtype=np.int32) random_reassign = False distances = None else: # The lower the minimum count is, the more we do random # reassignment, however, we don't want to do random # reassignment too often, to allow for building up counts random_reassign = self.random_state_.randint( 10 * (1 + self.counts_.min())) == 0 distances = np.zeros(X.shape[0], dtype=X.dtype) _mini_batch_step(X, x_squared_norms, self.cluster_centers_, self.counts_, np.zeros(0, dtype=X.dtype), 0, random_reassign=random_reassign, distances=distances, random_state=self.random_state_, reassignment_ratio=self.reassignment_ratio, verbose=self.verbose) if self.compute_labels: self.labels_, self.inertia_ = _labels_inertia( X, x_squared_norms, self.cluster_centers_) return self def predict(self, X): """Predict the closest cluster each sample in X belongs to. In the vector quantization literature, `cluster_centers_` is called the code book and each value returned by `predict` is the index of the closest code in the code book. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] New data to predict. Returns ------- labels : array, shape [n_samples,] Index of the cluster each sample belongs to. """ check_is_fitted(self, 'cluster_centers_') X = self._check_test_data(X) return self._labels_inertia_minibatch(X)[0]
bsd-3-clause
oknuutti/visnav-py
visnav/render/stars.py
1
34131
from datetime import datetime from functools import lru_cache import cv2 import math import os import sqlite3 import re import time import numpy as np import quaternion from visnav.algo import tools from visnav.algo.image import ImageProc from visnav.algo.model import SystemModel from visnav.missions.didymos import DidymosSystemModel from visnav.missions.rosetta import RosettaSystemModel from visnav.settings import * # https://pysynphot.readthedocs.io/en/latest/index.html#pysynphot-installation-setup import warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") import importlib mod = importlib.util.find_spec('pysynphot') if mod is not None: root = mod.submodule_search_locations[0] os.environ['PYSYN_CDBS'] = os.path.join(root, 'data', 'cdbs') # http://ssb.stsci.edu/cdbs/tarfiles/synphot1.tar.gz import pysynphot as S # http://ssb.stsci.edu/cdbs/tarfiles/synphot2.tar.gz # http://ssb.stsci.edu/cdbs/tarfiles/synphot3.tar.gz else: print('warning: module pysynphot not found') class Stars: # from VizieR catalogs: SOURCE_HIPPARCHOS = 'H' # I/239/hip_main SOURCE_PASTEL = 'P' # B/pastel/pastel SOURCE_WU = 'W' # J/A+A/525/A71/table2 SOURCE_GAIA1 = 'G' # J/MNRAS/471/770/table2 STARDB_TYC = os.path.join(DATA_DIR, 'deep_space_objects_tyc.sqlite') STARDB_HIP = os.path.join(DATA_DIR, 'deep_space_objects_hip.sqlite') STARDB = STARDB_HIP MAG_CUTOFF = 10 MAG_V_LAM0 = 545e-9 SUN_MAG_V = -26.74 SUN_MAG_B = 0.6222 + SUN_MAG_V # from sc cam frame (axis: +x, up: +z) to equatorial frame (axis: +y, up: +z) sc2ec_q = np.quaternion(1, 0, 0, 1).normalized().conj() @staticmethod def black_body_radiation(Teff, lam): return Stars.black_body_radiation_fn(Teff)(lam) @staticmethod def black_body_radiation_fn(Teff): def phi(lam): # planck's law of black body radiation [W/m3/sr] h = 6.626e-34 # planck constant (m2kg/s) c = 3e8 # speed of light k = 1.380649e-23 # Boltzmann constant r = 2*h*c**2/lam**5/(np.exp(h*c/lam/k/Teff) - 1) return r return phi @staticmethod def synthetic_radiation(Teff, fe_h, log_g, lam, mag_v=None): return Stars.synthetic_radiation_fn(Teff, fe_h, log_g, mag_v=mag_v)(lam) @staticmethod @lru_cache(maxsize=1000) def synthetic_radiation_fn(Teff, fe_h, log_g, mag_v=None, model='k93models', lam_min=0, lam_max=np.inf, return_sp=False): return Stars.uncached_synthetic_radiation_fn(Teff, fe_h, log_g, mag_v, model, lam_min, lam_max, return_sp) @staticmethod def uncached_synthetic_radiation_fn(Teff, fe_h, log_g, mag_v=None, model='k93models', lam_min=0, lam_max=np.inf, return_sp=False): sp = None orig_log_g = log_g if isinstance(model, tuple): # give in meters, W/m3 sp = S.spectrum.ArraySourceSpectrum(np.array(model[0]) * 1e10, np.array(model[1]) * 1e-4 * 1e-10 / 1e-7, 'angstrom', 'flam') else: first_try = True if Teff < 3500: print('could not init spectral model with given t_eff=%s, using t_eff=3500K instead' % Teff) Teff = 3500 for i in range(15): try: sp = S.Icat(model, Teff, fe_h, log_g) # 'ck04models' or 'k93models' break except: first_try = False log_g = log_g + (0.2 if Teff > 6000 else -0.2) assert sp is not None, 'could not init spectral model with given params: t_eff=%s, log_g=%s, fe_h=%s' % (Teff, orig_log_g, fe_h) if not first_try: print('could not init spectral model with given params (t_eff=%s, log_g=%s, fe_h=%s), changed log_g to %s' % (Teff, orig_log_g, fe_h, log_g)) if mag_v is not None: sp = sp.renorm(mag_v, 'vegamag', S.ObsBandpass('johnson,v')) if return_sp: return sp # for performance reasons (caching) from scipy.interpolate import interp1d I = np.logical_and(sp.wave >= lam_min*1e10, sp.wave <= lam_max*1e10) sample_fn = interp1d(sp.wave[I], sp.flux[I], kind='linear', assume_sorted=True) def phi(lam): r = sample_fn(lam*1e10) # wavelength in Å, result in "flam" (erg/s/cm2/Å) return r * 1e-7 / 1e-4 / 1e-10 # result in W/m3 return phi @staticmethod def magnitude_to_spectral_flux_density(mag): # spectral flux density for standard magnitude for V-band (at 545nm) # from "Model atmospheres broad-band colors, bolometric corrections and temperature calibrations for O - M stars" # Bessel M.S. et al, Astronomy and Astrophysics, 1998, table A2 # Also at http://web.ipac.caltech.edu/staff/fmasci/home/astro_refs/magsystems.pdf # 363.1e-11 erg/cm2/s/Å (erg=1e-7J, 1cm2=1e-4m2, Å=1e-10m) phi0 = 363.1e-11 * 1e-7 / 1e-4 / 1e-10 # W/m3 return np.power(10., -0.4 * mag) * phi0 @staticmethod def tycho_to_johnson(mag_bt, mag_vt): v = mag_vt - 0.09 * (mag_bt - mag_vt) b = 0.85 * (mag_bt - mag_vt) + v return b, v @staticmethod def effective_temp(b_v, metal=0, log_g=0): """ magnitudes in johnson system """ # calculate star effective temperatures, from: # - http://help.agi.com/stk/index.htm#stk/starConstruction.htm # - Sekiguchi, M. and Fukugita, M., 2000. A Study of the B−V Color-Temperature Relation. The Astronomical Journal, 120(2), p.1072. # - metallicity (Fe/H) and log surface gravity can be set to zero without big impact c0 = 3.939654 c1 = -0.395361 c2 = 0.2082113 c3 = -0.0604097 f1 = 0.027153 f2 = 0.005036 g1 = 0.007367 h1 = -0.01069 return 10**(c0+c1*(b_v)+c2*(b_v)**2+c3*(b_v)**3 + f1*metal + f2*metal**2 + g1*log_g + h1*(b_v)*log_g) @staticmethod def flux_density(cam_q, cam, mask=None, mag_cutoff=MAG_CUTOFF, array=False, undistorted=False, order_by=None): """ plots stars based on Tycho-2 database, gives out photon count per unit area given exposure time in seconds, cam_q is a quaternion in ICRS coord frame, x_fov and y_fov in degrees """ # calculate query conditions for star ra and dec cam_dec, cam_ra, _ = tools.q_to_ypr(cam_q) # camera boresight in ICRS coords d = np.linalg.norm((cam.x_fov, cam.y_fov))/2 min_dec, max_dec = math.degrees(cam_dec) - d, math.degrees(cam_dec) + d dec_cond = '(dec BETWEEN %s AND %s)' % (min_dec, max_dec) # goes over the pole to the other side of the sphere, easy solution => ignore limit on ra skip_ra_cond = min_dec < -90 or max_dec > 90 if skip_ra_cond: ra_cond = '1' else: min_ra, max_ra = math.degrees(cam_ra) - d, math.degrees(cam_ra) + d if min_ra < 0: ra_cond = '(ra < %s OR ra > %s)' % (max_ra, (min_ra + 360) % 360) elif max_ra > 360: ra_cond = '(ra > %s OR ra < %s)' % (min_ra, max_ra % 360) else: ra_cond = '(ra BETWEEN %s AND %s)' % (min_ra, max_ra) conn = sqlite3.connect(Stars.STARDB) cursor = conn.cursor() # the magnitudes for tycho id xxxx-xxxxx-2 entries are bad as they are most likely taken from hip catalog that bundles all .*-(\d) results = cursor.execute(""" SELECT x, y, z, mag_v""" + (", mag_b, t_eff, fe_h, log_g, dec, ra, id" if array else "") + """ FROM deep_sky_objects WHERE """ + ("tycho like '%-1' AND " if Stars.STARDB == Stars.STARDB_TYC else "") + "mag_v < " + str(mag_cutoff) + " AND " + dec_cond + " AND " + ra_cond + ((" ORDER BY %s ASC" % order_by) if order_by is not None else '')) stars = np.array(results.fetchall()) conn.close() flux_density = ([], None) if array else np.zeros((cam.height, cam.width), dtype=np.float32) if len(stars) == 0: return flux_density stars[:, 0:3] = tools.q_times_mx(SystemModel.sc2gl_q.conj() * cam_q.conj(), stars[:, 0:3]) stars_ixy_ = cam.calc_img_R(stars[:, 0:3], undistorted=undistorted) stars_ixy = np.round(stars_ixy_.astype(np.float)).astype(np.int) I = np.logical_and.reduce((np.all(stars_ixy >= 0, axis=1), stars_ixy[:, 0] <= cam.width-1, stars_ixy[:, 1] <= cam.height-1)) if array: cols = ('ix', 'iy', 'x', 'y', 'z', 'mag_v', 'mag_b', 't_eff', 'fe_h', 'log_g', 'dec', 'ra', 'id') return ( np.hstack((stars_ixy_[I, :], stars[I, :])), dict(zip(cols, range(len(cols)))) ) stars_ixy = stars_ixy[I, :] flux_density_per_star = Stars.magnitude_to_spectral_flux_density(stars[I, 3]) for i, f in enumerate(flux_density_per_star): flux_density[stars_ixy[i, 1], stars_ixy[i, 0]] += f if mask is not None: flux_density[np.logical_not(mask)] = 0 if True: # assume every star is like our sun, convert to total flux density [W/m2] solar_constant = 1360.8 # sun magnitude from http://mips.as.arizona.edu/~cnaw/sun.html sun_flux_density = Stars.magnitude_to_spectral_flux_density(Stars.SUN_MAG_V) flux_density = flux_density * (solar_constant / sun_flux_density) return flux_density @staticmethod def get_property_by_id(id, field=None): res = Stars._query_cursor.execute(f"select {field} from deep_sky_objects where id = {int(id)}").fetchone()[0] return res @staticmethod def get_catalog_id(id, field=None): try: is_arr = False id = int(id) except: is_arr = True if Stars._query_conn is None: Stars._conn = sqlite3.connect(Stars.STARDB) Stars._query_cursor = Stars._conn.cursor() field = field or ("tycho" if Stars.STARDB == Stars.STARDB_TYC else "hip") if is_arr: res = Stars._query_cursor.execute( "select id, %s from deep_sky_objects where id IN (%s)" % ( field, ','.join(str(i) for i in id))).fetchall() return {r[0]: str(r[1]) for r in res} else: res = Stars._query_cursor.execute( "select %s from deep_sky_objects where id = %s" % ( field, id)).fetchone()[0] return str(res) _query_conn, _query_cursor = None, None @staticmethod def _create_stardb(fname): conn = sqlite3.connect(fname) cursor = conn.cursor() cursor.execute("DROP TABLE IF EXISTS deep_sky_objects") cursor.execute(""" CREATE TABLE deep_sky_objects ( id INTEGER PRIMARY KEY ASC NOT NULL, hip INT, hd INT DEFAULT NULL, simbad CHAR(20) DEFAULT NULL, ra REAL NOT NULL, /* src[0] */ dec REAL NOT NULL, /* src[0] */ x REAL NOT NULL, y REAL NOT NULL, z REAL NOT NULL, mag_v REAL NOT NULL, /* src[1] */ mag_b REAL DEFAULT NULL, /* src[2] */ t_eff REAL DEFAULT NULL, /* src[3] */ log_g REAL DEFAULT NULL, /* src[4] */ fe_h REAL DEFAULT NULL, /* src[5] */ src CHAR(6) DEFAULT 'HHHPPP' )""") cursor.execute("DROP INDEX IF EXISTS ra_idx") cursor.execute("CREATE INDEX ra_idx ON deep_sky_objects (ra)") cursor.execute("DROP INDEX IF EXISTS dec_idx") cursor.execute("CREATE INDEX dec_idx ON deep_sky_objects (dec)") cursor.execute("DROP INDEX IF EXISTS mag_idx") cursor.execute("CREATE INDEX mag_idx ON deep_sky_objects (mag_v)") cursor.execute("DROP INDEX IF EXISTS hd") cursor.execute("CREATE INDEX hd ON deep_sky_objects (hd)") cursor.execute("DROP INDEX IF EXISTS simbad") cursor.execute("CREATE INDEX simbad ON deep_sky_objects (simbad)") cursor.execute("DROP INDEX IF EXISTS hip") cursor.execute("CREATE UNIQUE INDEX hip ON deep_sky_objects (hip)") conn.commit() @staticmethod def import_stars_hip(): # I/239/hip_main Stars._create_stardb(Stars.STARDB_HIP) conn = sqlite3.connect(Stars.STARDB_HIP) cursor = conn.cursor() from astroquery.vizier import Vizier Vizier.ROW_LIMIT = -1 cols = ["HIP", "HD", "_RA.icrs", "_DE.icrs", "Vmag", "B-V"] r = Vizier(catalog="I/239/hip_main", columns=cols, row_limit=-1).query_constraints()[0] for i, row in enumerate(r): hip, hd, ra, dec, mag_v, b_v = [row[f] for f in cols] if np.any(list(map(np.ma.is_masked, (ra, dec, mag_v)))): continue hd = 'null' if np.ma.is_masked(hd) else hd mag_b = 'null' if np.ma.is_masked(b_v) or np.isnan(b_v) else b_v + mag_v x, y, z = tools.spherical2cartesian(math.radians(dec), math.radians(ra), 1) cursor.execute(""" INSERT INTO deep_sky_objects (hip, hd, ra, dec, x, y, z, mag_v, mag_b) VALUES (%s, %s, %f, %f, %f, %f, %f, %f, %s)""" % (hip, hd, ra, dec, x, y, z, mag_v, mag_b)) if i % 100 == 0: conn.commit() tools.show_progress(len(r), i) conn.commit() conn.close() @staticmethod def import_stars_tyc(): assert False, 'not supported anymore' Stars._create_stardb(Stars.STARDB_TYC, 12) conn = sqlite3.connect(Stars.STARDB_TYC) cursor = conn.cursor() # Tycho-2 catalogue, from http://archive.eso.org/ASTROM/TYC-2/data/ for file in ('catalog.dat', 'suppl_1.dat'): with open(os.path.join(DATA_DIR, file), 'r') as fh: line = fh.readline() while line: c = line line = fh.readline() # mean position, ICRS, at epoch 2000.0 # proper motion milliarcsecond/year # apparent magnitude if file == 'catalog.dat': # main catalog epoch = 2000.0 tycho, ra, dec, pmra, pmdec, mag_bt, mag_vt = c[0:12], c[15:27], c[28:40], c[41:48], c[49:56], c[110:116], c[123:129] mag_b, mag_v = Stars.tycho_to_johnson(float(mag_bt), float(mag_vt)) else: # supplement-1 has the brightest stars, from hipparcos and tycho-1 epoch = 1991.25 tycho, ra, dec, pmra, pmdec, mag_bt, mag_vt, flag, hip = \ c[0:12], c[15:27], c[28:40], c[41:48], c[49:56], c[83:89], c[96:102], c[81:82], c[115:120] if flag in ('H', 'V', 'B'): if len(hip.strip()) > 0: mag_b, mag_v = Stars.get_hip_mag_bv(hip) else: continue else: mag_b, mag_v = Stars.tycho_to_johnson(float(mag_bt), float(mag_vt)) tycho = tycho.replace(' ', '-') if np.all(list(map(tools.numeric, (ra, dec)))): ra, dec = list(map(float, (ra, dec))) if -10 < mag_v < Stars.MAG_CUTOFF: curr_epoch = datetime.now().year + \ (datetime.now().timestamp() - datetime.strptime(str(datetime.now().year),'%Y').timestamp() )/365.25/24/3600 years = curr_epoch - epoch # TODO: (1) adjust to current epoch using proper motion and years since epoch x, y, z = tools.spherical2cartesian(math.radians(dec), math.radians(ra), 1) cursor.execute("INSERT INTO deep_sky_objects (tycho,ra,dec,x,y,z,mag_b,mag_v) VALUES (?,?,?,?,?,?,?,?)", ( tycho, (ra+360)%360, dec, x, y, z, mag_b, mag_v )) conn.commit() conn.close() @staticmethod def add_simbad_col(): conn = sqlite3.connect(Stars.STARDB) cursor_r = conn.cursor() cursor_w = conn.cursor() # cursor_w.execute("alter table deep_sky_objects add column simbad char(20) default null") # conn.commit() N_tot = cursor_r.execute("SELECT max(id) FROM deep_sky_objects WHERE 1").fetchone()[0] skip = 0 result = cursor_r.execute("select id, hip from deep_sky_objects where id >= %d" % skip) import time from astroquery.simbad import Simbad Simbad.add_votable_fields('typed_id') while 1: rows = result.fetchmany(1000) if rows is None or len(rows) == 0: break tools.show_progress(N_tot, rows[0][0]-1) s = Simbad.query_objects(['HIP %d' % int(row[1]) for row in rows]) time.sleep(2) values = [] if s is not None: s.add_index('TYPED_ID') for row in rows: sr = get(s, ('HIP %d' % int(row[1])).encode('utf-8')) if sr is not None: k = sr['MAIN_ID'].decode('utf-8') values.append("(%d, '%s', 0,0,0,0,0,0)" % (row[0], k.replace("'", "''"))) if len(values) > 0: cursor_w.execute(""" INSERT INTO deep_sky_objects (id, simbad, ra, dec, x, y, z, mag_v) VALUES """ + ','.join(values) + """ ON CONFLICT(id) DO UPDATE SET simbad = excluded.simbad""") conn.commit() conn.close() @staticmethod def query_t_eff(): from astroquery.vizier import Vizier v = Vizier(catalog="B/pastel/pastel", columns=["ID", "Teff", "logg", "[Fe/H]"], row_limit=-1) v2 = Vizier(catalog="J/A+A/525/A71/table2", columns=["Name", "Teff", "log(g)", "[Fe/H]"], row_limit=-1) v3 = Vizier(catalog="J/MNRAS/471/770/table2", columns=["HIP", "Teff", "log(g)"], row_limit=-1) conn = sqlite3.connect(Stars.STARDB) cursor_r = conn.cursor() cursor_w = conn.cursor() cond = "(t_eff is null OR log_g is null OR 1)" N_tot = cursor_r.execute(""" SELECT max(id) FROM deep_sky_objects WHERE %s """ % cond).fetchone()[0] skip = 37601 f_id, f_hip, f_hd, f_sim, f_ra, f_dec, f_t, f_g, f_m, f_src = range(10) results = cursor_r.execute(""" SELECT id, hip, hd, simbad, ra, dec, t_eff, log_g, fe_h, src FROM deep_sky_objects WHERE %s AND id >= ? ORDER BY id ASC """ % cond, (skip,)) r = v.query_constraints()[0] r.add_index('ID') N = 40 while True: rows = results.fetchmany(N) if rows is None or len(rows) == 0: break tools.show_progress(N_tot, rows[0][f_id]-1) ids = {row[f_id]: [i, row[f_src][:3] + '___'] for i, row in enumerate(rows)} insert = {} for i, row in enumerate(rows): k = 'HIP %6d' % int(row[f_hip]) if get(r, k) is None and row[f_hd]: k = 'HD %6d' % int(row[f_hd]) if get(r, k) is None and row[f_sim]: k = row[f_sim] if get(r, k) is None and row[f_sim]: k = row[f_sim] + ' A' dr = get(r, k) if dr is not None: t_eff, log_g, fe_h = median(dr, ('Teff', 'logg', '__Fe_H_'), null='null') src = row[f_src][0:3] + ''.join([('_' if v == 'null' else Stars.SOURCE_PASTEL) for v in (t_eff, log_g, fe_h)]) insert[row[f_id]] = [t_eff, log_g, fe_h, src] if '_' not in src[3:5]: ids.pop(row[f_id]) else: ids[row[f_id]][1] = src if len(ids) > 0: # try using other catalog r = v2.query_constraints(Name='=,' + ','.join([ ('HD%06d' % int(rows[i][f_hd])) for i, s in ids.values() if rows[i][f_hd] is not None ])) time.sleep(2) if len(r) > 0: r = r[0] r.add_index('Name') for id, (i, src) in ids.copy().items(): dr = get(r, 'HD%06d' % int(rows[i][f_hd])) if rows[i][f_hd] else None if dr is not None: t_eff, log_g, fe_h = median(dr, ('Teff', 'log_g_', '__Fe_H_'), null='null') src = src[0:3] + ''.join([('_' if v == 'null' else Stars.SOURCE_WU) for v in (t_eff, log_g, fe_h)]) insert[id] = [t_eff, log_g, fe_h, src] if '_' not in src[3:5]: ids.pop(rows[i][f_id]) else: ids[rows[i][f_id]][1] = src if len(ids) > 0: # try using other catalog r = v3.query_constraints(HIP='=,' + ','.join([str(rows[i][f_hip]) for i, s in ids.values()]))[0] r.add_index('HIP') for id, (i, src) in ids.copy().items(): dr = get(r, int(rows[i][f_hip])) if dr is not None: t_eff, log_g = median(dr, ('Teff', 'log_g_'), null='null') src = src[0:3] + ''.join([('_' if v == 'null' else Stars.SOURCE_GAIA1) for v in (t_eff, log_g)]) + src[5] insert[id] = [t_eff, log_g, insert[id][2] if id in insert else 'null', src] # if '_' not in src[3:5]: # ids.pop(rows[i][f_id]) # else: # ids[rows[i][f_id]][1] = src if len(insert) > 0: values = ["(%d, %s, %s, %s, '%s', 0,0,0,0,0,0)" % ( id, t_eff, log_g, fe_h, src) for id, (t_eff, log_g, fe_h, src) in insert.items()] cursor_w.execute(""" INSERT INTO deep_sky_objects (id, t_eff, log_g, fe_h, src, ra, dec, x, y, z, mag_v) VALUES """ + ','.join(values) + """ ON CONFLICT(id) DO UPDATE SET t_eff = excluded.t_eff, log_g = excluded.log_g, fe_h = excluded.fe_h, src = excluded.src """) conn.commit() conn.close() @staticmethod def query_v_mag(): from astroquery.vizier import Vizier from tqdm import tqdm v = Vizier(catalog="B/pastel/pastel", columns=["ID", "Vmag"], row_limit=-1) conn = sqlite3.connect(Stars.STARDB) cursor_r = conn.cursor() cursor_w = conn.cursor() cond = f"(substr(src,2,1) = '{Stars.SOURCE_HIPPARCHOS}')" N_tot = cursor_r.execute(f"SELECT count(*) FROM deep_sky_objects WHERE {cond}").fetchone()[0] f_id, f_hip, f_hd, f_sim, f_mag_v, f_src = range(6) results = cursor_r.execute(""" SELECT id, hip, hd, simbad, mag_v, src FROM deep_sky_objects WHERE %s ORDER BY mag_v ASC """ % cond) r = v.query_constraints()[0] r.add_index('ID') N = 40 pbar = tqdm(total=N_tot) while True: rows = results.fetchmany(N) if rows is None or len(rows) == 0: break ids = {row[f_id]: [i, row[f_src]] for i, row in enumerate(rows)} insert = {} for i, row in enumerate(rows): k = 'HIP %6d' % int(row[f_hip]) if get(r, k) is None and row[f_hd]: k = 'HD %6d' % int(row[f_hd]) if get(r, k) is None and row[f_sim]: k = row[f_sim] if get(r, k) is None and row[f_sim]: k = row[f_sim] + ' A' dr = get(r, k) if dr is not None: v_mag, *_ = median(dr, ('Vmag',), null='null') if v_mag != 'null': src = row[f_src] src = src[:1] + Stars.SOURCE_PASTEL + src[2:] insert[row[f_id]] = [v_mag, src] ids.pop(row[f_id]) if len(insert) > 0: values = [f"({id}, 0, 0, 0, '{src}', 0, 0, 0, 0, 0, {v_mag})" for id, (v_mag, src) in insert.items()] cursor_w.execute("INSERT INTO deep_sky_objects (id, t_eff, log_g, fe_h, src, ra, dec, x, y, z, mag_v) " "VALUES " + ','.join(values) + " " "ON CONFLICT(id) DO UPDATE SET " " mag_v = excluded.mag_v, " " src = excluded.src") conn.commit() pbar.set_postfix({'v_mag': np.max([float(row[f_mag_v]) for row in rows])}) pbar.update(len(rows)) conn.close() @staticmethod def correct_supplement_data(): conn = sqlite3.connect(Stars.STARDB) cursor = conn.cursor() def insert_mags(hips): res = Stars.get_hip_mag_bv([h[0] for h in hips.values()]) insert = ["('%s', %f, %f, %f, %f, %f, %f, %f)" % (t, h[1], h[2], h[3], h[4], h[5], res[h[0]][0], res[h[0]][1]) for t, h in hips.items() if h[0] in res and -10 < res[h[0]][1] < Stars.MAG_CUTOFF] if len(insert) > 0: cursor.execute(""" INSERT INTO deep_sky_objects (tycho, ra, dec, x, y, z, mag_b, mag_v) VALUES """ + ','.join(insert) + """ ON CONFLICT(tycho) DO UPDATE SET mag_b = excluded.mag_b, mag_v = excluded.mag_v """) conn.commit() file = 'suppl_1.dat' N = 30 rx = re.compile(r'0*(\d+)') with open(os.path.join(DATA_DIR, file), 'r') as fh: hips = {} line = fh.readline() while line: c = line line = fh.readline() tycho, ra, dec, mag_bt, mag_vt, flag, hip = c[0:12], c[15:27], c[28:40], c[83:89], c[96:102], c[81:82], c[115:123] tycho = tycho.replace(' ', '-') hip = rx.findall(hip)[0] if len(hip.strip()) > 0 else False if flag in ('H', 'V', 'B') and hip: ra, dec = float(ra), float(dec) x, y, z = tools.spherical2cartesian(math.radians(dec), math.radians(ra), 1) hips[tycho] = (hip, ra, dec, x, y, z) if len(hips) >= N: insert_mags(hips) hips.clear() else: continue if len(hips) > 0: insert_mags(hips) @staticmethod def get_hip_mag_bv(hip, v=None): from astroquery.vizier import Vizier Vizier.ROW_LIMIT = -1 hips = [hip] if isinstance(hip, str) else hip v = Vizier(columns=["HIP", "Vmag", "B-V"], catalog="I/239/hip_main", row_limit=-1) r = v.query_constraints(HIP='=,'+','.join(hips)) results = {} if len(r): r = r[0] r.add_index('HIP') for h in hips: try: if not np.ma.is_masked(r.loc[int(h)]['Vmag']) and not np.ma.is_masked(r.loc[int(h)]['B-V']): mag_v, b_v = float(r.loc[int(h)]['Vmag']), float(r.loc[int(h)]['B-V']) results[h] = (mag_v + b_v, mag_v) except: continue return results.get(hip, (None, None)) if isinstance(hip, str) else results @staticmethod def override_betelgeuse(): conn = sqlite3.connect(Stars.STARDB) cursor = conn.cursor() # from "The Advanced Spectral Library (ASTRAL): Reference Spectra for Evolved M Stars", # The Astrophysical Journal, 2018, https://iopscience.iop.org/article/10.3847/1538-4357/aaf164/pdf #t_eff = 3650 # based on query_t_eff was 3562 #mag_v = 0.42 # based on tycho2 suppl2 was 0.58 # from CTOA observations on 2018-12-07 and 18-12-22, accessed through https://www.aavso.org database mag_v = 0.8680 mag_b = 2.6745 # based on tycho2 suppl2 was 2.3498 t_eff = None # Stars.effective_temp(mag_b - mag_v, metal=0.006, log_g=-0.26) gives 3565K vs 3538K without log_g & metal cursor.execute("UPDATE deep_sky_objects SET t_eff=?, mag_v=?, mag_b=? where tycho='0129-01873-1'", (t_eff, mag_v, mag_b)) conn.commit() conn.close() def get(r, k, d=None): if k is None or r is None: return d try: return r.loc[k] except: return d def median(dr, fields, null='null'): try: values = [np.ma.median(dr[f]) for f in fields] values = [(null if np.ma.is_masked(v) else v) for v in values] except: values = [null if np.ma.is_masked(dr[f]) or np.isnan(dr[f]) else dr[f] for f in fields] return values if __name__ == '__main__': if 0: Stars.import_stars_hip() quit() elif 0: Stars.add_simbad_col() #Stars.override_rho_ori_b() #Stars.override_delta_ori_b() quit() elif 0: Stars.query_t_eff() quit() elif 0: Stars.query_v_mag() quit() elif 0: img = np.zeros((1024, 1024), dtype=np.uint8) for i in range(1000): Stars.plot_stars(img, tools.rand_q(math.radians(180)), cam, exposure=5, gain=1) quit() elif 1: conn = sqlite3.connect(Stars.STARDB) cursor = conn.cursor() f_id, f_hip, f_sim, f_hd, f_magv, f_magb, f_teff, f_logg, f_feh, f_src = range(10) r = cursor.execute(""" SELECT id, hip, simbad, hd, mag_v, mag_b, t_eff, log_g, fe_h, src FROM deep_sky_objects WHERE hd in (48915,34085,61421,39801,35468,37128,37742,37743,44743,38771,36486,48737,36861,33111,58715) ORDER BY mag_v """) rows = r.fetchall() stars = {} print('id\thip\tsim\thd\tmag_v\tmag_b\tt_eff\tlog_g\tfe_h\tsrc') for row in rows: stars[row[f_hd]] = row print('\t'.join([str(c) for c in row])) conn.close() quit() from astropy.io import fits import matplotlib.pyplot as plt def testf(fdat, teff, logg, feh): sp = S.Icat('k93models', float(teff), float(feh), float(logg))\ .renorm(0, 'vegamag', S.ObsBandpass('johnson,v')) sp_real = S.ArraySpectrum(wave=fdat[0][0], flux=fdat[0][1], fluxunits='flam')\ .renorm(0, 'vegamag', S.ObsBandpass('johnson,v')) plt.plot(sp_real.wave, sp_real.flux) plt.plot(sp.wave, sp.flux) plt.xlim(3000, 10000) plt.show() for hd in (48737, 35468, 39801): # Lambda Orionis (HD36861) Teff too high for model (37689K) fname = r'C:\projects\s100imgs\spectra\%s.fits' % hd fdat = fits.getdata(fname) teff, logg, feh = [stars[hd][f] for f in (f_teff, f_logg, f_feh)] if teff > 30000: logg = max(logg, 4.0) testf(fdat, teff, logg, feh or 0) quit() # cam = RosettaSystemModel(focused_attenuated=False).cam cam = DidymosSystemModel(use_narrow_cam=True).cam # cam_q = tools.rand_q(math.radians(180)) cam_q = quaternion.one for i in range(100): cam_q = tools.ypr_to_q(0, np.radians(1), 0) * cam_q flux_density = Stars.flux_density(cam_q, cam) img = cam.sense(flux_density, exposure=2, gain=2) img = np.clip(img*255, 0, 255).astype('uint8') img = ImageProc.adjust_gamma(img, 1.8) sc = min(768/cam.width, 768/cam.height) cv2.imshow('stars', cv2.resize(img, None, fx=sc, fy=sc)) cv2.waitKey() print('done')
mit
alsrgv/tensorflow
tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py
3
32746
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implementations of different data feeders to provide data for TF trainer (deprecated). This module and all its submodules are deprecated. See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md) for migration instructions. """ # TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues. from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import math import numpy as np import six from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.deprecation import deprecated # pylint: disable=g-multiple-import,g-bad-import-order from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels # pylint: enable=g-multiple-import,g-bad-import-order def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None): """Returns shape for input and output of the data feeder.""" x_is_dict, y_is_dict = isinstance( x_shape, dict), y_shape is not None and isinstance(y_shape, dict) if y_is_dict and n_classes is not None: assert isinstance(n_classes, dict) if batch_size is None: batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0] elif batch_size <= 0: raise ValueError('Invalid batch_size %d.' % batch_size) if x_is_dict: input_shape = {} for k, v in list(x_shape.items()): input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1]) else: x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1] input_shape = [batch_size] + x_shape if y_shape is None: return input_shape, None, batch_size def out_el_shape(out_shape, num_classes): out_shape = list(out_shape[1:]) if len(out_shape) > 1 else [] # Skip first dimension if it is 1. if out_shape and out_shape[0] == 1: out_shape = out_shape[1:] if num_classes is not None and num_classes > 1: return [batch_size] + out_shape + [num_classes] else: return [batch_size] + out_shape if not y_is_dict: output_shape = out_el_shape(y_shape, n_classes) else: output_shape = dict([(k, out_el_shape(v, n_classes[k] if n_classes is not None and k in n_classes else None)) for k, v in list(y_shape.items())]) return input_shape, output_shape, batch_size def _data_type_filter(x, y): """Filter data types into acceptable format.""" if HAS_DASK: x = extract_dask_data(x) if y is not None: y = extract_dask_labels(y) if HAS_PANDAS: x = extract_pandas_data(x) if y is not None: y = extract_pandas_labels(y) return x, y def _is_iterable(x): return hasattr(x, 'next') or hasattr(x, '__next__') @deprecated(None, 'Please use tensorflow/transform or tf.data.') def setup_train_data_feeder(x, y, n_classes, batch_size=None, shuffle=True, epochs=None): """Create data feeder, to sample inputs from dataset. If `x` and `y` are iterators, use `StreamingDataFeeder`. Args: x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also supports iterables. y: numpy, pandas or Dask array or dictionary of aforementioned. Also supports iterables. n_classes: number of classes. Must be None or same type as y. In case, `y` is `dict` (or iterable which returns dict) such that `n_classes[key] = n_classes for y[key]` batch_size: size to split data into parts. Must be >= 1. shuffle: Whether to shuffle the inputs. epochs: Number of epochs to run. Returns: DataFeeder object that returns training data. Raises: ValueError: if one of `x` and `y` is iterable and the other is not. """ x, y = _data_type_filter(x, y) if HAS_DASK: # pylint: disable=g-import-not-at-top import dask.dataframe as dd if (isinstance(x, (dd.Series, dd.DataFrame)) and (y is None or isinstance(y, (dd.Series, dd.DataFrame)))): data_feeder_cls = DaskDataFeeder else: data_feeder_cls = DataFeeder else: data_feeder_cls = DataFeeder if _is_iterable(x): if y is not None and not _is_iterable(y): raise ValueError('Both x and y should be iterators for ' 'streaming learning to work.') return StreamingDataFeeder(x, y, n_classes, batch_size) return data_feeder_cls( x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs) def _batch_data(x, batch_size=None): if (batch_size is not None) and (batch_size <= 0): raise ValueError('Invalid batch_size %d.' % batch_size) x_first_el = six.next(x) x = itertools.chain([x_first_el], x) chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance( x_first_el, dict) else [] chunk_filled = False for data in x: if isinstance(data, dict): for k, v in list(data.items()): chunk[k].append(v) if (batch_size is not None) and (len(chunk[k]) >= batch_size): chunk[k] = np.matrix(chunk[k]) chunk_filled = True if chunk_filled: yield chunk chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance( x_first_el, dict) else [] chunk_filled = False else: chunk.append(data) if (batch_size is not None) and (len(chunk) >= batch_size): yield np.matrix(chunk) chunk = [] if isinstance(x_first_el, dict): for k, v in list(data.items()): chunk[k] = np.matrix(chunk[k]) yield chunk else: yield np.matrix(chunk) @deprecated(None, 'Please use tensorflow/transform or tf.data.') def setup_predict_data_feeder(x, batch_size=None): """Returns an iterable for feeding into predict step. Args: x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports iterable. batch_size: Size of batches to split data into. If `None`, returns one batch of full size. Returns: List or iterator (or dictionary thereof) of parts of data to predict on. Raises: ValueError: if `batch_size` <= 0. """ if HAS_DASK: x = extract_dask_data(x) if HAS_PANDAS: x = extract_pandas_data(x) if _is_iterable(x): return _batch_data(x, batch_size) if len(x.shape) == 1: x = np.reshape(x, (-1, 1)) if batch_size is not None: if batch_size <= 0: raise ValueError('Invalid batch_size %d.' % batch_size) n_batches = int(math.ceil(float(len(x)) / batch_size)) return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)] return [x] @deprecated(None, 'Please use tensorflow/transform or tf.data.') def setup_processor_data_feeder(x): """Sets up processor iterable. Args: x: numpy, pandas or iterable. Returns: Iterable of data to process. """ if HAS_PANDAS: x = extract_pandas_matrix(x) return x @deprecated(None, 'Please convert numpy dtypes explicitly.') def check_array(array, dtype): """Checks array on dtype and converts it if different. Args: array: Input array. dtype: Expected dtype. Returns: Original array or converted. """ # skip check if array is instance of other classes, e.g. h5py.Dataset # to avoid copying array and loading whole data into memory if isinstance(array, (np.ndarray, list)): array = np.array(array, dtype=dtype, order=None, copy=False) return array def _access(data, iloc): """Accesses an element from collection, using integer location based indexing. Args: data: array-like. The collection to access iloc: `int` or `list` of `int`s. Location(s) to access in `collection` Returns: The element of `a` found at location(s) `iloc`. """ if HAS_PANDAS: import pandas as pd # pylint: disable=g-import-not-at-top if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame): return data.iloc[iloc] return data[iloc] def _check_dtype(dtype): if dtypes.as_dtype(dtype) == dtypes.float64: logging.warn( 'float64 is not supported by many models, consider casting to float32.') return dtype class DataFeeder(object): """Data feeder is an example class to sample data for TF trainer. THIS CLASS IS DEPRECATED. See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md) for general migration instructions. """ @deprecated(None, 'Please use tensorflow/transform or tf.data.') def __init__(self, x, y, n_classes, batch_size=None, shuffle=True, random_state=None, epochs=None): """Initializes a DataFeeder instance. Args: x: One feature sample which can either Nd numpy matrix of shape `[n_samples, n_features, ...]` or dictionary of Nd numpy matrix. y: label vector, either floats for regression or class id for classification. If matrix, will consider as a sequence of labels. Can be `None` for unsupervised setting. Also supports dictionary of labels. n_classes: Number of classes, 0 and 1 are considered regression, `None` will pass through the input labels without one-hot conversion. Also, if `y` is `dict`, then `n_classes` must be `dict` such that `n_classes[key] = n_classes for label y[key]`, `None` otherwise. batch_size: Mini-batch size to accumulate samples in one mini batch. shuffle: Whether to shuffle `x`. random_state: Numpy `RandomState` object to reproduce sampling. epochs: Number of times to iterate over input data before raising `StopIteration` exception. Attributes: x: Input features (ndarray or dictionary of ndarrays). y: Input label (ndarray or dictionary of ndarrays). n_classes: Number of classes (if `None`, pass through indices without one-hot conversion). batch_size: Mini-batch size to accumulate. input_shape: Shape of the input (or dictionary of shapes). output_shape: Shape of the output (or dictionary of shapes). input_dtype: DType of input (or dictionary of shapes). output_dtype: DType of output (or dictionary of shapes. """ x_is_dict, y_is_dict = isinstance( x, dict), y is not None and isinstance(y, dict) if isinstance(y, list): y = np.array(y) self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items()) ]) if x_is_dict else check_array(x, x.dtype) self._y = None if y is None else (dict( [(k, check_array(v, v.dtype)) for k, v in list(y.items())]) if y_is_dict else check_array(y, y.dtype)) # self.n_classes is not None means we're converting raw target indices # to one-hot. if n_classes is not None: if not y_is_dict: y_dtype = ( np.int64 if n_classes is not None and n_classes > 1 else np.float32) self._y = (None if y is None else check_array(y, dtype=y_dtype)) self.n_classes = n_classes self.max_epochs = epochs x_shape = dict([(k, v.shape) for k, v in list(self._x.items()) ]) if x_is_dict else self._x.shape y_shape = dict([(k, v.shape) for k, v in list(self._y.items()) ]) if y_is_dict else None if y is None else self._y.shape self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape( x_shape, y_shape, n_classes, batch_size) # Input dtype matches dtype of x. self._input_dtype = ( dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())]) if x_is_dict else _check_dtype(self._x.dtype)) # self._output_dtype == np.float32 when y is None self._output_dtype = ( dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())]) if y_is_dict else (_check_dtype(self._y.dtype) if y is not None else np.float32)) # self.n_classes is None means we're passing in raw target indices if n_classes is not None and y_is_dict: for key in list(n_classes.keys()): if key in self._output_dtype: self._output_dtype[key] = np.float32 self._shuffle = shuffle self.random_state = np.random.RandomState( 42) if random_state is None else random_state if x_is_dict: num_samples = list(self._x.values())[0].shape[0] elif tensor_util.is_tensor(self._x): num_samples = self._x.shape[ 0].value # shape will be a Dimension, extract an int else: num_samples = self._x.shape[0] if self._shuffle: self.indices = self.random_state.permutation(num_samples) else: self.indices = np.array(range(num_samples)) self.offset = 0 self.epoch = 0 self._epoch_placeholder = None @property def x(self): return self._x @property def y(self): return self._y @property def shuffle(self): return self._shuffle @property def input_dtype(self): return self._input_dtype @property def output_dtype(self): return self._output_dtype @property def batch_size(self): return self._batch_size def make_epoch_variable(self): """Adds a placeholder variable for the epoch to the graph. Returns: The epoch placeholder. """ self._epoch_placeholder = array_ops.placeholder( dtypes.int32, [1], name='epoch') return self._epoch_placeholder def input_builder(self): """Builds inputs in the graph. Returns: Two placeholders for inputs and outputs. """ def get_placeholder(shape, dtype, name_prepend): if shape is None: return None if isinstance(shape, dict): placeholder = {} for key in list(shape.keys()): placeholder[key] = array_ops.placeholder( dtypes.as_dtype(dtype[key]), [None] + shape[key][1:], name=name_prepend + '_' + key) else: placeholder = array_ops.placeholder( dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend) return placeholder self._input_placeholder = get_placeholder(self.input_shape, self._input_dtype, 'input') self._output_placeholder = get_placeholder(self.output_shape, self._output_dtype, 'output') return self._input_placeholder, self._output_placeholder def set_placeholders(self, input_placeholder, output_placeholder): """Sets placeholders for this data feeder. Args: input_placeholder: Placeholder for `x` variable. Should match shape of the examples in the x dataset. output_placeholder: Placeholder for `y` variable. Should match shape of the examples in the y dataset. Can be `None`. """ self._input_placeholder = input_placeholder self._output_placeholder = output_placeholder def get_feed_params(self): """Function returns a `dict` with data feed params while training. Returns: A `dict` with data feed params while training. """ return { 'epoch': self.epoch, 'offset': self.offset, 'batch_size': self._batch_size } def get_feed_dict_fn(self): """Returns a function that samples data into given placeholders. Returns: A function that when called samples a random subset of batch size from `x` and `y`. """ x_is_dict, y_is_dict = isinstance( self._x, dict), self._y is not None and isinstance(self._y, dict) # Assign input features from random indices. def extract(data, indices): return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if len(data.shape) == 1 else _access(data, indices)) # assign labels from random indices def assign_label(data, shape, dtype, n_classes, indices): shape[0] = indices.shape[0] out = np.zeros(shape, dtype=dtype) for i in xrange(out.shape[0]): sample = indices[i] # self.n_classes is None means we're passing in raw target indices if n_classes is None: out[i] = _access(data, sample) else: if n_classes > 1: if len(shape) == 2: out.itemset((i, int(_access(data, sample))), 1.0) else: for idx, value in enumerate(_access(data, sample)): out.itemset(tuple([i, idx, value]), 1.0) else: out[i] = _access(data, sample) return out def _feed_dict_fn(): """Function that samples data into given placeholders.""" if self.max_epochs is not None and self.epoch + 1 > self.max_epochs: raise StopIteration assert self._input_placeholder is not None feed_dict = {} if self._epoch_placeholder is not None: feed_dict[self._epoch_placeholder.name] = [self.epoch] # Take next batch of indices. x_len = list( self._x.values())[0].shape[0] if x_is_dict else self._x.shape[0] end = min(x_len, self.offset + self._batch_size) batch_indices = self.indices[self.offset:end] # adding input placeholder feed_dict.update( dict([(self._input_placeholder[k].name, extract(v, batch_indices)) for k, v in list(self._x.items())]) if x_is_dict else { self._input_placeholder.name: extract(self._x, batch_indices) }) # move offset and reset it if necessary self.offset += self._batch_size if self.offset >= x_len: self.indices = self.random_state.permutation( x_len) if self._shuffle else np.array(range(x_len)) self.offset = 0 self.epoch += 1 # return early if there are no labels if self._output_placeholder is None: return feed_dict # adding output placeholders if y_is_dict: for k, v in list(self._y.items()): n_classes = (self.n_classes[k] if k in self.n_classes else None) if self.n_classes is not None else None shape, dtype = self.output_shape[k], self._output_dtype[k] feed_dict.update({ self._output_placeholder[k].name: assign_label(v, shape, dtype, n_classes, batch_indices) }) else: shape, dtype, n_classes = (self.output_shape, self._output_dtype, self.n_classes) feed_dict.update({ self._output_placeholder.name: assign_label(self._y, shape, dtype, n_classes, batch_indices) }) return feed_dict return _feed_dict_fn class StreamingDataFeeder(DataFeeder): """Data feeder for TF trainer that reads data from iterator. THIS CLASS IS DEPRECATED. See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md) for general migration instructions. Streaming data feeder allows to read data as it comes it from disk or somewhere else. It's custom to have this iterators rotate infinetly over the dataset, to allow control of how much to learn on the trainer side. """ def __init__(self, x, y, n_classes, batch_size): """Initializes a StreamingDataFeeder instance. Args: x: iterator each element of which returns one feature sample. Sample can be a Nd numpy matrix or dictionary of Nd numpy matrices. y: iterator each element of which returns one label sample. Sample can be a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many classes regression values. n_classes: indicator of how many classes the corresponding label sample has for the purposes of one-hot conversion of label. In case where `y` is a dictionary, `n_classes` must be dictionary (with same keys as `y`) of how many classes there are in each label in `y`. If key is present in `y` and missing in `n_classes`, the value is assumed `None` and no one-hot conversion will be applied to the label with that key. batch_size: Mini batch size to accumulate samples in one batch. If set `None`, then assumes that iterator to return already batched element. Attributes: x: input features (or dictionary of input features). y: input label (or dictionary of output features). n_classes: number of classes. batch_size: mini batch size to accumulate. input_shape: shape of the input (can be dictionary depending on `x`). output_shape: shape of the output (can be dictionary depending on `y`). input_dtype: dtype of input (can be dictionary depending on `x`). output_dtype: dtype of output (can be dictionary depending on `y`). """ # pylint: disable=invalid-name,super-init-not-called x_first_el = six.next(x) self._x = itertools.chain([x_first_el], x) if y is not None: y_first_el = six.next(y) self._y = itertools.chain([y_first_el], y) else: y_first_el = None self._y = None self.n_classes = n_classes x_is_dict = isinstance(x_first_el, dict) y_is_dict = y is not None and isinstance(y_first_el, dict) if y_is_dict and n_classes is not None: assert isinstance(n_classes, dict) # extract shapes for first_elements if x_is_dict: x_first_el_shape = dict( [(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())]) else: x_first_el_shape = [1] + list(x_first_el.shape) if y_is_dict: y_first_el_shape = dict( [(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())]) elif y is None: y_first_el_shape = None else: y_first_el_shape = ( [1] + list(y_first_el[0].shape if isinstance(y_first_el, list) else y_first_el.shape)) self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape( x_first_el_shape, y_first_el_shape, n_classes, batch_size) # Input dtype of x_first_el. if x_is_dict: self._input_dtype = dict( [(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())]) else: self._input_dtype = _check_dtype(x_first_el.dtype) # Output dtype of y_first_el. def check_y_dtype(el): if isinstance(el, np.ndarray): return el.dtype elif isinstance(el, list): return check_y_dtype(el[0]) else: return _check_dtype(np.dtype(type(el))) # Output types are floats, due to both softmaxes and regression req. if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0: self._output_dtype = np.float32 elif y_is_dict: self._output_dtype = dict( [(k, check_y_dtype(v)) for k, v in list(y_first_el.items())]) elif y is None: self._output_dtype = None else: self._output_dtype = check_y_dtype(y_first_el) def get_feed_params(self): """Function returns a `dict` with data feed params while training. Returns: A `dict` with data feed params while training. """ return {'batch_size': self._batch_size} def get_feed_dict_fn(self): """Returns a function, that will sample data and provide it to placeholders. Returns: A function that when called samples a random subset of batch size from x and y. """ self.stopped = False def _feed_dict_fn(): """Samples data and provides it to placeholders. Returns: `dict` of input and output tensors. """ def init_array(shape, dtype): """Initialize array of given shape or dict of shapes and dtype.""" if shape is None: return None elif isinstance(shape, dict): return dict( [(k, np.zeros(shape[k], dtype[k])) for k in list(shape.keys())]) else: return np.zeros(shape, dtype=dtype) def put_data_array(dest, index, source=None, n_classes=None): """Puts data array into container.""" if source is None: dest = dest[:index] elif n_classes is not None and n_classes > 1: if len(self.output_shape) == 2: dest.itemset((index, source), 1.0) else: for idx, value in enumerate(source): dest.itemset(tuple([index, idx, value]), 1.0) else: if len(dest.shape) > 1: dest[index, :] = source else: dest[index] = source[0] if isinstance(source, list) else source return dest def put_data_array_or_dict(holder, index, data=None, n_classes=None): """Puts data array or data dictionary into container.""" if holder is None: return None if isinstance(holder, dict): if data is None: data = {k: None for k in holder.keys()} assert isinstance(data, dict) for k in holder.keys(): num_classes = n_classes[k] if (n_classes is not None and k in n_classes) else None holder[k] = put_data_array(holder[k], index, data[k], num_classes) else: holder = put_data_array(holder, index, data, n_classes) return holder if self.stopped: raise StopIteration inp = init_array(self.input_shape, self._input_dtype) out = init_array(self.output_shape, self._output_dtype) for i in xrange(self._batch_size): # Add handling when queue ends. try: next_inp = six.next(self._x) inp = put_data_array_or_dict(inp, i, next_inp, None) except StopIteration: self.stopped = True if i == 0: raise inp = put_data_array_or_dict(inp, i, None, None) out = put_data_array_or_dict(out, i, None, None) break if self._y is not None: next_out = six.next(self._y) out = put_data_array_or_dict(out, i, next_out, self.n_classes) # creating feed_dict if isinstance(inp, dict): feed_dict = dict([(self._input_placeholder[k].name, inp[k]) for k in list(self._input_placeholder.keys())]) else: feed_dict = {self._input_placeholder.name: inp} if self._y is not None: if isinstance(out, dict): feed_dict.update( dict([(self._output_placeholder[k].name, out[k]) for k in list(self._output_placeholder.keys())])) else: feed_dict.update({self._output_placeholder.name: out}) return feed_dict return _feed_dict_fn class DaskDataFeeder(object): """Data feeder for that reads data from dask.Series and dask.DataFrame. THIS CLASS IS DEPRECATED. See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md) for general migration instructions. Numpy arrays can be serialized to disk and it's possible to do random seeks into them. DaskDataFeeder will remove requirement to have full dataset in the memory and still do random seeks for sampling of batches. """ @deprecated(None, 'Please feed input to tf.data to support dask.') def __init__(self, x, y, n_classes, batch_size, shuffle=True, random_state=None, epochs=None): """Initializes a DaskDataFeeder instance. Args: x: iterator that returns for each element, returns features. y: iterator that returns for each element, returns 1 or many classes / regression values. n_classes: indicator of how many classes the label has. batch_size: Mini batch size to accumulate. shuffle: Whether to shuffle the inputs. random_state: random state for RNG. Note that it will mutate so use a int value for this if you want consistent sized batches. epochs: Number of epochs to run. Attributes: x: input features. y: input label. n_classes: number of classes. batch_size: mini batch size to accumulate. input_shape: shape of the input. output_shape: shape of the output. input_dtype: dtype of input. output_dtype: dtype of output. Raises: ValueError: if `x` or `y` are `dict`, as they are not supported currently. """ if isinstance(x, dict) or isinstance(y, dict): raise ValueError( 'DaskDataFeeder does not support dictionaries at the moment.') # pylint: disable=invalid-name,super-init-not-called import dask.dataframe as dd # pylint: disable=g-import-not-at-top # TODO(terrytangyuan): check x and y dtypes in dask_io like pandas self._x = x self._y = y # save column names self._x_columns = list(x.columns) if isinstance(y.columns[0], str): self._y_columns = list(y.columns) else: # deal with cases where two DFs have overlapped default numeric colnames self._y_columns = len(self._x_columns) + 1 self._y = self._y.rename(columns={y.columns[0]: self._y_columns}) # TODO(terrytangyuan): deal with unsupervised cases # combine into a data frame self.df = dd.multi.concat([self._x, self._y], axis=1) self.n_classes = n_classes x_count = x.count().compute()[0] x_shape = (x_count, len(self._x.columns)) y_shape = (x_count, len(self._y.columns)) # TODO(terrytangyuan): Add support for shuffle and epochs. self._shuffle = shuffle self.epochs = epochs self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape( x_shape, y_shape, n_classes, batch_size) self.sample_fraction = self._batch_size / float(x_count) self._input_dtype = _check_dtype(self._x.dtypes[0]) self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns]) if random_state is None: self.random_state = 66 else: self.random_state = random_state def get_feed_params(self): """Function returns a `dict` with data feed params while training. Returns: A `dict` with data feed params while training. """ return {'batch_size': self._batch_size} def get_feed_dict_fn(self, input_placeholder, output_placeholder): """Returns a function, that will sample data and provide it to placeholders. Args: input_placeholder: tf.compat.v1.placeholder for input features mini batch. output_placeholder: tf.compat.v1.placeholder for output labels. Returns: A function that when called samples a random subset of batch size from x and y. """ def _feed_dict_fn(): """Samples data and provides it to placeholders.""" # TODO(ipolosukhin): option for with/without replacement (dev version of # dask) sample = self.df.random_split( [self.sample_fraction, 1 - self.sample_fraction], random_state=self.random_state) inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist() out = extract_pandas_matrix(sample[0][self._y_columns].compute()) # convert to correct dtype inp = np.array(inp, dtype=self._input_dtype) # one-hot encode out for each class for cross entropy loss if HAS_PANDAS: import pandas as pd # pylint: disable=g-import-not-at-top if not isinstance(out, pd.Series): out = out.flatten() out_max = self._y.max().compute().values[0] encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype) encoded_out[np.arange(out.size), out] = 1 return {input_placeholder.name: inp, output_placeholder.name: encoded_out} return _feed_dict_fn
apache-2.0
billy-inn/scikit-learn
sklearn/tests/test_cross_validation.py
27
41664
"""Test the cross_validation module""" from __future__ import division import warnings import numpy as np from scipy.sparse import coo_matrix from scipy import stats from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import ignore_warnings from sklearn.utils.mocking import CheckingClassifier, MockDataFrame from sklearn import cross_validation as cval from sklearn.datasets import make_regression from sklearn.datasets import load_boston from sklearn.datasets import load_digits from sklearn.datasets import load_iris from sklearn.metrics import explained_variance_score from sklearn.metrics import make_scorer from sklearn.metrics import precision_score from sklearn.externals import six from sklearn.externals.six.moves import zip from sklearn.linear_model import Ridge from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.cluster import KMeans from sklearn.preprocessing import Imputer from sklearn.pipeline import Pipeline class MockClassifier(object): """Dummy classifier to test the cross-validation""" def __init__(self, a=0, allow_nd=False): self.a = a self.allow_nd = allow_nd def fit(self, X, Y=None, sample_weight=None, class_prior=None, sparse_sample_weight=None, sparse_param=None, dummy_int=None, dummy_str=None, dummy_obj=None, callback=None): """The dummy arguments are to test that this fit function can accept non-array arguments through cross-validation, such as: - int - str (this is actually array-like) - object - function """ self.dummy_int = dummy_int self.dummy_str = dummy_str self.dummy_obj = dummy_obj if callback is not None: callback(self) if self.allow_nd: X = X.reshape(len(X), -1) if X.ndim >= 3 and not self.allow_nd: raise ValueError('X cannot be d') if sample_weight is not None: assert_true(sample_weight.shape[0] == X.shape[0], 'MockClassifier extra fit_param sample_weight.shape[0]' ' is {0}, should be {1}'.format(sample_weight.shape[0], X.shape[0])) if class_prior is not None: assert_true(class_prior.shape[0] == len(np.unique(y)), 'MockClassifier extra fit_param class_prior.shape[0]' ' is {0}, should be {1}'.format(class_prior.shape[0], len(np.unique(y)))) if sparse_sample_weight is not None: fmt = ('MockClassifier extra fit_param sparse_sample_weight' '.shape[0] is {0}, should be {1}') assert_true(sparse_sample_weight.shape[0] == X.shape[0], fmt.format(sparse_sample_weight.shape[0], X.shape[0])) if sparse_param is not None: fmt = ('MockClassifier extra fit_param sparse_param.shape ' 'is ({0}, {1}), should be ({2}, {3})') assert_true(sparse_param.shape == P_sparse.shape, fmt.format(sparse_param.shape[0], sparse_param.shape[1], P_sparse.shape[0], P_sparse.shape[1])) return self def predict(self, T): if self.allow_nd: T = T.reshape(len(T), -1) return T[:, 0] def score(self, X=None, Y=None): return 1. / (1 + np.abs(self.a)) def get_params(self, deep=False): return {'a': self.a, 'allow_nd': self.allow_nd} X = np.ones((10, 2)) X_sparse = coo_matrix(X) W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))), shape=(10, 1)) P_sparse = coo_matrix(np.eye(5)) y = np.arange(10) // 2 ############################################################################## # Tests def check_valid_split(train, test, n_samples=None): # Use python sets to get more informative assertion failure messages train, test = set(train), set(test) # Train and test split should not overlap assert_equal(train.intersection(test), set()) if n_samples is not None: # Check that the union of train an test split cover all the indices assert_equal(train.union(test), set(range(n_samples))) def check_cv_coverage(cv, expected_n_iter=None, n_samples=None): # Check that a all the samples appear at least once in a test fold if expected_n_iter is not None: assert_equal(len(cv), expected_n_iter) else: expected_n_iter = len(cv) collected_test_samples = set() iterations = 0 for train, test in cv: check_valid_split(train, test, n_samples=n_samples) iterations += 1 collected_test_samples.update(test) # Check that the accumulated test samples cover the whole dataset assert_equal(iterations, expected_n_iter) if n_samples is not None: assert_equal(collected_test_samples, set(range(n_samples))) def test_kfold_valueerrors(): # Check that errors are raised if there is not enough samples assert_raises(ValueError, cval.KFold, 3, 4) # Check that a warning is raised if the least populated class has too few # members. y = [3, 3, -1, -1, 2] cv = assert_warns_message(Warning, "The least populated class", cval.StratifiedKFold, y, 3) # Check that despite the warning the folds are still computed even # though all the classes are not necessarily represented at on each # side of the split at each split check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y)) # Error when number of folds is <= 1 assert_raises(ValueError, cval.KFold, 2, 0) assert_raises(ValueError, cval.KFold, 2, 1) assert_raises(ValueError, cval.StratifiedKFold, y, 0) assert_raises(ValueError, cval.StratifiedKFold, y, 1) # When n is not integer: assert_raises(ValueError, cval.KFold, 2.5, 2) # When n_folds is not integer: assert_raises(ValueError, cval.KFold, 5, 1.5) assert_raises(ValueError, cval.StratifiedKFold, y, 1.5) def test_kfold_indices(): # Check all indices are returned in the test folds kf = cval.KFold(300, 3) check_cv_coverage(kf, expected_n_iter=3, n_samples=300) # Check all indices are returned in the test folds even when equal-sized # folds are not possible kf = cval.KFold(17, 3) check_cv_coverage(kf, expected_n_iter=3, n_samples=17) def test_kfold_no_shuffle(): # Manually check that KFold preserves the data ordering on toy datasets splits = iter(cval.KFold(4, 2)) train, test = next(splits) assert_array_equal(test, [0, 1]) assert_array_equal(train, [2, 3]) train, test = next(splits) assert_array_equal(test, [2, 3]) assert_array_equal(train, [0, 1]) splits = iter(cval.KFold(5, 2)) train, test = next(splits) assert_array_equal(test, [0, 1, 2]) assert_array_equal(train, [3, 4]) train, test = next(splits) assert_array_equal(test, [3, 4]) assert_array_equal(train, [0, 1, 2]) def test_stratified_kfold_no_shuffle(): # Manually check that StratifiedKFold preserves the data ordering as much # as possible on toy datasets in order to avoid hiding sample dependencies # when possible splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2)) train, test = next(splits) assert_array_equal(test, [0, 2]) assert_array_equal(train, [1, 3]) train, test = next(splits) assert_array_equal(test, [1, 3]) assert_array_equal(train, [0, 2]) splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2)) train, test = next(splits) assert_array_equal(test, [0, 1, 3, 4]) assert_array_equal(train, [2, 5, 6]) train, test = next(splits) assert_array_equal(test, [2, 5, 6]) assert_array_equal(train, [0, 1, 3, 4]) def test_stratified_kfold_ratios(): # Check that stratified kfold preserves label ratios in individual splits # Repeat with shuffling turned off and on n_samples = 1000 labels = np.array([4] * int(0.10 * n_samples) + [0] * int(0.89 * n_samples) + [1] * int(0.01 * n_samples)) for shuffle in [False, True]: for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle): assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10, 2) assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89, 2) assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01, 2) assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2) assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2) assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2) def test_kfold_balance(): # Check that KFold returns folds with balanced sizes for kf in [cval.KFold(i, 5) for i in range(11, 17)]: sizes = [] for _, test in kf: sizes.append(len(test)) assert_true((np.max(sizes) - np.min(sizes)) <= 1) assert_equal(np.sum(sizes), kf.n) def test_stratifiedkfold_balance(): # Check that KFold returns folds with balanced sizes (only when # stratification is possible) # Repeat with shuffling turned off and on labels = [0] * 3 + [1] * 14 for shuffle in [False, True]: for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle) for i in range(11, 17)]: sizes = [] for _, test in skf: sizes.append(len(test)) assert_true((np.max(sizes) - np.min(sizes)) <= 1) assert_equal(np.sum(sizes), skf.n) def test_shuffle_kfold(): # Check the indices are shuffled properly, and that all indices are # returned in the different test folds kf = cval.KFold(300, 3, shuffle=True, random_state=0) ind = np.arange(300) all_folds = None for train, test in kf: sorted_array = np.arange(100) assert_true(np.any(sorted_array != ind[train])) sorted_array = np.arange(101, 200) assert_true(np.any(sorted_array != ind[train])) sorted_array = np.arange(201, 300) assert_true(np.any(sorted_array != ind[train])) if all_folds is None: all_folds = ind[test].copy() else: all_folds = np.concatenate((all_folds, ind[test])) all_folds.sort() assert_array_equal(all_folds, ind) def test_shuffle_stratifiedkfold(): # Check that shuffling is happening when requested, and for proper # sample coverage labels = [0] * 20 + [1] * 20 kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0)) kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1)) for (_, test0), (_, test1) in zip(kf0, kf1): assert_true(set(test0) != set(test1)) check_cv_coverage(kf0, expected_n_iter=5, n_samples=40) def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372 # The digits samples are dependent: they are apparently grouped by authors # although we don't have any information on the groups segment locations # for this data. We can highlight this fact be computing k-fold cross- # validation with and without shuffling: we observe that the shuffling case # wrongly makes the IID assumption and is therefore too optimistic: it # estimates a much higher accuracy (around 0.96) than than the non # shuffling variant (around 0.86). digits = load_digits() X, y = digits.data[:800], digits.target[:800] model = SVC(C=10, gamma=0.005) n = len(y) cv = cval.KFold(n, 5, shuffle=False) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(0.88, mean_score) assert_greater(mean_score, 0.85) # Shuffling the data artificially breaks the dependency and hides the # overfitting of the model with regards to the writing style of the authors # by yielding a seriously overestimated score: cv = cval.KFold(n, 5, shuffle=True, random_state=0) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(mean_score, 0.95) cv = cval.KFold(n, 5, shuffle=True, random_state=1) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(mean_score, 0.95) # Similarly, StratifiedKFold should try to shuffle the data as little # as possible (while respecting the balanced class constraints) # and thus be able to detect the dependency by not overestimating # the CV score either. As the digits dataset is approximately balanced # the estimated mean score is close to the score measured with # non-shuffled KFold cv = cval.StratifiedKFold(y, 5) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(0.88, mean_score) assert_greater(mean_score, 0.85) def test_shuffle_split(): ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0) ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0) ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0) for typ in six.integer_types: ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0) for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4): assert_array_equal(t1[0], t2[0]) assert_array_equal(t2[0], t3[0]) assert_array_equal(t3[0], t4[0]) assert_array_equal(t1[1], t2[1]) assert_array_equal(t2[1], t3[1]) assert_array_equal(t3[1], t4[1]) def test_stratified_shuffle_split_init(): y = np.asarray([0, 1, 1, 1, 2, 2, 2]) # Check that error is raised if there is a class with only one sample assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2) # Check that error is raised if the test set size is smaller than n_classes assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2) # Check that error is raised if the train set size is smaller than # n_classes assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2) y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2]) # Check that errors are raised if there is not enough samples assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6) assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6) assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8) # Train size or test size too small assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2) assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2) def test_stratified_shuffle_split_iter(): ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]), np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]), np.array([-1] * 800 + [1] * 50) ] for y in ys: sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33, random_state=0) for train, test in sss: assert_array_equal(np.unique(y[train]), np.unique(y[test])) # Checks if folds keep classes proportions p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1]) / float(len(y[train]))) p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1]) / float(len(y[test]))) assert_array_almost_equal(p_train, p_test, 1) assert_equal(y[train].size + y[test].size, y.size) assert_array_equal(np.lib.arraysetops.intersect1d(train, test), []) def test_stratified_shuffle_split_even(): # Test the StratifiedShuffleSplit, indices are drawn with a # equal chance n_folds = 5 n_iter = 1000 def assert_counts_are_ok(idx_counts, p): # Here we test that the distribution of the counts # per index is close enough to a binomial threshold = 0.05 / n_splits bf = stats.binom(n_splits, p) for count in idx_counts: p = bf.pmf(count) assert_true(p > threshold, "An index is not drawn with chance corresponding " "to even draws") for n_samples in (6, 22): labels = np.array((n_samples // 2) * [0, 1]) splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter, test_size=1. / n_folds, random_state=0) train_counts = [0] * n_samples test_counts = [0] * n_samples n_splits = 0 for train, test in splits: n_splits += 1 for counter, ids in [(train_counts, train), (test_counts, test)]: for id in ids: counter[id] += 1 assert_equal(n_splits, n_iter) assert_equal(len(train), splits.n_train) assert_equal(len(test), splits.n_test) assert_equal(len(set(train).intersection(test)), 0) label_counts = np.unique(labels) assert_equal(splits.test_size, 1.0 / n_folds) assert_equal(splits.n_train + splits.n_test, len(labels)) assert_equal(len(label_counts), 2) ex_test_p = float(splits.n_test) / n_samples ex_train_p = float(splits.n_train) / n_samples assert_counts_are_ok(train_counts, ex_train_p) assert_counts_are_ok(test_counts, ex_test_p) def test_predefinedsplit_with_kfold_split(): # Check that PredefinedSplit can reproduce a split generated by Kfold. folds = -1 * np.ones(10) kf_train = [] kf_test = [] for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)): kf_train.append(train_ind) kf_test.append(test_ind) folds[test_ind] = i ps_train = [] ps_test = [] ps = cval.PredefinedSplit(folds) for train_ind, test_ind in ps: ps_train.append(train_ind) ps_test.append(test_ind) assert_array_equal(ps_train, kf_train) assert_array_equal(ps_test, kf_test) def test_leave_label_out_changing_labels(): # Check that LeaveOneLabelOut and LeavePLabelOut work normally if # the labels variable is changed before calling __iter__ labels = np.array([0, 1, 2, 1, 1, 2, 0, 0]) labels_changing = np.array(labels, copy=True) lolo = cval.LeaveOneLabelOut(labels) lolo_changing = cval.LeaveOneLabelOut(labels_changing) lplo = cval.LeavePLabelOut(labels, p=2) lplo_changing = cval.LeavePLabelOut(labels_changing, p=2) labels_changing[:] = 0 for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]: for (train, test), (train_chan, test_chan) in zip(llo, llo_changing): assert_array_equal(train, train_chan) assert_array_equal(test, test_chan) def test_cross_val_score(): clf = MockClassifier() for a in range(-10, 10): clf.a = a # Smoke test scores = cval.cross_val_score(clf, X, y) assert_array_equal(scores, clf.score(X, y)) # test with multioutput y scores = cval.cross_val_score(clf, X_sparse, X) assert_array_equal(scores, clf.score(X_sparse, X)) scores = cval.cross_val_score(clf, X_sparse, y) assert_array_equal(scores, clf.score(X_sparse, y)) # test with multioutput y scores = cval.cross_val_score(clf, X_sparse, X) assert_array_equal(scores, clf.score(X_sparse, X)) # test with X and y as list list_check = lambda x: isinstance(x, list) clf = CheckingClassifier(check_X=list_check) scores = cval.cross_val_score(clf, X.tolist(), y.tolist()) clf = CheckingClassifier(check_y=list_check) scores = cval.cross_val_score(clf, X, y.tolist()) assert_raises(ValueError, cval.cross_val_score, clf, X, y, scoring="sklearn") # test with 3d X and X_3d = X[:, :, np.newaxis] clf = MockClassifier(allow_nd=True) scores = cval.cross_val_score(clf, X_3d, y) clf = MockClassifier(allow_nd=False) assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y) def test_cross_val_score_pandas(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((Series, DataFrame)) except ImportError: pass for TargetType, InputFeatureType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) check_df = lambda x: isinstance(x, InputFeatureType) check_series = lambda x: isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) cval.cross_val_score(clf, X_df, y_ser) def test_cross_val_score_mask(): # test that cross_val_score works with boolean masks svm = SVC(kernel="linear") iris = load_iris() X, y = iris.data, iris.target cv_indices = cval.KFold(len(y), 5) scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices) cv_indices = cval.KFold(len(y), 5) cv_masks = [] for train, test in cv_indices: mask_train = np.zeros(len(y), dtype=np.bool) mask_test = np.zeros(len(y), dtype=np.bool) mask_train[train] = 1 mask_test[test] = 1 cv_masks.append((train, test)) scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks) assert_array_equal(scores_indices, scores_masks) def test_cross_val_score_precomputed(): # test for svm with precomputed kernel svm = SVC(kernel="precomputed") iris = load_iris() X, y = iris.data, iris.target linear_kernel = np.dot(X, X.T) score_precomputed = cval.cross_val_score(svm, linear_kernel, y) svm = SVC(kernel="linear") score_linear = cval.cross_val_score(svm, X, y) assert_array_equal(score_precomputed, score_linear) # Error raised for non-square X svm = SVC(kernel="precomputed") assert_raises(ValueError, cval.cross_val_score, svm, X, y) # test error is raised when the precomputed kernel is not array-like # or sparse assert_raises(ValueError, cval.cross_val_score, svm, linear_kernel.tolist(), y) def test_cross_val_score_fit_params(): clf = MockClassifier() n_samples = X.shape[0] n_classes = len(np.unique(y)) DUMMY_INT = 42 DUMMY_STR = '42' DUMMY_OBJ = object() def assert_fit_params(clf): # Function to test that the values are passed correctly to the # classifier arguments for non-array type assert_equal(clf.dummy_int, DUMMY_INT) assert_equal(clf.dummy_str, DUMMY_STR) assert_equal(clf.dummy_obj, DUMMY_OBJ) fit_params = {'sample_weight': np.ones(n_samples), 'class_prior': np.ones(n_classes) / n_classes, 'sparse_sample_weight': W_sparse, 'sparse_param': P_sparse, 'dummy_int': DUMMY_INT, 'dummy_str': DUMMY_STR, 'dummy_obj': DUMMY_OBJ, 'callback': assert_fit_params} cval.cross_val_score(clf, X, y, fit_params=fit_params) def test_cross_val_score_score_func(): clf = MockClassifier() _score_func_args = [] def score_func(y_test, y_predict): _score_func_args.append((y_test, y_predict)) return 1.0 with warnings.catch_warnings(record=True): scoring = make_scorer(score_func) score = cval.cross_val_score(clf, X, y, scoring=scoring) assert_array_equal(score, [1.0, 1.0, 1.0]) assert len(_score_func_args) == 3 def test_cross_val_score_errors(): class BrokenEstimator: pass assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X) def test_train_test_split_errors(): assert_raises(ValueError, cval.train_test_split) assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1) assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6, train_size=0.6) assert_raises(ValueError, cval.train_test_split, range(3), test_size=np.float32(0.6), train_size=np.float32(0.6)) assert_raises(ValueError, cval.train_test_split, range(3), test_size="wrong_type") assert_raises(ValueError, cval.train_test_split, range(3), test_size=2, train_size=4) assert_raises(TypeError, cval.train_test_split, range(3), some_argument=1.1) assert_raises(ValueError, cval.train_test_split, range(3), range(42)) def test_train_test_split(): X = np.arange(100).reshape((10, 10)) X_s = coo_matrix(X) y = np.arange(10) # simple test split = cval.train_test_split(X, y, test_size=None, train_size=.5) X_train, X_test, y_train, y_test = split assert_equal(len(y_test), len(y_train)) # test correspondence of X and y assert_array_equal(X_train[:, 0], y_train * 10) assert_array_equal(X_test[:, 0], y_test * 10) # conversion of lists to arrays (deprecated?) with warnings.catch_warnings(record=True): split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False) X_train, X_test, X_s_train, X_s_test, y_train, y_test = split assert_array_equal(X_train, X_s_train.toarray()) assert_array_equal(X_test, X_s_test.toarray()) # don't convert lists to anything else by default split = cval.train_test_split(X, X_s, y.tolist()) X_train, X_test, X_s_train, X_s_test, y_train, y_test = split assert_true(isinstance(y_train, list)) assert_true(isinstance(y_test, list)) # allow nd-arrays X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) split = cval.train_test_split(X_4d, y_3d) assert_equal(split[0].shape, (7, 5, 3, 2)) assert_equal(split[1].shape, (3, 5, 3, 2)) assert_equal(split[2].shape, (7, 7, 11)) assert_equal(split[3].shape, (3, 7, 11)) # test stratification option y = np.array([1, 1, 1, 1, 2, 2, 2, 2]) for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75], [2, 4, 2, 4, 6]): train, test = cval.train_test_split(y, test_size=test_size, stratify=y, random_state=0) assert_equal(len(test), exp_test_size) assert_equal(len(test) + len(train), len(y)) # check the 1:1 ratio of ones and twos in the data is preserved assert_equal(np.sum(train == 1), np.sum(train == 2)) def train_test_split_pandas(): # check cross_val_score doesn't destroy pandas dataframe types = [MockDataFrame] try: from pandas import DataFrame types.append(DataFrame) except ImportError: pass for InputFeatureType in types: # X dataframe X_df = InputFeatureType(X) X_train, X_test = cval.train_test_split(X_df) assert_true(isinstance(X_train, InputFeatureType)) assert_true(isinstance(X_test, InputFeatureType)) def train_test_split_mock_pandas(): # X mock dataframe X_df = MockDataFrame(X) X_train, X_test = cval.train_test_split(X_df) assert_true(isinstance(X_train, MockDataFrame)) assert_true(isinstance(X_test, MockDataFrame)) X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False) assert_true(isinstance(X_train_arr, np.ndarray)) assert_true(isinstance(X_test_arr, np.ndarray)) def test_cross_val_score_with_score_func_classification(): iris = load_iris() clf = SVC(kernel='linear') # Default score (should be the accuracy score) scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5) assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2) # Correct classification score (aka. zero / one score) - should be the # same as the default estimator score zo_scores = cval.cross_val_score(clf, iris.data, iris.target, scoring="accuracy", cv=5) assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2) # F1 score (class are balanced so f1_score should be equal to zero/one # score f1_scores = cval.cross_val_score(clf, iris.data, iris.target, scoring="f1_weighted", cv=5) assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2) def test_cross_val_score_with_score_func_regression(): X, y = make_regression(n_samples=30, n_features=20, n_informative=5, random_state=0) reg = Ridge() # Default score of the Ridge regression estimator scores = cval.cross_val_score(reg, X, y, cv=5) assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) # R2 score (aka. determination coefficient) - should be the # same as the default estimator score r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5) assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) # Mean squared error; this is a loss function, so "scores" are negative mse_scores = cval.cross_val_score(reg, X, y, cv=5, scoring="mean_squared_error") expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99]) assert_array_almost_equal(mse_scores, expected_mse, 2) # Explained variance scoring = make_scorer(explained_variance_score) ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring) assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) def test_permutation_score(): iris = load_iris() X = iris.data X_sparse = coo_matrix(X) y = iris.target svm = SVC(kernel='linear') cv = cval.StratifiedKFold(y, 2) score, scores, pvalue = cval.permutation_test_score( svm, X, y, n_permutations=30, cv=cv, scoring="accuracy") assert_greater(score, 0.9) assert_almost_equal(pvalue, 0.0, 1) score_label, _, pvalue_label = cval.permutation_test_score( svm, X, y, n_permutations=30, cv=cv, scoring="accuracy", labels=np.ones(y.size), random_state=0) assert_true(score_label == score) assert_true(pvalue_label == pvalue) # check that we obtain the same results with a sparse representation svm_sparse = SVC(kernel='linear') cv_sparse = cval.StratifiedKFold(y, 2) score_label, _, pvalue_label = cval.permutation_test_score( svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse, scoring="accuracy", labels=np.ones(y.size), random_state=0) assert_true(score_label == score) assert_true(pvalue_label == pvalue) # test with custom scoring object def custom_score(y_true, y_pred): return (((y_true == y_pred).sum() - (y_true != y_pred).sum()) / y_true.shape[0]) scorer = make_scorer(custom_score) score, _, pvalue = cval.permutation_test_score( svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0) assert_almost_equal(score, .93, 2) assert_almost_equal(pvalue, 0.01, 3) # set random y y = np.mod(np.arange(len(y)), 3) score, scores, pvalue = cval.permutation_test_score( svm, X, y, n_permutations=30, cv=cv, scoring="accuracy") assert_less(score, 0.5) assert_greater(pvalue, 0.2) def test_cross_val_generator_with_indices(): X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) y = np.array([1, 1, 2, 2]) labels = np.array([1, 2, 3, 4]) # explicitly passing indices value is deprecated loo = cval.LeaveOneOut(4) lpo = cval.LeavePOut(4, 2) kf = cval.KFold(4, 2) skf = cval.StratifiedKFold(y, 2) lolo = cval.LeaveOneLabelOut(labels) lopo = cval.LeavePLabelOut(labels, 2) ps = cval.PredefinedSplit([1, 1, 2, 2]) ss = cval.ShuffleSplit(2) for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]: for train, test in cv: assert_not_equal(np.asarray(train).dtype.kind, 'b') assert_not_equal(np.asarray(train).dtype.kind, 'b') X[train], X[test] y[train], y[test] @ignore_warnings def test_cross_val_generator_with_default_indices(): X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) y = np.array([1, 1, 2, 2]) labels = np.array([1, 2, 3, 4]) loo = cval.LeaveOneOut(4) lpo = cval.LeavePOut(4, 2) kf = cval.KFold(4, 2) skf = cval.StratifiedKFold(y, 2) lolo = cval.LeaveOneLabelOut(labels) lopo = cval.LeavePLabelOut(labels, 2) ss = cval.ShuffleSplit(2) ps = cval.PredefinedSplit([1, 1, 2, 2]) for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]: for train, test in cv: assert_not_equal(np.asarray(train).dtype.kind, 'b') assert_not_equal(np.asarray(train).dtype.kind, 'b') X[train], X[test] y[train], y[test] def test_shufflesplit_errors(): assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1, train_size=0.95) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3) assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None, train_size=None) def test_shufflesplit_reproducible(): # Check that iterating twice on the ShuffleSplit gives the same # sequence of train-test when the random_state is given ss = cval.ShuffleSplit(10, random_state=21) assert_array_equal(list(a for a, b in ss), list(a for a, b in ss)) def test_safe_split_with_precomputed_kernel(): clf = SVC() clfp = SVC(kernel="precomputed") iris = load_iris() X, y = iris.data, iris.target K = np.dot(X, X.T) cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0) tr, te = list(cv)[0] X_tr, y_tr = cval._safe_split(clf, X, y, tr) K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr) assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T)) X_te, y_te = cval._safe_split(clf, X, y, te, tr) K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr) assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T)) def test_cross_val_score_allow_nans(): # Check that cross_val_score allows input data with NaNs X = np.arange(200, dtype=np.float64).reshape(10, -1) X[2, :] = np.nan y = np.repeat([0, 1], X.shape[0] / 2) p = Pipeline([ ('imputer', Imputer(strategy='mean', missing_values='NaN')), ('classifier', MockClassifier()), ]) cval.cross_val_score(p, X, y, cv=5) def test_train_test_split_allow_nans(): # Check that train_test_split allows input data with NaNs X = np.arange(200, dtype=np.float64).reshape(10, -1) X[2, :] = np.nan y = np.repeat([0, 1], X.shape[0] / 2) cval.train_test_split(X, y, test_size=0.2, random_state=42) def test_permutation_test_score_allow_nans(): # Check that permutation_test_score allows input data with NaNs X = np.arange(200, dtype=np.float64).reshape(10, -1) X[2, :] = np.nan y = np.repeat([0, 1], X.shape[0] / 2) p = Pipeline([ ('imputer', Imputer(strategy='mean', missing_values='NaN')), ('classifier', MockClassifier()), ]) cval.permutation_test_score(p, X, y, cv=5) def test_check_cv_return_types(): X = np.ones((9, 2)) cv = cval.check_cv(3, X, classifier=False) assert_true(isinstance(cv, cval.KFold)) y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1]) cv = cval.check_cv(3, X, y_binary, classifier=True) assert_true(isinstance(cv, cval.StratifiedKFold)) y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2]) cv = cval.check_cv(3, X, y_multiclass, classifier=True) assert_true(isinstance(cv, cval.StratifiedKFold)) X = np.ones((5, 2)) y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]] cv = cval.check_cv(3, X, y_multilabel, classifier=True) assert_true(isinstance(cv, cval.KFold)) y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]]) cv = cval.check_cv(3, X, y_multioutput, classifier=True) assert_true(isinstance(cv, cval.KFold)) def test_cross_val_score_multilabel(): X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1], [-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]]) y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1], [0, 1], [1, 0], [1, 1], [1, 0], [0, 0]]) clf = KNeighborsClassifier(n_neighbors=1) scoring_micro = make_scorer(precision_score, average='micro') scoring_macro = make_scorer(precision_score, average='macro') scoring_samples = make_scorer(precision_score, average='samples') score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5) score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5) score_samples = cval.cross_val_score(clf, X, y, scoring=scoring_samples, cv=5) assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3]) assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4]) assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4]) def test_cross_val_predict(): boston = load_boston() X, y = boston.data, boston.target cv = cval.KFold(len(boston.target)) est = Ridge() # Naive loop (should be same as cross_val_predict): preds2 = np.zeros_like(y) for train, test in cv: est.fit(X[train], y[train]) preds2[test] = est.predict(X[test]) preds = cval.cross_val_predict(est, X, y, cv=cv) assert_array_almost_equal(preds, preds2) preds = cval.cross_val_predict(est, X, y) assert_equal(len(preds), len(y)) cv = cval.LeaveOneOut(len(y)) preds = cval.cross_val_predict(est, X, y, cv=cv) assert_equal(len(preds), len(y)) Xsp = X.copy() Xsp *= (Xsp > np.median(Xsp)) Xsp = coo_matrix(Xsp) preds = cval.cross_val_predict(est, Xsp, y) assert_array_almost_equal(len(preds), len(y)) preds = cval.cross_val_predict(KMeans(), X) assert_equal(len(preds), len(y)) def bad_cv(): for i in range(4): yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8]) assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv()) def test_cross_val_predict_input_types(): clf = Ridge() # Smoke test predictions = cval.cross_val_predict(clf, X, y) assert_equal(predictions.shape, (10,)) # test with multioutput y predictions = cval.cross_val_predict(clf, X_sparse, X) assert_equal(predictions.shape, (10, 2)) predictions = cval.cross_val_predict(clf, X_sparse, y) assert_array_equal(predictions.shape, (10,)) # test with multioutput y predictions = cval.cross_val_predict(clf, X_sparse, X) assert_array_equal(predictions.shape, (10, 2)) # test with X and y as list list_check = lambda x: isinstance(x, list) clf = CheckingClassifier(check_X=list_check) predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist()) clf = CheckingClassifier(check_y=list_check) predictions = cval.cross_val_predict(clf, X, y.tolist()) # test with 3d X and X_3d = X[:, :, np.newaxis] check_3d = lambda x: x.ndim == 3 clf = CheckingClassifier(check_X=check_3d) predictions = cval.cross_val_predict(clf, X_3d, y) assert_array_equal(predictions.shape, (10,)) def test_cross_val_predict_pandas(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((Series, DataFrame)) except ImportError: pass for TargetType, InputFeatureType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) check_df = lambda x: isinstance(x, InputFeatureType) check_series = lambda x: isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) cval.cross_val_predict(clf, X_df, y_ser) def test_sparse_fit_params(): iris = load_iris() X, y = iris.data, iris.target clf = MockClassifier() fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))} a = cval.cross_val_score(clf, X, y, fit_params=fit_params) assert_array_equal(a, np.ones(3)) def test_check_is_partition(): p = np.arange(100) assert_true(cval._check_is_partition(p, 100)) assert_false(cval._check_is_partition(np.delete(p, 23), 100)) p[0] = 23 assert_false(cval._check_is_partition(p, 100))
bsd-3-clause
ryfeus/lambda-packs
Sklearn_scipy_numpy/source/scipy/stats/morestats.py
4
94486
# Author: Travis Oliphant, 2002 # # Further updates and enhancements by many SciPy developers. # from __future__ import division, print_function, absolute_import import math import warnings from collections import namedtuple import numpy as np from numpy import (isscalar, r_, log, around, unique, asarray, zeros, arange, sort, amin, amax, any, atleast_1d, sqrt, ceil, floor, array, poly1d, compress, pi, exp, ravel, angle, count_nonzero) from numpy.testing.decorators import setastest from scipy._lib.six import string_types from scipy import optimize from scipy import special from . import statlib from . import stats from .stats import find_repeats from .contingency import chi2_contingency from . import distributions from ._distn_infrastructure import rv_generic __all__ = ['mvsdist', 'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot', 'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot', 'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test', 'fligner', 'mood', 'wilcoxon', 'median_test', 'pdf_fromgamma', 'circmean', 'circvar', 'circstd', 'anderson_ksamp' ] Mean = namedtuple('Mean', ('statistic', 'minmax')) Variance = namedtuple('Variance', ('statistic', 'minmax')) Std_dev = namedtuple('Std_dev', ('statistic', 'minmax')) def bayes_mvs(data, alpha=0.90): r""" Bayesian confidence intervals for the mean, var, and std. Parameters ---------- data : array_like Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`. Requires 2 or more data points. alpha : float, optional Probability that the returned confidence interval contains the true parameter. Returns ------- mean_cntr, var_cntr, std_cntr : tuple The three results are for the mean, variance and standard deviation, respectively. Each result is a tuple of the form:: (center, (lower, upper)) with `center` the mean of the conditional pdf of the value given the data, and `(lower, upper)` a confidence interval, centered on the median, containing the estimate to a probability ``alpha``. See Also -------- mvsdist Notes ----- Each tuple of mean, variance, and standard deviation estimates represent the (center, (lower, upper)) with center the mean of the conditional pdf of the value given the data and (lower, upper) is a confidence interval centered on the median, containing the estimate to a probability ``alpha``. Converts data to 1-D and assumes all data has the same mean and variance. Uses Jeffrey's prior for variance and std. Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))`` References ---------- T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278, 2006. Examples -------- First a basic example to demonstrate the outputs: >>> from scipy import stats >>> data = [6, 9, 12, 7, 8, 8, 13] >>> mean, var, std = stats.bayes_mvs(data) >>> mean Mean(statistic=9.0, minmax=(7.1036502226125329, 10.896349777387467)) >>> var Variance(statistic=10.0, minmax=(3.1767242068607087, 24.459103821334018)) >>> std Std_dev(statistic=2.9724954732045084, minmax=(1.7823367265645143, 4.9456146050146295)) Now we generate some normally distributed random data, and get estimates of mean and standard deviation with 95% confidence intervals for those estimates: >>> n_samples = 100000 >>> data = stats.norm.rvs(size=n_samples) >>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95) >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.hist(data, bins=100, normed=True, label='Histogram of data') >>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean') >>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r', ... alpha=0.2, label=r'Estimated mean (95% limits)') >>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale') >>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2, ... label=r'Estimated scale (95% limits)') >>> ax.legend(fontsize=10) >>> ax.set_xlim([-4, 4]) >>> ax.set_ylim([0, 0.5]) >>> plt.show() """ m, v, s = mvsdist(data) if alpha >= 1 or alpha <= 0: raise ValueError("0 < alpha < 1 is required, but alpha=%s was given." % alpha) m_res = Mean(m.mean(), m.interval(alpha)) v_res = Variance(v.mean(), v.interval(alpha)) s_res = Std_dev(s.mean(), s.interval(alpha)) return m_res, v_res, s_res def mvsdist(data): """ 'Frozen' distributions for mean, variance, and standard deviation of data. Parameters ---------- data : array_like Input array. Converted to 1-D using ravel. Requires 2 or more data-points. Returns ------- mdist : "frozen" distribution object Distribution object representing the mean of the data vdist : "frozen" distribution object Distribution object representing the variance of the data sdist : "frozen" distribution object Distribution object representing the standard deviation of the data See Also -------- bayes_mvs Notes ----- The return values from ``bayes_mvs(data)`` is equivalent to ``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``. In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)`` on the three distribution objects returned from this function will give the same results that are returned from `bayes_mvs`. References ---------- T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278, 2006. Examples -------- >>> from scipy import stats >>> data = [6, 9, 12, 7, 8, 8, 13] >>> mean, var, std = stats.mvsdist(data) We now have frozen distribution objects "mean", "var" and "std" that we can examine: >>> mean.mean() 9.0 >>> mean.interval(0.95) (6.6120585482655692, 11.387941451734431) >>> mean.std() 1.1952286093343936 """ x = ravel(data) n = len(x) if n < 2: raise ValueError("Need at least 2 data-points.") xbar = x.mean() C = x.var() if n > 1000: # gaussian approximations for large n mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n)) sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n))) vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C) else: nm1 = n - 1 fac = n * C / 2. val = nm1 / 2. mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1)) sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac)) vdist = distributions.invgamma(val, scale=fac) return mdist, vdist, sdist def kstat(data, n=2): r""" Return the nth k-statistic (1<=n<=4 so far). The nth k-statistic k_n is the unique symmetric unbiased estimator of the nth cumulant kappa_n. Parameters ---------- data : array_like Input array. Note that n-D input gets flattened. n : int, {1, 2, 3, 4}, optional Default is equal to 2. Returns ------- kstat : float The nth k-statistic. See Also -------- kstatvar: Returns an unbiased estimator of the variance of the k-statistic. moment: Returns the n-th central moment about the mean for a sample. Notes ----- For a sample size n, the first few k-statistics are given by: .. math:: k_{1} = \mu k_{2} = \frac{n}{n-1} m_{2} k_{3} = \frac{ n^{2} } {(n-1) (n-2)} m_{3} k_{4} = \frac{ n^{2} [(n + 1)m_{4} - 3(n - 1) m^2_{2}]} {(n-1) (n-2) (n-3)} where ``:math:\mu`` is the sample mean, ``:math:m_2`` is the sample variance, and ``:math:m_i`` is the i-th sample central moment. References ---------- http://mathworld.wolfram.com/k-Statistic.html http://mathworld.wolfram.com/Cumulant.html Examples -------- >>> from scipy import stats >>> rndm = np.random.RandomState(1234) As sample size increases, n-th moment and n-th k-statistic converge to the same number (although they aren't identical). In the case of the normal distribution, they converge to zero. >>> for n in [2, 3, 4, 5, 6, 7]: ... x = rndm.normal(size=10**n) ... m, k = stats.moment(x, 3), stats.kstat(x, 3) ... print("%.3g %.3g %.3g" % (m, k, m-k)) -0.631 -0.651 0.0194 0.0282 0.0283 -8.49e-05 -0.0454 -0.0454 1.36e-05 7.53e-05 7.53e-05 -2.26e-09 0.00166 0.00166 -4.99e-09 -2.88e-06 -2.88e-06 8.63e-13 """ if n > 4 or n < 1: raise ValueError("k-statistics only supported for 1<=n<=4") n = int(n) S = np.zeros(n + 1, np.float64) data = ravel(data) N = data.size # raise ValueError on empty input if N == 0: raise ValueError("Data input must not be empty") # on nan input, return nan without warning if np.isnan(np.sum(data)): return np.nan for k in range(1, n + 1): S[k] = np.sum(data**k, axis=0) if n == 1: return S[1] * 1.0/N elif n == 2: return (N*S[2] - S[1]**2.0) / (N*(N - 1.0)) elif n == 3: return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0)) elif n == 4: return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 - 4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) / (N*(N-1.0)*(N-2.0)*(N-3.0))) else: raise ValueError("Should not be here.") def kstatvar(data, n=2): r""" Returns an unbiased estimator of the variance of the k-statistic. See `kstat` for more details of the k-statistic. Parameters ---------- data : array_like Input array. Note that n-D input gets flattened. n : int, {1, 2}, optional Default is equal to 2. Returns ------- kstatvar : float The nth k-statistic variance. See Also -------- kstat: Returns the n-th k-statistic. moment: Returns the n-th central moment about the mean for a sample. Notes ----- The variances of the first few k-statistics are given by: .. math:: var(k_{1}) = \frac{\kappa^2}{n} var(k_{2}) = \frac{\kappa^4}{n} + \frac{2\kappa^2_{2}}{n - 1} var(k_{3}) = \frac{\kappa^6}{n} + \frac{9 \kappa_2 \kappa_4}{n - 1} + \frac{9 \kappa^2_{3}}{n - 1} + \frac{6 n \kappa^3_{2}}{(n-1) (n-2)} var(k_{4}) = \frac{\kappa^8}{n} + \frac{16 \kappa_2 \kappa_6}{n - 1} + \frac{48 \kappa_{3} \kappa_5}{n - 1} + \frac{34 \kappa^2_{4}}{n-1} + \frac{72 n \kappa^2_{2} \kappa_4}{(n - 1) (n - 2)} + \frac{144 n \kappa_{2} \kappa^2_{3}}{(n - 1) (n - 2)} + \frac{24 (n + 1) n \kappa^4_{2}}{(n - 1) (n - 2) (n - 3)} """ data = ravel(data) N = len(data) if n == 1: return kstat(data, n=2) * 1.0/N elif n == 2: k2 = kstat(data, n=2) k4 = kstat(data, n=4) return (2*N*k2**2 + (N-1)*k4) / (N*(N+1)) else: raise ValueError("Only n=1 or n=2 supported.") def _calc_uniform_order_statistic_medians(n): """ Approximations of uniform order statistic medians. Parameters ---------- n : int Sample size. Returns ------- v : 1d float array Approximations of the order statistic medians. References ---------- .. [1] James J. Filliben, "The Probability Plot Correlation Coefficient Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975. Examples -------- Order statistics of the uniform distribution on the unit interval are marginally distributed according to beta distributions. The expectations of these order statistic are evenly spaced across the interval, but the distributions are skewed in a way that pushes the medians slightly towards the endpoints of the unit interval: >>> n = 4 >>> k = np.arange(1, n+1) >>> from scipy.stats import beta >>> a = k >>> b = n-k+1 >>> beta.mean(a, b) array([ 0.2, 0.4, 0.6, 0.8]) >>> beta.median(a, b) array([ 0.15910358, 0.38572757, 0.61427243, 0.84089642]) The Filliben approximation uses the exact medians of the smallest and greatest order statistics, and the remaining medians are approximated by points spread evenly across a sub-interval of the unit interval: >>> from scipy.morestats import _calc_uniform_order_statistic_medians >>> _calc_uniform_order_statistic_medians(n) array([ 0.15910358, 0.38545246, 0.61454754, 0.84089642]) This plot shows the skewed distributions of the order statistics of a sample of size four from a uniform distribution on the unit interval: >>> import matplotlib.pyplot as plt >>> x = np.linspace(0.0, 1.0, num=50, endpoint=True) >>> pdfs = [beta.pdf(x, a[i], b[i]) for i in range(n)] >>> plt.figure() >>> plt.plot(x, pdfs[0], x, pdfs[1], x, pdfs[2], x, pdfs[3]) """ v = np.zeros(n, dtype=np.float64) v[-1] = 0.5**(1.0 / n) v[0] = 1 - v[-1] i = np.arange(2, n) v[1:-1] = (i - 0.3175) / (n + 0.365) return v def _parse_dist_kw(dist, enforce_subclass=True): """Parse `dist` keyword. Parameters ---------- dist : str or stats.distributions instance. Several functions take `dist` as a keyword, hence this utility function. enforce_subclass : bool, optional If True (default), `dist` needs to be a `_distn_infrastructure.rv_generic` instance. It can sometimes be useful to set this keyword to False, if a function wants to accept objects that just look somewhat like such an instance (for example, they have a ``ppf`` method). """ if isinstance(dist, rv_generic): pass elif isinstance(dist, string_types): try: dist = getattr(distributions, dist) except AttributeError: raise ValueError("%s is not a valid distribution name" % dist) elif enforce_subclass: msg = ("`dist` should be a stats.distributions instance or a string " "with the name of such a distribution.") raise ValueError(msg) return dist def _add_axis_labels_title(plot, xlabel, ylabel, title): """Helper function to add axes labels and a title to stats plots""" try: if hasattr(plot, 'set_title'): # Matplotlib Axes instance or something that looks like it plot.set_title(title) plot.set_xlabel(xlabel) plot.set_ylabel(ylabel) else: # matplotlib.pyplot module plot.title(title) plot.xlabel(xlabel) plot.ylabel(ylabel) except: # Not an MPL object or something that looks (enough) like it. # Don't crash on adding labels or title pass def probplot(x, sparams=(), dist='norm', fit=True, plot=None): """ Calculate quantiles for a probability plot, and optionally show the plot. Generates a probability plot of sample data against the quantiles of a specified theoretical distribution (the normal distribution by default). `probplot` optionally calculates a best-fit line for the data and plots the results using Matplotlib or a given plot function. Parameters ---------- x : array_like Sample/response data from which `probplot` creates the plot. sparams : tuple, optional Distribution-specific shape parameters (shape parameters plus location and scale). dist : str or stats.distributions instance, optional Distribution or distribution function name. The default is 'norm' for a normal probability plot. Objects that look enough like a stats.distributions instance (i.e. they have a ``ppf`` method) are also accepted. fit : bool, optional Fit a least-squares regression (best-fit) line to the sample data if True (default). plot : object, optional If given, plots the quantiles and least squares fit. `plot` is an object that has to have methods "plot" and "text". The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, or a custom object with the same methods. Default is None, which means that no plot is created. Returns ------- (osm, osr) : tuple of ndarrays Tuple of theoretical quantiles (osm, or order statistic medians) and ordered responses (osr). `osr` is simply sorted input `x`. For details on how `osm` is calculated see the Notes section. (slope, intercept, r) : tuple of floats, optional Tuple containing the result of the least-squares fit, if that is performed by `probplot`. `r` is the square root of the coefficient of determination. If ``fit=False`` and ``plot=None``, this tuple is not returned. Notes ----- Even if `plot` is given, the figure is not shown or saved by `probplot`; ``plt.show()`` or ``plt.savefig('figname.png')`` should be used after calling `probplot`. `probplot` generates a probability plot, which should not be confused with a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this type, see ``statsmodels.api.ProbPlot``. The formula used for the theoretical quantiles (horizontal axis of the probability plot) is Filliben's estimate:: quantiles = dist.ppf(val), for 0.5**(1/n), for i = n val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1 1 - 0.5**(1/n), for i = 1 where ``i`` indicates the i-th ordered value and ``n`` is the total number of values. Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> nsample = 100 >>> np.random.seed(7654321) A t distribution with small degrees of freedom: >>> ax1 = plt.subplot(221) >>> x = stats.t.rvs(3, size=nsample) >>> res = stats.probplot(x, plot=plt) A t distribution with larger degrees of freedom: >>> ax2 = plt.subplot(222) >>> x = stats.t.rvs(25, size=nsample) >>> res = stats.probplot(x, plot=plt) A mixture of two normal distributions with broadcasting: >>> ax3 = plt.subplot(223) >>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5], ... size=(nsample//2,2)).ravel() >>> res = stats.probplot(x, plot=plt) A standard normal distribution: >>> ax4 = plt.subplot(224) >>> x = stats.norm.rvs(loc=0, scale=1, size=nsample) >>> res = stats.probplot(x, plot=plt) Produce a new figure with a loggamma distribution, using the ``dist`` and ``sparams`` keywords: >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> x = stats.loggamma.rvs(c=2.5, size=500) >>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax) >>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5") Show the results with Matplotlib: >>> plt.show() """ x = np.asarray(x) _perform_fit = fit or (plot is not None) if x.size == 0: if _perform_fit: return (x, x), (np.nan, np.nan, 0.0) else: return x, x osm_uniform = _calc_uniform_order_statistic_medians(len(x)) dist = _parse_dist_kw(dist, enforce_subclass=False) if sparams is None: sparams = () if isscalar(sparams): sparams = (sparams,) if not isinstance(sparams, tuple): sparams = tuple(sparams) osm = dist.ppf(osm_uniform, *sparams) osr = sort(x) if _perform_fit: # perform a linear least squares fit. slope, intercept, r, prob, sterrest = stats.linregress(osm, osr) if plot is not None: plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-') _add_axis_labels_title(plot, xlabel='Quantiles', ylabel='Ordered Values', title='Probability Plot') # Add R^2 value to the plot as text xmin = amin(osm) xmax = amax(osm) ymin = amin(x) ymax = amax(x) posx = xmin + 0.70 * (xmax - xmin) posy = ymin + 0.01 * (ymax - ymin) plot.text(posx, posy, "$R^2=%1.4f$" % r**2) if fit: return (osm, osr), (slope, intercept, r) else: return osm, osr def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'): """ Calculate the shape parameter that maximizes the PPCC The probability plot correlation coefficient (PPCC) plot can be used to determine the optimal shape parameter for a one-parameter family of distributions. ppcc_max returns the shape parameter that would maximize the probability plot correlation coefficient for the given data to a one-parameter family of distributions. Parameters ---------- x : array_like Input array. brack : tuple, optional Triple (a,b,c) where (a<b<c). If bracket consists of two numbers (a, c) then they are assumed to be a starting interval for a downhill bracket search (see `scipy.optimize.brent`). dist : str or stats.distributions instance, optional Distribution or distribution function name. Objects that look enough like a stats.distributions instance (i.e. they have a ``ppf`` method) are also accepted. The default is ``'tukeylambda'``. Returns ------- shape_value : float The shape parameter at which the probability plot correlation coefficient reaches its max value. See also -------- ppcc_plot, probplot, boxcox Notes ----- The brack keyword serves as a starting point which is useful in corner cases. One can use a plot to obtain a rough visual estimate of the location for the maximum to start the search near it. References ---------- .. [1] J.J. Filliben, "The Probability Plot Correlation Coefficient Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975. .. [2] http://www.itl.nist.gov/div898/handbook/eda/section3/ppccplot.htm Examples -------- First we generate some random data from a Tukey-Lambda distribution, with shape parameter -0.7: >>> from scipy import stats >>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000, ... random_state=1234567) + 1e4 Now we explore this data with a PPCC plot as well as the related probability plot and Box-Cox normplot. A red line is drawn where we expect the PPCC value to be maximal (at the shape parameter -0.7 used above): >>> import matplotlib.pyplot as plt >>> fig = plt.figure(figsize=(8, 6)) >>> ax = fig.add_subplot(111) >>> res = stats.ppcc_plot(x, -5, 5, plot=ax) We calculate the value where the shape should reach its maximum and a red line is drawn there. The line should coincide with the highest point in the ppcc_plot. >>> max = stats.ppcc_max(x) >>> ax.vlines(max, 0, 1, colors='r', label='Expected shape value') >>> plt.show() """ dist = _parse_dist_kw(dist) osm_uniform = _calc_uniform_order_statistic_medians(len(x)) osr = sort(x) # this function computes the x-axis values of the probability plot # and computes a linear regression (including the correlation) # and returns 1-r so that a minimization function maximizes the # correlation def tempfunc(shape, mi, yvals, func): xvals = func(mi, shape) r, prob = stats.pearsonr(xvals, yvals) return 1 - r return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf)) def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80): """ Calculate and optionally plot probability plot correlation coefficient. The probability plot correlation coefficient (PPCC) plot can be used to determine the optimal shape parameter for a one-parameter family of distributions. It cannot be used for distributions without shape parameters (like the normal distribution) or with multiple shape parameters. By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed distributions via an approximately normal one, and is therefore particularly useful in practice. Parameters ---------- x : array_like Input array. a, b: scalar Lower and upper bounds of the shape parameter to use. dist : str or stats.distributions instance, optional Distribution or distribution function name. Objects that look enough like a stats.distributions instance (i.e. they have a ``ppf`` method) are also accepted. The default is ``'tukeylambda'``. plot : object, optional If given, plots PPCC against the shape parameter. `plot` is an object that has to have methods "plot" and "text". The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, or a custom object with the same methods. Default is None, which means that no plot is created. N : int, optional Number of points on the horizontal axis (equally distributed from `a` to `b`). Returns ------- svals : ndarray The shape values for which `ppcc` was calculated. ppcc : ndarray The calculated probability plot correlation coefficient values. See also -------- ppcc_max, probplot, boxcox_normplot, tukeylambda References ---------- J.J. Filliben, "The Probability Plot Correlation Coefficient Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975. Examples -------- First we generate some random data from a Tukey-Lambda distribution, with shape parameter -0.7: >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> np.random.seed(1234567) >>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4 Now we explore this data with a PPCC plot as well as the related probability plot and Box-Cox normplot. A red line is drawn where we expect the PPCC value to be maximal (at the shape parameter -0.7 used above): >>> fig = plt.figure(figsize=(12, 4)) >>> ax1 = fig.add_subplot(131) >>> ax2 = fig.add_subplot(132) >>> ax3 = fig.add_subplot(133) >>> res = stats.probplot(x, plot=ax1) >>> res = stats.boxcox_normplot(x, -5, 5, plot=ax2) >>> res = stats.ppcc_plot(x, -5, 5, plot=ax3) >>> ax3.vlines(-0.7, 0, 1, colors='r', label='Expected shape value') >>> plt.show() """ if b <= a: raise ValueError("`b` has to be larger than `a`.") svals = np.linspace(a, b, num=N) ppcc = np.empty_like(svals) for k, sval in enumerate(svals): _, r2 = probplot(x, sval, dist=dist, fit=True) ppcc[k] = r2[-1] if plot is not None: plot.plot(svals, ppcc, 'x') _add_axis_labels_title(plot, xlabel='Shape Values', ylabel='Prob Plot Corr. Coef.', title='(%s) PPCC Plot' % dist) return svals, ppcc def boxcox_llf(lmb, data): r"""The boxcox log-likelihood function. Parameters ---------- lmb : scalar Parameter for Box-Cox transformation. See `boxcox` for details. data : array_like Data to calculate Box-Cox log-likelihood for. If `data` is multi-dimensional, the log-likelihood is calculated along the first axis. Returns ------- llf : float or ndarray Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`, an array otherwise. See Also -------- boxcox, probplot, boxcox_normplot, boxcox_normmax Notes ----- The Box-Cox log-likelihood function is defined here as .. math:: llf = (\lambda - 1) \sum_i(\log(x_i)) - N/2 \log(\sum_i (y_i - \bar{y})^2 / N), where ``y`` is the Box-Cox transformed input data ``x``. Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes >>> np.random.seed(1245) Generate some random variates and calculate Box-Cox log-likelihood values for them for a range of ``lmbda`` values: >>> x = stats.loggamma.rvs(5, loc=10, size=1000) >>> lmbdas = np.linspace(-2, 10) >>> llf = np.zeros(lmbdas.shape, dtype=float) >>> for ii, lmbda in enumerate(lmbdas): ... llf[ii] = stats.boxcox_llf(lmbda, x) Also find the optimal lmbda value with `boxcox`: >>> x_most_normal, lmbda_optimal = stats.boxcox(x) Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a horizontal line to check that that's really the optimum: >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.plot(lmbdas, llf, 'b.-') >>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r') >>> ax.set_xlabel('lmbda parameter') >>> ax.set_ylabel('Box-Cox log-likelihood') Now add some probability plots to show that where the log-likelihood is maximized the data transformed with `boxcox` looks closest to normal: >>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right' >>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs): ... xt = stats.boxcox(x, lmbda=lmbda) ... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt) ... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc) ... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-') ... ax_inset.set_xticklabels([]) ... ax_inset.set_yticklabels([]) ... ax_inset.set_title('$\lambda=%1.2f$' % lmbda) >>> plt.show() """ data = np.asarray(data) N = data.shape[0] if N == 0: return np.nan y = boxcox(data, lmb) y_mean = np.mean(y, axis=0) llf = (lmb - 1) * np.sum(np.log(data), axis=0) llf -= N / 2.0 * np.log(np.sum((y - y_mean)**2. / N, axis=0)) return llf def _boxcox_conf_interval(x, lmax, alpha): # Need to find the lambda for which # f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1 fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1) target = boxcox_llf(lmax, x) - fac def rootfunc(lmbda, data, target): return boxcox_llf(lmbda, data) - target # Find positive endpoint of interval in which answer is to be found newlm = lmax + 0.5 N = 0 while (rootfunc(newlm, x, target) > 0.0) and (N < 500): newlm += 0.1 N += 1 if N == 500: raise RuntimeError("Could not find endpoint.") lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target)) # Now find negative interval in the same way newlm = lmax - 0.5 N = 0 while (rootfunc(newlm, x, target) > 0.0) and (N < 500): newlm -= 0.1 N += 1 if N == 500: raise RuntimeError("Could not find endpoint.") lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target)) return lmminus, lmplus def boxcox(x, lmbda=None, alpha=None): r""" Return a positive dataset transformed by a Box-Cox power transformation. Parameters ---------- x : ndarray Input array. Should be 1-dimensional. lmbda : {None, scalar}, optional If `lmbda` is not None, do the transformation for that value. If `lmbda` is None, find the lambda that maximizes the log-likelihood function and return it as the second output argument. alpha : {None, float}, optional If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence interval for `lmbda` as the third output argument. Must be between 0.0 and 1.0. Returns ------- boxcox : ndarray Box-Cox power transformed array. maxlog : float, optional If the `lmbda` parameter is None, the second returned argument is the lambda that maximizes the log-likelihood function. (min_ci, max_ci) : tuple of float, optional If `lmbda` parameter is None and ``alpha`` is not None, this returned tuple of floats represents the minimum and maximum confidence limits given ``alpha``. See Also -------- probplot, boxcox_normplot, boxcox_normmax, boxcox_llf Notes ----- The Box-Cox transform is given by:: y = (x**lmbda - 1) / lmbda, for lmbda > 0 log(x), for lmbda = 0 `boxcox` requires the input data to be positive. Sometimes a Box-Cox transformation provides a shift parameter to achieve this; `boxcox` does not. Such a shift parameter is equivalent to adding a positive constant to `x` before calling `boxcox`. The confidence limits returned when ``alpha`` is provided give the interval where: .. math:: llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1), with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared function. References ---------- G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the Royal Statistical Society B, 26, 211-252 (1964). Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt We generate some random variates from a non-normal distribution and make a probability plot for it, to show it is non-normal in the tails: >>> fig = plt.figure() >>> ax1 = fig.add_subplot(211) >>> x = stats.loggamma.rvs(5, size=500) + 5 >>> prob = stats.probplot(x, dist=stats.norm, plot=ax1) >>> ax1.set_xlabel('') >>> ax1.set_title('Probplot against normal distribution') We now use `boxcox` to transform the data so it's closest to normal: >>> ax2 = fig.add_subplot(212) >>> xt, _ = stats.boxcox(x) >>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2) >>> ax2.set_title('Probplot after Box-Cox transformation') >>> plt.show() """ x = np.asarray(x) if x.size == 0: return x if any(x <= 0): raise ValueError("Data must be positive.") if lmbda is not None: # single transformation return special.boxcox(x, lmbda) # If lmbda=None, find the lmbda that maximizes the log-likelihood function. lmax = boxcox_normmax(x, method='mle') y = boxcox(x, lmax) if alpha is None: return y, lmax else: # Find confidence interval interval = _boxcox_conf_interval(x, lmax, alpha) return y, lmax, interval def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'): """Compute optimal Box-Cox transform parameter for input data. Parameters ---------- x : array_like Input array. brack : 2-tuple, optional The starting interval for a downhill bracket search with `optimize.brent`. Note that this is in most cases not critical; the final result is allowed to be outside this bracket. method : str, optional The method to determine the optimal transform parameter (`boxcox` ``lmbda`` parameter). Options are: 'pearsonr' (default) Maximizes the Pearson correlation coefficient between ``y = boxcox(x)`` and the expected values for ``y`` if `x` would be normally-distributed. 'mle' Minimizes the log-likelihood `boxcox_llf`. This is the method used in `boxcox`. 'all' Use all optimization methods available, and return all results. Useful to compare different methods. Returns ------- maxlog : float or ndarray The optimal transform parameter found. An array instead of a scalar for ``method='all'``. See Also -------- boxcox, boxcox_llf, boxcox_normplot Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> np.random.seed(1234) # make this example reproducible Generate some data and determine optimal ``lmbda`` in various ways: >>> x = stats.loggamma.rvs(5, size=30) + 5 >>> y, lmax_mle = stats.boxcox(x) >>> lmax_pearsonr = stats.boxcox_normmax(x) >>> lmax_mle 7.177... >>> lmax_pearsonr 7.916... >>> stats.boxcox_normmax(x, method='all') array([ 7.91667384, 7.17718692]) >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax) >>> ax.axvline(lmax_mle, color='r') >>> ax.axvline(lmax_pearsonr, color='g', ls='--') >>> plt.show() """ def _pearsonr(x, brack): osm_uniform = _calc_uniform_order_statistic_medians(len(x)) xvals = distributions.norm.ppf(osm_uniform) def _eval_pearsonr(lmbda, xvals, samps): # This function computes the x-axis values of the probability plot # and computes a linear regression (including the correlation) and # returns ``1 - r`` so that a minimization function maximizes the # correlation. y = boxcox(samps, lmbda) yvals = np.sort(y) r, prob = stats.pearsonr(xvals, yvals) return 1 - r return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x)) def _mle(x, brack): def _eval_mle(lmb, data): # function to minimize return -boxcox_llf(lmb, data) return optimize.brent(_eval_mle, brack=brack, args=(x,)) def _all(x, brack): maxlog = np.zeros(2, dtype=float) maxlog[0] = _pearsonr(x, brack) maxlog[1] = _mle(x, brack) return maxlog methods = {'pearsonr': _pearsonr, 'mle': _mle, 'all': _all} if method not in methods.keys(): raise ValueError("Method %s not recognized." % method) optimfunc = methods[method] return optimfunc(x, brack) def boxcox_normplot(x, la, lb, plot=None, N=80): """Compute parameters for a Box-Cox normality plot, optionally show it. A Box-Cox normality plot shows graphically what the best transformation parameter is to use in `boxcox` to obtain a distribution that is close to normal. Parameters ---------- x : array_like Input array. la, lb : scalar The lower and upper bounds for the ``lmbda`` values to pass to `boxcox` for Box-Cox transformations. These are also the limits of the horizontal axis of the plot if that is generated. plot : object, optional If given, plots the quantiles and least squares fit. `plot` is an object that has to have methods "plot" and "text". The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, or a custom object with the same methods. Default is None, which means that no plot is created. N : int, optional Number of points on the horizontal axis (equally distributed from `la` to `lb`). Returns ------- lmbdas : ndarray The ``lmbda`` values for which a Box-Cox transform was done. ppcc : ndarray Probability Plot Correlelation Coefficient, as obtained from `probplot` when fitting the Box-Cox transformed input `x` against a normal distribution. See Also -------- probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max Notes ----- Even if `plot` is given, the figure is not shown or saved by `boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')`` should be used after calling `probplot`. Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt Generate some non-normally distributed data, and create a Box-Cox plot: >>> x = stats.loggamma.rvs(5, size=500) + 5 >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax) Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in the same plot: >>> _, maxlog = stats.boxcox(x) >>> ax.axvline(maxlog, color='r') >>> plt.show() """ x = np.asarray(x) if x.size == 0: return x if lb <= la: raise ValueError("`lb` has to be larger than `la`.") lmbdas = np.linspace(la, lb, num=N) ppcc = lmbdas * 0.0 for i, val in enumerate(lmbdas): # Determine for each lmbda the correlation coefficient of transformed x z = boxcox(x, lmbda=val) _, r2 = probplot(z, dist='norm', fit=True) ppcc[i] = r2[-1] if plot is not None: plot.plot(lmbdas, ppcc, 'x') _add_axis_labels_title(plot, xlabel='$\lambda$', ylabel='Prob Plot Corr. Coef.', title='Box-Cox Normality Plot') return lmbdas, ppcc def shapiro(x, a=None, reta=False): """ Perform the Shapiro-Wilk test for normality. The Shapiro-Wilk test tests the null hypothesis that the data was drawn from a normal distribution. Parameters ---------- x : array_like Array of sample data. a : array_like, optional Array of internal parameters used in the calculation. If these are not given, they will be computed internally. If x has length n, then a must have length n/2. reta : bool, optional Whether or not to return the internally computed a values. The default is False. Returns ------- W : float The test statistic. p-value : float The p-value for the hypothesis test. a : array_like, optional If `reta` is True, then these are the internally computed "a" values that may be passed into this function on future calls. See Also -------- anderson : The Anderson-Darling test for normality kstest : The Kolmogorov-Smirnov test for goodness of fit. Notes ----- The algorithm used is described in [4]_ but censoring parameters as described are not implemented. For N > 5000 the W test statistic is accurate but the p-value may not be. The chance of rejecting the null hypothesis when it is true is close to 5% regardless of sample size. References ---------- .. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm .. [2] Shapiro, S. S. & Wilk, M.B (1965). An analysis of variance test for normality (complete samples), Biometrika, Vol. 52, pp. 591-611. .. [3] Razali, N. M. & Wah, Y. B. (2011) Power comparisons of Shapiro-Wilk, Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests, Journal of Statistical Modeling and Analytics, Vol. 2, pp. 21-33. .. [4] ALGORITHM AS R94 APPL. STATIST. (1995) VOL. 44, NO. 4. Examples -------- >>> from scipy import stats >>> np.random.seed(12345678) >>> x = stats.norm.rvs(loc=5, scale=3, size=100) >>> stats.shapiro(x) (0.9772805571556091, 0.08144091814756393) """ if a is not None or reta: warnings.warn("input parameters 'a' and 'reta' are scheduled to be " "removed in version 0.18.0", FutureWarning) x = np.ravel(x) N = len(x) if N < 3: raise ValueError("Data must be at least length 3.") if a is None: a = zeros(N, 'f') init = 0 else: if len(a) != N // 2: raise ValueError("len(a) must equal len(x)/2") init = 1 y = sort(x) a, w, pw, ifault = statlib.swilk(y, a[:N//2], init) if ifault not in [0, 2]: warnings.warn("Input data for shapiro has range zero. The results " "may not be accurate.") if N > 5000: warnings.warn("p-value may not be accurate for N > 5000.") if reta: return w, pw, a else: return w, pw # Values from Stephens, M A, "EDF Statistics for Goodness of Fit and # Some Comparisons", Journal of he American Statistical # Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737 _Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092]) _Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957]) # From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution", # Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588. _Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038]) # From Stephens, M A, "Tests of Fit for the Logistic Distribution Based # on the Empirical Distribution Function.", Biometrika, # Vol. 66, Issue 3, Dec. 1979, pp 591-595. _Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010]) AndersonResult = namedtuple('AndersonResult', ('statistic', 'critical_values', 'significance_level')) def anderson(x, dist='norm'): """ Anderson-Darling test for data coming from a particular distribution The Anderson-Darling test is a modification of the Kolmogorov- Smirnov test `kstest` for the null hypothesis that a sample is drawn from a population that follows a particular distribution. For the Anderson-Darling test, the critical values depend on which distribution is being tested against. This function works for normal, exponential, logistic, or Gumbel (Extreme Value Type I) distributions. Parameters ---------- x : array_like array of sample data dist : {'norm','expon','logistic','gumbel','extreme1'}, optional the type of distribution to test against. The default is 'norm' and 'extreme1' is a synonym for 'gumbel' Returns ------- statistic : float The Anderson-Darling test statistic critical_values : list The critical values for this distribution significance_level : list The significance levels for the corresponding critical values in percents. The function returns critical values for a differing set of significance levels depending on the distribution that is being tested against. Notes ----- Critical values provided are for the following significance levels: normal/exponenential 15%, 10%, 5%, 2.5%, 1% logistic 25%, 10%, 5%, 2.5%, 1%, 0.5% Gumbel 25%, 10%, 5%, 2.5%, 1% If A2 is larger than these critical values then for the corresponding significance level, the null hypothesis that the data come from the chosen distribution can be rejected. References ---------- .. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm .. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and Some Comparisons, Journal of the American Statistical Association, Vol. 69, pp. 730-737. .. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit Statistics with Unknown Parameters, Annals of Statistics, Vol. 4, pp. 357-369. .. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value Distribution, Biometrika, Vol. 64, pp. 583-588. .. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference to Tests for Exponentiality , Technical Report No. 262, Department of Statistics, Stanford University, Stanford, CA. .. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution Based on the Empirical Distribution Function, Biometrika, Vol. 66, pp. 591-595. """ if dist not in ['norm', 'expon', 'gumbel', 'extreme1', 'logistic']: raise ValueError("Invalid distribution; dist must be 'norm', " "'expon', 'gumbel', 'extreme1' or 'logistic'.") y = sort(x) xbar = np.mean(x, axis=0) N = len(y) if dist == 'norm': s = np.std(x, ddof=1, axis=0) w = (y - xbar) / s z = distributions.norm.cdf(w) sig = array([15, 10, 5, 2.5, 1]) critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3) elif dist == 'expon': w = y / xbar z = distributions.expon.cdf(w) sig = array([15, 10, 5, 2.5, 1]) critical = around(_Avals_expon / (1.0 + 0.6/N), 3) elif dist == 'logistic': def rootfunc(ab, xj, N): a, b = ab tmp = (xj - a) / b tmp2 = exp(tmp) val = [np.sum(1.0/(1+tmp2), axis=0) - 0.5*N, np.sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N] return array(val) sol0 = array([xbar, np.std(x, ddof=1, axis=0)]) sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5) w = (y - sol[0]) / sol[1] z = distributions.logistic.cdf(w) sig = array([25, 10, 5, 2.5, 1, 0.5]) critical = around(_Avals_logistic / (1.0 + 0.25/N), 3) else: # (dist == 'gumbel') or (dist == 'extreme1'): xbar, s = distributions.gumbel_l.fit(x) w = (y - xbar) / s z = distributions.gumbel_l.cdf(w) sig = array([25, 10, 5, 2.5, 1]) critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3) i = arange(1, N + 1) A2 = -N - np.sum((2*i - 1.0) / N * (log(z) + log(1 - z[::-1])), axis=0) return AndersonResult(A2, critical, sig) def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N): """ Compute A2akN equation 7 of Scholz and Stephens. Parameters ---------- samples : sequence of 1-D array_like Array of sample arrays. Z : array_like Sorted array of all observations. Zstar : array_like Sorted array of unique observations. k : int Number of samples. n : array_like Number of observations in each sample. N : int Total number of observations. Returns ------- A2aKN : float The A2aKN statistics of Scholz and Stephens 1987. """ A2akN = 0. Z_ssorted_left = Z.searchsorted(Zstar, 'left') if N == Zstar.size: lj = 1. else: lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left Bj = Z_ssorted_left + lj / 2. for i in arange(0, k): s = np.sort(samples[i]) s_ssorted_right = s.searchsorted(Zstar, side='right') Mij = s_ssorted_right.astype(float) fij = s_ssorted_right - s.searchsorted(Zstar, 'left') Mij -= fij / 2. inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.) A2akN += inner.sum() / n[i] A2akN *= (N - 1.) / N return A2akN def _anderson_ksamp_right(samples, Z, Zstar, k, n, N): """ Compute A2akN equation 6 of Scholz & Stephens. Parameters ---------- samples : sequence of 1-D array_like Array of sample arrays. Z : array_like Sorted array of all observations. Zstar : array_like Sorted array of unique observations. k : int Number of samples. n : array_like Number of observations in each sample. N : int Total number of observations. Returns ------- A2KN : float The A2KN statistics of Scholz and Stephens 1987. """ A2kN = 0. lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1], 'left') Bj = lj.cumsum() for i in arange(0, k): s = np.sort(samples[i]) Mij = s.searchsorted(Zstar[:-1], side='right') inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj)) A2kN += inner.sum() / n[i] return A2kN Anderson_ksampResult = namedtuple('Anderson_ksampResult', ('statistic', 'critical_values', 'significance_level')) def anderson_ksamp(samples, midrank=True): """The Anderson-Darling test for k-samples. The k-sample Anderson-Darling test is a modification of the one-sample Anderson-Darling test. It tests the null hypothesis that k-samples are drawn from the same population without having to specify the distribution function of that population. The critical values depend on the number of samples. Parameters ---------- samples : sequence of 1-D array_like Array of sample data in arrays. midrank : bool, optional Type of Anderson-Darling test which is computed. Default (True) is the midrank test applicable to continuous and discrete populations. If False, the right side empirical distribution is used. Returns ------- statistic : float Normalized k-sample Anderson-Darling test statistic. critical_values : array The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%. significance_level : float An approximate significance level at which the null hypothesis for the provided samples can be rejected. Raises ------ ValueError If less than 2 samples are provided, a sample is empty, or no distinct observations are in the samples. See Also -------- ks_2samp : 2 sample Kolmogorov-Smirnov test anderson : 1 sample Anderson-Darling test Notes ----- [1]_ Defines three versions of the k-sample Anderson-Darling test: one for continuous distributions and two for discrete distributions, in which ties between samples may occur. The default of this routine is to compute the version based on the midrank empirical distribution function. This test is applicable to continuous and discrete data. If midrank is set to False, the right side empirical distribution is used for a test for discrete data. According to [1]_, the two discrete test statistics differ only slightly if a few collisions due to round-off errors occur in the test not adjusted for ties between samples. .. versionadded:: 0.14.0 References ---------- .. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample Anderson-Darling Tests, Journal of the American Statistical Association, Vol. 82, pp. 918-924. Examples -------- >>> from scipy import stats >>> np.random.seed(314159) The null hypothesis that the two random samples come from the same distribution can be rejected at the 5% level because the returned test value is greater than the critical value for 5% (1.961) but not at the 2.5% level. The interpolation gives an approximate significance level of 3.1%: >>> stats.anderson_ksamp([np.random.normal(size=50), ... np.random.normal(loc=0.5, size=30)]) (2.4615796189876105, array([ 0.325, 1.226, 1.961, 2.718, 3.752]), 0.03134990135800783) The null hypothesis cannot be rejected for three samples from an identical distribution. The approximate p-value (87%) has to be computed by extrapolation and may not be very accurate: >>> stats.anderson_ksamp([np.random.normal(size=50), ... np.random.normal(size=30), np.random.normal(size=20)]) (-0.73091722665244196, array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856]), 0.8789283903979661) """ k = len(samples) if (k < 2): raise ValueError("anderson_ksamp needs at least two samples") samples = list(map(np.asarray, samples)) Z = np.sort(np.hstack(samples)) N = Z.size Zstar = np.unique(Z) if Zstar.size < 2: raise ValueError("anderson_ksamp needs more than one distinct " "observation") n = np.array([sample.size for sample in samples]) if any(n == 0): raise ValueError("anderson_ksamp encountered sample without " "observations") if midrank: A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N) else: A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N) H = (1. / n).sum() hs_cs = (1. / arange(N - 1, 1, -1)).cumsum() h = hs_cs[-1] + 1 g = (hs_cs / arange(2, N)).sum() a = (4*g - 6) * (k - 1) + (10 - 6*g)*H b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6 c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h d = (2*h + 6)*k**2 - 4*h*k sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.)) m = k - 1 A2 = (A2kN - m) / math.sqrt(sigmasq) # The b_i values are the interpolation coefficients from Table 2 # of Scholz and Stephens 1987 b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326]) b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822]) b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396]) critical = b0 + b1 / math.sqrt(m) + b2 / m pf = np.polyfit(critical, log(np.array([0.25, 0.1, 0.05, 0.025, 0.01])), 2) if A2 < critical.min() or A2 > critical.max(): warnings.warn("approximate p-value will be computed by extrapolation") p = math.exp(np.polyval(pf, A2)) return Anderson_ksampResult(A2, critical, p) AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue')) def ansari(x, y): """ Perform the Ansari-Bradley test for equal scale parameters The Ansari-Bradley test is a non-parametric test for the equality of the scale parameter of the distributions from which two samples were drawn. Parameters ---------- x, y : array_like arrays of sample data Returns ------- statistic : float The Ansari-Bradley test statistic pvalue : float The p-value of the hypothesis test See Also -------- fligner : A non-parametric test for the equality of k variances mood : A non-parametric test for the equality of two scale parameters Notes ----- The p-value given is exact when the sample sizes are both less than 55 and there are no ties, otherwise a normal approximation for the p-value is used. References ---------- .. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2. """ x, y = asarray(x), asarray(y) n = len(x) m = len(y) if m < 1: raise ValueError("Not enough other observations.") if n < 1: raise ValueError("Not enough test observations.") N = m + n xy = r_[x, y] # combine rank = stats.rankdata(xy) symrank = amin(array((rank, N - rank + 1)), 0) AB = np.sum(symrank[:n], axis=0) uxy = unique(xy) repeats = (len(uxy) != len(xy)) exact = ((m < 55) and (n < 55) and not repeats) if repeats and (m < 55 or n < 55): warnings.warn("Ties preclude use of exact statistic.") if exact: astart, a1, ifault = statlib.gscale(n, m) ind = AB - astart total = np.sum(a1, axis=0) if ind < len(a1)/2.0: cind = int(ceil(ind)) if ind == cind: pval = 2.0 * np.sum(a1[:cind+1], axis=0) / total else: pval = 2.0 * np.sum(a1[:cind], axis=0) / total else: find = int(floor(ind)) if ind == floor(ind): pval = 2.0 * np.sum(a1[find:], axis=0) / total else: pval = 2.0 * np.sum(a1[find+1:], axis=0) / total return AnsariResult(AB, min(1.0, pval)) # otherwise compute normal approximation if N % 2: # N odd mnAB = n * (N+1.0)**2 / 4.0 / N varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2) else: mnAB = n * (N+2.0) / 4.0 varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0) if repeats: # adjust variance estimates # compute np.sum(tj * rj**2,axis=0) fac = np.sum(symrank**2, axis=0) if N % 2: # N odd varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1)) else: # N even varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1)) z = (AB - mnAB) / sqrt(varAB) pval = distributions.norm.sf(abs(z)) * 2.0 return AnsariResult(AB, pval) BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue')) def bartlett(*args): """ Perform Bartlett's test for equal variances Bartlett's test tests the null hypothesis that all input samples are from populations with equal variances. For samples from significantly non-normal populations, Levene's test `levene` is more robust. Parameters ---------- sample1, sample2,... : array_like arrays of sample data. May be different lengths. Returns ------- statistic : float The test statistic. pvalue : float The p-value of the test. See Also -------- fligner : A non-parametric test for the equality of k variances levene : A robust parametric test for equality of k variances Notes ----- Conover et al. (1981) examine many of the existing parametric and nonparametric tests by extensive simulations and they conclude that the tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be superior in terms of robustness of departures from normality and power [3]_. References ---------- .. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm .. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical Methods, Eighth Edition, Iowa State University Press. .. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and Hypothesis Testing based on Quadratic Inference Function. Technical Report #99-03, Center for Likelihood Studies, Pennsylvania State University. .. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical Tests. Proceedings of the Royal Society of London. Series A, Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282. """ # Handle empty input for a in args: if np.asanyarray(a).size == 0: return BartlettResult(np.nan, np.nan) k = len(args) if k < 2: raise ValueError("Must enter at least two input sample vectors.") Ni = zeros(k) ssq = zeros(k, 'd') for j in range(k): Ni[j] = len(args[j]) ssq[j] = np.var(args[j], ddof=1) Ntot = np.sum(Ni, axis=0) spsq = np.sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k)) numer = (Ntot*1.0 - k) * log(spsq) - np.sum((Ni - 1.0)*log(ssq), axis=0) denom = 1.0 + 1.0/(3*(k - 1)) * ((np.sum(1.0/(Ni - 1.0), axis=0)) - 1.0/(Ntot - k)) T = numer / denom pval = distributions.chi2.sf(T, k - 1) # 1 - cdf return BartlettResult(T, pval) LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue')) def levene(*args, **kwds): """ Perform Levene test for equal variances. The Levene test tests the null hypothesis that all input samples are from populations with equal variances. Levene's test is an alternative to Bartlett's test `bartlett` in the case where there are significant deviations from normality. Parameters ---------- sample1, sample2, ... : array_like The sample data, possibly with different lengths center : {'mean', 'median', 'trimmed'}, optional Which function of the data to use in the test. The default is 'median'. proportiontocut : float, optional When `center` is 'trimmed', this gives the proportion of data points to cut from each end. (See `scipy.stats.trim_mean`.) Default is 0.05. Returns ------- statistic : float The test statistic. pvalue : float The p-value for the test. Notes ----- Three variations of Levene's test are possible. The possibilities and their recommended usages are: * 'median' : Recommended for skewed (non-normal) distributions> * 'mean' : Recommended for symmetric, moderate-tailed distributions. * 'trimmed' : Recommended for heavy-tailed distributions. References ---------- .. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm .. [2] Levene, H. (1960). In Contributions to Probability and Statistics: Essays in Honor of Harold Hotelling, I. Olkin et al. eds., Stanford University Press, pp. 278-292. .. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American Statistical Association, 69, 364-367 """ # Handle keyword arguments. center = 'median' proportiontocut = 0.05 for kw, value in kwds.items(): if kw not in ['center', 'proportiontocut']: raise TypeError("levene() got an unexpected keyword " "argument '%s'" % kw) if kw == 'center': center = value else: proportiontocut = value k = len(args) if k < 2: raise ValueError("Must enter at least two input sample vectors.") Ni = zeros(k) Yci = zeros(k, 'd') if center not in ['mean', 'median', 'trimmed']: raise ValueError("Keyword argument <center> must be 'mean', 'median'" + "or 'trimmed'.") if center == 'median': func = lambda x: np.median(x, axis=0) elif center == 'mean': func = lambda x: np.mean(x, axis=0) else: # center == 'trimmed' args = tuple(stats.trimboth(np.sort(arg), proportiontocut) for arg in args) func = lambda x: np.mean(x, axis=0) for j in range(k): Ni[j] = len(args[j]) Yci[j] = func(args[j]) Ntot = np.sum(Ni, axis=0) # compute Zij's Zij = [None] * k for i in range(k): Zij[i] = abs(asarray(args[i]) - Yci[i]) # compute Zbari Zbari = zeros(k, 'd') Zbar = 0.0 for i in range(k): Zbari[i] = np.mean(Zij[i], axis=0) Zbar += Zbari[i] * Ni[i] Zbar /= Ntot numer = (Ntot - k) * np.sum(Ni * (Zbari - Zbar)**2, axis=0) # compute denom_variance dvar = 0.0 for i in range(k): dvar += np.sum((Zij[i] - Zbari[i])**2, axis=0) denom = (k - 1.0) * dvar W = numer / denom pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf return LeveneResult(W, pval) @setastest(False) def binom_test(x, n=None, p=0.5, alternative='two-sided'): """ Perform a test that the probability of success is p. This is an exact, two-sided test of the null hypothesis that the probability of success in a Bernoulli experiment is `p`. Parameters ---------- x : integer or array_like the number of successes, or if x has length 2, it is the number of successes and the number of failures. n : integer the number of trials. This is ignored if x gives both the number of successes and failures p : float, optional The hypothesized probability of success. 0 <= p <= 1. The default value is p = 0.5 Returns ------- p-value : float The p-value of the hypothesis test References ---------- .. [1] http://en.wikipedia.org/wiki/Binomial_test """ x = atleast_1d(x).astype(np.integer) if len(x) == 2: n = x[1] + x[0] x = x[0] elif len(x) == 1: x = x[0] if n is None or n < x: raise ValueError("n must be >= x") n = np.int_(n) else: raise ValueError("Incorrect length for x.") if (p > 1.0) or (p < 0.0): raise ValueError("p must be in range [0,1]") if alternative not in ('two-sided', 'less', 'greater'): raise ValueError("alternative not recognized\n" "should be 'two-sided', 'less' or 'greater'") if alternative == 'less': pval = distributions.binom.cdf(x, n, p) return pval if alternative == 'greater': pval = distributions.binom.sf(x-1, n, p) return pval # if alternative was neither 'less' nor 'greater', then it's 'two-sided' d = distributions.binom.pmf(x, n, p) rerr = 1 + 1e-7 if x == p * n: # special case as shortcut, would also be handled by `else` below pval = 1. elif x < p * n: i = np.arange(np.ceil(p * n), n+1) y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0) pval = (distributions.binom.cdf(x, n, p) + distributions.binom.sf(n - y, n, p)) else: i = np.arange(np.floor(p*n) + 1) y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0) pval = (distributions.binom.cdf(y-1, n, p) + distributions.binom.sf(x-1, n, p)) return min(1.0, pval) def _apply_func(x, g, func): # g is list of indices into x # separating x into different groups # func should be applied over the groups g = unique(r_[0, g, len(x)]) output = [] for k in range(len(g) - 1): output.append(func(x[g[k]:g[k+1]])) return asarray(output) FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue')) def fligner(*args, **kwds): """ Perform Fligner-Killeen test for equality of variance. Fligner's test tests the null hypothesis that all input samples are from populations with equal variances. Fligner-Killeen's test is distribution free when populations are identical [2]_. Parameters ---------- sample1, sample2, ... : array_like Arrays of sample data. Need not be the same length. center : {'mean', 'median', 'trimmed'}, optional Keyword argument controlling which function of the data is used in computing the test statistic. The default is 'median'. proportiontocut : float, optional When `center` is 'trimmed', this gives the proportion of data points to cut from each end. (See `scipy.stats.trim_mean`.) Default is 0.05. Returns ------- statistic : float The test statistic. pvalue : float The p-value for the hypothesis test. See Also -------- bartlett : A parametric test for equality of k variances in normal samples levene : A robust parametric test for equality of k variances Notes ----- As with Levene's test there are three variants of Fligner's test that differ by the measure of central tendency used in the test. See `levene` for more information. Conover et al. (1981) examine many of the existing parametric and nonparametric tests by extensive simulations and they conclude that the tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be superior in terms of robustness of departures from normality and power [3]_. References ---------- .. [1] http://www.stat.psu.edu/~bgl/center/tr/TR993.ps .. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample tests for scale. 'Journal of the American Statistical Association.' 71(353), 210-213. .. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and Hypothesis Testing based on Quadratic Inference Function. Technical Report #99-03, Center for Likelihood Studies, Pennsylvania State University. .. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A comparative study of tests for homogeneity of variances, with applications to the outer continental shelf biding data. Technometrics, 23(4), 351-361. """ # Handle empty input for a in args: if np.asanyarray(a).size == 0: return FlignerResult(np.nan, np.nan) # Handle keyword arguments. center = 'median' proportiontocut = 0.05 for kw, value in kwds.items(): if kw not in ['center', 'proportiontocut']: raise TypeError("fligner() got an unexpected keyword " "argument '%s'" % kw) if kw == 'center': center = value else: proportiontocut = value k = len(args) if k < 2: raise ValueError("Must enter at least two input sample vectors.") if center not in ['mean', 'median', 'trimmed']: raise ValueError("Keyword argument <center> must be 'mean', 'median'" + "or 'trimmed'.") if center == 'median': func = lambda x: np.median(x, axis=0) elif center == 'mean': func = lambda x: np.mean(x, axis=0) else: # center == 'trimmed' args = tuple(stats.trimboth(arg, proportiontocut) for arg in args) func = lambda x: np.mean(x, axis=0) Ni = asarray([len(args[j]) for j in range(k)]) Yci = asarray([func(args[j]) for j in range(k)]) Ntot = np.sum(Ni, axis=0) # compute Zij's Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)] allZij = [] g = [0] for i in range(k): allZij.extend(list(Zij[i])) g.append(len(allZij)) ranks = stats.rankdata(allZij) a = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5) # compute Aibar Aibar = _apply_func(a, g, np.sum) / Ni anbar = np.mean(a, axis=0) varsq = np.var(a, axis=0, ddof=1) Xsq = np.sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf return FlignerResult(Xsq, pval) def mood(x, y, axis=0): """ Perform Mood's test for equal scale parameters. Mood's two-sample test for scale parameters is a non-parametric test for the null hypothesis that two samples are drawn from the same distribution with the same scale parameter. Parameters ---------- x, y : array_like Arrays of sample data. axis : int, optional The axis along which the samples are tested. `x` and `y` can be of different length along `axis`. If `axis` is None, `x` and `y` are flattened and the test is done on all values in the flattened arrays. Returns ------- z : scalar or ndarray The z-score for the hypothesis test. For 1-D inputs a scalar is returned. p-value : scalar ndarray The p-value for the hypothesis test. See Also -------- fligner : A non-parametric test for the equality of k variances ansari : A non-parametric test for the equality of 2 variances bartlett : A parametric test for equality of k variances in normal samples levene : A parametric test for equality of k variances Notes ----- The data are assumed to be drawn from probability distributions ``f(x)`` and ``f(x/s) / s`` respectively, for some probability density function f. The null hypothesis is that ``s == 1``. For multi-dimensional arrays, if the inputs are of shapes ``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the resulting z and p values will have shape ``(n0, n2, n3)``. Note that ``n1`` and ``m1`` don't have to be equal, but the other dimensions do. Examples -------- >>> from scipy import stats >>> np.random.seed(1234) >>> x2 = np.random.randn(2, 45, 6, 7) >>> x1 = np.random.randn(2, 30, 6, 7) >>> z, p = stats.mood(x1, x2, axis=1) >>> p.shape (2, 6, 7) Find the number of points where the difference in scale is not significant: >>> (p > 0.1).sum() 74 Perform the test with different scales: >>> x1 = np.random.randn(2, 30) >>> x2 = np.random.randn(2, 35) * 10.0 >>> stats.mood(x1, x2, axis=1) (array([-5.7178125 , -5.25342163]), array([ 1.07904114e-08, 1.49299218e-07])) """ x = np.asarray(x, dtype=float) y = np.asarray(y, dtype=float) if axis is None: x = x.flatten() y = y.flatten() axis = 0 # Determine shape of the result arrays res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis]) if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if ax != axis])): raise ValueError("Dimensions of x and y on all axes except `axis` " "should match") n = x.shape[axis] m = y.shape[axis] N = m + n if N < 3: raise ValueError("Not enough observations.") xy = np.concatenate((x, y), axis=axis) if axis != 0: xy = np.rollaxis(xy, axis) xy = xy.reshape(xy.shape[0], -1) # Generalized to the n-dimensional case by adding the axis argument, and # using for loops, since rankdata is not vectorized. For improving # performance consider vectorizing rankdata function. all_ranks = np.zeros_like(xy) for j in range(xy.shape[1]): all_ranks[:, j] = stats.rankdata(xy[:, j]) Ri = all_ranks[:n] M = np.sum((Ri - (N + 1.0) / 2)**2, axis=0) # Approx stat. mnM = n * (N * N - 1.0) / 12 varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180 z = (M - mnM) / sqrt(varM) # sf for right tail, cdf for left tail. Factor 2 for two-sidedness z_pos = z > 0 pval = np.zeros_like(z) pval[z_pos] = 2 * distributions.norm.sf(z[z_pos]) pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos]) if res_shape == (): # Return scalars, not 0-D arrays z = z[0] pval = pval[0] else: z.shape = res_shape pval.shape = res_shape return z, pval WilcoxonResult = namedtuple('WilcoxonResult', ('statistic', 'pvalue')) def wilcoxon(x, y=None, zero_method="wilcox", correction=False): """ Calculate the Wilcoxon signed-rank test. The Wilcoxon signed-rank test tests the null hypothesis that two related paired samples come from the same distribution. In particular, it tests whether the distribution of the differences x - y is symmetric about zero. It is a non-parametric version of the paired T-test. Parameters ---------- x : array_like The first set of measurements. y : array_like, optional The second set of measurements. If `y` is not given, then the `x` array is considered to be the differences between the two sets of measurements. zero_method : string, {"pratt", "wilcox", "zsplit"}, optional "pratt": Pratt treatment: includes zero-differences in the ranking process (more conservative) "wilcox": Wilcox treatment: discards all zero-differences "zsplit": Zero rank split: just like Pratt, but spliting the zero rank between positive and negative ones correction : bool, optional If True, apply continuity correction by adjusting the Wilcoxon rank statistic by 0.5 towards the mean value when computing the z-statistic. Default is False. Returns ------- statistic : float The sum of the ranks of the differences above or below zero, whichever is smaller. pvalue : float The two-sided p-value for the test. Notes ----- Because the normal approximation is used for the calculations, the samples used should be large. A typical rule is to require that n > 20. References ---------- .. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test """ if zero_method not in ["wilcox", "pratt", "zsplit"]: raise ValueError("Zero method should be either 'wilcox' " "or 'pratt' or 'zsplit'") if y is None: d = x else: x, y = map(asarray, (x, y)) if len(x) != len(y): raise ValueError('Unequal N in wilcoxon. Aborting.') d = x - y if zero_method == "wilcox": # Keep all non-zero differences d = compress(np.not_equal(d, 0), d, axis=-1) count = len(d) if count < 10: warnings.warn("Warning: sample size too small for normal approximation.") r = stats.rankdata(abs(d)) r_plus = np.sum((d > 0) * r, axis=0) r_minus = np.sum((d < 0) * r, axis=0) if zero_method == "zsplit": r_zero = np.sum((d == 0) * r, axis=0) r_plus += r_zero / 2. r_minus += r_zero / 2. T = min(r_plus, r_minus) mn = count * (count + 1.) * 0.25 se = count * (count + 1.) * (2. * count + 1.) if zero_method == "pratt": r = r[d != 0] replist, repnum = find_repeats(r) if repnum.size != 0: # Correction for repeated elements. se -= 0.5 * (repnum * (repnum * repnum - 1)).sum() se = sqrt(se / 24) correction = 0.5 * int(bool(correction)) * np.sign(T - mn) z = (T - mn - correction) / se prob = 2. * distributions.norm.sf(abs(z)) return WilcoxonResult(T, prob) @setastest(False) def median_test(*args, **kwds): """ Mood's median test. Test that two or more samples come from populations with the same median. Let ``n = len(args)`` be the number of samples. The "grand median" of all the data is computed, and a contingency table is formed by classifying the values in each sample as being above or below the grand median. The contingency table, along with `correction` and `lambda_`, are passed to `scipy.stats.chi2_contingency` to compute the test statistic and p-value. Parameters ---------- sample1, sample2, ... : array_like The set of samples. There must be at least two samples. Each sample must be a one-dimensional sequence containing at least one value. The samples are not required to have the same length. ties : str, optional Determines how values equal to the grand median are classified in the contingency table. The string must be one of:: "below": Values equal to the grand median are counted as "below". "above": Values equal to the grand median are counted as "above". "ignore": Values equal to the grand median are not counted. The default is "below". correction : bool, optional If True, *and* there are just two samples, apply Yates' correction for continuity when computing the test statistic associated with the contingency table. Default is True. lambda_ : float or str, optional. By default, the statistic computed in this test is Pearson's chi-squared statistic. `lambda_` allows a statistic from the Cressie-Read power divergence family to be used instead. See `power_divergence` for details. Default is 1 (Pearson's chi-squared statistic). Returns ------- stat : float The test statistic. The statistic that is returned is determined by `lambda_`. The default is Pearson's chi-squared statistic. p : float The p-value of the test. m : float The grand median. table : ndarray The contingency table. The shape of the table is (2, n), where n is the number of samples. The first row holds the counts of the values above the grand median, and the second row holds the counts of the values below the grand median. The table allows further analysis with, for example, `scipy.stats.chi2_contingency`, or with `scipy.stats.fisher_exact` if there are two samples, without having to recompute the table. See Also -------- kruskal : Compute the Kruskal-Wallis H-test for independent samples. mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y. Notes ----- .. versionadded:: 0.15.0 References ---------- .. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill (1950), pp. 394-399. .. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010). See Sections 8.12 and 10.15. Examples -------- A biologist runs an experiment in which there are three groups of plants. Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants. Each plant produces a number of seeds. The seed counts for each group are:: Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49 Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99 Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84 The following code applies Mood's median test to these samples. >>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49] >>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99] >>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84] >>> from scipy.stats import median_test >>> stat, p, med, tbl = median_test(g1, g2, g3) The median is >>> med 34.0 and the contingency table is >>> tbl array([[ 5, 10, 7], [11, 5, 10]]) `p` is too large to conclude that the medians are not the same: >>> p 0.12609082774093244 The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to `median_test`. >>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood") >>> p 0.12224779737117837 The median occurs several times in the data, so we'll get a different result if, for example, ``ties="above"`` is used: >>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above") >>> p 0.063873276069553273 >>> tbl array([[ 5, 11, 9], [11, 4, 8]]) This example demonstrates that if the data set is not large and there are values equal to the median, the p-value can be sensitive to the choice of `ties`. """ ties = kwds.pop('ties', 'below') correction = kwds.pop('correction', True) lambda_ = kwds.pop('lambda_', None) if len(kwds) > 0: bad_kwd = kwds.keys()[0] raise TypeError("median_test() got an unexpected keyword " "argument %r" % bad_kwd) if len(args) < 2: raise ValueError('median_test requires two or more samples.') ties_options = ['below', 'above', 'ignore'] if ties not in ties_options: raise ValueError("invalid 'ties' option '%s'; 'ties' must be one " "of: %s" % (ties, str(ties_options)[1:-1])) data = [np.asarray(arg) for arg in args] # Validate the sizes and shapes of the arguments. for k, d in enumerate(data): if d.size == 0: raise ValueError("Sample %d is empty. All samples must " "contain at least one value." % (k + 1)) if d.ndim != 1: raise ValueError("Sample %d has %d dimensions. All " "samples must be one-dimensional sequences." % (k + 1, d.ndim)) grand_median = np.median(np.concatenate(data)) # Create the contingency table. table = np.zeros((2, len(data)), dtype=np.int64) for k, sample in enumerate(data): nabove = count_nonzero(sample > grand_median) nbelow = count_nonzero(sample < grand_median) nequal = sample.size - (nabove + nbelow) table[0, k] += nabove table[1, k] += nbelow if ties == "below": table[1, k] += nequal elif ties == "above": table[0, k] += nequal # Check that no row or column of the table is all zero. # Such a table can not be given to chi2_contingency, because it would have # a zero in the table of expected frequencies. rowsums = table.sum(axis=1) if rowsums[0] == 0: raise ValueError("All values are below the grand median (%r)." % grand_median) if rowsums[1] == 0: raise ValueError("All values are above the grand median (%r)." % grand_median) if ties == "ignore": # We already checked that each sample has at least one value, but it # is possible that all those values equal the grand median. If `ties` # is "ignore", that would result in a column of zeros in `table`. We # check for that case here. zero_cols = np.where((table == 0).all(axis=0))[0] if len(zero_cols) > 0: msg = ("All values in sample %d are equal to the grand " "median (%r), so they are ignored, resulting in an " "empty sample." % (zero_cols[0] + 1, grand_median)) raise ValueError(msg) stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_, correction=correction) return stat, p, grand_median, table def _hermnorm(N): # return the negatively normalized hermite polynomials up to order N-1 # (inclusive) # using the recursive relationship # p_n+1 = p_n(x)' - x*p_n(x) # and p_0(x) = 1 plist = [None] * N plist[0] = poly1d(1) for n in range(1, N): plist[n] = plist[n-1].deriv() - poly1d([1, 0]) * plist[n-1] return plist # Note: when removing pdf_fromgamma, also remove the _hermnorm support function @np.deprecate(message="scipy.stats.pdf_fromgamma is deprecated in scipy 0.16.0 " "in favour of statsmodels.distributions.ExpandedNormal.") def pdf_fromgamma(g1, g2, g3=0.0, g4=None): if g4 is None: g4 = 3 * g2**2 sigsq = 1.0 / g2 sig = sqrt(sigsq) mu = g1 * sig**3.0 p12 = _hermnorm(13) for k in range(13): p12[k] /= sig**k # Add all of the terms to polynomial totp = (p12[0] - g1/6.0*p12[3] + g2/24.0*p12[4] + g1**2/72.0 * p12[6] - g3/120.0*p12[5] - g1*g2/144.0*p12[7] - g1**3.0/1296.0*p12[9] + g4/720*p12[6] + (g2**2/1152.0 + g1*g3/720)*p12[8] + g1**2 * g2/1728.0*p12[10] + g1**4.0 / 31104.0*p12[12]) # Final normalization totp = totp / sqrt(2*pi) / sig def thefunc(x): xn = (x - mu) / sig return totp(xn) * exp(-xn**2 / 2.) return thefunc def _circfuncs_common(samples, high, low): samples = np.asarray(samples) if samples.size == 0: return np.nan, np.nan ang = (samples - low)*2*pi / (high - low) return samples, ang def circmean(samples, high=2*pi, low=0, axis=None): """ Compute the circular mean for samples in a range. Parameters ---------- samples : array_like Input array. high : float or int, optional High boundary for circular mean range. Default is ``2*pi``. low : float or int, optional Low boundary for circular mean range. Default is 0. axis : int, optional Axis along which means are computed. The default is to compute the mean of the flattened array. Returns ------- circmean : float Circular mean. """ samples, ang = _circfuncs_common(samples, high, low) res = angle(np.mean(exp(1j * ang), axis=axis)) mask = res < 0 if mask.ndim > 0: res[mask] += 2*pi elif mask: res += 2*pi return res*(high - low)/2.0/pi + low def circvar(samples, high=2*pi, low=0, axis=None): """ Compute the circular variance for samples assumed to be in a range Parameters ---------- samples : array_like Input array. low : float or int, optional Low boundary for circular variance range. Default is 0. high : float or int, optional High boundary for circular variance range. Default is ``2*pi``. axis : int, optional Axis along which variances are computed. The default is to compute the variance of the flattened array. Returns ------- circvar : float Circular variance. Notes ----- This uses a definition of circular variance that in the limit of small angles returns a number close to the 'linear' variance. """ samples, ang = _circfuncs_common(samples, high, low) res = np.mean(exp(1j * ang), axis=axis) R = abs(res) return ((high - low)/2.0/pi)**2 * 2 * log(1/R) def circstd(samples, high=2*pi, low=0, axis=None): """ Compute the circular standard deviation for samples assumed to be in the range [low to high]. Parameters ---------- samples : array_like Input array. low : float or int, optional Low boundary for circular standard deviation range. Default is 0. high : float or int, optional High boundary for circular standard deviation range. Default is ``2*pi``. axis : int, optional Axis along which standard deviations are computed. The default is to compute the standard deviation of the flattened array. Returns ------- circstd : float Circular standard deviation. Notes ----- This uses a definition of circular standard deviation that in the limit of small angles returns a number close to the 'linear' standard deviation. """ samples, ang = _circfuncs_common(samples, high, low) res = np.mean(exp(1j * ang), axis=axis) R = abs(res) return ((high - low)/2.0/pi) * sqrt(-2*log(R)) # Tests to include (from R) -- some of these already in stats. ######## # X Ansari-Bradley # X Bartlett (and Levene) # X Binomial # Y Pearson's Chi-squared (stats.chisquare) # Y Association Between Paired samples (stats.pearsonr, stats.spearmanr) # stats.kendalltau) -- these need work though # Fisher's exact test # X Fligner-Killeen Test # Y Friedman Rank Sum (stats.friedmanchisquare?) # Y Kruskal-Wallis # Y Kolmogorov-Smirnov # Cochran-Mantel-Haenszel Chi-Squared for Count # McNemar's Chi-squared for Count # X Mood Two-Sample # X Test For Equal Means in One-Way Layout (see stats.ttest also) # Pairwise Comparisons of proportions # Pairwise t tests # Tabulate p values for pairwise comparisons # Pairwise Wilcoxon rank sum tests # Power calculations two sample test of prop. # Power calculations for one and two sample t tests # Equal or Given Proportions # Trend in Proportions # Quade Test # Y Student's T Test # Y F Test to compare two variances # XY Wilcoxon Rank Sum and Signed Rank Tests
mit
SophieIPP/ipp-macro-series-parser
ipp_macro_series_parser/denombrements_fiscaux/agregats_ipp.py
1
36244
# -*- coding: utf-8 -*- # TAXIPP -- A French microsimulation model # By: IPP <taxipp@ipp.eu> # # Copyright (C) 2012, 2013, 2014, 2015 IPP # https://github.com/taxipp # # This file is part of TAXIPP. # # TAXIPP is free software; you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # TAXIPP is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import collections import numpy import os import pandas import pkg_resources from py_expression_eval import Parser from ipp_macro_series_parser.config import Config from ipp_macro_series_parser.denombrements_fiscaux.parsers import ( get_denombrements_fiscaux_data_frame) from ipp_macro_series_parser.data_extraction import get_or_construct_value config_parser = Config( config_files_directory = os.path.join(pkg_resources.get_distribution('ipp-macro-series-parser').location) ) def update_index_by_variable_name_appearing_in_formula(index_by_variable_name, formula): parser_formula = Parser() try: expr = parser_formula.parse(formula) except Exception, e: print formula raise(e) formula_variables = expr.variables() components = dict( (formula_variable, {'code': formula_variable}) for formula_variable in formula_variables ) index_by_variable_name.update(components) return index_by_variable_name def create_index_by_variable_name(formula_by_variable_name, level_2_formula_by_variable_name = None): index_by_variable_name = dict() for variable_name, formula in formula_by_variable_name.iteritems(): if not formula: continue index_by_variable_name[variable_name] = { 'code': None, 'formula': formula, } if isinstance(formula, list): for single_formula in formula: index_by_variable_name = update_index_by_variable_name_appearing_in_formula( index_by_variable_name, single_formula['formula']) else: index_by_variable_name = update_index_by_variable_name_appearing_in_formula(index_by_variable_name, formula) if level_2_formula_by_variable_name is not None: level_2_index_by_variable_name = dict() for variable_name, formula in level_2_formula_by_variable_name.iteritems(): level_2_index_by_variable_name[variable_name] = dict( formula = formula, ) index_by_variable_name.update(level_2_index_by_variable_name) return index_by_variable_name def build_aggregates(raw_data, formula_by_variable_name, level_2_formula_by_variable_name = None, years = None, fill_value = numpy.NaN): assert years is not None aggregates = None index_by_variable_name = create_index_by_variable_name(formula_by_variable_name, level_2_formula_by_variable_name) for variable_name in formula_by_variable_name.keys() + level_2_formula_by_variable_name.keys(): serie, formula = get_or_construct_value( raw_data, variable_name, index_by_variable_name, years = years, fill_value = fill_value) serie = serie.reset_index().drop_duplicates().set_index('year') assert not numpy.any(serie.index.duplicated()), 'Duplicated index for {} : {}'.format( variable_name, serie) if aggregates is None: aggregates = serie else: try: aggregates = pandas.concat([aggregates, serie], axis = 1, verify_integrity = True) except Exception, e: print "aggregates", aggregates print "serie", serie raise(e) return aggregates formula_by_variable_name = dict( ## Salaires salaires_imposables = [ dict( start = 1990, end = 2004, formula = 'f1aj + f1bj + f1cj + f1dj + f1ej + + f1fj', ), dict( start = 2005, end = 2006, formula = 'f1aj + f1bj + f1cj + f1dj + f1ej', ), dict( start = 2007, end = 2013, formula = 'f1aj + f1bj + f1cj + f1dj', ), dict( start = 2014, end = 2015, formula = 'f1aj + f1bj + f1cj + f1dj', ), ], heures_supplementaires = [ dict( start = 2007, end = 2013, formula = ' + f1au + f1bu + f1cu + f1du', # les heures sup effectuées en 2012 payées en 2013 ... ), ], ## Bénéfices agricoles benefices_agricoles_forfait_exoneres = 'f5hn + f5in + f5jn', # frag_exon benefices_agricoles_forfait_imposables = 'f5ho + f5io + f5jo', # frag_impo benefices_agricoles_reels_exoneres = 'f5hb + f5ib + f5jb', # arag_exon benefices_agricoles_reels_imposables = [ dict( start = 1990, end = 2005, formula = 'f5hc + f5ic + f5jc + f5hd + f5id + f5jd' ), dict( start = 2006, end = 2013, formula = 'f5hc + f5ic + f5jc' ), ], # arag_impg TODO: check last values in openfisca benefices_agricoles_reels_deficits = 'f5hf + f5if + f5jf', # arag_defi benefices_agricoles_reels_sans_cga_exoneres = 'f5hh + f5ih + f5jh', # nrag_exon benefices_agricoles_reels_sans_cga_imposables = [ dict( start = 1990, end = 2005, formula = 'f5hi + f5ii + f5ji + f5hj + f5ij + f5jj', ), dict( start = 2006, end = 2013, formula = 'f5hi + f5ii + f5ji', ), ], # nrag_impg TODO: check last values in openfisca # TODO voir années antérieures à 2006 benefices_agricoles_reels_sans_cga_deficits = 'f5hl + f5il + f5jl', # nrag_defi # TODO: benefices_agricoles_ = 'f5hm + f5im + f5jm', # nrag_ajag ## Bénéfices industriels et commerciaux professionnels (déclaration complémentaire, cadres 5B) benefices_industriels_commerciaux_professionnels_micro_entreprise_vente = 'f5ko + f5lo + f5mo', # mbic_impv # TODO erreur car 2 fois la mm ligne benefices_industriels_commerciaux_professionnels_micro_entreprise_services = 'f5kp + f5lp + f5mp', # mbic_imps benefices_industriels_commerciaux_professionnels_reels_exoneres = 'f5kb + f5lb + f5mb', # mbic_imps benefices_industriels_commerciaux_professionnels_reels_imposables_normal = [ dict( start = 2003, end = 2009, formula = 'f5kc + f5lc + f5mc', # abic_impn ), ], benefices_industriels_commerciaux_professionnels_reels_imposables_simplifie = [ dict( start = 2003, end = 2009, formula = 'f5kd + f5ld + f5md', # abic_imps ), ], benefices_industriels_commerciaux_professionnels_reels_imposables_normal_et_simplifie = [ dict( start = 2010, end = 2014, formula = 'f5kc + f5lc + f5mc', # abic_impn ), ], benefices_industriels_commerciaux_professionnels_reels_exoneres_sans_cga = 'f5kh + f5lh + f5mh', # nbic_exon benefices_industriels_commerciaux_professionnels_reels_imposables_normal_sans_cga = 'f5ki + f5li + f5mi', # nbic_impn benefices_industriels_commerciaux_professionnels_reels_imposables_simplifie_sans_cga = 'f5kj + f5lj + f5mj', # nbic_mvct deficits_industriels_commerciaux_professionnels_normal = [ dict( start = 2006, # au moins end = 2009, formula = 'f5kf + f5lf + f5mf', # abic_defn ), ], deficits_industriels_commerciaux_professionnels_simplifie = [ dict( start = 2006, # au moins end = 2009, formula = 'f5kg + f5lg + f5mg', # abic_defs ), ], deficits_industriels_commerciaux_professionnels_normal_et_simplifie = [ dict( start = 2010, end = 2014, # au moins formula = 'f5kf + f5lf + f5mf', ) ], deficits_industriels_commerciaux_professionnels_normal_sans_cga = [ dict( start = 2006, # au moins end = 2009, formula = 'f5kl + f5ll + f5ml', # nbic_defn ), ], deficits_industriels_commerciaux_professionnels_simplifie_sans_cga = [ dict( start = 2006, # au moins end = 2009, formula = 'f5km + f5lm + f5mm', ), ], deficits_industriels_commerciaux_professionnels_normal_et_simplifie_sans_cga = [ dict( start = 2010, end = 2014, # au moins formula = 'f5kl + f5ll + f5ml', ), ], # deficits_industriels_commerciaux_professionnels_locations = [ ## dict( ## start = 1990, ## end = 2008, ## formula = 'f5km + f5lm + f5mm', ## ), # dict( # start = 2009, # end = 2014, # formula = 'f5qa + f5ra + f5sa', # ), # ], # nbic_defs ## Bénéfices industriels et commerciaux non professionnels (déclaration complémentaire, cadres 5C) benefices_industriels_commerciaux_non_professionnels_micro_entreprise_exoneres = 'f5nn + f5on + f5pn', # macc_exon benefices_industriels_commerciaux_non_professionnels_micro_entreprise_vente = 'f5no + f5oo + f5po', # macc_impv benefices_industriels_commerciaux_non_professionnels_micro_entreprise_services = 'f5np + f5op + f5op', # macc_impS benefices_industriels_commerciaux_non_professionnels_reels_exoneres = 'f5nb + f5ob + f5pb', # aacc_exon # benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal = 'f5nc + f5nc + f5nc', # aacc_impn # TODO: normal si 3 fois la même chose ? benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal = [ dict( start = 2003, end = 2009, formula = 'f5nc + f5oc + f5pc', # aacc_impn ), ], # benefices_industriels_commerciaux_non_professionnels_reels_imposables_simplifie = 'f5nd + f5nd + f5nd', # aacc_imps TODO: ceci avant 2010 mais après locations meublées pro, # TODO: normal si 3 fois la même chose ? benefices_industriels_commerciaux_non_professionnels_reels_imposables_simplifie = [ dict( start = 2003, end = 2009, formula = 'f5nd + f5od + f5pd', # aacc_imps TODO: ceci avant 2010 mais après locations meublées pro, ), ], benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal_et_simplifie = [ dict( start = 2010, end = 2014, formula = 'f5nc + f5oc + f5pc', ), ], benefices_industriels_commerciaux_non_professionnels_reels_exoneres_sans_cga = 'f5nh + f5oh + f5ph', # nacc_exon benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal_sans_cga = 'f5ni + f5ni + f5ni', # nacc_impn benefices_industriels_commerciaux_non_professionnels_reels_imposables_simplifie_sans_cga = 'f5nj + f5nj + f5nj', # nacc_meup TODO: ceci avant 2012 mais après locations déjà soumises aux prélèvements sociaux, deficits_industriels_commerciaux_non_professionnels_normal = [ #'f5nf + f5of + f5pf', # aacc_defn dict( start = 2002, # au moins end = 2009, formula = 'f5nf + f5of + f5pf', ), ], deficits_industriels_commerciaux_non_professionnels_simplifie = [ #'f5ng + f5og + f5pg', # aacc_gits dict( start = 2002, # au moins end = 2009, formula = 'f5ng + f5og + f5pg', ), ], deficits_industriels_commerciaux_non_professionnels_normal_sans_cga = [ dict( start = 2002, # au moins end = 2009, formula = 'f5nl + f5ol + f5pl', ), ], deficits_industriels_commerciaux_non_professionnels_simplifie_sans_cga = [ dict( start = 2002, # au moins end = 2009, formula = 'f5nm + f5om + f5pm', ), ], deficits_industriels_commerciaux_non_professionnels_normal_et_simplifie = [ dict( start = 2010, end = 2014, # au moins formula = 'f5nf + f5of + f5pf', ), ], deficits_industriels_commerciaux_non_professionnels_normal_et_simplifie_sans_cga = [ dict( start = 2010, end = 2014, # au moins formula = 'f5nl + f5ol + f5pl', ), ], # deficits_industriels_commerciaux_non_professionnels_sans_cga = 'f5nl + f5ol + f5pl', # nacc_defn # TODO: Locations déjà soumises aux prélèvements sociaux sans CGA (régime du bénéfice réel) # deficits_industriels_commerciaux_non_professionnels_locations = 'f5ny + f5oy + f5py', # - Détails Bénéfices non commerciaux professionnels (déclaration complémentaire, cadres 5D) benefices_non_commerciaux_professionnels_micro_entreprise_imposables = 'f5hq + f5iq + f5jq', # mbnc_impo benefices_non_commerciaux_professionnels_declaration_controlee = 'f5qc + f5rc + f5sc', # benefices_non_commerciaux_professionnels_declaration_controlee_sans_cga = 'f5qi + f5ri + f5si', # deficits_non_commerciaux_professionnels_declaration_controlee = 'f5qe + f5re + f5se', # deficits_non_commerciaux_professionnels_declaration_controlee_sans_cga = 'f5qk + f5rk + f5sk', # # - Détails Bénéfices non commerciaux non professionnels (déclaration complémentaire, cadres 5E) benefices_non_commerciaux_non_professionnels_micro_entreprise_imposables = 'f5ku + f5lu + f5mu', benefices_non_commerciaux_non_professionnels_declaration_controlee = 'f5jg + f5rf + f5sf', benefices_non_commerciaux_non_professionnels_declaration_controlee_sans_cga = 'f5sn + f5ns + f5os', deficits_non_commerciaux_non_professionnels_declaration_controlee = 'f5jj + f5rg + f5sg', deficits_non_commerciaux_non_professionnels_declaration_controlee_sans_cga = 'f5sp + f5nu + f5ou', # - Revenus fonciers revenus_fonciers_regime_normal = 'f4ba', # f4ba revenus_fonciers_micro_foncier = 'f4be', # f4be # Missing financier (tout est par case dans of) # - Déficits des années antérieures non encore déduits # Missing foncier # - rentes viagères : 'f1aw', 'f1bw', 'f1cw', 'f1dw' # - Déficits : 'f4bb', et suivantes... 'f4bd' # Missing plus value # - Gains de levée d'options sur titres 'f1tv' 'f1uv' 'f1tw' 'f1uw' 'f1tx' 'f1ux' # Missing revenus_de_remplacement # - chomeur_longue_duree: 'f1ai', 'f1bi', 'f1ci', 'f1di' # Missing salarie # - sal_pen_exo_etr (start = 2013, 1ac, 1bc, 1cc, 1cd) frais_reels = [ dict( end = 2014, start = 2005, formula = 'f1ak + f1bk + f1ck + f1dk', ), dict( end = 2004, start = 2004, formula = 'f1ak + f1bk + f1ck + f1dk + f1ek', ), dict( start = 2003, end = 2003, formula = 'f1ak + f1bk + f1ck + f1dk + f1ek + f1fk', ), ], # - hsup (f1au, f1bu, f1cu, f1du, f1eu) start 2007 # - allocations_chomage = [ dict( start = 2007, end = 2013, formula = 'f1ap + f1bp + f1cp + f1dp', ), dict( start = 2005, end = 2006, formula = 'f1ap + f1bp + f1cp + f1dp + f1ep', ), dict( start = 2000, end = 2004, formula = 'f1ap + f1bp + f1cp + f1dp + f1ep + f1fp', ), ], # choi # pensions_de_retraite = [ dict( start = 2007, end = 2013, formula = 'f1as + f1bs + f1cs + f1ds', ), dict( start = 2005, end = 2006, formula = 'f1as + f1bs + f1cs + f1ds + f1es', ), dict( start = 2000, end = 2004, formula = 'f1as + f1bs + f1cs + f1ds + f1es + f1fs', ), ], # rsti dividendes_imposes_au_bareme = 'f2dc + f2fu', # 'f2dc + f2fu' non agrégés interet_imposes_au_bareme = 'f2ts + f2go + f2tr', # non agrégés assurances_vie_imposees_au_bareme = 'f2ch', # non agrégés dividendes_imposes_au_prelevement_liberatoire = 'f2da', interets_imposes_au_prelevement_liberatoire = 'f2ee', assurances_vie_imposees_au_prelevement_liberatoire = 'f2dh', plus_values_mobilieres_regime_normal = 'f3vg', plus_values_mobilieres_stock_options = 'f3vf + f3vi', # PV stock options 1, stock options 2, TODO Différencier ? plus_values_mobilieres_retraite_dirigeant = 'f3va', # TODO f3vb ? plus_values_professionnelles_regime_normal = [ dict( start = 2007, # au moins end = 2009, formula = 'f5hz + f5iz + f5jz', # TODO: ceci n'est valable qu'avant 2010 ), dict( start = 2010, end = 2013, # DONE formula = 'f5hx + f5ix + f5jx + f5he + f5ie + f5je + f5kq + f5lq + f5ke + f5le + f5me + f5nq + f5oq + f5pq + f5ne + f5oe + f5pe + f5hr + f5ir + f5jr + f5qd + f5rd + f5sd + f5kv + f5lv + f5mv + f5so + f5nt', # + f5mq + f5ot ), ], plus_values_professionnelles_retraite_dirigeant = 'f5hg + f5ig', revenus_distribues_pea_exoneres = [ dict( start = 2009, end = 2009, formula = 'f2gr', ), ], pensions_alimentaires_percues = 'f1ao + f1bo + f1co + f1do + f1eo + f1fo', # pensions_alimentaires_percues pensions_alimentaires_verses = 'f6gi + f6gj + f6el + f6em + f6gp + f6gu + f6dd', ) level_2_formula_by_variable_name = dict( salaires = 'salaires_imposables + heures_supplementaires', revenus_d_activite_non_salariee = 'benefices_agricoles + benefices_industriels_commerciaux + benefices_non_commerciaux', # + revenus_activite_non_salariee_exoneres', # TODO get parameters form openfisca legislation benefices_agricoles = 'benefices_agricoles_bruts - 0.5 * deficits_agricoles', benefices_agricoles_bruts = 'benefices_agricoles_forfait_imposables + benefices_agricoles_reels_imposables + 1.25 * benefices_agricoles_reels_sans_cga_imposables', # TODO get parameters form openfisca legislation deficits_agricoles = 'benefices_agricoles_reels_deficits + benefices_agricoles_reels_sans_cga_deficits', # Bénéfices industriels et commerciaux benefices_industriels_commerciaux = 'benefices_industriels_commerciaux_professionnels + benefices_industriels_commerciaux_non_professionnels', benefices_industriels_commerciaux_bruts = 'benefices_industriels_commerciaux_professionnels_bruts + benefices_industriels_commerciaux_non_professionnels_bruts', deficits_industriels_commerciaux = 'deficits_industriels_commerciaux_professionnels + deficits_industriels_commerciaux_non_professionnels', # - Bénéfices industriels et commerciaux professionnels benefices_industriels_commerciaux_professionnels = 'benefices_industriels_commerciaux_professionnels_bruts - 0.5 * deficits_industriels_commerciaux_professionnels', benefices_industriels_commerciaux_professionnels_bruts = 'benefices_industriels_commerciaux_professionnels_micro_entreprise + benefices_industriels_commerciaux_professionnels_reels', benefices_industriels_commerciaux_professionnels_micro_entreprise = '(1 - 0.71) * benefices_industriels_commerciaux_professionnels_micro_entreprise_vente + (1 - 0.5) * benefices_industriels_commerciaux_professionnels_micro_entreprise_services', # TODO check and use legislation parameters benefices_industriels_commerciaux_professionnels_reels = 'benefices_industriels_commerciaux_professionnels_reels_avec_cga + benefices_industriels_commerciaux_professionnels_reels_sans_cga', benefices_industriels_commerciaux_professionnels_reels_avec_cga = 'benefices_industriels_commerciaux_professionnels_reels_imposables_normal + benefices_industriels_commerciaux_professionnels_reels_imposables_simplifie', benefices_industriels_commerciaux_professionnels_reels_sans_cga = '1.25 * (benefices_industriels_commerciaux_professionnels_reels_imposables_normal_sans_cga + benefices_industriels_commerciaux_professionnels_reels_imposables_simplifie_sans_cga)', # TODO check and use legislation deficits_industriels_commerciaux_professionnels = [ dict( start = 2006, end = 2009, formula = 'deficits_industriels_commerciaux_professionnels_normal + deficits_industriels_commerciaux_professionnels_simplifie + deficits_industriels_commerciaux_professionnels_normal_sans_cga + deficits_industriels_commerciaux_professionnels_simplifie_sans_cga', ), dict( start = 2010, end = 2014, formula = 'deficits_industriels_commerciaux_professionnels_normal_et_simplifie + deficits_industriels_commerciaux_professionnels_normal_et_simplifie_sans_cga' ), ], # - Bénéfices industriels et commerciaux non professionnels (déclaration complémentaire, cadres 5C) benefices_industriels_commerciaux_non_professionnels = 'benefices_industriels_commerciaux_non_professionnels_bruts - 0.5 * deficits_industriels_commerciaux_non_professionnels', benefices_industriels_commerciaux_non_professionnels_bruts = 'benefices_industriels_commerciaux_non_professionnels_micro_entreprise + benefices_industriels_commerciaux_non_professionnels_reels', benefices_industriels_commerciaux_non_professionnels_micro_entreprise = '(1 - 0.71) * benefices_industriels_commerciaux_non_professionnels_micro_entreprise_vente + (1 - 0.5) * benefices_industriels_commerciaux_non_professionnels_micro_entreprise_services', # TODO check and use legislation parameters benefices_industriels_commerciaux_non_professionnels_reels = 'benefices_industriels_commerciaux_non_professionnels_reels_avec_cga + benefices_industriels_commerciaux_non_professionnels_reels_sans_cga', benefices_industriels_commerciaux_non_professionnels_reels_avec_cga = [ dict( start = 2003, end = 2009, formula = 'benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal + benefices_industriels_commerciaux_non_professionnels_reels_imposables_simplifie', ), dict( start = 2010, end = 2014, formula = 'benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal_et_simplifie', ), ], benefices_industriels_commerciaux_non_professionnels_reels_sans_cga = '1.25 * (benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal_sans_cga + benefices_industriels_commerciaux_non_professionnels_reels_imposables_simplifie_sans_cga)', # TODO check and use legislation # Bénéfices non commerciaux benefices_non_commerciaux = 'benefices_non_commerciaux_professionnels + benefices_non_commerciaux_non_professionnels', benefices_non_commerciaux_bruts = 'benefices_non_commerciaux_professionnels_bruts + benefices_non_commerciaux_non_professionnels_bruts', deficits_non_commerciaux = 'deficits_non_commerciaux_professionnels + deficits_non_commerciaux_non_professionnels', deficits_industriels_commerciaux_non_professionnels = [ dict( start = 2002, # au moins end = 2009, formula = 'deficits_industriels_commerciaux_non_professionnels_normal + deficits_industriels_commerciaux_non_professionnels_simplifie + deficits_industriels_commerciaux_non_professionnels_normal_sans_cga + deficits_industriels_commerciaux_non_professionnels_simplifie_sans_cga' ), dict( start = 2010, end = 2014, # au moins formula = 'deficits_industriels_commerciaux_non_professionnels_normal_et_simplifie + deficits_industriels_commerciaux_non_professionnels_normal_et_simplifie_sans_cga', ), ], # - Bénéfices non commerciaux professionnels (déclaration complémentaire, cadres 5D) benefices_non_commerciaux_professionnels = 'benefices_non_commerciaux_professionnels_bruts - 0.5 * deficits_non_commerciaux_professionnels', benefices_non_commerciaux_professionnels_bruts = '(1 - 0.34) * benefices_non_commerciaux_professionnels_micro_entreprise_imposables + benefices_non_commerciaux_professionnels_declaration_controlee + 1.25 * benefices_non_commerciaux_professionnels_declaration_controlee_sans_cga', deficits_non_commerciaux_professionnels = 'deficits_non_commerciaux_professionnels_declaration_controlee + deficits_non_commerciaux_professionnels_declaration_controlee_sans_cga', # - Bénéfices non commerciaux non professionnels (déclaration complémentaire, cadres 5E) benefices_non_commerciaux_non_professionnels = 'benefices_non_commerciaux_non_professionnels_bruts - 0.5 * deficits_non_commerciaux_non_professionnels', benefices_non_commerciaux_non_professionnels_bruts = '(1 - 0.34) * benefices_non_commerciaux_non_professionnels_micro_entreprise_imposables + benefices_non_commerciaux_non_professionnels_declaration_controlee + 1.25 * benefices_non_commerciaux_non_professionnels_declaration_controlee_sans_cga', deficits_non_commerciaux_non_professionnels = 'deficits_non_commerciaux_non_professionnels_declaration_controlee + deficits_non_commerciaux_non_professionnels_declaration_controlee_sans_cga', # Revenus Fonciers revenus_fonciers = 'revenus_fonciers_regime_normal + revenus_fonciers_micro_foncier', revenus_de_remplacement = 'pensions_de_retraite + allocations_chomage', revenus_financiers_hors_plus_values = 'revenus_imposes_au_bareme + revenus_imposes_au_prelevement_liberatoire', revenus_financiers = 'revenus_imposes_au_bareme + revenus_imposes_au_prelevement_liberatoire + plus_values', plus_values = 'plus_values_mobilieres + plus_values_professionnelles', plus_values_mobilieres = 'plus_values_mobilieres_regime_normal + plus_values_mobilieres_stock_options + plus_values_mobilieres_retraite_dirigeant', # analysis:ignore plus_values_professionnelles = 'plus_values_professionnelles_regime_normal + plus_values_professionnelles_retraite_dirigeant', # analysis:ignore revenus_imposes_au_bareme = 'dividendes_imposes_au_bareme + interet_imposes_au_bareme + assurances_vie_imposees_au_bareme', # analysis:ignore revenus_imposes_au_prelevement_liberatoire = 'dividendes_imposes_au_prelevement_liberatoire + interets_imposes_au_prelevement_liberatoire + assurances_vie_imposees_au_prelevement_liberatoire', #analysis:ignore ) # #raw_data = get_denombrements_fiscaux_data_frame(years = [2010], fill_value = 0) #aggregates = build_aggregates( # raw_data, # formula_by_variable_name, # level_2_formula_by_variable_name = level_2_formula_by_variable_name, # years = [2010], # fill_value = numpy.NaN, # ) def build_irpp_tables(years = None, fill_value = numpy.NaN): assert years is not None assert isinstance(years, list) raw_data = get_denombrements_fiscaux_data_frame(years = years, fill_value = 0) aggregates = build_aggregates( raw_data, formula_by_variable_name, level_2_formula_by_variable_name = level_2_formula_by_variable_name, years = years, fill_value = fill_value, ) data_frame_by_irpp_table_name = collections.OrderedDict([ # 1. Tableau IRPP1: Les revenus figurant dans les déclarations de revenus ('irpp_1', aggregates[[ 'salaires', 'salaires_imposables', 'heures_supplementaires', # TODO # 'revenus_d_activite_non_salariee' # 'ba', # 'bic', # 'bnc', # 'revenus_activite_non_salariee_exoneres', 'revenus_de_remplacement', 'pensions_de_retraite', 'allocations_chomage', 'revenus_fonciers', 'revenus_fonciers_regime_normal', 'revenus_fonciers_micro_foncier', 'revenus_financiers', 'frais_reels', 'pensions_alimentaires_percues', ]]), # 2. Tableau IRPP2: Détails des revenus financiers (intérêts, dividendes, plus-values) figurant dans les # déclations de revenus (imposition au barème, imposition au prélèvement forfaitaire libératoire (PL) et # plus-values) ('irpp_2', aggregates[[ 'revenus_imposes_au_bareme', 'dividendes_imposes_au_bareme', 'interet_imposes_au_bareme', 'assurances_vie_imposees_au_bareme', 'revenus_imposes_au_prelevement_liberatoire', 'dividendes_imposes_au_prelevement_liberatoire', 'interets_imposes_au_prelevement_liberatoire', 'assurances_vie_imposees_au_prelevement_liberatoire', 'plus_values', 'revenus_financiers', 'revenus_financiers_hors_plus_values' ]]), # 3. Tableau IRPP3: Plus-values mobilières et professionnelles ('irpp_3', aggregates[[ 'plus_values', 'plus_values_mobilieres', 'plus_values_mobilieres_regime_normal', 'plus_values_mobilieres_stock_options', 'plus_values_mobilieres_retraite_dirigeant', 'plus_values_professionnelles', 'plus_values_professionnelles_regime_normal', 'plus_values_professionnelles_retraite_dirigeant', ]]), ('irpp_4', aggregates[[ 'revenus_d_activite_non_salariee', 'benefices_agricoles', 'benefices_agricoles_bruts', 'deficits_agricoles', 'benefices_industriels_commerciaux', 'benefices_industriels_commerciaux_bruts', 'deficits_industriels_commerciaux', # 'bnc', # 'revenus_activite_non_salariee_exoneres', ]]), # ('irpp_5_a', aggregates[[ # 'benefices_agricoles', # 'benefices_agricoles_forfait_exoneres', # 'benefices_agricoles_forfait_imposables', # 'benefices_agricoles_reels_exoneres', # 'benefices_agricoles_reels_imposables', # 'benefices_agricoles_reels_deficits', # 'benefices_agricoles_reels_sans_cga_exoneres', # 'benefices_agricoles_reels_sans_cga_imposables', # 'benefices_agricoles_reels_sans_cga_deficits', # ]]) ]) return data_frame_by_irpp_table_name of_name_by_irpp_table_name = dict( salaires_imposables = 'salaire_imposable', heures_supplementaires = 'hsup', benefices_agricoles_forfait_exoneres = 'frag_exon', benefices_agricoles_forfait_imposables = 'frag_impo', benefices_agricoles_reels_exoneres = 'arag_exon', benefices_agricoles_reels_sans_cga_deficits = 'nrag_defi', benefices_agricoles_reels_imposables = 'arag_impg', benefices_agricoles_reels_deficits = 'arag_defi', benefices_agricoles_reels_sans_cga_exoneres = 'nrag_exon', benefices_agricoles_reels_sans_cga_imposables = 'arag_defi', benefices_industriels_commerciaux_professionnels_micro_entreprise_vente = 'mbic_impv', benefices_industriels_commerciaux_professionnels_micro_entreprise_services = 'mbic_imps', benefices_industriels_commerciaux_professionnels_reels_exoneres = 'mbic_imps', benefices_industriels_commerciaux_professionnels_reels_imposables_normal = 'abic_impn', benefices_industriels_commerciaux_professionnels_reels_imposables_simplifie = 'abic_imps', benefices_industriels_commerciaux_professionnels_reels_exoneres_sans_cga = 'nbic_exon', benefices_industriels_commerciaux_professionnels_reels_imposables_normal_sans_cga = 'nbic_impn', benefices_industriels_commerciaux_professionnels_reels_imposables_simplifie_sans_cga = 'nbic_mvct', deficits_industriels_commerciaux_professionnels_normal = 'abic_defn', deficits_industriels_commerciaux_professionnels_simplifie = 'abic_defs', deficits_industriels_commerciaux_professionnels_sans_cga = 'nbic_defn', deficits_industriels_commerciaux_professionnels_locations = 'nbic_defs', benefices_industriels_commerciaux_non_professionnels_micro_entreprise_exoneres = 'macc_exon', benefices_industriels_commerciaux_non_professionnels_micro_entreprise_vente = 'macc_impv', benefices_industriels_commerciaux_non_professionnels_micro_entreprise_services = 'macc_impS', benefices_industriels_commerciaux_non_professionnels_reels_exoneres = 'aacc_exon', benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal = 'aacc_impn', benefices_industriels_commerciaux_non_professionnels_reels_imposables_simplifie = 'aacc_imps', benefices_industriels_commerciaux_non_professionnels_reels_exoneres_sans_cga = 'nacc_exon', benefices_industriels_commerciaux_non_professionnels_reels_imposables_normal_sans_cga = 'nacc_impn', benefices_industriels_commerciaux_non_professionnels_reels_imposables_simplifie_sans_cga = 'nacc_meup', deficits_industriels_commerciaux_non_professionnels_normal = 'aacc_defn', deficits_industriels_commerciaux_non_professionnels_simplifie = 'aacc_gits', deficits_industriels_commerciaux_non_professionnels_sans_cga = 'nacc_defn', benefices_non_commerciaux_professionnels_micro_entreprise_imposables = 'mbnc_impo', benefices_non_commerciaux_professionnels_declaration_controlee = '', benefices_non_commerciaux_professionnels_declaration_controlee_sans_cga = '', deficits_non_commerciaux_professionnels_declaration_controlee = '', deficits_non_commerciaux_professionnels_declaration_controlee_sans_cga = '', benefices_non_commerciaux_non_professionnels_micro_entreprise_imposables = '', benefices_non_commerciaux_non_professionnels_declaration_controlee = '', benefices_non_commerciaux_non_professionnels_declaration_controlee_sans_cga = '', revenus_fonciers_regime_normal = 'f4ba', # f4ba revenus_fonciers_micro_foncier = 'f4be', # f4be allocations_chomage = 'cho', pensions_de_retraite = 'rst', # dividendes_imposes_au_bareme = 'f2dc + f2fu', # 'f2dc + f2fu' non agrégés # interet_imposes_au_bareme = 'f2ts + f2go + f2tr', # non agrégés assurances_vie_imposees_au_bareme = 'f2ch', # non agrégés dividendes_imposes_au_prelevement_liberatoire = 'f2da', interets_imposes_au_prelevement_liberatoire = 'f2ee', assurances_vie_imposees_au_prelevement_liberatoire = 'f2dh', plus_values_mobilieres_regime_normal = 'f3vg', # plus_values_mobilieres_stock_options = 'f3vf + f3vi', # PV stock options 1, stock options 2, TODO Différencier ? plus_values_mobilieres_retraite_dirigeant = 'f3va', # TODO f3vb ? # plus_values_professionnelles_regime_normal = 'f5hz + f5iz + f5jz', # TODO: ceci n'est valable qu'avant 2010 # plus_values_professionnelles_retraite_dirigeant = 'f5hg + f5ig', revenus_distribues_pea_exoneres = 'f2gr', pensions_alimentaires_percues = 'pensions_alimentaires_percues', # pensions_alimentaires_percues # pensions_alimentaires_versess = 'f6gi + f6gj + f6el + f6em + f6gp + f6gu + f6dd', ) if __name__ == '__main__': data_frame_by_irpp_table_name = build_irpp_tables(years = range(2008, 2013), fill_value = 0)
gpl-3.0
Titan-C/scikit-learn
sklearn/linear_model/omp.py
8
31640
"""Orthogonal matching pursuit algorithms """ # Author: Vlad Niculae # # License: BSD 3 clause import warnings import numpy as np from scipy import linalg from scipy.linalg.lapack import get_lapack_funcs from .base import LinearModel, _pre_fit from ..base import RegressorMixin from ..utils import as_float_array, check_array, check_X_y from ..model_selection import check_cv from ..externals.joblib import Parallel, delayed solve_triangular_args = {'check_finite': False} premature = """ Orthogonal matching pursuit ended prematurely due to linear dependence in the dictionary. The requested precision might not have been met. """ def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True, return_path=False): """Orthogonal Matching Pursuit step using the Cholesky decomposition. Parameters ---------- X : array, shape (n_samples, n_features) Input dictionary. Columns are assumed to have unit norm. y : array, shape (n_samples,) Input targets n_nonzero_coefs : int Targeted number of non-zero elements tol : float Targeted squared error, if not None overrides n_nonzero_coefs. copy_X : bool, optional Whether the design matrix X must be copied by the algorithm. A false value is only helpful if X is already Fortran-ordered, otherwise a copy is made anyway. return_path : bool, optional. Default: False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. Returns ------- gamma : array, shape (n_nonzero_coefs,) Non-zero elements of the solution idx : array, shape (n_nonzero_coefs,) Indices of the positions of the elements in gamma within the solution vector coef : array, shape (n_features, n_nonzero_coefs) The first k values of column k correspond to the coefficient value for the active features at that step. The lower left triangle contains garbage. Only returned if ``return_path=True``. n_active : int Number of active features at convergence. """ if copy_X: X = X.copy('F') else: # even if we are allowed to overwrite, still copy it if bad order X = np.asfortranarray(X) min_float = np.finfo(X.dtype).eps nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,)) potrs, = get_lapack_funcs(('potrs',), (X,)) alpha = np.dot(X.T, y) residual = y gamma = np.empty(0) n_active = 0 indices = np.arange(X.shape[1]) # keeping track of swapping max_features = X.shape[1] if tol is not None else n_nonzero_coefs if solve_triangular_args: # new scipy, don't need to initialize because check_finite=False L = np.empty((max_features, max_features), dtype=X.dtype) else: # old scipy, we need the garbage upper triangle to be non-Inf L = np.zeros((max_features, max_features), dtype=X.dtype) L[0, 0] = 1. if return_path: coefs = np.empty_like(L) while True: lam = np.argmax(np.abs(np.dot(X.T, residual))) if lam < n_active or alpha[lam] ** 2 < min_float: # atom already selected or inner product too small warnings.warn(premature, RuntimeWarning, stacklevel=2) break if n_active > 0: # Updates the Cholesky decomposition of X' X L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam]) linalg.solve_triangular(L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, **solve_triangular_args) v = nrm2(L[n_active, :n_active]) ** 2 if 1 - v <= min_float: # selected atoms are dependent warnings.warn(premature, RuntimeWarning, stacklevel=2) break L[n_active, n_active] = np.sqrt(1 - v) X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam]) alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active] indices[n_active], indices[lam] = indices[lam], indices[n_active] n_active += 1 # solves LL'x = y as a composition of two triangular systems gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True, overwrite_b=False) if return_path: coefs[:n_active, n_active - 1] = gamma residual = y - np.dot(X[:, :n_active], gamma) if tol is not None and nrm2(residual) ** 2 <= tol: break elif n_active == max_features: break if return_path: return gamma, indices[:n_active], coefs[:, :n_active], n_active else: return gamma, indices[:n_active], n_active def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None, copy_Gram=True, copy_Xy=True, return_path=False): """Orthogonal Matching Pursuit step on a precomputed Gram matrix. This function uses the Cholesky decomposition method. Parameters ---------- Gram : array, shape (n_features, n_features) Gram matrix of the input data matrix Xy : array, shape (n_features,) Input targets n_nonzero_coefs : int Targeted number of non-zero elements tol_0 : float Squared norm of y, required if tol is not None. tol : float Targeted squared error, if not None overrides n_nonzero_coefs. copy_Gram : bool, optional Whether the gram matrix must be copied by the algorithm. A false value is only helpful if it is already Fortran-ordered, otherwise a copy is made anyway. copy_Xy : bool, optional Whether the covariance vector Xy must be copied by the algorithm. If False, it may be overwritten. return_path : bool, optional. Default: False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. Returns ------- gamma : array, shape (n_nonzero_coefs,) Non-zero elements of the solution idx : array, shape (n_nonzero_coefs,) Indices of the positions of the elements in gamma within the solution vector coefs : array, shape (n_features, n_nonzero_coefs) The first k values of column k correspond to the coefficient value for the active features at that step. The lower left triangle contains garbage. Only returned if ``return_path=True``. n_active : int Number of active features at convergence. """ Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram) if copy_Xy: Xy = Xy.copy() min_float = np.finfo(Gram.dtype).eps nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,)) potrs, = get_lapack_funcs(('potrs',), (Gram,)) indices = np.arange(len(Gram)) # keeping track of swapping alpha = Xy tol_curr = tol_0 delta = 0 gamma = np.empty(0) n_active = 0 max_features = len(Gram) if tol is not None else n_nonzero_coefs if solve_triangular_args: # new scipy, don't need to initialize because check_finite=False L = np.empty((max_features, max_features), dtype=Gram.dtype) else: # old scipy, we need the garbage upper triangle to be non-Inf L = np.zeros((max_features, max_features), dtype=Gram.dtype) L[0, 0] = 1. if return_path: coefs = np.empty_like(L) while True: lam = np.argmax(np.abs(alpha)) if lam < n_active or alpha[lam] ** 2 < min_float: # selected same atom twice, or inner product too small warnings.warn(premature, RuntimeWarning, stacklevel=3) break if n_active > 0: L[n_active, :n_active] = Gram[lam, :n_active] linalg.solve_triangular(L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, **solve_triangular_args) v = nrm2(L[n_active, :n_active]) ** 2 if 1 - v <= min_float: # selected atoms are dependent warnings.warn(premature, RuntimeWarning, stacklevel=3) break L[n_active, n_active] = np.sqrt(1 - v) Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam]) Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam]) indices[n_active], indices[lam] = indices[lam], indices[n_active] Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active] n_active += 1 # solves LL'x = y as a composition of two triangular systems gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True, overwrite_b=False) if return_path: coefs[:n_active, n_active - 1] = gamma beta = np.dot(Gram[:, :n_active], gamma) alpha = Xy - beta if tol is not None: tol_curr += delta delta = np.inner(gamma, beta[:n_active]) tol_curr -= delta if abs(tol_curr) <= tol: break elif n_active == max_features: break if return_path: return gamma, indices[:n_active], coefs[:, :n_active], n_active else: return gamma, indices[:n_active], n_active def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False, copy_X=True, return_path=False, return_n_iter=False): """Orthogonal Matching Pursuit (OMP) Solves n_targets Orthogonal Matching Pursuit problems. An instance of the problem has the form: When parametrized by the number of non-zero coefficients using `n_nonzero_coefs`: argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs} When parametrized by error using the parameter `tol`: argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol Read more in the :ref:`User Guide <omp>`. Parameters ---------- X : array, shape (n_samples, n_features) Input data. Columns are assumed to have unit norm. y : array, shape (n_samples,) or (n_samples, n_targets) Input targets n_nonzero_coefs : int Desired number of non-zero entries in the solution. If None (by default) this value is set to 10% of n_features. tol : float Maximum norm of the residual. If not None, overrides n_nonzero_coefs. precompute : {True, False, 'auto'}, Whether to perform precomputations. Improves performance when n_targets or n_samples is very large. copy_X : bool, optional Whether the design matrix X must be copied by the algorithm. A false value is only helpful if X is already Fortran-ordered, otherwise a copy is made anyway. return_path : bool, optional. Default: False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. return_n_iter : bool, optional default False Whether or not to return the number of iterations. Returns ------- coef : array, shape (n_features,) or (n_features, n_targets) Coefficients of the OMP solution. If `return_path=True`, this contains the whole coefficient path. In this case its shape is (n_features, n_features) or (n_features, n_targets, n_features) and iterating over the last axis yields coefficients in increasing order of active features. n_iters : array-like or int Number of active features across every target. Returned only if `return_n_iter` is set to True. See also -------- OrthogonalMatchingPursuit orthogonal_mp_gram lars_path decomposition.sparse_encode Notes ----- Orthogonal matching pursuit was introduced in S. Mallat, Z. Zhang, Matching pursuits with time-frequency dictionaries, IEEE Transactions on Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. (http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf) This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal Matching Pursuit Technical Report - CS Technion, April 2008. http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf """ X = check_array(X, order='F', copy=copy_X) copy_X = False if y.ndim == 1: y = y.reshape(-1, 1) y = check_array(y) if y.shape[1] > 1: # subsequent targets will be affected copy_X = True if n_nonzero_coefs is None and tol is None: # default for n_nonzero_coefs is 0.1 * n_features # but at least one. n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1) if tol is not None and tol < 0: raise ValueError("Epsilon cannot be negative") if tol is None and n_nonzero_coefs <= 0: raise ValueError("The number of atoms must be positive") if tol is None and n_nonzero_coefs > X.shape[1]: raise ValueError("The number of atoms cannot be more than the number " "of features") if precompute == 'auto': precompute = X.shape[0] > X.shape[1] if precompute: G = np.dot(X.T, X) G = np.asfortranarray(G) Xy = np.dot(X.T, y) if tol is not None: norms_squared = np.sum((y ** 2), axis=0) else: norms_squared = None return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared, copy_Gram=copy_X, copy_Xy=False, return_path=return_path) if return_path: coef = np.zeros((X.shape[1], y.shape[1], X.shape[1])) else: coef = np.zeros((X.shape[1], y.shape[1])) n_iters = [] for k in range(y.shape[1]): out = _cholesky_omp( X, y[:, k], n_nonzero_coefs, tol, copy_X=copy_X, return_path=return_path) if return_path: _, idx, coefs, n_iter = out coef = coef[:, :, :len(idx)] for n_active, x in enumerate(coefs.T): coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1] else: x, idx, n_iter = out coef[idx, k] = x n_iters.append(n_iter) if y.shape[1] == 1: n_iters = n_iters[0] if return_n_iter: return np.squeeze(coef), n_iters else: return np.squeeze(coef) def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None, norms_squared=None, copy_Gram=True, copy_Xy=True, return_path=False, return_n_iter=False): """Gram Orthogonal Matching Pursuit (OMP) Solves n_targets Orthogonal Matching Pursuit problems using only the Gram matrix X.T * X and the product X.T * y. Read more in the :ref:`User Guide <omp>`. Parameters ---------- Gram : array, shape (n_features, n_features) Gram matrix of the input data: X.T * X Xy : array, shape (n_features,) or (n_features, n_targets) Input targets multiplied by X: X.T * y n_nonzero_coefs : int Desired number of non-zero entries in the solution. If None (by default) this value is set to 10% of n_features. tol : float Maximum norm of the residual. If not None, overrides n_nonzero_coefs. norms_squared : array-like, shape (n_targets,) Squared L2 norms of the lines of y. Required if tol is not None. copy_Gram : bool, optional Whether the gram matrix must be copied by the algorithm. A false value is only helpful if it is already Fortran-ordered, otherwise a copy is made anyway. copy_Xy : bool, optional Whether the covariance vector Xy must be copied by the algorithm. If False, it may be overwritten. return_path : bool, optional. Default: False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. return_n_iter : bool, optional default False Whether or not to return the number of iterations. Returns ------- coef : array, shape (n_features,) or (n_features, n_targets) Coefficients of the OMP solution. If `return_path=True`, this contains the whole coefficient path. In this case its shape is (n_features, n_features) or (n_features, n_targets, n_features) and iterating over the last axis yields coefficients in increasing order of active features. n_iters : array-like or int Number of active features across every target. Returned only if `return_n_iter` is set to True. See also -------- OrthogonalMatchingPursuit orthogonal_mp lars_path decomposition.sparse_encode Notes ----- Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang, Matching pursuits with time-frequency dictionaries, IEEE Transactions on Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. (http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf) This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal Matching Pursuit Technical Report - CS Technion, April 2008. http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf """ Gram = check_array(Gram, order='F', copy=copy_Gram) Xy = np.asarray(Xy) if Xy.ndim > 1 and Xy.shape[1] > 1: # or subsequent target will be affected copy_Gram = True if Xy.ndim == 1: Xy = Xy[:, np.newaxis] if tol is not None: norms_squared = [norms_squared] if n_nonzero_coefs is None and tol is None: n_nonzero_coefs = int(0.1 * len(Gram)) if tol is not None and norms_squared is None: raise ValueError('Gram OMP needs the precomputed norms in order ' 'to evaluate the error sum of squares.') if tol is not None and tol < 0: raise ValueError("Epsilon cannot be negative") if tol is None and n_nonzero_coefs <= 0: raise ValueError("The number of atoms must be positive") if tol is None and n_nonzero_coefs > len(Gram): raise ValueError("The number of atoms cannot be more than the number " "of features") if return_path: coef = np.zeros((len(Gram), Xy.shape[1], len(Gram))) else: coef = np.zeros((len(Gram), Xy.shape[1])) n_iters = [] for k in range(Xy.shape[1]): out = _gram_omp( Gram, Xy[:, k], n_nonzero_coefs, norms_squared[k] if tol is not None else None, tol, copy_Gram=copy_Gram, copy_Xy=copy_Xy, return_path=return_path) if return_path: _, idx, coefs, n_iter = out coef = coef[:, :, :len(idx)] for n_active, x in enumerate(coefs.T): coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1] else: x, idx, n_iter = out coef[idx, k] = x n_iters.append(n_iter) if Xy.shape[1] == 1: n_iters = n_iters[0] if return_n_iter: return np.squeeze(coef), n_iters else: return np.squeeze(coef) class OrthogonalMatchingPursuit(LinearModel, RegressorMixin): """Orthogonal Matching Pursuit model (OMP) Parameters ---------- n_nonzero_coefs : int, optional Desired number of non-zero entries in the solution. If None (by default) this value is set to 10% of n_features. tol : float, optional Maximum norm of the residual. If not None, overrides n_nonzero_coefs. fit_intercept : boolean, optional whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default True This parameter is ignored when ``fit_intercept`` is set to False. If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm. If you wish to standardize, please use :class:`sklearn.preprocessing.StandardScaler` before calling ``fit`` on an estimator with ``normalize=False``. precompute : {True, False, 'auto'}, default 'auto' Whether to use a precomputed Gram and Xy matrix to speed up calculations. Improves performance when `n_targets` or `n_samples` is very large. Note that if you already have such matrices, you can pass them directly to the fit method. Read more in the :ref:`User Guide <omp>`. Attributes ---------- coef_ : array, shape (n_features,) or (n_targets, n_features) parameter vector (w in the formula) intercept_ : float or array, shape (n_targets,) independent term in decision function. n_iter_ : int or array-like Number of active features across every target. Notes ----- Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang, Matching pursuits with time-frequency dictionaries, IEEE Transactions on Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. (http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf) This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal Matching Pursuit Technical Report - CS Technion, April 2008. http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf See also -------- orthogonal_mp orthogonal_mp_gram lars_path Lars LassoLars decomposition.sparse_encode """ def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True, normalize=True, precompute='auto'): self.n_nonzero_coefs = n_nonzero_coefs self.tol = tol self.fit_intercept = fit_intercept self.normalize = normalize self.precompute = precompute def fit(self, X, y): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values. Returns ------- self : object returns an instance of self. """ X, y = check_X_y(X, y, multi_output=True, y_numeric=True) n_features = X.shape[1] X, y, X_offset, y_offset, X_scale, Gram, Xy = \ _pre_fit(X, y, None, self.precompute, self.normalize, self.fit_intercept, copy=True) if y.ndim == 1: y = y[:, np.newaxis] if self.n_nonzero_coefs is None and self.tol is None: # default for n_nonzero_coefs is 0.1 * n_features # but at least one. self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1) else: self.n_nonzero_coefs_ = self.n_nonzero_coefs if Gram is False: coef_, self.n_iter_ = orthogonal_mp( X, y, self.n_nonzero_coefs_, self.tol, precompute=False, copy_X=True, return_n_iter=True) else: norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None coef_, self.n_iter_ = orthogonal_mp_gram( Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_, tol=self.tol, norms_squared=norms_sq, copy_Gram=True, copy_Xy=True, return_n_iter=True) self.coef_ = coef_.T self._set_intercept(X_offset, y_offset, X_scale) return self def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True, fit_intercept=True, normalize=True, max_iter=100): """Compute the residues on left-out data for a full LARS path Parameters ----------- X_train : array, shape (n_samples, n_features) The data to fit the LARS on y_train : array, shape (n_samples) The target variable to fit LARS on X_test : array, shape (n_samples, n_features) The data to compute the residues on y_test : array, shape (n_samples) The target variable to compute the residues on copy : boolean, optional Whether X_train, X_test, y_train and y_test should be copied. If False, they may be overwritten. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default True This parameter is ignored when ``fit_intercept`` is set to False. If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm. If you wish to standardize, please use :class:`sklearn.preprocessing.StandardScaler` before calling ``fit`` on an estimator with ``normalize=False``. max_iter : integer, optional Maximum numbers of iterations to perform, therefore maximum features to include. 100 by default. Returns ------- residues : array, shape (n_samples, max_features) Residues of the prediction on the test data """ if copy: X_train = X_train.copy() y_train = y_train.copy() X_test = X_test.copy() y_test = y_test.copy() if fit_intercept: X_mean = X_train.mean(axis=0) X_train -= X_mean X_test -= X_mean y_mean = y_train.mean(axis=0) y_train = as_float_array(y_train, copy=False) y_train -= y_mean y_test = as_float_array(y_test, copy=False) y_test -= y_mean if normalize: norms = np.sqrt(np.sum(X_train ** 2, axis=0)) nonzeros = np.flatnonzero(norms) X_train[:, nonzeros] /= norms[nonzeros] coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None, precompute=False, copy_X=False, return_path=True) if coefs.ndim == 1: coefs = coefs[:, np.newaxis] if normalize: coefs[nonzeros] /= norms[nonzeros][:, np.newaxis] return np.dot(coefs.T, X_test.T) - y_test class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin): """Cross-validated Orthogonal Matching Pursuit model (OMP) Parameters ---------- copy : bool, optional Whether the design matrix X must be copied by the algorithm. A false value is only helpful if X is already Fortran-ordered, otherwise a copy is made anyway. fit_intercept : boolean, optional whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default True This parameter is ignored when ``fit_intercept`` is set to False. If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm. If you wish to standardize, please use :class:`sklearn.preprocessing.StandardScaler` before calling ``fit`` on an estimator with ``normalize=False``. max_iter : integer, optional Maximum numbers of iterations to perform, therefore maximum features to include. 10% of ``n_features`` but at least 5 if available. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs verbose : boolean or integer, optional Sets the verbosity amount Read more in the :ref:`User Guide <omp>`. Attributes ---------- intercept_ : float or array, shape (n_targets,) Independent term in decision function. coef_ : array, shape (n_features,) or (n_targets, n_features) Parameter vector (w in the problem formulation). n_nonzero_coefs_ : int Estimated number of non-zero coefficients giving the best mean squared error over the cross-validation folds. n_iter_ : int or array-like Number of active features across every target for the model refit with the best hyperparameters got by cross-validating across all folds. See also -------- orthogonal_mp orthogonal_mp_gram lars_path Lars LassoLars OrthogonalMatchingPursuit LarsCV LassoLarsCV decomposition.sparse_encode """ def __init__(self, copy=True, fit_intercept=True, normalize=True, max_iter=None, cv=None, n_jobs=1, verbose=False): self.copy = copy self.fit_intercept = fit_intercept self.normalize = normalize self.max_iter = max_iter self.cv = cv self.n_jobs = n_jobs self.verbose = verbose def fit(self, X, y): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape [n_samples, n_features] Training data. y : array-like, shape [n_samples] Target values. Returns ------- self : object returns an instance of self. """ X, y = check_X_y(X, y, y_numeric=True, ensure_min_features=2, estimator=self) X = as_float_array(X, copy=False, force_all_finite=False) cv = check_cv(self.cv, classifier=False) max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1]) if not self.max_iter else self.max_iter) cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( delayed(_omp_path_residues)( X[train], y[train], X[test], y[test], self.copy, self.fit_intercept, self.normalize, max_iter) for train, test in cv.split(X)) min_early_stop = min(fold.shape[0] for fold in cv_paths) mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1) for fold in cv_paths]) best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1 self.n_nonzero_coefs_ = best_n_nonzero_coefs omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs, fit_intercept=self.fit_intercept, normalize=self.normalize) omp.fit(X, y) self.coef_ = omp.coef_ self.intercept_ = omp.intercept_ self.n_iter_ = omp.n_iter_ return self
bsd-3-clause
dhimmel/networkx
networkx/convert_matrix.py
13
33243
"""Functions to convert NetworkX graphs to and from numpy/scipy matrices. The preferred way of converting data to a NetworkX graph is through the graph constuctor. The constructor calls the to_networkx_graph() function which attempts to guess the input type and convert it automatically. Examples -------- Create a 10 node random graph from a numpy matrix >>> import numpy >>> a = numpy.reshape(numpy.random.random_integers(0,1,size=100),(10,10)) >>> D = nx.DiGraph(a) or equivalently >>> D = nx.to_networkx_graph(a,create_using=nx.DiGraph()) See Also -------- nx_pygraphviz, nx_pydot """ # Copyright (C) 2006-2014 by # Aric Hagberg <hagberg@lanl.gov> # Dan Schult <dschult@colgate.edu> # Pieter Swart <swart@lanl.gov> # All rights reserved. # BSD license. import warnings import itertools import networkx as nx from networkx.convert import _prep_create_using from networkx.utils import not_implemented_for __author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>', 'Pieter Swart (swart@lanl.gov)', 'Dan Schult(dschult@colgate.edu)']) __all__ = ['from_numpy_matrix', 'to_numpy_matrix', 'from_pandas_dataframe', 'to_pandas_dataframe', 'to_numpy_recarray', 'from_scipy_sparse_matrix', 'to_scipy_sparse_matrix'] def to_pandas_dataframe(G, nodelist=None, multigraph_weight=sum, weight='weight', nonedge=0.0): """Return the graph adjacency matrix as a Pandas DataFrame. Parameters ---------- G : graph The NetworkX graph used to construct the Pandas DataFrame. nodelist : list, optional The rows and columns are ordered according to the nodes in `nodelist`. If `nodelist` is None, then the ordering is produced by G.nodes(). multigraph_weight : {sum, min, max}, optional An operator that determines how weights in multigraphs are handled. The default is to sum the weights of the multiple edges. weight : string or None, optional The edge attribute that holds the numerical value used for the edge weight. If an edge does not have that attribute, then the value 1 is used instead. nonedge : float, optional The matrix values corresponding to nonedges are typically set to zero. However, this could be undesirable if there are matrix values corresponding to actual edges that also have the value zero. If so, one might prefer nonedges to have some other value, such as nan. Returns ------- df : Pandas DataFrame Graph adjacency matrix Notes ----- The DataFrame entries are assigned to the weight edge attribute. When an edge does not have a weight attribute, the value of the entry is set to the number 1. For multiple (parallel) edges, the values of the entries are determined by the 'multigraph_weight' parameter. The default is to sum the weight attributes for each of the parallel edges. When `nodelist` does not contain every node in `G`, the matrix is built from the subgraph of `G` that is induced by the nodes in `nodelist`. The convention used for self-loop edges in graphs is to assign the diagonal matrix entry value to the weight attribute of the edge (or the number 1 if the edge has no weight attribute). If the alternate convention of doubling the edge weight is desired the resulting Pandas DataFrame can be modified as follows: >>> import pandas as pd >>> import numpy as np >>> G = nx.Graph([(1,1)]) >>> df = nx.to_pandas_dataframe(G) >>> df 1 1 1 >>> df.values[np.diag_indices_from(df)] *= 2 >>> df 1 1 2 Examples -------- >>> G = nx.MultiDiGraph() >>> G.add_edge(0,1,weight=2) >>> G.add_edge(1,0) >>> G.add_edge(2,2,weight=3) >>> G.add_edge(2,2) >>> nx.to_pandas_dataframe(G, nodelist=[0,1,2]) 0 1 2 0 0 2 0 1 1 0 0 2 0 0 4 """ import pandas as pd M = to_numpy_matrix(G, nodelist, None, None, multigraph_weight, weight, nonedge) if nodelist is None: nodelist = G.nodes() nodeset = set(nodelist) df = pd.DataFrame(data=M, index = nodelist ,columns = nodelist) return df def from_pandas_dataframe(df, source, target, edge_attr=None, create_using=None): """Return a graph from Pandas DataFrame. The Pandas DataFrame should contain at least two columns of node names and zero or more columns of node attributes. Each row will be processed as one edge instance. Note: This function iterates over DataFrame.values, which is not guaranteed to retain the data type across columns in the row. This is only a problem if your row is entirely numeric and a mix of ints and floats. In that case, all values will be returned as floats. See the DataFrame.iterrows documentation for an example. Parameters ---------- df : Pandas DataFrame An edge list representation of a graph source : str or int A valid column name (string or iteger) for the source nodes (for the directed case). target : str or int A valid column name (string or iteger) for the target nodes (for the directed case). edge_attr : str or int, iterable, True A valid column name (str or integer) or list of column names that will be used to retrieve items from the row and add them to the graph as edge attributes. If `True`, all of the remaining columns will be added. create_using : NetworkX graph Use specified graph for result. The default is Graph() See Also -------- to_pandas_dataframe Examples -------- Simple integer weights on edges: >>> import pandas as pd >>> import numpy as np >>> r = np.random.RandomState(seed=5) >>> ints = r.random_integers(1, 10, size=(3,2)) >>> a = ['A', 'B', 'C'] >>> b = ['D', 'A', 'E'] >>> df = pd.DataFrame(ints, columns=['weight', 'cost']) >>> df[0] = a >>> df['b'] = b >>> df weight cost 0 b 0 4 7 A D 1 7 1 B A 2 10 9 C E >>> G=nx.from_pandas_dataframe(df, 0, 'b', ['weight', 'cost']) >>> G['E']['C']['weight'] 10 >>> G['E']['C']['cost'] 9 """ g = _prep_create_using(create_using) # Index of source and target src_i = df.columns.get_loc(source) tar_i = df.columns.get_loc(target) if edge_attr: # If all additional columns requested, build up a list of tuples # [(name, index),...] if edge_attr is True: # Create a list of all columns indices, ignore nodes edge_i = [] for i, col in enumerate(df.columns): if col is not source and col is not target: edge_i.append((col, i)) # If a list or tuple of name is requested elif isinstance(edge_attr, (list, tuple)): edge_i = [(i, df.columns.get_loc(i)) for i in edge_attr] # If a string or int is passed else: edge_i = [(edge_attr, df.columns.get_loc(edge_attr)),] # Iteration on values returns the rows as Numpy arrays for row in df.values: g.add_edge(row[src_i], row[tar_i], {i:row[j] for i, j in edge_i}) # If no column names are given, then just return the edges. else: for row in df.values: g.add_edge(row[src_i], row[tar_i]) return g def to_numpy_matrix(G, nodelist=None, dtype=None, order=None, multigraph_weight=sum, weight='weight', nonedge=0.0): """Return the graph adjacency matrix as a NumPy matrix. Parameters ---------- G : graph The NetworkX graph used to construct the NumPy matrix. nodelist : list, optional The rows and columns are ordered according to the nodes in ``nodelist``. If ``nodelist`` is None, then the ordering is produced by G.nodes(). dtype : NumPy data type, optional A valid single NumPy data type used to initialize the array. This must be a simple type such as int or numpy.float64 and not a compound data type (see to_numpy_recarray) If None, then the NumPy default is used. order : {'C', 'F'}, optional Whether to store multidimensional data in C- or Fortran-contiguous (row- or column-wise) order in memory. If None, then the NumPy default is used. multigraph_weight : {sum, min, max}, optional An operator that determines how weights in multigraphs are handled. The default is to sum the weights of the multiple edges. weight : string or None optional (default = 'weight') The edge attribute that holds the numerical value used for the edge weight. If an edge does not have that attribute, then the value 1 is used instead. nonedge : float (default = 0.0) The matrix values corresponding to nonedges are typically set to zero. However, this could be undesirable if there are matrix values corresponding to actual edges that also have the value zero. If so, one might prefer nonedges to have some other value, such as nan. Returns ------- M : NumPy matrix Graph adjacency matrix See Also -------- to_numpy_recarray, from_numpy_matrix Notes ----- The matrix entries are assigned to the weight edge attribute. When an edge does not have a weight attribute, the value of the entry is set to the number 1. For multiple (parallel) edges, the values of the entries are determined by the ``multigraph_weight`` parameter. The default is to sum the weight attributes for each of the parallel edges. When ``nodelist`` does not contain every node in ``G``, the matrix is built from the subgraph of ``G`` that is induced by the nodes in ``nodelist``. The convention used for self-loop edges in graphs is to assign the diagonal matrix entry value to the weight attribute of the edge (or the number 1 if the edge has no weight attribute). If the alternate convention of doubling the edge weight is desired the resulting Numpy matrix can be modified as follows: >>> import numpy as np >>> G = nx.Graph([(1, 1)]) >>> A = nx.to_numpy_matrix(G) >>> A matrix([[ 1.]]) >>> A.A[np.diag_indices_from(A)] *= 2 >>> A matrix([[ 2.]]) Examples -------- >>> G = nx.MultiDiGraph() >>> G.add_edge(0,1,weight=2) >>> G.add_edge(1,0) >>> G.add_edge(2,2,weight=3) >>> G.add_edge(2,2) >>> nx.to_numpy_matrix(G, nodelist=[0,1,2]) matrix([[ 0., 2., 0.], [ 1., 0., 0.], [ 0., 0., 4.]]) """ import numpy as np if nodelist is None: nodelist = G.nodes() nodeset = set(nodelist) if len(nodelist) != len(nodeset): msg = "Ambiguous ordering: `nodelist` contained duplicates." raise nx.NetworkXError(msg) nlen=len(nodelist) undirected = not G.is_directed() index=dict(zip(nodelist,range(nlen))) # Initially, we start with an array of nans. Then we populate the matrix # using data from the graph. Afterwards, any leftover nans will be # converted to the value of `nonedge`. Note, we use nans initially, # instead of zero, for two reasons: # # 1) It can be important to distinguish a real edge with the value 0 # from a nonedge with the value 0. # # 2) When working with multi(di)graphs, we must combine the values of all # edges between any two nodes in some manner. This often takes the # form of a sum, min, or max. Using the value 0 for a nonedge would # have undesirable effects with min and max, but using nanmin and # nanmax with initially nan values is not problematic at all. # # That said, there are still some drawbacks to this approach. Namely, if # a real edge is nan, then that value is a) not distinguishable from # nonedges and b) is ignored by the default combinator (nansum, nanmin, # nanmax) functions used for multi(di)graphs. If this becomes an issue, # an alternative approach is to use masked arrays. Initially, every # element is masked and set to some `initial` value. As we populate the # graph, elements are unmasked (automatically) when we combine the initial # value with the values given by real edges. At the end, we convert all # masked values to `nonedge`. Using masked arrays fully addresses reason 1, # but for reason 2, we would still have the issue with min and max if the # initial values were 0.0. Note: an initial value of +inf is appropriate # for min, while an initial value of -inf is appropriate for max. When # working with sum, an initial value of zero is appropriate. Ideally then, # we'd want to allow users to specify both a value for nonedges and also # an initial value. For multi(di)graphs, the choice of the initial value # will, in general, depend on the combinator function---sensible defaults # can be provided. if G.is_multigraph(): # Handle MultiGraphs and MultiDiGraphs M = np.zeros((nlen, nlen), dtype=dtype, order=order) + np.nan # use numpy nan-aware operations operator={sum:np.nansum, min:np.nanmin, max:np.nanmax} try: op=operator[multigraph_weight] except: raise ValueError('multigraph_weight must be sum, min, or max') for u,v,attrs in G.edges_iter(data=True): if (u in nodeset) and (v in nodeset): i, j = index[u], index[v] e_weight = attrs.get(weight, 1) M[i,j] = op([e_weight, M[i,j]]) if undirected: M[j,i] = M[i,j] else: # Graph or DiGraph, this is much faster than above M = np.zeros((nlen,nlen), dtype=dtype, order=order) + np.nan for u,nbrdict in G.adjacency_iter(): for v,d in nbrdict.items(): try: M[index[u],index[v]] = d.get(weight,1) except KeyError: # This occurs when there are fewer desired nodes than # there are nodes in the graph: len(nodelist) < len(G) pass M[np.isnan(M)] = nonedge M = np.asmatrix(M) return M def from_numpy_matrix(A, parallel_edges=False, create_using=None): """Return a graph from numpy matrix. The numpy matrix is interpreted as an adjacency matrix for the graph. Parameters ---------- A : numpy matrix An adjacency matrix representation of a graph parallel_edges : Boolean If this is ``True``, ``create_using`` is a multigraph, and ``A`` is an integer matrix, then entry *(i, j)* in the matrix is interpreted as the number of parallel edges joining vertices *i* and *j* in the graph. If it is ``False``, then the entries in the adjacency matrix are interpreted as the weight of a single edge joining the vertices. create_using : NetworkX graph Use specified graph for result. The default is Graph() Notes ----- If ``create_using`` is an instance of :class:`networkx.MultiGraph` or :class:`networkx.MultiDiGraph`, ``parallel_edges`` is ``True``, and the entries of ``A`` are of type ``int``, then this function returns a multigraph (of the same type as ``create_using``) with parallel edges. If ``create_using`` is an undirected multigraph, then only the edges indicated by the upper triangle of the matrix `A` will be added to the graph. If the numpy matrix has a single data type for each matrix entry it will be converted to an appropriate Python data type. If the numpy matrix has a user-specified compound data type the names of the data fields will be used as attribute keys in the resulting NetworkX graph. See Also -------- to_numpy_matrix, to_numpy_recarray Examples -------- Simple integer weights on edges: >>> import numpy >>> A=numpy.matrix([[1, 1], [2, 1]]) >>> G=nx.from_numpy_matrix(A) If ``create_using`` is a multigraph and the matrix has only integer entries, the entries will be interpreted as weighted edges joining the vertices (without creating parallel edges): >>> import numpy >>> A = numpy.matrix([[1, 1], [1, 2]]) >>> G = nx.from_numpy_matrix(A, create_using = nx.MultiGraph()) >>> G[1][1] {0: {'weight': 2}} If ``create_using`` is a multigraph and the matrix has only integer entries but ``parallel_edges`` is ``True``, then the entries will be interpreted as the number of parallel edges joining those two vertices: >>> import numpy >>> A = numpy.matrix([[1, 1], [1, 2]]) >>> temp = nx.MultiGraph() >>> G = nx.from_numpy_matrix(A, parallel_edges = True, create_using = temp) >>> G[1][1] {0: {'weight': 1}, 1: {'weight': 1}} User defined compound data type on edges: >>> import numpy >>> dt = [('weight', float), ('cost', int)] >>> A = numpy.matrix([[(1.0, 2)]], dtype = dt) >>> G = nx.from_numpy_matrix(A) >>> G.edges() [(0, 0)] >>> G[0][0]['cost'] 2 >>> G[0][0]['weight'] 1.0 """ # This should never fail if you have created a numpy matrix with numpy... import numpy as np kind_to_python_type={'f':float, 'i':int, 'u':int, 'b':bool, 'c':complex, 'S':str, 'V':'void'} try: # Python 3.x blurb = chr(1245) # just to trigger the exception kind_to_python_type['U']=str except ValueError: # Python 2.6+ kind_to_python_type['U']=unicode G=_prep_create_using(create_using) n,m=A.shape if n!=m: raise nx.NetworkXError("Adjacency matrix is not square.", "nx,ny=%s"%(A.shape,)) dt=A.dtype try: python_type=kind_to_python_type[dt.kind] except: raise TypeError("Unknown numpy data type: %s"%dt) # Make sure we get even the isolated nodes of the graph. G.add_nodes_from(range(n)) # Get a list of all the entries in the matrix with nonzero entries. These # coordinates will become the edges in the graph. edges = zip(*(np.asarray(A).nonzero())) # handle numpy constructed data type if python_type is 'void': # Sort the fields by their offset, then by dtype, then by name. fields = sorted((offset, dtype, name) for name, (dtype, offset) in A.dtype.fields.items()) triples = ((u, v, {name: kind_to_python_type[dtype.kind](val) for (_, dtype, name), val in zip(fields, A[u, v])}) for u, v in edges) # If the entries in the adjacency matrix are integers, the graph is a # multigraph, and parallel_edges is True, then create parallel edges, each # with weight 1, for each entry in the adjacency matrix. Otherwise, create # one edge for each positive entry in the adjacency matrix and set the # weight of that edge to be the entry in the matrix. elif python_type is int and G.is_multigraph() and parallel_edges: chain = itertools.chain.from_iterable # The following line is equivalent to: # # for (u, v) in edges: # for d in range(A[u, v]): # G.add_edge(u, v, weight=1) # triples = chain(((u, v, dict(weight=1)) for d in range(A[u, v])) for (u, v) in edges) else: # basic data type triples = ((u, v, dict(weight=python_type(A[u, v]))) for u, v in edges) # If we are creating an undirected multigraph, only add the edges from the # upper triangle of the matrix. Otherwise, add all the edges. This relies # on the fact that the vertices created in the # ``_generated_weighted_edges()`` function are actually the row/column # indices for the matrix ``A``. # # Without this check, we run into a problem where each edge is added twice # when ``G.add_edges_from()`` is invoked below. if G.is_multigraph() and not G.is_directed(): triples = ((u, v, d) for u, v, d in triples if u <= v) G.add_edges_from(triples) return G @not_implemented_for('multigraph') def to_numpy_recarray(G,nodelist=None, dtype=[('weight',float)], order=None): """Return the graph adjacency matrix as a NumPy recarray. Parameters ---------- G : graph The NetworkX graph used to construct the NumPy matrix. nodelist : list, optional The rows and columns are ordered according to the nodes in `nodelist`. If `nodelist` is None, then the ordering is produced by G.nodes(). dtype : NumPy data-type, optional A valid NumPy named dtype used to initialize the NumPy recarray. The data type names are assumed to be keys in the graph edge attribute dictionary. order : {'C', 'F'}, optional Whether to store multidimensional data in C- or Fortran-contiguous (row- or column-wise) order in memory. If None, then the NumPy default is used. Returns ------- M : NumPy recarray The graph with specified edge data as a Numpy recarray Notes ----- When `nodelist` does not contain every node in `G`, the matrix is built from the subgraph of `G` that is induced by the nodes in `nodelist`. Examples -------- >>> G = nx.Graph() >>> G.add_edge(1,2,weight=7.0,cost=5) >>> A=nx.to_numpy_recarray(G,dtype=[('weight',float),('cost',int)]) >>> print(A.weight) [[ 0. 7.] [ 7. 0.]] >>> print(A.cost) [[0 5] [5 0]] """ import numpy as np if nodelist is None: nodelist = G.nodes() nodeset = set(nodelist) if len(nodelist) != len(nodeset): msg = "Ambiguous ordering: `nodelist` contained duplicates." raise nx.NetworkXError(msg) nlen=len(nodelist) undirected = not G.is_directed() index=dict(zip(nodelist,range(nlen))) M = np.zeros((nlen,nlen), dtype=dtype, order=order) names=M.dtype.names for u,v,attrs in G.edges_iter(data=True): if (u in nodeset) and (v in nodeset): i,j = index[u],index[v] values=tuple([attrs[n] for n in names]) M[i,j] = values if undirected: M[j,i] = M[i,j] return M.view(np.recarray) def to_scipy_sparse_matrix(G, nodelist=None, dtype=None, weight='weight', format='csr'): """Return the graph adjacency matrix as a SciPy sparse matrix. Parameters ---------- G : graph The NetworkX graph used to construct the NumPy matrix. nodelist : list, optional The rows and columns are ordered according to the nodes in `nodelist`. If `nodelist` is None, then the ordering is produced by G.nodes(). dtype : NumPy data-type, optional A valid NumPy dtype used to initialize the array. If None, then the NumPy default is used. weight : string or None optional (default='weight') The edge attribute that holds the numerical value used for the edge weight. If None then all edge weights are 1. format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'} The type of the matrix to be returned (default 'csr'). For some algorithms different implementations of sparse matrices can perform better. See [1]_ for details. Returns ------- M : SciPy sparse matrix Graph adjacency matrix. Notes ----- The matrix entries are populated using the edge attribute held in parameter weight. When an edge does not have that attribute, the value of the entry is 1. For multiple edges the matrix values are the sums of the edge weights. When `nodelist` does not contain every node in `G`, the matrix is built from the subgraph of `G` that is induced by the nodes in `nodelist`. Uses coo_matrix format. To convert to other formats specify the format= keyword. The convention used for self-loop edges in graphs is to assign the diagonal matrix entry value to the weight attribute of the edge (or the number 1 if the edge has no weight attribute). If the alternate convention of doubling the edge weight is desired the resulting Scipy sparse matrix can be modified as follows: >>> import scipy as sp >>> G = nx.Graph([(1,1)]) >>> A = nx.to_scipy_sparse_matrix(G) >>> print(A.todense()) [[1]] >>> A.setdiag(A.diagonal()*2) >>> print(A.todense()) [[2]] Examples -------- >>> G = nx.MultiDiGraph() >>> G.add_edge(0,1,weight=2) >>> G.add_edge(1,0) >>> G.add_edge(2,2,weight=3) >>> G.add_edge(2,2) >>> S = nx.to_scipy_sparse_matrix(G, nodelist=[0,1,2]) >>> print(S.todense()) [[0 2 0] [1 0 0] [0 0 4]] References ---------- .. [1] Scipy Dev. References, "Sparse Matrices", http://docs.scipy.org/doc/scipy/reference/sparse.html """ from scipy import sparse if nodelist is None: nodelist = G nlen = len(nodelist) if nlen == 0: raise nx.NetworkXError("Graph has no nodes or edges") if len(nodelist) != len(set(nodelist)): msg = "Ambiguous ordering: `nodelist` contained duplicates." raise nx.NetworkXError(msg) index = dict(zip(nodelist,range(nlen))) if G.number_of_edges() == 0: row,col,data=[],[],[] else: row,col,data = zip(*((index[u],index[v],d.get(weight,1)) for u,v,d in G.edges_iter(nodelist, data=True) if u in index and v in index)) if G.is_directed(): M = sparse.coo_matrix((data,(row,col)), shape=(nlen,nlen), dtype=dtype) else: # symmetrize matrix d = data + data r = row + col c = col + row # selfloop entries get double counted when symmetrizing # so we subtract the data on the diagonal selfloops = G.selfloop_edges(data=True) if selfloops: diag_index,diag_data = zip(*((index[u],-d.get(weight,1)) for u,v,d in selfloops if u in index and v in index)) d += diag_data r += diag_index c += diag_index M = sparse.coo_matrix((d, (r, c)), shape=(nlen,nlen), dtype=dtype) try: return M.asformat(format) except AttributeError: raise nx.NetworkXError("Unknown sparse matrix format: %s"%format) def _csr_gen_triples(A): """Converts a SciPy sparse matrix in **Compressed Sparse Row** format to an iterable of weighted edge triples. """ nrows = A.shape[0] data, indices, indptr = A.data, A.indices, A.indptr for i in range(nrows): for j in range(indptr[i], indptr[i+1]): yield i, indices[j], data[j] def _csc_gen_triples(A): """Converts a SciPy sparse matrix in **Compressed Sparse Column** format to an iterable of weighted edge triples. """ ncols = A.shape[1] data, indices, indptr = A.data, A.indices, A.indptr for i in range(ncols): for j in range(indptr[i], indptr[i+1]): yield indices[j], i, data[j] def _coo_gen_triples(A): """Converts a SciPy sparse matrix in **Coordinate** format to an iterable of weighted edge triples. """ row, col, data = A.row, A.col, A.data return zip(row, col, data) def _dok_gen_triples(A): """Converts a SciPy sparse matrix in **Dictionary of Keys** format to an iterable of weighted edge triples. """ for (r, c), v in A.items(): yield r, c, v def _generate_weighted_edges(A): """Returns an iterable over (u, v, w) triples, where u and v are adjacent vertices and w is the weight of the edge joining u and v. `A` is a SciPy sparse matrix (in any format). """ if A.format == 'csr': return _csr_gen_triples(A) if A.format == 'csc': return _csc_gen_triples(A) if A.format == 'dok': return _dok_gen_triples(A) # If A is in any other format (including COO), convert it to COO format. return _coo_gen_triples(A.tocoo()) def from_scipy_sparse_matrix(A, parallel_edges=False, create_using=None, edge_attribute='weight'): """Creates a new graph from an adjacency matrix given as a SciPy sparse matrix. Parameters ---------- A: scipy sparse matrix An adjacency matrix representation of a graph parallel_edges : Boolean If this is ``True``, `create_using` is a multigraph, and `A` is an integer matrix, then entry *(i, j)* in the matrix is interpreted as the number of parallel edges joining vertices *i* and *j* in the graph. If it is ``False``, then the entries in the adjacency matrix are interpreted as the weight of a single edge joining the vertices. create_using: NetworkX graph Use specified graph for result. The default is Graph() edge_attribute: string Name of edge attribute to store matrix numeric value. The data will have the same type as the matrix entry (int, float, (real,imag)). Notes ----- If `create_using` is an instance of :class:`networkx.MultiGraph` or :class:`networkx.MultiDiGraph`, `parallel_edges` is ``True``, and the entries of `A` are of type ``int``, then this function returns a multigraph (of the same type as `create_using`) with parallel edges. In this case, `edge_attribute` will be ignored. If `create_using` is an undirected multigraph, then only the edges indicated by the upper triangle of the matrix `A` will be added to the graph. Examples -------- >>> import scipy.sparse >>> A = scipy.sparse.eye(2,2,1) >>> G = nx.from_scipy_sparse_matrix(A) If `create_using` is a multigraph and the matrix has only integer entries, the entries will be interpreted as weighted edges joining the vertices (without creating parallel edges): >>> import scipy >>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]]) >>> G = nx.from_scipy_sparse_matrix(A, create_using=nx.MultiGraph()) >>> G[1][1] {0: {'weight': 2}} If `create_using` is a multigraph and the matrix has only integer entries but `parallel_edges` is ``True``, then the entries will be interpreted as the number of parallel edges joining those two vertices: >>> import scipy >>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]]) >>> G = nx.from_scipy_sparse_matrix(A, parallel_edges=True, ... create_using=nx.MultiGraph()) >>> G[1][1] {0: {'weight': 1}, 1: {'weight': 1}} """ G = _prep_create_using(create_using) n,m = A.shape if n != m: raise nx.NetworkXError(\ "Adjacency matrix is not square. nx,ny=%s"%(A.shape,)) # Make sure we get even the isolated nodes of the graph. G.add_nodes_from(range(n)) # Create an iterable over (u, v, w) triples and for each triple, add an # edge from u to v with weight w. triples = _generate_weighted_edges(A) # If the entries in the adjacency matrix are integers, the graph is a # multigraph, and parallel_edges is True, then create parallel edges, each # with weight 1, for each entry in the adjacency matrix. Otherwise, create # one edge for each positive entry in the adjacency matrix and set the # weight of that edge to be the entry in the matrix. if A.dtype.kind in ('i', 'u') and G.is_multigraph() and parallel_edges: chain = itertools.chain.from_iterable # The following line is equivalent to: # # for (u, v) in edges: # for d in range(A[u, v]): # G.add_edge(u, v, weight=1) # triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples) # If we are creating an undirected multigraph, only add the edges from the # upper triangle of the matrix. Otherwise, add all the edges. This relies # on the fact that the vertices created in the # ``_generated_weighted_edges()`` function are actually the row/column # indices for the matrix ``A``. # # Without this check, we run into a problem where each edge is added twice # when `G.add_weighted_edges_from()` is invoked below. if G.is_multigraph() and not G.is_directed(): triples = ((u, v, d) for u, v, d in triples if u <= v) G.add_weighted_edges_from(triples, weight=edge_attribute) return G # fixture for nose tests def setup_module(module): from nose import SkipTest try: import numpy except: raise SkipTest("NumPy not available") try: import scipy except: raise SkipTest("SciPy not available")
bsd-3-clause
aloverso/SoftwareSystems
hw04/wave3/thinkdsp.py
23
31996
"""This file contains code used in "Think DSP", by Allen B. Downey, available from greenteapress.com Copyright 2013 Allen B. Downey License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html """ import array import math import numpy import random import scipy import scipy.stats import struct import subprocess import thinkplot from fractions import gcd from wave import open as open_wave import matplotlib.pyplot as pyplot PI2 = math.pi * 2 def random_seed(x): """Initialize the random and numpy.random generators. x: int seed """ random.seed(x) numpy.random.seed(x) class UnimplementedMethodException(Exception): """Exception if someone calls a method that should be overridden.""" class WavFileWriter(object): """Writes wav files.""" def __init__(self, filename='sound.wav', framerate=11025): """Opens the file and sets parameters. filename: string framerate: samples per second """ self.filename = filename self.framerate = framerate self.nchannels = 1 self.sampwidth = 2 self.bits = self.sampwidth * 8 self.bound = 2**(self.bits-1) - 1 self.fmt = 'h' self.dtype = numpy.int16 self.fp = open_wave(self.filename, 'w') self.fp.setnchannels(self.nchannels) self.fp.setsampwidth(self.sampwidth) self.fp.setframerate(self.framerate) def write(self, wave): """Writes a wave. wave: Wave """ zs = wave.quantize(self.bound, self.dtype) self.fp.writeframes(zs.tostring()) def close(self, duration=0): """Closes the file. duration: how many seconds of silence to append """ if duration: self.write(rest(duration)) self.fp.close() def read_wave(filename='sound.wav'): """Reads a wave file. filename: string returns: Wave """ fp = open_wave(filename, 'r') nchannels = fp.getnchannels() nframes = fp.getnframes() sampwidth = fp.getsampwidth() framerate = fp.getframerate() z_str = fp.readframes(nframes) fp.close() dtype_map = {1:numpy.int8, 2:numpy.int16} assert sampwidth in dtype_map ys = numpy.fromstring(z_str, dtype=dtype_map[sampwidth]) wave = Wave(ys, framerate) return wave def play_wave(filename='sound.wav', player='aplay'): """Plays a wave file. filename: string player: string name of executable that plays wav files """ cmd = '%s %s' % (player, filename) popen = subprocess.Popen(cmd, shell=True) popen.communicate() class _SpectrumParent(object): """Contains code common to Spectrum and DCT. """ @property def max_freq(self): return self.framerate / 2.0 @property def freq_res(self): return self.max_freq / (len(self.fs) - 1) def plot(self, low=0, high=None, **options): """Plots amplitude vs frequency. low: int index to start at high: int index to end at """ thinkplot.plot(self.fs[low:high], self.amps[low:high], **options) def plot_power(self, low=0, high=None, **options): """Plots power vs frequency. low: int index to start at high: int index to end at """ thinkplot.plot(self.fs[low:high], self.power[low:high], **options) def estimate_slope(self): """Runs linear regression on log power vs log frequency. returns: slope, inter, r2, p, stderr """ x = numpy.log(self.fs[1:]) y = numpy.log(self.power[1:]) t = scipy.stats.linregress(x,y) return t def peaks(self): """Finds the highest peaks and their frequencies. returns: sorted list of (amplitude, frequency) pairs """ t = zip(self.amps, self.fs) t.sort(reverse=True) return t class Spectrum(_SpectrumParent): """Represents the spectrum of a signal.""" def __init__(self, hs, framerate): self.hs = hs self.framerate = framerate n = len(hs) self.fs = numpy.linspace(0, self.max_freq, n) def __add__(self, other): if other == 0: return self assert self.framerate == other.framerate hs = self.hs + other.hs return Spectrum(hs, self.framerate) __radd__ = __add__ @property def real(self): """Returns the real part of the hs (read-only property).""" return numpy.real(self.hs) @property def imag(self): """Returns the imaginary part of the hs (read-only property).""" return numpy.imag(self.hs) @property def amps(self): """Returns a sequence of amplitudes (read-only property).""" return numpy.absolute(self.hs) @property def power(self): """Returns a sequence of powers (read-only property).""" return self.amps ** 2 def low_pass(self, cutoff, factor=0): """Attenuate frequencies above the cutoff. cutoff: frequency in Hz factor: what to multiply the magnitude by """ for i in xrange(len(self.hs)): if self.fs[i] > cutoff: self.hs[i] *= factor def high_pass(self, cutoff, factor=0): """Attenuate frequencies below the cutoff. cutoff: frequency in Hz factor: what to multiply the magnitude by """ for i in xrange(len(self.hs)): if self.fs[i] < cutoff: self.hs[i] *= factor def band_stop(self, low_cutoff, high_cutoff, factor=0): """Attenuate frequencies between the cutoffs. low_cutoff: frequency in Hz high_cutoff: frequency in Hz factor: what to multiply the magnitude by """ for i in xrange(len(self.hs)): if low_cutoff < self.fs[i] < high_cutoff: self.hs[i] = 0 def pink_filter(self, beta=1): """Apply a filter that would make white noise pink. beta: exponent of the pink noise """ denom = self.fs ** (beta/2.0) denom[0] = 1 self.hs /= denom def angles(self, i): """Computes phase angles in radians. returns: list of phase angles """ return numpy.angle(self.hs) def make_integrated_spectrum(self): """Makes an integrated spectrum. """ cs = numpy.cumsum(self.power) cs /= cs[-1] return IntegratedSpectrum(cs, self.fs) def make_wave(self): """Transforms to the time domain. returns: Wave """ ys = numpy.fft.irfft(self.hs) return Wave(ys, self.framerate) class IntegratedSpectrum(object): """Represents the integral of a spectrum.""" def __init__(self, cs, fs): """Initializes an integrated spectrum: cs: sequence of cumulative amplitudes fs: sequence of frequences """ self.cs = cs self.fs = fs def plot_power(self, low=0, high=None, expo=False, **options): """Plots the integrated spectrum. low: int index to start at high: int index to end at """ cs = self.cs[low:high] fs = self.fs[low:high] if expo: cs = numpy.exp(cs) thinkplot.Plot(fs, cs, **options) def estimate_slope(self, low=1, high=-12000): """Runs linear regression on log cumulative power vs log frequency. returns: slope, inter, r2, p, stderr """ #print self.fs[low:high] #print self.cs[low:high] x = numpy.log(self.fs[low:high]) y = numpy.log(self.cs[low:high]) t = scipy.stats.linregress(x,y) return t class Dct(_SpectrumParent): """Represents the spectrum of a signal.""" def __init__(self, amps, framerate): self.amps = amps self.framerate = framerate n = len(amps) self.fs = numpy.arange(n) / float(n) * self.max_freq def make_wave(self): """Transforms to the time domain. returns: Wave """ ys = scipy.fftpack.dct(self.amps, type=3) / 2 return Wave(ys, self.framerate) class Spectrogram(object): """Represents the spectrum of a signal.""" def __init__(self, spec_map, seg_length, window_func=None): """Initialize the spectrogram. spec_map: map from float time to Spectrum seg_length: number of samples in each segment window_func: function that computes the window """ self.spec_map = spec_map self.seg_length = seg_length self.window_func = window_func def any_spectrum(self): """Returns an arbitrary spectrum from the spectrogram.""" return self.spec_map.itervalues().next() @property def time_res(self): """Time resolution in seconds.""" spectrum = self.any_spectrum() return float(self.seg_length) / spectrum.framerate @property def freq_res(self): """Frequency resolution in Hz.""" return self.any_spectrum().freq_res def times(self): """Sorted sequence of times. returns: sequence of float times in seconds """ ts = sorted(self.spec_map.iterkeys()) return ts def frequencies(self): """Sequence of frequencies. returns: sequence of float freqencies in Hz. """ fs = self.any_spectrum().fs return fs def plot(self, low=0, high=None, **options): """Make a pseudocolor plot. low: index of the lowest frequency component to plot high: index of the highest frequency component to plot """ ts = self.times() fs = self.frequencies()[low:high] # make the array size = len(fs), len(ts) array = numpy.zeros(size, dtype=numpy.float) # copy amplitude from each spectrum into a column of the array for i, t in enumerate(ts): spectrum = self.spec_map[t] array[:,i] = spectrum.amps[low:high] thinkplot.pcolor(ts, fs, array, **options) def make_wave(self): """Inverts the spectrogram and returns a Wave. returns: Wave """ res = [] for t, spectrum in sorted(self.spec_map.iteritems()): wave = spectrum.make_wave() n = len(wave) if self.window_func: window = 1 / self.window_func(n) wave.window(window) i = int(round(t * wave.framerate)) start = i - n / 2 end = start + n res.append((start, end, wave)) starts, ends, waves = zip(*res) low = min(starts) high = max(ends) ys = numpy.zeros(high-low, numpy.float) for start, end, wave in res: ys[start:end] = wave.ys return Wave(ys, wave.framerate) class Wave(object): """Represents a discrete-time waveform. Note: the ys attribute is a "wave array" which is a numpy array of floats. """ def __init__(self, ys, framerate, start=0): """Initializes the wave. ys: wave array framerate: samples per second """ self.ys = ys self.framerate = framerate self.start = start def __len__(self): return len(self.ys) @property def duration(self): """Duration (property). returns: float duration in seconds """ return len(self.ys) / float(self.framerate) def __or__(self, other): """Concatenates two waves. other: Wave returns: Wave """ if self.framerate != other.framerate: raise ValueError('Wave.__or__: framerates do not agree') ys = numpy.concatenate((self.ys, other.ys)) return Wave(ys, self.framerate) def quantize(self, bound, dtype): """Maps the waveform to quanta. bound: maximum amplitude dtype: numpy data type or string returns: quantized signal """ return quantize(self.ys, bound, dtype) def apodize(self, denom=20, duration=0.1): """Tapers the amplitude at the beginning and end of the signal. Tapers either the given duration of time or the given fraction of the total duration, whichever is less. denom: float fraction of the segment to taper duration: float duration of the taper in seconds """ self.ys = apodize(self.ys, self.framerate, denom, duration) def hamming(self): """Apply a Hamming window to the wave. """ self.ys *= numpy.hamming(len(self.ys)) def window(self, window): """Apply a window to the wave. window: sequence of multipliers, same length as self.ys """ self.ys *= window def normalize(self, amp=1.0): """Normalizes the signal to the given amplitude. amp: float amplitude """ self.ys = normalize(self.ys, amp=amp) def unbias(self): """Unbiases the signal. """ self.ys = unbias(self.ys) def segment(self, start=0, duration=None): """Extracts a segment. start: float start time in seconds duration: float duration in seconds returns: Wave """ i = start * self.framerate if duration is None: j = None else: j = i + duration * self.framerate ys = self.ys[i:j] return Wave(ys, self.framerate) def make_spectrum(self): """Computes the spectrum using FFT. returns: Spectrum """ hs = numpy.fft.rfft(self.ys) return Spectrum(hs, self.framerate) def make_dct(self): amps = scipy.fftpack.dct(self.ys, type=2) return Dct(amps, self.framerate) def make_spectrogram(self, seg_length, window_func=numpy.hamming): """Computes the spectrogram of the wave. seg_length: number of samples in each segment window_func: function used to compute the window returns: Spectrogram """ n = len(self.ys) window = window_func(seg_length) start, end, step = 0, seg_length, seg_length / 2 spec_map = {} while end < n: ys = self.ys[start:end] * window hs = numpy.fft.rfft(ys) t = (start + end) / 2.0 / self.framerate spec_map[t] = Spectrum(hs, self.framerate) start += step end += step return Spectrogram(spec_map, seg_length, window_func) def plot(self, **options): """Plots the wave. """ n = len(self.ys) ts = numpy.linspace(0, self.duration, n) thinkplot.plot(ts, self.ys, **options) def corr(self, other): """Correlation coefficient two waves. other: Wave returns: 2x2 covariance matrix """ mat = self.cov_mat(other) corr = mat[0][1] / math.sqrt(mat[0][0] * mat[1][1]) return corr def cov_mat(self, other): """Covariance matrix of two waves. other: Wave returns: 2x2 covariance matrix """ return numpy.cov(self.ys, other.ys) def cov(self, other): """Covariance of two unbiased waves. other: Wave returns: float """ total = sum(self.ys * other.ys) / len(self.ys) return total def cos_cov(self, k): """Covariance with a cosine signal. freq: freq of the cosine signal in Hz returns: float covariance """ n = len(self.ys) factor = math.pi * k / n ys = [math.cos(factor * (i+0.5)) for i in range(n)] total = 2 * sum(self.ys * ys) return total def cos_transform(self): """Discrete cosine transform. returns: list of frequency, cov pairs """ n = len(self.ys) res = [] for k in range(n): cov = self.cos_cov(k) res.append((k, cov)) return res def write(self, filename='sound.wav'): """Write a wave file. filename: string """ print 'Writing', filename wfile = WavFileWriter(filename, self.framerate) wfile.write(self) wfile.close() def play(self, filename='sound.wav'): """Plays a wave file. filename: string """ self.write(filename) play_wave(filename) def unbias(ys): """Shifts a wave array so it has mean 0. ys: wave array returns: wave array """ return ys - ys.mean() def normalize(ys, amp=1.0): """Normalizes a wave array so the maximum amplitude is +amp or -amp. ys: wave array amp: max amplitude (pos or neg) in result returns: wave array """ high, low = abs(max(ys)), abs(min(ys)) return amp * ys / max(high, low) def quantize(ys, bound, dtype): """Maps the waveform to quanta. ys: wave array bound: maximum amplitude dtype: numpy data type of the result returns: quantized signal """ if max(ys) > 1 or min(ys) < -1: print 'Warning: normalizing before quantizing.' ys = normalize(ys) zs = (ys * bound).astype(dtype) return zs def apodize(ys, framerate, denom=20, duration=0.1): """Tapers the amplitude at the beginning and end of the signal. Tapers either the given duration of time or the given fraction of the total duration, whichever is less. ys: wave array framerate: int frames per second denom: float fraction of the segment to taper duration: float duration of the taper in seconds returns: wave array """ # a fixed fraction of the segment n = len(ys) k1 = n / denom # a fixed duration of time k2 = int(duration * framerate) k = min(k1, k2) w1 = numpy.linspace(0, 1, k) w2 = numpy.ones(n - 2*k) w3 = numpy.linspace(1, 0, k) window = numpy.concatenate((w1, w2, w3)) return ys * window class Signal(object): """Represents a time-varying signal.""" def __add__(self, other): """Adds two signals. other: Signal returns: Signal """ if other == 0: return self return SumSignal(self, other) __radd__ = __add__ @property def period(self): """Period of the signal in seconds (property). For non-periodic signals, use the default, 0.1 seconds returns: float seconds """ return 0.1 def plot(self, framerate=11025): """Plots the signal. framerate: samples per second """ duration = self.period * 3 wave = self.make_wave(duration, start=0, framerate=framerate) wave.plot() def make_wave(self, duration=1, start=0, framerate=11025): """Makes a Wave object. duration: float seconds start: float seconds framerate: int frames per second returns: Wave """ dt = 1.0 / framerate ts = numpy.arange(start, duration, dt) ys = self.evaluate(ts) return Wave(ys, framerate=framerate, start=start) def infer_framerate(ts): """Given ts, find the framerate. Assumes that the ts are equally spaced. ts: sequence of times in seconds returns: frames per second """ dt = ts[1] - ts[0] framerate = 1.0 / dt return framerate class SumSignal(Signal): """Represents the sum of signals.""" def __init__(self, *args): """Initializes the sum. args: tuple of signals """ self.signals = args @property def period(self): """Period of the signal in seconds. Note: this is not correct; it's mostly a placekeeper. But it is correct for a harmonic sequence where all component frequencies are multiples of the fundamental. returns: float seconds """ return max(sig.period for sig in self.signals) def evaluate(self, ts): """Evaluates the signal at the given times. ts: float array of times returns: float wave array """ return sum(sig.evaluate(ts) for sig in self.signals) class Sinusoid(Signal): """Represents a sinusoidal signal.""" def __init__(self, freq=440, amp=1.0, offset=0, func=numpy.sin): """Initializes a sinusoidal signal. freq: float frequency in Hz amp: float amplitude, 1.0 is nominal max offset: float phase offset in radians func: function that maps phase to amplitude """ self.freq = freq self.amp = amp self.offset = offset self.func = func @property def period(self): """Period of the signal in seconds. returns: float seconds """ return 1.0 / self.freq def evaluate(self, ts): """Evaluates the signal at the given times. ts: float array of times returns: float wave array """ phases = PI2 * self.freq * ts + self.offset ys = self.amp * self.func(phases) return ys def CosSignal(freq=440, amp=1.0, offset=0): """Makes a consine Sinusoid. freq: float frequency in Hz amp: float amplitude, 1.0 is nominal max offset: float phase offset in radians returns: Sinusoid object """ return Sinusoid(freq, amp, offset, func=numpy.cos) def SinSignal(freq=440, amp=1.0, offset=0): """Makes a sine Sinusoid. freq: float frequency in Hz amp: float amplitude, 1.0 is nominal max offset: float phase offset in radians returns: Sinusoid object """ return Sinusoid(freq, amp, offset, func=numpy.sin) class SquareSignal(Sinusoid): """Represents a square signal.""" def evaluate(self, ts): """Evaluates the signal at the given times. ts: float array of times returns: float wave array """ cycles = self.freq * ts + self.offset / PI2 frac, _ = numpy.modf(cycles) ys = self.amp * numpy.sign(unbias(frac)) return ys class SawtoothSignal(Sinusoid): """Represents a sawtooth signal.""" def evaluate(self, ts): """Evaluates the signal at the given times. ts: float array of times returns: float wave array """ cycles = self.freq * ts + self.offset / PI2 frac, _ = numpy.modf(cycles) ys = normalize(unbias(frac), self.amp) return ys class ParabolicSignal(Sinusoid): """Represents a parabolic signal.""" def evaluate(self, ts): """Evaluates the signal at the given times. ts: float array of times returns: float wave array """ cycles = self.freq * ts + self.offset / PI2 frac, _ = numpy.modf(cycles) ys = frac**2 ys = normalize(unbias(ys), self.amp) return ys class GlottalSignal(Sinusoid): """Represents a periodic signal that resembles a glottal signal.""" def evaluate(self, ts): """Evaluates the signal at the given times. ts: float array of times returns: float wave array """ cycles = self.freq * ts + self.offset / PI2 frac, _ = numpy.modf(cycles) ys = frac**4 * (1-frac) ys = normalize(unbias(ys), self.amp) return ys class TriangleSignal(Sinusoid): """Represents a triangle signal.""" def evaluate(self, ts): """Evaluates the signal at the given times. ts: float array of times returns: float wave array """ cycles = self.freq * ts + self.offset / PI2 frac, _ = numpy.modf(cycles) ys = numpy.abs(frac - 0.5) ys = normalize(unbias(ys), self.amp) return ys class Chirp(Signal): """Represents a signal with variable frequency.""" def __init__(self, start=440, end=880, amp=1.0): """Initializes a linear chirp. start: float frequency in Hz end: float frequency in Hz amp: float amplitude, 1.0 is nominal max """ self.start = start self.end = end self.amp = amp @property def period(self): """Period of the signal in seconds. returns: float seconds """ return ValueError('Non-periodic signal.') def evaluate(self, ts): """Evaluates the signal at the given times. ts: float array of times returns: float wave array """ freqs = numpy.linspace(self.start, self.end, len(ts)-1) return self._evaluate(ts, freqs) def _evaluate(self, ts, freqs): """Helper function that evaluates the signal. ts: float array of times freqs: float array of frequencies during each interval """ #n = len(freqs) #print freqs[::n/2] dts = numpy.diff(ts) dps = PI2 * freqs * dts phases = numpy.cumsum(dps) phases = numpy.insert(phases, 0, 0) ys = self.amp * numpy.cos(phases) return ys class ExpoChirp(Chirp): """Represents a signal with varying frequency.""" def evaluate(self, ts): """Evaluates the signal at the given times. ts: float array of times returns: float wave array """ start, end = math.log10(self.start), math.log10(self.end) freqs = numpy.logspace(start, end, len(ts)-1) return self._evaluate(ts, freqs) class SilentSignal(Signal): """Represents silence.""" def evaluate(self, ts): """Evaluates the signal at the given times. ts: float array of times returns: float wave array """ return numpy.zeros(len(ts)) class _Noise(Signal): """Represents a noise signal (abstract parent class).""" def __init__(self, amp=1.0): """Initializes a white noise signal. amp: float amplitude, 1.0 is nominal max """ self.amp = amp @property def period(self): """Period of the signal in seconds. returns: float seconds """ return ValueError('Non-periodic signal.') class UncorrelatedUniformNoise(_Noise): """Represents uncorrelated uniform noise.""" def evaluate(self, ts): """Evaluates the signal at the given times. ts: float array of times returns: float wave array """ ys = numpy.random.uniform(-self.amp, self.amp, len(ts)) return ys class UncorrelatedGaussianNoise(_Noise): """Represents uncorrelated gaussian noise.""" def evaluate(self, ts): """Evaluates the signal at the given times. ts: float array of times returns: float wave array """ ys = numpy.random.normal(0, 1, len(ts)) ys = normalize(ys, self.amp) return ys class BrownianNoise(_Noise): """Represents Brownian noise, aka red noise.""" def evaluate(self, ts): """Evaluates the signal at the given times. Computes Brownian noise by taking the cumulative sum of a uniform random series. ts: float array of times returns: float wave array """ #dys = numpy.random.normal(0, 1, len(ts)) dys = numpy.random.uniform(-1, 1, len(ts)) #ys = numpy.cumsum(dys) ys = scipy.integrate.cumtrapz(dys, ts) ys = normalize(unbias(ys), self.amp) return ys class PinkNoise(_Noise): """Represents Brownian noise, aka red noise.""" def __init__(self, amp=1.0, beta=1.0): """Initializes a pink noise signal. amp: float amplitude, 1.0 is nominal max """ self.amp = amp self.beta = beta def make_wave(self, duration=1, start=0, framerate=11025): """Makes a Wave object. duration: float seconds start: float seconds framerate: int frames per second returns: Wave """ signal = UncorrelatedUniformNoise() wave = signal.make_wave(duration, start, framerate) spectrum = wave.make_spectrum() spectrum.pink_filter(beta=self.beta) wave2 = spectrum.make_wave() wave2.unbias() wave2.normalize(self.amp) return wave2 def rest(duration): """Makes a rest of the given duration. duration: float seconds returns: Wave """ signal = SilentSignal() wave = signal.make_wave(duration) return wave def make_note(midi_num, duration, sig_cons=CosSignal, framerate=11025): """Make a MIDI note with the given duration. midi_num: int MIDI note number duration: float seconds sig_cons: Signal constructor function framerate: int frames per second returns: Wave """ freq = midi_to_freq(midi_num) signal = sig_cons(freq) wave = signal.make_wave(duration, framerate=framerate) wave.apodize() return wave def make_chord(midi_nums, duration, sig_cons=CosSignal, framerate=11025): """Make a chord with the given duration. midi_nums: sequence of int MIDI note numbers duration: float seconds sig_cons: Signal constructor function framerate: int frames per second returns: Wave """ freqs = [midi_to_freq(num) for num in midi_nums] signal = sum(sig_cons(freq) for freq in freqs) wave = signal.make_wave(duration, framerate=framerate) wave.apodize() return wave def midi_to_freq(midi_num): """Converts MIDI note number to frequency. midi_num: int MIDI note number returns: float frequency in Hz """ x = (midi_num - 69) / 12.0 freq = 440.0 * 2**x return freq def sin_wave(freq, duration=1, offset=0): """Makes a sine wave with the given parameters. freq: float cycles per second duration: float seconds offset: float radians returns: Wave """ signal = SinSignal(freq, offset=offset) wave = signal.make_wave(duration) return wave def cos_wave(freq, duration=1, offset=0): """Makes a cosine wave with the given parameters. freq: float cycles per second duration: float seconds offset: float radians returns: Wave """ signal = CosSignal(freq, offset=offset) wave = signal.make_wave(duration) return wave def mag(a): """Computes the magnitude of a numpy array. a: numpy array returns: float """ return numpy.sqrt(numpy.dot(a, a)) def main(): cos_basis = cos_wave(440) sin_basis = sin_wave(440) wave = cos_wave(440, offset=math.pi/2) cos_cov = cos_basis.cov(wave) sin_cov = sin_basis.cov(wave) print cos_cov, sin_cov, mag((cos_cov, sin_cov)) return wfile = WavFileWriter() for sig_cons in [SinSignal, TriangleSignal, SawtoothSignal, GlottalSignal, ParabolicSignal, SquareSignal]: print sig_cons sig = sig_cons(440) wave = sig.make_wave(1) wave.apodize() wfile.write(wave) wfile.close() return signal = GlottalSignal(440) signal.plot() pyplot.show() return wfile = WavFileWriter() for m in range(60, 0, -1): wfile.write(make_note(m, 0.25)) wfile.close() return wave1 = make_note(69, 1) wave2 = make_chord([69, 72, 76], 1) wave = wave1 | wave2 wfile = WavFileWriter() wfile.write(wave) wfile.close() return sig1 = CosSignal(freq=440) sig2 = CosSignal(freq=523.25) sig3 = CosSignal(freq=660) sig4 = CosSignal(freq=880) sig5 = CosSignal(freq=987) sig = sig1 + sig2 + sig3 + sig4 #wave = Wave(sig, duration=0.02) #wave.plot() wave = sig.make_wave(duration=1) #wave.normalize() wfile = WavFileWriter(wave) wfile.write() wfile.close() if __name__ == '__main__': main()
gpl-3.0
arborh/tensorflow
tensorflow/python/keras/engine/data_adapter_test.py
3
30720
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """DataAdapter tests.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.engine import data_adapter from tensorflow.python.keras.utils import data_utils from tensorflow.python.ops import array_ops from tensorflow.python.platform import test class DummyArrayLike(object): """Dummy array-like object.""" def __init__(self, data): self.data = data def __len__(self): return len(self.data) def __getitem__(self, key): return self.data[key] @property def shape(self): return self.data.shape @property def dtype(self): return self.data.dtype def fail_on_convert(x, **kwargs): _ = x _ = kwargs raise TypeError('Cannot convert DummyArrayLike to a tensor') ops.register_tensor_conversion_function(DummyArrayLike, fail_on_convert) class DataAdapterTestBase(keras_parameterized.TestCase): def setUp(self): super(DataAdapterTestBase, self).setUp() self.batch_size = 5 self.numpy_input = np.zeros((50, 10)) self.numpy_target = np.ones(50) self.tensor_input = constant_op.constant(2.0, shape=(50, 10)) self.tensor_target = array_ops.ones((50,)) self.arraylike_input = DummyArrayLike(self.numpy_input) self.arraylike_target = DummyArrayLike(self.numpy_target) self.dataset_input = dataset_ops.DatasetV2.from_tensor_slices( (self.numpy_input, self.numpy_target)).shuffle(50).batch( self.batch_size) def generator(): while True: yield (np.zeros((self.batch_size, 10)), np.ones(self.batch_size)) self.generator_input = generator() self.iterator_input = data_utils.threadsafe_generator(generator)() self.sequence_input = TestSequence(batch_size=self.batch_size, feature_shape=10) self.model = keras.models.Sequential( [keras.layers.Dense(8, input_shape=(10,), activation='softmax')]) class TestSequence(data_utils.Sequence): def __init__(self, batch_size, feature_shape): self.batch_size = batch_size self.feature_shape = feature_shape def __getitem__(self, item): return (np.zeros((self.batch_size, self.feature_shape)), np.ones((self.batch_size,))) def __len__(self): return 10 class TensorLikeDataAdapterTest(DataAdapterTestBase): def setUp(self): super(TensorLikeDataAdapterTest, self).setUp() self.adapter_cls = data_adapter.TensorLikeDataAdapter def test_can_handle_numpy(self): self.assertTrue(self.adapter_cls.can_handle(self.numpy_input)) self.assertTrue( self.adapter_cls.can_handle(self.numpy_input, self.numpy_target)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) def test_iterator_expect_batch_size_numpy(self): with self.assertRaisesRegexp( ValueError, r'`batch_size` or `steps` is required'): self.adapter_cls(self.numpy_input, self.numpy_target) def test_size_numpy(self): adapter = self.adapter_cls( self.numpy_input, self.numpy_target, batch_size=5) self.assertEqual(adapter.get_size(), 10) self.assertFalse(adapter.has_partial_batch()) def test_batch_size_numpy(self): adapter = self.adapter_cls( self.numpy_input, self.numpy_target, batch_size=5) self.assertEqual(adapter.batch_size(), 5) def test_partial_batch_numpy(self): adapter = self.adapter_cls( self.numpy_input, self.numpy_target, batch_size=4) self.assertEqual(adapter.get_size(), 13) # 50/4 self.assertTrue(adapter.has_partial_batch()) self.assertEqual(adapter.partial_batch_size(), 2) def test_epochs(self): num_epochs = 3 adapter = self.adapter_cls( self.numpy_input, self.numpy_target, batch_size=5, epochs=num_epochs) ds_iter = iter(adapter.get_dataset()) num_batches_per_epoch = self.numpy_input.shape[0] // 5 for _ in range(num_batches_per_epoch * num_epochs): next(ds_iter) with self.assertRaises(StopIteration): next(ds_iter) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training_numpy(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.numpy_input, self.numpy_target, batch_size=5) def test_can_handle_pandas(self): try: import pandas as pd # pylint: disable=g-import-not-at-top except ImportError: self.skipTest('Skipping test because pandas is not installed.') self.assertTrue(self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input))) self.assertTrue( self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input)[0])) self.assertTrue( self.adapter_cls.can_handle( pd.DataFrame(self.numpy_input), pd.DataFrame(self.numpy_input)[0])) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training_pandas(self): try: import pandas as pd # pylint: disable=g-import-not-at-top except ImportError: self.skipTest('Skipping test because pandas is not installed.') input_a = keras.Input(shape=(3,), name='input_a') input_b = keras.Input(shape=(3,), name='input_b') input_c = keras.Input(shape=(1,), name='input_b') x = keras.layers.Dense(4, name='dense_1')(input_a) y = keras.layers.Dense(3, name='dense_2')(input_b) z = keras.layers.Dense(1, name='dense_3')(input_c) model_1 = keras.Model(inputs=input_a, outputs=x) model_2 = keras.Model(inputs=[input_a, input_b], outputs=[x, y]) model_3 = keras.Model(inputs=input_c, outputs=z) model_1.compile(optimizer='rmsprop', loss='mse') model_2.compile(optimizer='rmsprop', loss='mse') input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) input_a_df = pd.DataFrame(input_a_np) input_b_df = pd.DataFrame(input_b_np) output_a_df = pd.DataFrame(np.random.random((10, 4))) output_b_df = pd.DataFrame(np.random.random((10, 3))) model_1.fit(input_a_df, output_a_df) model_2.fit([input_a_df, input_b_df], [output_a_df, output_b_df]) model_1.fit([input_a_df], [output_a_df]) model_1.fit({'input_a': input_a_df}, output_a_df) model_2.fit({'input_a': input_a_df, 'input_b': input_b_df}, [output_a_df, output_b_df]) model_1.evaluate(input_a_df, output_a_df) model_2.evaluate([input_a_df, input_b_df], [output_a_df, output_b_df]) model_1.evaluate([input_a_df], [output_a_df]) model_1.evaluate({'input_a': input_a_df}, output_a_df) model_2.evaluate({'input_a': input_a_df, 'input_b': input_b_df}, [output_a_df, output_b_df]) # Verify predicting on pandas vs numpy returns the same result predict_1_pandas = model_1.predict(input_a_df) predict_2_pandas = model_2.predict([input_a_df, input_b_df]) predict_3_pandas = model_3.predict(input_a_df[0]) predict_1_numpy = model_1.predict(input_a_np) predict_2_numpy = model_2.predict([input_a_np, input_b_np]) predict_3_numpy = model_3.predict(np.asarray(input_a_df[0])) self.assertAllClose(predict_1_numpy, predict_1_pandas) self.assertAllClose(predict_2_numpy, predict_2_pandas) self.assertAllClose(predict_3_numpy, predict_3_pandas) # Extra ways to pass in dataframes model_1.predict([input_a_df]) model_1.predict({'input_a': input_a_df}) model_2.predict({'input_a': input_a_df, 'input_b': input_b_df}) def test_can_handle(self): self.assertTrue(self.adapter_cls.can_handle(self.tensor_input)) self.assertTrue( self.adapter_cls.can_handle(self.tensor_input, self.tensor_target)) self.assertFalse(self.adapter_cls.can_handle(self.arraylike_input)) self.assertFalse( self.adapter_cls.can_handle(self.arraylike_input, self.arraylike_target)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.tensor_input, self.tensor_target, batch_size=5) def test_size(self): adapter = self.adapter_cls( self.tensor_input, self.tensor_target, batch_size=5) self.assertEqual(adapter.get_size(), 10) self.assertFalse(adapter.has_partial_batch()) def test_shuffle_correctness(self): with context.eager_mode(): num_samples = 100 batch_size = 32 x = np.arange(num_samples) np.random.seed(99) adapter = self.adapter_cls( x, y=None, batch_size=batch_size, shuffle=True, epochs=2) def _get_epoch(ds_iter): ds_data = [] for _ in range(int(math.ceil(num_samples / batch_size))): ds_data.append(next(ds_iter)[0].numpy()) return np.concatenate(ds_data) ds_iter = iter(adapter.get_dataset()) # First epoch. epoch_data = _get_epoch(ds_iter) # Check that shuffling occurred. self.assertNotAllClose(x, epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(epoch_data)) # Second epoch. second_epoch_data = _get_epoch(ds_iter) # Check that shuffling occurred. self.assertNotAllClose(x, second_epoch_data) # Check that shuffling is different across epochs. self.assertNotAllClose(epoch_data, second_epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(second_epoch_data)) def test_batch_shuffle_correctness(self): with context.eager_mode(): num_samples = 100 batch_size = 6 x = np.arange(num_samples) np.random.seed(99) adapter = self.adapter_cls( x, y=None, batch_size=batch_size, shuffle='batch', epochs=2) def _get_epoch_batches(ds_iter): ds_data = [] for _ in range(int(math.ceil(num_samples / batch_size))): ds_data.append(next(ds_iter)[0].numpy()) return ds_data ds_iter = iter(adapter.get_dataset()) # First epoch. epoch_batch_data = _get_epoch_batches(ds_iter) epoch_data = np.concatenate(epoch_batch_data) def _verify_batch(batch): # Verify that a batch contains only contiguous data, and that it has # been shuffled. shuffled_batch = np.sort(batch) self.assertNotAllClose(batch, shuffled_batch) for i in range(1, len(batch)): self.assertEqual(shuffled_batch[i-1] + 1, shuffled_batch[i]) # Assert that the data within each batch remains contiguous for batch in epoch_batch_data: _verify_batch(batch) # Check that individual batches are unshuffled # Check that shuffling occurred. self.assertNotAllClose(x, epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(epoch_data)) # Second epoch. second_epoch_batch_data = _get_epoch_batches(ds_iter) second_epoch_data = np.concatenate(second_epoch_batch_data) # Assert that the data within each batch remains contiguous for batch in second_epoch_batch_data: _verify_batch(batch) # Check that shuffling occurred. self.assertNotAllClose(x, second_epoch_data) # Check that shuffling is different across epochs. self.assertNotAllClose(epoch_data, second_epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(second_epoch_data)) @parameterized.named_parameters( ('batch_size_5', 5, None, 5), ('batch_size_50', 50, 4, 50), # Sanity check: batch_size takes precedence ('steps_1', None, 1, 50), ('steps_4', None, 4, 13), ) def test_batch_size(self, batch_size_in, steps, batch_size_out): adapter = self.adapter_cls( self.tensor_input, self.tensor_target, batch_size=batch_size_in, steps=steps) self.assertEqual(adapter.batch_size(), batch_size_out) @parameterized.named_parameters( ('batch_size_5', 5, None, 10, 0), ('batch_size_4', 4, None, 13, 2), ('steps_1', None, 1, 1, 0), ('steps_5', None, 5, 5, 0), ('steps_4', None, 4, 4, 11), ) def test_partial_batch( self, batch_size_in, steps, size, partial_batch_size): adapter = self.adapter_cls( self.tensor_input, self.tensor_target, batch_size=batch_size_in, steps=steps) self.assertEqual(adapter.get_size(), size) # 50/steps self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size)) self.assertEqual(adapter.partial_batch_size(), partial_batch_size or None) class GenericArrayLikeDataAdapterTest(DataAdapterTestBase): def setUp(self): super(GenericArrayLikeDataAdapterTest, self).setUp() self.adapter_cls = data_adapter.GenericArrayLikeDataAdapter def test_can_handle_some_numpy(self): self.assertTrue(self.adapter_cls.can_handle( self.arraylike_input)) self.assertTrue( self.adapter_cls.can_handle(self.arraylike_input, self.arraylike_target)) # Because adapters are mutually exclusive, don't handle cases # where all the data is numpy or an eagertensor self.assertFalse(self.adapter_cls.can_handle(self.numpy_input)) self.assertFalse( self.adapter_cls.can_handle(self.numpy_input, self.numpy_target)) self.assertFalse(self.adapter_cls.can_handle(self.tensor_input)) self.assertFalse( self.adapter_cls.can_handle(self.tensor_input, self.tensor_target)) # But do handle mixes that include generic arraylike data self.assertTrue( self.adapter_cls.can_handle(self.numpy_input, self.arraylike_target)) self.assertTrue( self.adapter_cls.can_handle(self.arraylike_input, self.numpy_target)) self.assertTrue( self.adapter_cls.can_handle(self.arraylike_input, self.tensor_target)) self.assertTrue( self.adapter_cls.can_handle(self.tensor_input, self.arraylike_target)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) def test_iterator_expect_batch_size_generic_arraylike(self): with self.assertRaisesRegexp( ValueError, r'`batch_size` or `steps` is required'): self.adapter_cls(self.arraylike_input, self.arraylike_target) def test_size(self): adapter = self.adapter_cls( self.arraylike_input, self.arraylike_target, batch_size=5) self.assertEqual(adapter.get_size(), 10) self.assertFalse(adapter.has_partial_batch()) def test_epochs(self): num_epochs = 3 adapter = self.adapter_cls( self.arraylike_input, self.numpy_target, batch_size=5, epochs=num_epochs) ds_iter = iter(adapter.get_dataset()) num_batches_per_epoch = self.numpy_input.shape[0] // 5 for _ in range(num_batches_per_epoch * num_epochs): next(ds_iter) with self.assertRaises(StopIteration): next(ds_iter) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training(self): # First verify that DummyArrayLike can't be converted to a Tensor with self.assertRaises(TypeError): ops.convert_to_tensor(self.arraylike_input) # Then train on the array like. # It should not be converted to a tensor directly (which would force it into # memory), only the sliced data should be converted. self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.arraylike_input, self.arraylike_target, batch_size=5) self.model.fit(self.arraylike_input, self.arraylike_target, shuffle=True, batch_size=5) self.model.fit(self.arraylike_input, self.arraylike_target, shuffle='batch', batch_size=5) self.model.evaluate(self.arraylike_input, self.arraylike_target, batch_size=5) self.model.predict(self.arraylike_input, batch_size=5) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training_numpy_target(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.arraylike_input, self.numpy_target, batch_size=5) self.model.fit(self.arraylike_input, self.numpy_target, shuffle=True, batch_size=5) self.model.fit(self.arraylike_input, self.numpy_target, shuffle='batch', batch_size=5) self.model.evaluate(self.arraylike_input, self.numpy_target, batch_size=5) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training_tensor_target(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.arraylike_input, self.tensor_target, batch_size=5) self.model.fit(self.arraylike_input, self.tensor_target, shuffle=True, batch_size=5) self.model.fit(self.arraylike_input, self.tensor_target, shuffle='batch', batch_size=5) self.model.evaluate(self.arraylike_input, self.tensor_target, batch_size=5) def test_shuffle_correctness(self): with context.eager_mode(): num_samples = 100 batch_size = 32 x = DummyArrayLike(np.arange(num_samples)) np.random.seed(99) adapter = self.adapter_cls( x, y=None, batch_size=batch_size, shuffle=True, epochs=2) def _get_epoch(ds_iter): ds_data = [] for _ in range(int(math.ceil(num_samples / batch_size))): ds_data.append(next(ds_iter)[0].numpy()) return np.concatenate(ds_data) ds_iter = iter(adapter.get_dataset()) # First epoch. epoch_data = _get_epoch(ds_iter) # Check that shuffling occurred. self.assertNotAllClose(x, epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(epoch_data)) # Second epoch. second_epoch_data = _get_epoch(ds_iter) # Check that shuffling occurred. self.assertNotAllClose(x, second_epoch_data) # Check that shuffling is different across epochs. self.assertNotAllClose(epoch_data, second_epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(second_epoch_data)) def test_batch_shuffle_correctness(self): with context.eager_mode(): num_samples = 100 batch_size = 6 x = DummyArrayLike(np.arange(num_samples)) np.random.seed(99) adapter = self.adapter_cls( x, y=None, batch_size=batch_size, shuffle='batch', epochs=2) def _get_epoch_batches(ds_iter): ds_data = [] for _ in range(int(math.ceil(num_samples / batch_size))): ds_data.append(next(ds_iter)[0].numpy()) return ds_data ds_iter = iter(adapter.get_dataset()) # First epoch. epoch_batch_data = _get_epoch_batches(ds_iter) epoch_data = np.concatenate(epoch_batch_data) def _verify_batch(batch): # Verify that a batch contains only contiguous data, but that it has # been shuffled. shuffled_batch = np.sort(batch) self.assertNotAllClose(batch, shuffled_batch) for i in range(1, len(batch)): self.assertEqual(shuffled_batch[i-1] + 1, shuffled_batch[i]) # Assert that the data within each batch is shuffled contiguous data for batch in epoch_batch_data: _verify_batch(batch) # Check that individual batches are unshuffled # Check that shuffling occurred. self.assertNotAllClose(x, epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(epoch_data)) # Second epoch. second_epoch_batch_data = _get_epoch_batches(ds_iter) second_epoch_data = np.concatenate(second_epoch_batch_data) # Assert that the data within each batch remains contiguous for batch in second_epoch_batch_data: _verify_batch(batch) # Check that shuffling occurred. self.assertNotAllClose(x, second_epoch_data) # Check that shuffling is different across epochs. self.assertNotAllClose(epoch_data, second_epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(second_epoch_data)) @parameterized.named_parameters( ('batch_size_5', 5, None, 5), ('batch_size_50', 50, 4, 50), # Sanity check: batch_size takes precedence ('steps_1', None, 1, 50), ('steps_4', None, 4, 13), ) def test_batch_size(self, batch_size_in, steps, batch_size_out): adapter = self.adapter_cls( self.arraylike_input, self.arraylike_target, batch_size=batch_size_in, steps=steps) self.assertEqual(adapter.batch_size(), batch_size_out) @parameterized.named_parameters( ('batch_size_5', 5, None, 10, 0), ('batch_size_4', 4, None, 13, 2), ('steps_1', None, 1, 1, 0), ('steps_5', None, 5, 5, 0), ('steps_4', None, 4, 4, 11), ) def test_partial_batch( self, batch_size_in, steps, size, partial_batch_size): adapter = self.adapter_cls( self.arraylike_input, self.arraylike_target, batch_size=batch_size_in, steps=steps) self.assertEqual(adapter.get_size(), size) # 50/steps self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size)) self.assertEqual(adapter.partial_batch_size(), partial_batch_size or None) class DatasetAdapterTest(DataAdapterTestBase): def setUp(self): super(DatasetAdapterTest, self).setUp() self.adapter_cls = data_adapter.DatasetAdapter def test_can_handle(self): self.assertFalse(self.adapter_cls.can_handle(self.numpy_input)) self.assertFalse(self.adapter_cls.can_handle(self.tensor_input)) self.assertTrue(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training(self): dataset = self.adapter_cls(self.dataset_input).get_dataset() self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(dataset) def test_size(self): adapter = self.adapter_cls(self.dataset_input) self.assertIsNone(adapter.get_size()) def test_batch_size(self): adapter = self.adapter_cls(self.dataset_input) self.assertIsNone(adapter.batch_size()) def test_partial_batch(self): adapter = self.adapter_cls(self.dataset_input) self.assertFalse(adapter.has_partial_batch()) self.assertIsNone(adapter.partial_batch_size()) def test_invalid_targets_argument(self): with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'): self.adapter_cls(self.dataset_input, y=self.dataset_input) def test_invalid_sample_weights_argument(self): with self.assertRaisesRegexp(ValueError, r'`sample_weight` argument is not supported'): self.adapter_cls(self.dataset_input, sample_weights=self.dataset_input) class GeneratorDataAdapterTest(DataAdapterTestBase): def setUp(self): super(GeneratorDataAdapterTest, self).setUp() self.adapter_cls = data_adapter.GeneratorDataAdapter def test_can_handle(self): self.assertFalse(self.adapter_cls.can_handle(self.numpy_input)) self.assertFalse(self.adapter_cls.can_handle(self.tensor_input)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertTrue(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.generator_input, steps_per_epoch=10) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) @test_util.run_v2_only @data_utils.dont_use_multiprocessing_pool def test_with_multiprocessing_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.iterator_input, workers=1, use_multiprocessing=True, max_queue_size=10, steps_per_epoch=10) # Fit twice to ensure there isn't any duplication that prevent the worker # from starting. self.model.fit(self.iterator_input, workers=1, use_multiprocessing=True, max_queue_size=10, steps_per_epoch=10) def test_size(self): adapter = self.adapter_cls(self.generator_input) self.assertIsNone(adapter.get_size()) def test_batch_size(self): adapter = self.adapter_cls(self.generator_input) self.assertEqual(adapter.batch_size(), None) self.assertEqual(adapter.representative_batch_size(), 5) def test_partial_batch(self): adapter = self.adapter_cls(self.generator_input) self.assertFalse(adapter.has_partial_batch()) self.assertIsNone(adapter.partial_batch_size()) def test_invalid_targets_argument(self): with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'): self.adapter_cls(self.generator_input, y=self.generator_input) def test_invalid_sample_weights_argument(self): with self.assertRaisesRegexp(ValueError, r'`sample_weight` argument is not supported'): self.adapter_cls( self.generator_input, sample_weights=self.generator_input) class KerasSequenceAdapterTest(DataAdapterTestBase): def setUp(self): super(KerasSequenceAdapterTest, self).setUp() self.adapter_cls = data_adapter.KerasSequenceAdapter def test_can_handle(self): self.assertFalse(self.adapter_cls.can_handle(self.numpy_input)) self.assertFalse(self.adapter_cls.can_handle(self.tensor_input)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertTrue(self.adapter_cls.can_handle(self.sequence_input)) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.sequence_input) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) @test_util.run_v2_only @data_utils.dont_use_multiprocessing_pool def test_with_multiprocessing_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True, max_queue_size=10, steps_per_epoch=10) # Fit twice to ensure there isn't any duplication that prevent the worker # from starting. self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True, max_queue_size=10, steps_per_epoch=10) def test_size(self): adapter = self.adapter_cls(self.sequence_input) self.assertEqual(adapter.get_size(), 10) def test_batch_size(self): adapter = self.adapter_cls(self.sequence_input) self.assertEqual(adapter.batch_size(), None) self.assertEqual(adapter.representative_batch_size(), 5) def test_partial_batch(self): adapter = self.adapter_cls(self.sequence_input) self.assertFalse(adapter.has_partial_batch()) self.assertIsNone(adapter.partial_batch_size()) def test_invalid_targets_argument(self): with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'): self.adapter_cls(self.sequence_input, y=self.sequence_input) def test_invalid_sample_weights_argument(self): with self.assertRaisesRegexp(ValueError, r'`sample_weight` argument is not supported'): self.adapter_cls(self.sequence_input, sample_weights=self.sequence_input) if __name__ == '__main__': ops.enable_eager_execution() test.main()
apache-2.0
tswast/google-cloud-python
bigquery/tests/unit/test_job.py
1
231908
# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import concurrent import copy import json import textwrap import unittest import freezegun import mock import pytest import requests from six.moves import http_client try: import pandas except (ImportError, AttributeError): # pragma: NO COVER pandas = None try: import pyarrow except ImportError: # pragma: NO COVER pyarrow = None try: from google.cloud import bigquery_storage_v1beta1 except (ImportError, AttributeError): # pragma: NO COVER bigquery_storage_v1beta1 = None try: from tqdm import tqdm except (ImportError, AttributeError): # pragma: NO COVER tqdm = None def _make_credentials(): import google.auth.credentials return mock.Mock(spec=google.auth.credentials.Credentials) def _make_client(project="test-project", connection=None): from google.cloud.bigquery.client import Client if connection is None: connection = _make_connection() client = Client(project=project, credentials=_make_credentials(), _http=object()) client._connection = connection return client def _make_connection(*responses): import google.cloud.bigquery._http from google.cloud.exceptions import NotFound mock_conn = mock.create_autospec(google.cloud.bigquery._http.Connection) mock_conn.api_request.side_effect = list(responses) + [NotFound("miss")] return mock_conn def _make_job_resource( creation_time_ms=1437767599006, started_time_ms=1437767600007, ended_time_ms=1437767601008, started=False, ended=False, etag="abc-def-hjk", endpoint="https://bigquery.googleapis.com", job_type="load", job_id="a-random-id", project_id="some-project", user_email="bq-user@example.com", ): resource = { "configuration": {job_type: {}}, "statistics": {"creationTime": creation_time_ms, job_type: {}}, "etag": etag, "id": "{}:{}".format(project_id, job_id), "jobReference": {"projectId": project_id, "jobId": job_id}, "selfLink": "{}/bigquery/v2/projects/{}/jobs/{}".format( endpoint, project_id, job_id ), "user_email": user_email, } if started or ended: resource["statistics"]["startTime"] = started_time_ms if ended: resource["statistics"]["endTime"] = ended_time_ms if job_type == "query": resource["configuration"]["query"]["destinationTable"] = { "projectId": project_id, "datasetId": "_temp_dataset", "tableId": "_temp_table", } return resource class Test__error_result_to_exception(unittest.TestCase): def _call_fut(self, *args, **kwargs): from google.cloud.bigquery import job return job._error_result_to_exception(*args, **kwargs) def test_simple(self): error_result = {"reason": "invalid", "message": "bad request"} exception = self._call_fut(error_result) self.assertEqual(exception.code, http_client.BAD_REQUEST) self.assertTrue(exception.message.startswith("bad request")) self.assertIn(error_result, exception.errors) def test_missing_reason(self): error_result = {} exception = self._call_fut(error_result) self.assertEqual(exception.code, http_client.INTERNAL_SERVER_ERROR) class Test_JobReference(unittest.TestCase): JOB_ID = "job-id" PROJECT = "test-project-123" LOCATION = "us-central" @staticmethod def _get_target_class(): from google.cloud.bigquery import job return job._JobReference def _make_one(self, job_id, project, location): return self._get_target_class()(job_id, project, location) def test_ctor(self): job_ref = self._make_one(self.JOB_ID, self.PROJECT, self.LOCATION) self.assertEqual(job_ref.job_id, self.JOB_ID) self.assertEqual(job_ref.project, self.PROJECT) self.assertEqual(job_ref.location, self.LOCATION) def test__to_api_repr(self): job_ref = self._make_one(self.JOB_ID, self.PROJECT, self.LOCATION) self.assertEqual( job_ref._to_api_repr(), { "jobId": self.JOB_ID, "projectId": self.PROJECT, "location": self.LOCATION, }, ) def test_from_api_repr(self): api_repr = { "jobId": self.JOB_ID, "projectId": self.PROJECT, "location": self.LOCATION, } job_ref = self._get_target_class()._from_api_repr(api_repr) self.assertEqual(job_ref.job_id, self.JOB_ID) self.assertEqual(job_ref.project, self.PROJECT) self.assertEqual(job_ref.location, self.LOCATION) class Test_AsyncJob(unittest.TestCase): JOB_ID = "job-id" PROJECT = "test-project-123" LOCATION = "us-central" @staticmethod def _get_target_class(): from google.cloud.bigquery import job return job._AsyncJob def _make_one(self, job_id, client): return self._get_target_class()(job_id, client) def _make_derived_class(self): class Derived(self._get_target_class()): _JOB_TYPE = "derived" return Derived def _make_derived(self, job_id, client): return self._make_derived_class()(job_id, client) @staticmethod def _job_reference(job_id, project, location): from google.cloud.bigquery import job return job._JobReference(job_id, project, location) def test_ctor_w_bare_job_id(self): import threading client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) self.assertEqual(job.job_id, self.JOB_ID) self.assertEqual(job.project, self.PROJECT) self.assertIsNone(job.location) self.assertIs(job._client, client) self.assertEqual( job._properties, {"jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}}, ) self.assertIsInstance(job._completion_lock, type(threading.Lock())) self.assertEqual( job.path, "/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID) ) def test_ctor_w_job_ref(self): import threading other_project = "other-project-234" client = _make_client(project=other_project) job_ref = self._job_reference(self.JOB_ID, self.PROJECT, self.LOCATION) job = self._make_one(job_ref, client) self.assertEqual(job.job_id, self.JOB_ID) self.assertEqual(job.project, self.PROJECT) self.assertEqual(job.location, self.LOCATION) self.assertIs(job._client, client) self.assertEqual( job._properties, { "jobReference": { "projectId": self.PROJECT, "location": self.LOCATION, "jobId": self.JOB_ID, } }, ) self.assertFalse(job._result_set) self.assertIsInstance(job._completion_lock, type(threading.Lock())) self.assertEqual( job.path, "/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID) ) def test__require_client_w_none(self): client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) self.assertIs(job._require_client(None), client) def test__require_client_w_other(self): client = _make_client(project=self.PROJECT) other = object() job = self._make_one(self.JOB_ID, client) self.assertIs(job._require_client(other), other) def test_job_type(self): client = _make_client(project=self.PROJECT) derived = self._make_derived(self.JOB_ID, client) self.assertEqual(derived.job_type, "derived") def test_parent_job_id(self): client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) self.assertIsNone(job.parent_job_id) job._properties["statistics"] = {"parentJobId": "parent-job-123"} self.assertEqual(job.parent_job_id, "parent-job-123") def test_script_statistics(self): client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) self.assertIsNone(job.script_statistics) job._properties["statistics"] = { "scriptStatistics": { "evaluationKind": "EXPRESSION", "stackFrames": [ { "startLine": 5, "startColumn": 29, "endLine": 9, "endColumn": 14, "text": "QUERY TEXT", } ], } } script_stats = job.script_statistics self.assertEqual(script_stats.evaluation_kind, "EXPRESSION") stack_frames = script_stats.stack_frames self.assertEqual(len(stack_frames), 1) stack_frame = stack_frames[0] self.assertIsNone(stack_frame.procedure_id) self.assertEqual(stack_frame.start_line, 5) self.assertEqual(stack_frame.start_column, 29) self.assertEqual(stack_frame.end_line, 9) self.assertEqual(stack_frame.end_column, 14) self.assertEqual(stack_frame.text, "QUERY TEXT") def test_num_child_jobs(self): client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) self.assertEqual(job.num_child_jobs, 0) job._properties["statistics"] = {"numChildJobs": "17"} self.assertEqual(job.num_child_jobs, 17) def test_labels_miss(self): client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) self.assertEqual(job.labels, {}) def test_labels_update_in_place(self): client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) labels = job.labels labels["foo"] = "bar" # update in place self.assertEqual(job.labels, {"foo": "bar"}) def test_labels_hit(self): labels = {"foo": "bar"} client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) job._properties["labels"] = labels self.assertEqual(job.labels, labels) def test_etag(self): etag = "ETAG-123" client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) self.assertIsNone(job.etag) job._properties["etag"] = etag self.assertEqual(job.etag, etag) def test_self_link(self): self_link = "https://api.example.com/123" client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) self.assertIsNone(job.self_link) job._properties["selfLink"] = self_link self.assertEqual(job.self_link, self_link) def test_user_email(self): user_email = "user@example.com" client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) self.assertIsNone(job.user_email) job._properties["user_email"] = user_email self.assertEqual(job.user_email, user_email) @staticmethod def _datetime_and_millis(): import datetime import pytz from google.cloud._helpers import _millis now = datetime.datetime.utcnow().replace( microsecond=123000, tzinfo=pytz.UTC # stats timestamps have ms precision ) return now, _millis(now) def test_created(self): now, millis = self._datetime_and_millis() client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) self.assertIsNone(job.created) stats = job._properties["statistics"] = {} self.assertIsNone(job.created) stats["creationTime"] = millis self.assertEqual(job.created, now) def test_started(self): now, millis = self._datetime_and_millis() client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) self.assertIsNone(job.started) stats = job._properties["statistics"] = {} self.assertIsNone(job.started) stats["startTime"] = millis self.assertEqual(job.started, now) def test_ended(self): now, millis = self._datetime_and_millis() client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) self.assertIsNone(job.ended) stats = job._properties["statistics"] = {} self.assertIsNone(job.ended) stats["endTime"] = millis self.assertEqual(job.ended, now) def test__job_statistics(self): statistics = {"foo": "bar"} client = _make_client(project=self.PROJECT) derived = self._make_derived(self.JOB_ID, client) self.assertEqual(derived._job_statistics(), {}) stats = derived._properties["statistics"] = {} self.assertEqual(derived._job_statistics(), {}) stats["derived"] = statistics self.assertEqual(derived._job_statistics(), statistics) def test_error_result(self): error_result = { "debugInfo": "DEBUG INFO", "location": "LOCATION", "message": "MESSAGE", "reason": "REASON", } client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) self.assertIsNone(job.error_result) status = job._properties["status"] = {} self.assertIsNone(job.error_result) status["errorResult"] = error_result self.assertEqual(job.error_result, error_result) def test_errors(self): errors = [ { "debugInfo": "DEBUG INFO", "location": "LOCATION", "message": "MESSAGE", "reason": "REASON", } ] client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) self.assertIsNone(job.errors) status = job._properties["status"] = {} self.assertIsNone(job.errors) status["errors"] = errors self.assertEqual(job.errors, errors) def test_state(self): state = "STATE" client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) self.assertIsNone(job.state) status = job._properties["status"] = {} self.assertIsNone(job.state) status["state"] = state self.assertEqual(job.state, state) def test__scrub_local_properties(self): before = {"foo": "bar"} resource = before.copy() client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) job._scrub_local_properties(resource) # no raise self.assertEqual(resource, before) def test__copy_configuration_properties(self): before = {"foo": "bar"} resource = before.copy() client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) with self.assertRaises(NotImplementedError): job._copy_configuration_properties(resource) self.assertEqual(resource, before) def _set_properties_job(self): client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) job._scrub_local_properties = mock.Mock() job._copy_configuration_properties = mock.Mock() job._set_future_result = mock.Mock() job._properties = { "jobReference": job._properties["jobReference"], "foo": "bar", } return job def test__set_properties_no_stats(self): config = {"test": True} resource = {"configuration": config} job = self._set_properties_job() job._set_properties(resource) self.assertEqual(job._properties, resource) job._scrub_local_properties.assert_called_once_with(resource) job._copy_configuration_properties.assert_called_once_with(config) def test__set_properties_w_creation_time(self): now, millis = self._datetime_and_millis() config = {"test": True} stats = {"creationTime": str(millis)} resource = {"configuration": config, "statistics": stats} job = self._set_properties_job() job._set_properties(resource) cleaned = copy.deepcopy(resource) cleaned["statistics"]["creationTime"] = float(millis) self.assertEqual(job._properties, cleaned) job._scrub_local_properties.assert_called_once_with(resource) job._copy_configuration_properties.assert_called_once_with(config) def test__set_properties_w_start_time(self): now, millis = self._datetime_and_millis() config = {"test": True} stats = {"startTime": str(millis)} resource = {"configuration": config, "statistics": stats} job = self._set_properties_job() job._set_properties(resource) cleaned = copy.deepcopy(resource) cleaned["statistics"]["startTime"] = float(millis) self.assertEqual(job._properties, cleaned) job._scrub_local_properties.assert_called_once_with(resource) job._copy_configuration_properties.assert_called_once_with(config) def test__set_properties_w_end_time(self): now, millis = self._datetime_and_millis() config = {"test": True} stats = {"endTime": str(millis)} resource = {"configuration": config, "statistics": stats} job = self._set_properties_job() job._set_properties(resource) cleaned = copy.deepcopy(resource) cleaned["statistics"]["endTime"] = float(millis) self.assertEqual(job._properties, cleaned) job._scrub_local_properties.assert_called_once_with(resource) job._copy_configuration_properties.assert_called_once_with(config) def test__get_resource_config_missing_job_ref(self): resource = {} klass = self._make_derived_class() with self.assertRaises(KeyError): klass._get_resource_config(resource) def test__get_resource_config_missing_job_id(self): resource = {"jobReference": {}} klass = self._make_derived_class() with self.assertRaises(KeyError): klass._get_resource_config(resource) def test__get_resource_config_missing_configuration(self): resource = {"jobReference": {"jobId": self.JOB_ID}} klass = self._make_derived_class() with self.assertRaises(KeyError): klass._get_resource_config(resource) def test__get_resource_config_missing_config_type(self): resource = {"jobReference": {"jobId": self.JOB_ID}, "configuration": {}} klass = self._make_derived_class() with self.assertRaises(KeyError): klass._get_resource_config(resource) def test__get_resource_config_ok(self): derived_config = {"foo": "bar"} resource = { "jobReference": {"jobId": self.JOB_ID}, "configuration": {"derived": derived_config}, } klass = self._make_derived_class() job_id, config = klass._get_resource_config(resource) self.assertEqual(job_id, self.JOB_ID) self.assertEqual(config, {"derived": derived_config}) def test__build_resource(self): client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) with self.assertRaises(NotImplementedError): job._build_resource() def test_to_api_repr(self): client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) with self.assertRaises(NotImplementedError): job.to_api_repr() def test__begin_already(self): job = self._set_properties_job() job._properties["status"] = {"state": "WHATEVER"} with self.assertRaises(ValueError): job._begin() def test__begin_defaults(self): from google.cloud.bigquery.retry import DEFAULT_RETRY resource = { "jobReference": { "jobId": self.JOB_ID, "projectId": self.PROJECT, "location": None, }, "configuration": {"test": True}, } job = self._set_properties_job() builder = job.to_api_repr = mock.Mock() builder.return_value = resource call_api = job._client._call_api = mock.Mock() call_api.return_value = resource job._begin() call_api.assert_called_once_with( DEFAULT_RETRY, method="POST", path="/projects/{}/jobs".format(self.PROJECT), data=resource, timeout=None, ) self.assertEqual(job._properties, resource) def test__begin_explicit(self): from google.cloud.bigquery.retry import DEFAULT_RETRY other_project = "other-project-234" resource = { "jobReference": { "jobId": self.JOB_ID, "projectId": self.PROJECT, "location": None, }, "configuration": {"test": True}, } job = self._set_properties_job() builder = job.to_api_repr = mock.Mock() builder.return_value = resource client = _make_client(project=other_project) call_api = client._call_api = mock.Mock() call_api.return_value = resource retry = DEFAULT_RETRY.with_deadline(1) job._begin(client=client, retry=retry, timeout=7.5) call_api.assert_called_once_with( retry, method="POST", path="/projects/{}/jobs".format(self.PROJECT), data=resource, timeout=7.5, ) self.assertEqual(job._properties, resource) def test_exists_defaults_miss(self): from google.cloud.exceptions import NotFound from google.cloud.bigquery.retry import DEFAULT_RETRY job = self._set_properties_job() job._properties["jobReference"]["location"] = self.LOCATION call_api = job._client._call_api = mock.Mock() call_api.side_effect = NotFound("testing") self.assertFalse(job.exists()) call_api.assert_called_once_with( DEFAULT_RETRY, method="GET", path="/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID), query_params={"fields": "id", "location": self.LOCATION}, timeout=None, ) def test_exists_explicit_hit(self): from google.cloud.bigquery.retry import DEFAULT_RETRY other_project = "other-project-234" resource = { "jobReference": { "jobId": self.JOB_ID, "projectId": self.PROJECT, "location": None, }, "configuration": {"test": True}, } job = self._set_properties_job() client = _make_client(project=other_project) call_api = client._call_api = mock.Mock() call_api.return_value = resource retry = DEFAULT_RETRY.with_deadline(1) self.assertTrue(job.exists(client=client, retry=retry)) call_api.assert_called_once_with( retry, method="GET", path="/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID), query_params={"fields": "id"}, timeout=None, ) def test_exists_w_timeout(self): from google.cloud.bigquery.retry import DEFAULT_RETRY PATH = "/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID) job = self._set_properties_job() call_api = job._client._call_api = mock.Mock() job.exists(timeout=7.5) call_api.assert_called_once_with( DEFAULT_RETRY, method="GET", path=PATH, query_params={"fields": "id"}, timeout=7.5, ) def test_reload_defaults(self): from google.cloud.bigquery.retry import DEFAULT_RETRY resource = { "jobReference": { "jobId": self.JOB_ID, "projectId": self.PROJECT, "location": None, }, "configuration": {"test": True}, } job = self._set_properties_job() job._properties["jobReference"]["location"] = self.LOCATION call_api = job._client._call_api = mock.Mock() call_api.return_value = resource job.reload() call_api.assert_called_once_with( DEFAULT_RETRY, method="GET", path="/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID), query_params={"location": self.LOCATION}, timeout=None, ) self.assertEqual(job._properties, resource) def test_reload_explicit(self): from google.cloud.bigquery.retry import DEFAULT_RETRY other_project = "other-project-234" resource = { "jobReference": { "jobId": self.JOB_ID, "projectId": self.PROJECT, "location": None, }, "configuration": {"test": True}, } job = self._set_properties_job() client = _make_client(project=other_project) call_api = client._call_api = mock.Mock() call_api.return_value = resource retry = DEFAULT_RETRY.with_deadline(1) job.reload(client=client, retry=retry, timeout=4.2) call_api.assert_called_once_with( retry, method="GET", path="/projects/{}/jobs/{}".format(self.PROJECT, self.JOB_ID), query_params={}, timeout=4.2, ) self.assertEqual(job._properties, resource) def test_cancel_defaults(self): resource = { "jobReference": { "jobId": self.JOB_ID, "projectId": self.PROJECT, "location": None, }, "configuration": {"test": True}, } response = {"job": resource} job = self._set_properties_job() job._properties["jobReference"]["location"] = self.LOCATION connection = job._client._connection = _make_connection(response) self.assertTrue(job.cancel()) connection.api_request.assert_called_once_with( method="POST", path="/projects/{}/jobs/{}/cancel".format(self.PROJECT, self.JOB_ID), query_params={"location": self.LOCATION}, timeout=None, ) self.assertEqual(job._properties, resource) def test_cancel_explicit(self): other_project = "other-project-234" resource = { "jobReference": { "jobId": self.JOB_ID, "projectId": self.PROJECT, "location": None, }, "configuration": {"test": True}, } response = {"job": resource} job = self._set_properties_job() client = _make_client(project=other_project) connection = client._connection = _make_connection(response) self.assertTrue(job.cancel(client=client, timeout=7.5)) connection.api_request.assert_called_once_with( method="POST", path="/projects/{}/jobs/{}/cancel".format(self.PROJECT, self.JOB_ID), query_params={}, timeout=7.5, ) self.assertEqual(job._properties, resource) def test_cancel_w_custom_retry(self): from google.cloud.bigquery.retry import DEFAULT_RETRY api_path = "/projects/{}/jobs/{}/cancel".format(self.PROJECT, self.JOB_ID) resource = { "jobReference": { "jobId": self.JOB_ID, "projectId": self.PROJECT, "location": None, }, "configuration": {"test": True}, } response = {"job": resource} job = self._set_properties_job() api_request_patcher = mock.patch.object( job._client._connection, "api_request", side_effect=[ValueError, response], ) retry = DEFAULT_RETRY.with_deadline(1).with_predicate( lambda exc: isinstance(exc, ValueError) ) with api_request_patcher as fake_api_request: result = job.cancel(retry=retry, timeout=7.5) self.assertTrue(result) self.assertEqual(job._properties, resource) self.assertEqual( fake_api_request.call_args_list, [ mock.call(method="POST", path=api_path, query_params={}, timeout=7.5), mock.call( method="POST", path=api_path, query_params={}, timeout=7.5, ), # was retried once ], ) def test__set_future_result_wo_done(self): client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) set_exception = job.set_exception = mock.Mock() set_result = job.set_result = mock.Mock() job._set_future_result() set_exception.assert_not_called() set_result.assert_not_called() def test__set_future_result_w_result_set(self): client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) job._properties["status"] = {"state": "DONE"} job._result_set = True set_exception = job.set_exception = mock.Mock() set_result = job.set_result = mock.Mock() job._set_future_result() set_exception.assert_not_called() set_result.assert_not_called() def test__set_future_result_w_done_wo_result_set_w_error(self): from google.cloud.exceptions import NotFound client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) job._properties["status"] = { "state": "DONE", "errorResult": {"reason": "notFound", "message": "testing"}, } set_exception = job.set_exception = mock.Mock() set_result = job.set_result = mock.Mock() job._set_future_result() set_exception.assert_called_once() args, kw = set_exception.call_args (exception,) = args self.assertIsInstance(exception, NotFound) self.assertEqual(exception.message, "testing") self.assertEqual(kw, {}) set_result.assert_not_called() def test__set_future_result_w_done_wo_result_set_wo_error(self): client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) job._properties["status"] = {"state": "DONE"} set_exception = job.set_exception = mock.Mock() set_result = job.set_result = mock.Mock() job._set_future_result() set_exception.assert_not_called() set_result.assert_called_once_with(job) def test_done_defaults_wo_state(self): from google.cloud.bigquery.retry import DEFAULT_RETRY client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) reload_ = job.reload = mock.Mock() self.assertFalse(job.done()) reload_.assert_called_once_with(retry=DEFAULT_RETRY, timeout=None) def test_done_explicit_wo_state(self): from google.cloud.bigquery.retry import DEFAULT_RETRY client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) reload_ = job.reload = mock.Mock() retry = DEFAULT_RETRY.with_deadline(1) self.assertFalse(job.done(retry=retry, timeout=7.5)) reload_.assert_called_once_with(retry=retry, timeout=7.5) def test_done_already(self): client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) job._properties["status"] = {"state": "DONE"} self.assertTrue(job.done()) @mock.patch("google.api_core.future.polling.PollingFuture.result") def test_result_default_wo_state(self, result): from google.cloud.bigquery.retry import DEFAULT_RETRY client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) begin = job._begin = mock.Mock() self.assertIs(job.result(), result.return_value) begin.assert_called_once_with(retry=DEFAULT_RETRY, timeout=None) result.assert_called_once_with(timeout=None) @mock.patch("google.api_core.future.polling.PollingFuture.result") def test_result_w_retry_wo_state(self, result): client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) begin = job._begin = mock.Mock() retry = mock.Mock() self.assertIs(job.result(retry=retry), result.return_value) begin.assert_called_once_with(retry=retry, timeout=None) result.assert_called_once_with(timeout=None) @mock.patch("google.api_core.future.polling.PollingFuture.result") def test_result_explicit_w_state(self, result): client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) job._properties["status"] = {"state": "DONE"} begin = job._begin = mock.Mock() timeout = 1 self.assertIs(job.result(timeout=timeout), result.return_value) begin.assert_not_called() result.assert_called_once_with(timeout=timeout) @mock.patch("google.api_core.future.polling.PollingFuture.result") def test_result_splitting_timout_between_requests(self, result): client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) begin = job._begin = mock.Mock() retry = mock.Mock() with freezegun.freeze_time("1970-01-01 00:00:00", tick=False) as frozen_time: def delayed_begin(*args, **kwargs): frozen_time.tick(delta=0.3) begin.side_effect = delayed_begin job.result(retry=retry, timeout=1.0) begin.assert_called_once_with(retry=retry, timeout=1.0) result.assert_called_once_with(timeout=0.7) def test_cancelled_wo_error_result(self): client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) self.assertFalse(job.cancelled()) def test_cancelled_w_error_result_not_stopped(self): client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) job._properties["status"] = {"errorResult": {"reason": "other"}} self.assertFalse(job.cancelled()) def test_cancelled_w_error_result_w_stopped(self): client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, client) job._properties["status"] = {"errorResult": {"reason": "stopped"}} self.assertTrue(job.cancelled()) class Test_JobConfig(unittest.TestCase): JOB_TYPE = "testing" @staticmethod def _get_target_class(): from google.cloud.bigquery import job return job._JobConfig def _make_one(self, job_type=JOB_TYPE): return self._get_target_class()(job_type) def test_ctor(self): job_config = self._make_one() self.assertEqual(job_config._job_type, self.JOB_TYPE) self.assertEqual(job_config._properties, {self.JOB_TYPE: {}}) def test_fill_from_default(self): from google.cloud.bigquery import QueryJobConfig job_config = QueryJobConfig() job_config.dry_run = True job_config.maximum_bytes_billed = 1000 default_job_config = QueryJobConfig() default_job_config.use_query_cache = True default_job_config.maximum_bytes_billed = 2000 final_job_config = job_config._fill_from_default(default_job_config) self.assertTrue(final_job_config.dry_run) self.assertTrue(final_job_config.use_query_cache) self.assertEqual(final_job_config.maximum_bytes_billed, 1000) def test_fill_from_default_conflict(self): from google.cloud.bigquery import QueryJobConfig basic_job_config = QueryJobConfig() conflicting_job_config = self._make_one("conflicting_job_type") self.assertNotEqual( basic_job_config._job_type, conflicting_job_config._job_type ) with self.assertRaises(TypeError): basic_job_config._fill_from_default(conflicting_job_config) @mock.patch("google.cloud.bigquery._helpers._get_sub_prop") def test__get_sub_prop_wo_default(self, _get_sub_prop): job_config = self._make_one() key = "key" self.assertIs(job_config._get_sub_prop(key), _get_sub_prop.return_value) _get_sub_prop.assert_called_once_with( job_config._properties, [self.JOB_TYPE, key], default=None ) @mock.patch("google.cloud.bigquery._helpers._get_sub_prop") def test__get_sub_prop_w_default(self, _get_sub_prop): job_config = self._make_one() key = "key" default = "default" self.assertIs( job_config._get_sub_prop(key, default=default), _get_sub_prop.return_value ) _get_sub_prop.assert_called_once_with( job_config._properties, [self.JOB_TYPE, key], default=default ) @mock.patch("google.cloud.bigquery._helpers._set_sub_prop") def test__set_sub_prop(self, _set_sub_prop): job_config = self._make_one() key = "key" value = "value" job_config._set_sub_prop(key, value) _set_sub_prop.assert_called_once_with( job_config._properties, [self.JOB_TYPE, key], value ) def test_to_api_repr(self): job_config = self._make_one() expected = job_config._properties = {self.JOB_TYPE: {"foo": "bar"}} found = job_config.to_api_repr() self.assertEqual(found, expected) self.assertIsNot(found, expected) # copied # 'from_api_repr' cannot be tested on '_JobConfig', because it presumes # the ctor can be called w/o arguments def test_labels_miss(self): job_config = self._make_one() self.assertEqual(job_config.labels, {}) def test_labels_update_in_place(self): job_config = self._make_one() labels = job_config.labels labels["foo"] = "bar" # update in place self.assertEqual(job_config.labels, {"foo": "bar"}) def test_labels_hit(self): labels = {"foo": "bar"} job_config = self._make_one() job_config._properties["labels"] = labels self.assertEqual(job_config.labels, labels) def test_labels_setter_invalid(self): labels = object() job_config = self._make_one() with self.assertRaises(ValueError): job_config.labels = labels def test_labels_setter(self): labels = {"foo": "bar"} job_config = self._make_one() job_config.labels = labels self.assertEqual(job_config._properties["labels"], labels) class _Base(object): from google.cloud.bigquery.dataset import DatasetReference from google.cloud.bigquery.table import TableReference ENDPOINT = "https://bigquery.googleapis.com" PROJECT = "project" SOURCE1 = "http://example.com/source1.csv" DS_ID = "dataset_id" DS_REF = DatasetReference(PROJECT, DS_ID) TABLE_ID = "table_id" TABLE_REF = TableReference(DS_REF, TABLE_ID) JOB_ID = "JOB_ID" KMS_KEY_NAME = "projects/1/locations/us/keyRings/1/cryptoKeys/1" def _make_one(self, *args, **kw): return self._get_target_class()(*args, **kw) def _setUpConstants(self): import datetime from google.cloud._helpers import UTC self.WHEN_TS = 1437767599.006 self.WHEN = datetime.datetime.utcfromtimestamp(self.WHEN_TS).replace(tzinfo=UTC) self.ETAG = "ETAG" self.FULL_JOB_ID = "%s:%s" % (self.PROJECT, self.JOB_ID) self.RESOURCE_URL = "{}/bigquery/v2/projects/{}/jobs/{}".format( self.ENDPOINT, self.PROJECT, self.JOB_ID ) self.USER_EMAIL = "phred@example.com" def _table_ref(self, table_id): from google.cloud.bigquery.table import TableReference return TableReference(self.DS_REF, table_id) def _make_resource(self, started=False, ended=False): self._setUpConstants() return _make_job_resource( creation_time_ms=int(self.WHEN_TS * 1000), started_time_ms=int(self.WHEN_TS * 1000), ended_time_ms=int(self.WHEN_TS * 1000) + 1000000, started=started, ended=ended, etag=self.ETAG, endpoint=self.ENDPOINT, job_type=self.JOB_TYPE, job_id=self.JOB_ID, project_id=self.PROJECT, user_email=self.USER_EMAIL, ) def _verifyInitialReadonlyProperties(self, job): # root elements of resource self.assertIsNone(job.etag) self.assertIsNone(job.self_link) self.assertIsNone(job.user_email) # derived from resource['statistics'] self.assertIsNone(job.created) self.assertIsNone(job.started) self.assertIsNone(job.ended) # derived from resource['status'] self.assertIsNone(job.error_result) self.assertIsNone(job.errors) self.assertIsNone(job.state) def _verifyReadonlyResourceProperties(self, job, resource): from datetime import timedelta statistics = resource.get("statistics", {}) if "creationTime" in statistics: self.assertEqual(job.created, self.WHEN) else: self.assertIsNone(job.created) if "startTime" in statistics: self.assertEqual(job.started, self.WHEN) else: self.assertIsNone(job.started) if "endTime" in statistics: self.assertEqual(job.ended, self.WHEN + timedelta(seconds=1000)) else: self.assertIsNone(job.ended) if "etag" in resource: self.assertEqual(job.etag, self.ETAG) else: self.assertIsNone(job.etag) if "selfLink" in resource: self.assertEqual(job.self_link, self.RESOURCE_URL) else: self.assertIsNone(job.self_link) if "user_email" in resource: self.assertEqual(job.user_email, self.USER_EMAIL) else: self.assertIsNone(job.user_email) class TestLoadJobConfig(unittest.TestCase, _Base): JOB_TYPE = "load" @staticmethod def _get_target_class(): from google.cloud.bigquery.job import LoadJobConfig return LoadJobConfig def test_ctor_w_properties(self): config = self._get_target_class()( allow_jagged_rows=True, allow_quoted_newlines=True ) self.assertTrue(config.allow_jagged_rows) self.assertTrue(config.allow_quoted_newlines) def test_allow_jagged_rows_missing(self): config = self._get_target_class()() self.assertIsNone(config.allow_jagged_rows) def test_allow_jagged_rows_hit(self): config = self._get_target_class()() config._properties["load"]["allowJaggedRows"] = True self.assertTrue(config.allow_jagged_rows) def test_allow_jagged_rows_setter(self): config = self._get_target_class()() config.allow_jagged_rows = True self.assertTrue(config._properties["load"]["allowJaggedRows"]) def test_allow_quoted_newlines_missing(self): config = self._get_target_class()() self.assertIsNone(config.allow_quoted_newlines) def test_allow_quoted_newlines_hit(self): config = self._get_target_class()() config._properties["load"]["allowQuotedNewlines"] = True self.assertTrue(config.allow_quoted_newlines) def test_allow_quoted_newlines_setter(self): config = self._get_target_class()() config.allow_quoted_newlines = True self.assertTrue(config._properties["load"]["allowQuotedNewlines"]) def test_autodetect_missing(self): config = self._get_target_class()() self.assertIsNone(config.autodetect) def test_autodetect_hit(self): config = self._get_target_class()() config._properties["load"]["autodetect"] = True self.assertTrue(config.autodetect) def test_autodetect_setter(self): config = self._get_target_class()() config.autodetect = True self.assertTrue(config._properties["load"]["autodetect"]) def test_clustering_fields_miss(self): config = self._get_target_class()() self.assertIsNone(config.clustering_fields) def test_clustering_fields_hit(self): config = self._get_target_class()() fields = ["email", "postal_code"] config._properties["load"]["clustering"] = {"fields": fields} self.assertEqual(config.clustering_fields, fields) def test_clustering_fields_setter(self): fields = ["email", "postal_code"] config = self._get_target_class()() config.clustering_fields = fields self.assertEqual(config._properties["load"]["clustering"], {"fields": fields}) def test_clustering_fields_setter_w_none(self): config = self._get_target_class()() fields = ["email", "postal_code"] config._properties["load"]["clustering"] = {"fields": fields} config.clustering_fields = None self.assertIsNone(config.clustering_fields) self.assertNotIn("clustering", config._properties["load"]) def test_create_disposition_missing(self): config = self._get_target_class()() self.assertIsNone(config.create_disposition) def test_create_disposition_hit(self): from google.cloud.bigquery.job import CreateDisposition disposition = CreateDisposition.CREATE_IF_NEEDED config = self._get_target_class()() config._properties["load"]["createDisposition"] = disposition self.assertEqual(config.create_disposition, disposition) def test_create_disposition_setter(self): from google.cloud.bigquery.job import CreateDisposition disposition = CreateDisposition.CREATE_IF_NEEDED config = self._get_target_class()() config.create_disposition = disposition self.assertEqual(config._properties["load"]["createDisposition"], disposition) def test_destination_encryption_configuration_missing(self): config = self._get_target_class()() self.assertIsNone(config.destination_encryption_configuration) def test_destination_encryption_configuration_hit(self): from google.cloud.bigquery.encryption_configuration import ( EncryptionConfiguration, ) kms_key_name = "kms-key-name" encryption_configuration = EncryptionConfiguration(kms_key_name) config = self._get_target_class()() config._properties["load"]["destinationEncryptionConfiguration"] = { "kmsKeyName": kms_key_name } self.assertEqual( config.destination_encryption_configuration, encryption_configuration ) def test_destination_encryption_configuration_setter(self): from google.cloud.bigquery.encryption_configuration import ( EncryptionConfiguration, ) kms_key_name = "kms-key-name" encryption_configuration = EncryptionConfiguration(kms_key_name) config = self._get_target_class()() config.destination_encryption_configuration = encryption_configuration expected = {"kmsKeyName": kms_key_name} self.assertEqual( config._properties["load"]["destinationEncryptionConfiguration"], expected ) def test_destination_encryption_configuration_setter_w_none(self): kms_key_name = "kms-key-name" config = self._get_target_class()() config._properties["load"]["destinationEncryptionConfiguration"] = { "kmsKeyName": kms_key_name } config.destination_encryption_configuration = None self.assertIsNone(config.destination_encryption_configuration) self.assertNotIn( "destinationEncryptionConfiguration", config._properties["load"] ) def test_destination_table_description_missing(self): config = self._get_target_class()() self.assertIsNone(config.destination_table_description) def test_destination_table_description_hit(self): description = "Description" config = self._get_target_class()() config._properties["load"]["destinationTableProperties"] = { "description": description } self.assertEqual(config.destination_table_description, description) def test_destination_table_description_setter(self): description = "Description" config = self._get_target_class()() config.destination_table_description = description expected = {"description": description} self.assertEqual( config._properties["load"]["destinationTableProperties"], expected ) def test_destination_table_description_setter_w_fn_already(self): description = "Description" friendly_name = "Friendly Name" config = self._get_target_class()() config._properties["load"]["destinationTableProperties"] = { "friendlyName": friendly_name } config.destination_table_description = description expected = {"friendlyName": friendly_name, "description": description} self.assertEqual( config._properties["load"]["destinationTableProperties"], expected ) def test_destination_table_description_w_none(self): description = "Description" friendly_name = "Friendly Name" config = self._get_target_class()() config._properties["load"]["destinationTableProperties"] = { "description": description, "friendlyName": friendly_name, } config.destination_table_description = None expected = {"friendlyName": friendly_name} self.assertEqual( config._properties["load"]["destinationTableProperties"], expected ) def test_destination_table_friendly_name_missing(self): config = self._get_target_class()() self.assertIsNone(config.destination_table_friendly_name) def test_destination_table_friendly_name_hit(self): friendly_name = "Friendly Name" config = self._get_target_class()() config._properties["load"]["destinationTableProperties"] = { "friendlyName": friendly_name } self.assertEqual(config.destination_table_friendly_name, friendly_name) def test_destination_table_friendly_name_setter(self): friendly_name = "Friendly Name" config = self._get_target_class()() config.destination_table_friendly_name = friendly_name expected = {"friendlyName": friendly_name} self.assertEqual( config._properties["load"]["destinationTableProperties"], expected ) def test_destination_table_friendly_name_setter_w_descr_already(self): friendly_name = "Friendly Name" description = "Description" config = self._get_target_class()() config._properties["load"]["destinationTableProperties"] = { "description": description } config.destination_table_friendly_name = friendly_name expected = {"friendlyName": friendly_name, "description": description} self.assertEqual( config._properties["load"]["destinationTableProperties"], expected ) def test_destination_table_friendly_name_w_none(self): friendly_name = "Friendly Name" description = "Description" config = self._get_target_class()() config._properties["load"]["destinationTableProperties"] = { "description": description, "friendlyName": friendly_name, } config.destination_table_friendly_name = None expected = {"description": description} self.assertEqual( config._properties["load"]["destinationTableProperties"], expected ) def test_encoding_missing(self): config = self._get_target_class()() self.assertIsNone(config.encoding) def test_encoding_hit(self): from google.cloud.bigquery.job import Encoding encoding = Encoding.UTF_8 config = self._get_target_class()() config._properties["load"]["encoding"] = encoding self.assertEqual(config.encoding, encoding) def test_encoding_setter(self): from google.cloud.bigquery.job import Encoding encoding = Encoding.UTF_8 config = self._get_target_class()() config.encoding = encoding self.assertEqual(config._properties["load"]["encoding"], encoding) def test_field_delimiter_missing(self): config = self._get_target_class()() self.assertIsNone(config.field_delimiter) def test_field_delimiter_hit(self): field_delimiter = "|" config = self._get_target_class()() config._properties["load"]["fieldDelimiter"] = field_delimiter self.assertEqual(config.field_delimiter, field_delimiter) def test_field_delimiter_setter(self): field_delimiter = "|" config = self._get_target_class()() config.field_delimiter = field_delimiter self.assertEqual(config._properties["load"]["fieldDelimiter"], field_delimiter) def test_hive_partitioning_missing(self): config = self._get_target_class()() self.assertIsNone(config.hive_partitioning) def test_hive_partitioning_hit(self): from google.cloud.bigquery.external_config import HivePartitioningOptions config = self._get_target_class()() config._properties["load"]["hivePartitioningOptions"] = { "sourceUriPrefix": "http://foo/bar", "mode": "STRINGS", } result = config.hive_partitioning self.assertIsInstance(result, HivePartitioningOptions) self.assertEqual(result.source_uri_prefix, "http://foo/bar") self.assertEqual(result.mode, "STRINGS") def test_hive_partitioning_setter(self): from google.cloud.bigquery.external_config import HivePartitioningOptions hive_partitioning = HivePartitioningOptions() hive_partitioning.source_uri_prefix = "http://foo/bar" hive_partitioning.mode = "AUTO" config = self._get_target_class()() config.hive_partitioning = hive_partitioning self.assertEqual( config._properties["load"]["hivePartitioningOptions"], {"sourceUriPrefix": "http://foo/bar", "mode": "AUTO"}, ) config.hive_partitioning = None self.assertIsNone(config._properties["load"]["hivePartitioningOptions"]) def test_hive_partitioning_invalid_type(self): config = self._get_target_class()() with self.assertRaises(TypeError): config.hive_partitioning = {"mode": "AUTO"} def test_ignore_unknown_values_missing(self): config = self._get_target_class()() self.assertIsNone(config.ignore_unknown_values) def test_ignore_unknown_values_hit(self): config = self._get_target_class()() config._properties["load"]["ignoreUnknownValues"] = True self.assertTrue(config.ignore_unknown_values) def test_ignore_unknown_values_setter(self): config = self._get_target_class()() config.ignore_unknown_values = True self.assertTrue(config._properties["load"]["ignoreUnknownValues"]) def test_max_bad_records_missing(self): config = self._get_target_class()() self.assertIsNone(config.max_bad_records) def test_max_bad_records_hit(self): max_bad_records = 13 config = self._get_target_class()() config._properties["load"]["maxBadRecords"] = max_bad_records self.assertEqual(config.max_bad_records, max_bad_records) def test_max_bad_records_setter(self): max_bad_records = 13 config = self._get_target_class()() config.max_bad_records = max_bad_records self.assertEqual(config._properties["load"]["maxBadRecords"], max_bad_records) def test_null_marker_missing(self): config = self._get_target_class()() self.assertIsNone(config.null_marker) def test_null_marker_hit(self): null_marker = "XXX" config = self._get_target_class()() config._properties["load"]["nullMarker"] = null_marker self.assertEqual(config.null_marker, null_marker) def test_null_marker_setter(self): null_marker = "XXX" config = self._get_target_class()() config.null_marker = null_marker self.assertEqual(config._properties["load"]["nullMarker"], null_marker) def test_quote_character_missing(self): config = self._get_target_class()() self.assertIsNone(config.quote_character) def test_quote_character_hit(self): quote_character = "'" config = self._get_target_class()() config._properties["load"]["quote"] = quote_character self.assertEqual(config.quote_character, quote_character) def test_quote_character_setter(self): quote_character = "'" config = self._get_target_class()() config.quote_character = quote_character self.assertEqual(config._properties["load"]["quote"], quote_character) def test_schema_missing(self): config = self._get_target_class()() self.assertIsNone(config.schema) def test_schema_hit(self): from google.cloud.bigquery.schema import SchemaField config = self._get_target_class()() all_props_repr = { "mode": "REQUIRED", "name": "foo", "type": "INTEGER", "description": "Foo", } minimal_repr = {"name": "bar", "type": "STRING"} config._properties["load"]["schema"] = { "fields": [all_props_repr, minimal_repr] } all_props, minimal = config.schema self.assertEqual(all_props, SchemaField.from_api_repr(all_props_repr)) self.assertEqual(minimal, SchemaField.from_api_repr(minimal_repr)) def test_schema_setter_fields(self): from google.cloud.bigquery.schema import SchemaField config = self._get_target_class()() full_name = SchemaField("full_name", "STRING", mode="REQUIRED") age = SchemaField("age", "INTEGER", mode="REQUIRED") config.schema = [full_name, age] full_name_repr = { "name": "full_name", "type": "STRING", "mode": "REQUIRED", "description": None, } age_repr = { "name": "age", "type": "INTEGER", "mode": "REQUIRED", "description": None, } self.assertEqual( config._properties["load"]["schema"], {"fields": [full_name_repr, age_repr]} ) def test_schema_setter_valid_mappings_list(self): config = self._get_target_class()() schema = [ {"name": "full_name", "type": "STRING", "mode": "REQUIRED"}, {"name": "age", "type": "INTEGER", "mode": "REQUIRED"}, ] config.schema = schema full_name_repr = { "name": "full_name", "type": "STRING", "mode": "REQUIRED", "description": None, } age_repr = { "name": "age", "type": "INTEGER", "mode": "REQUIRED", "description": None, } self.assertEqual( config._properties["load"]["schema"], {"fields": [full_name_repr, age_repr]} ) def test_schema_setter_invalid_mappings_list(self): config = self._get_target_class()() schema = [ {"name": "full_name", "type": "STRING", "mode": "REQUIRED"}, {"name": "age", "typeoo": "INTEGER", "mode": "REQUIRED"}, ] with self.assertRaises(Exception): config.schema = schema def test_schema_setter_unsetting_schema(self): from google.cloud.bigquery.schema import SchemaField config = self._get_target_class()() config._properties["load"]["schema"] = [ SchemaField("full_name", "STRING", mode="REQUIRED"), SchemaField("age", "INTEGER", mode="REQUIRED"), ] config.schema = None self.assertNotIn("schema", config._properties["load"]) config.schema = None # no error, idempotent operation def test_schema_update_options_missing(self): config = self._get_target_class()() self.assertIsNone(config.schema_update_options) def test_schema_update_options_hit(self): from google.cloud.bigquery.job import SchemaUpdateOption options = [ SchemaUpdateOption.ALLOW_FIELD_ADDITION, SchemaUpdateOption.ALLOW_FIELD_RELAXATION, ] config = self._get_target_class()() config._properties["load"]["schemaUpdateOptions"] = options self.assertEqual(config.schema_update_options, options) def test_schema_update_options_setter(self): from google.cloud.bigquery.job import SchemaUpdateOption options = [ SchemaUpdateOption.ALLOW_FIELD_ADDITION, SchemaUpdateOption.ALLOW_FIELD_RELAXATION, ] config = self._get_target_class()() config.schema_update_options = options self.assertEqual(config._properties["load"]["schemaUpdateOptions"], options) def test_skip_leading_rows_missing(self): config = self._get_target_class()() self.assertIsNone(config.skip_leading_rows) def test_skip_leading_rows_hit_w_str(self): skip_leading_rows = 1 config = self._get_target_class()() config._properties["load"]["skipLeadingRows"] = str(skip_leading_rows) self.assertEqual(config.skip_leading_rows, skip_leading_rows) def test_skip_leading_rows_hit_w_integer(self): skip_leading_rows = 1 config = self._get_target_class()() config._properties["load"]["skipLeadingRows"] = skip_leading_rows self.assertEqual(config.skip_leading_rows, skip_leading_rows) def test_skip_leading_rows_setter(self): skip_leading_rows = 1 config = self._get_target_class()() config.skip_leading_rows = skip_leading_rows self.assertEqual( config._properties["load"]["skipLeadingRows"], str(skip_leading_rows) ) def test_source_format_missing(self): config = self._get_target_class()() self.assertIsNone(config.source_format) def test_source_format_hit(self): from google.cloud.bigquery.job import SourceFormat source_format = SourceFormat.CSV config = self._get_target_class()() config._properties["load"]["sourceFormat"] = source_format self.assertEqual(config.source_format, source_format) def test_source_format_setter(self): from google.cloud.bigquery.job import SourceFormat source_format = SourceFormat.CSV config = self._get_target_class()() config.source_format = source_format self.assertEqual(config._properties["load"]["sourceFormat"], source_format) def test_range_partitioning_w_none(self): object_under_test = self._get_target_class()() assert object_under_test.range_partitioning is None def test_range_partitioning_w_value(self): object_under_test = self._get_target_class()() object_under_test._properties["load"]["rangePartitioning"] = { "field": "column_one", "range": {"start": 1, "end": 1000, "interval": 10}, } object_under_test.range_partitioning.field == "column_one" object_under_test.range_partitioning.range_.start == 1 object_under_test.range_partitioning.range_.end == 1000 object_under_test.range_partitioning.range_.interval == 10 def test_range_partitioning_setter(self): from google.cloud.bigquery.table import PartitionRange from google.cloud.bigquery.table import RangePartitioning object_under_test = self._get_target_class()() object_under_test.range_partitioning = RangePartitioning( field="column_one", range_=PartitionRange(start=1, end=1000, interval=10) ) object_under_test.range_partitioning.field == "column_one" object_under_test.range_partitioning.range_.start == 1 object_under_test.range_partitioning.range_.end == 1000 object_under_test.range_partitioning.range_.interval == 10 def test_range_partitioning_setter_w_none(self): object_under_test = self._get_target_class()() object_under_test.range_partitioning = None assert object_under_test.range_partitioning is None def test_range_partitioning_setter_w_wrong_type(self): object_under_test = self._get_target_class()() with pytest.raises(ValueError, match="RangePartitioning"): object_under_test.range_partitioning = object() def test_time_partitioning_miss(self): config = self._get_target_class()() self.assertIsNone(config.time_partitioning) def test_time_partitioning_hit(self): from google.cloud.bigquery.table import TimePartitioning from google.cloud.bigquery.table import TimePartitioningType field = "creation_date" year_ms = 86400 * 1000 * 365 config = self._get_target_class()() config._properties["load"]["timePartitioning"] = { "type": TimePartitioningType.DAY, "field": field, "expirationMs": str(year_ms), "requirePartitionFilter": False, } expected = TimePartitioning( type_=TimePartitioningType.DAY, field=field, expiration_ms=year_ms, require_partition_filter=False, ) self.assertEqual(config.time_partitioning, expected) def test_time_partitioning_setter(self): from google.cloud.bigquery.table import TimePartitioning from google.cloud.bigquery.table import TimePartitioningType field = "creation_date" year_ms = 86400 * 1000 * 365 time_partitioning = TimePartitioning( type_=TimePartitioningType.DAY, field=field, expiration_ms=year_ms, require_partition_filter=False, ) config = self._get_target_class()() config.time_partitioning = time_partitioning expected = { "type": TimePartitioningType.DAY, "field": field, "expirationMs": str(year_ms), "requirePartitionFilter": False, } self.assertEqual(config._properties["load"]["timePartitioning"], expected) def test_time_partitioning_setter_w_none(self): from google.cloud.bigquery.table import TimePartitioningType field = "creation_date" year_ms = 86400 * 1000 * 365 config = self._get_target_class()() config._properties["load"]["timePartitioning"] = { "type": TimePartitioningType.DAY, "field": field, "expirationMs": str(year_ms), "requirePartitionFilter": False, } config.time_partitioning = None self.assertIsNone(config.time_partitioning) self.assertNotIn("timePartitioning", config._properties["load"]) def test_use_avro_logical_types(self): config = self._get_target_class()() self.assertIsNone(config.use_avro_logical_types) def test_use_avro_logical_types_setter(self): config = self._get_target_class()() config.use_avro_logical_types = True self.assertTrue(config._properties["load"]["useAvroLogicalTypes"]) def test_write_disposition_missing(self): config = self._get_target_class()() self.assertIsNone(config.write_disposition) def test_write_disposition_hit(self): from google.cloud.bigquery.job import WriteDisposition write_disposition = WriteDisposition.WRITE_TRUNCATE config = self._get_target_class()() config._properties["load"]["writeDisposition"] = write_disposition self.assertEqual(config.write_disposition, write_disposition) def test_write_disposition_setter(self): from google.cloud.bigquery.job import WriteDisposition write_disposition = WriteDisposition.WRITE_TRUNCATE config = self._get_target_class()() config.write_disposition = write_disposition self.assertEqual( config._properties["load"]["writeDisposition"], write_disposition ) class TestLoadJob(unittest.TestCase, _Base): JOB_TYPE = "load" @staticmethod def _get_target_class(): from google.cloud.bigquery.job import LoadJob return LoadJob def _setUpConstants(self): super(TestLoadJob, self)._setUpConstants() self.INPUT_FILES = 2 self.INPUT_BYTES = 12345 self.OUTPUT_BYTES = 23456 self.OUTPUT_ROWS = 345 def _make_resource(self, started=False, ended=False): resource = super(TestLoadJob, self)._make_resource(started, ended) config = resource["configuration"]["load"] config["sourceUris"] = [self.SOURCE1] config["destinationTable"] = { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.TABLE_ID, } if ended: resource["status"] = {"state": "DONE"} resource["statistics"]["load"]["inputFiles"] = self.INPUT_FILES resource["statistics"]["load"]["inputFileBytes"] = self.INPUT_BYTES resource["statistics"]["load"]["outputBytes"] = self.OUTPUT_BYTES resource["statistics"]["load"]["outputRows"] = self.OUTPUT_ROWS return resource def _verifyBooleanConfigProperties(self, job, config): if "allowJaggedRows" in config: self.assertEqual(job.allow_jagged_rows, config["allowJaggedRows"]) else: self.assertIsNone(job.allow_jagged_rows) if "allowQuotedNewlines" in config: self.assertEqual(job.allow_quoted_newlines, config["allowQuotedNewlines"]) else: self.assertIsNone(job.allow_quoted_newlines) if "autodetect" in config: self.assertEqual(job.autodetect, config["autodetect"]) else: self.assertIsNone(job.autodetect) if "ignoreUnknownValues" in config: self.assertEqual(job.ignore_unknown_values, config["ignoreUnknownValues"]) else: self.assertIsNone(job.ignore_unknown_values) if "useAvroLogicalTypes" in config: self.assertEqual(job.use_avro_logical_types, config["useAvroLogicalTypes"]) else: self.assertIsNone(job.use_avro_logical_types) def _verifyEnumConfigProperties(self, job, config): if "createDisposition" in config: self.assertEqual(job.create_disposition, config["createDisposition"]) else: self.assertIsNone(job.create_disposition) if "encoding" in config: self.assertEqual(job.encoding, config["encoding"]) else: self.assertIsNone(job.encoding) if "sourceFormat" in config: self.assertEqual(job.source_format, config["sourceFormat"]) else: self.assertIsNone(job.source_format) if "writeDisposition" in config: self.assertEqual(job.write_disposition, config["writeDisposition"]) else: self.assertIsNone(job.write_disposition) if "schemaUpdateOptions" in config: self.assertEqual(job.schema_update_options, config["schemaUpdateOptions"]) else: self.assertIsNone(job.schema_update_options) def _verifyResourceProperties(self, job, resource): self._verifyReadonlyResourceProperties(job, resource) config = resource.get("configuration", {}).get("load") self._verifyBooleanConfigProperties(job, config) self._verifyEnumConfigProperties(job, config) self.assertEqual(job.source_uris, config["sourceUris"]) table_ref = config["destinationTable"] self.assertEqual(job.destination.project, table_ref["projectId"]) self.assertEqual(job.destination.dataset_id, table_ref["datasetId"]) self.assertEqual(job.destination.table_id, table_ref["tableId"]) if "fieldDelimiter" in config: self.assertEqual(job.field_delimiter, config["fieldDelimiter"]) else: self.assertIsNone(job.field_delimiter) if "maxBadRecords" in config: self.assertEqual(job.max_bad_records, config["maxBadRecords"]) else: self.assertIsNone(job.max_bad_records) if "nullMarker" in config: self.assertEqual(job.null_marker, config["nullMarker"]) else: self.assertIsNone(job.null_marker) if "quote" in config: self.assertEqual(job.quote_character, config["quote"]) else: self.assertIsNone(job.quote_character) if "skipLeadingRows" in config: self.assertEqual(str(job.skip_leading_rows), config["skipLeadingRows"]) else: self.assertIsNone(job.skip_leading_rows) if "destinationEncryptionConfiguration" in config: self.assertIsNotNone(job.destination_encryption_configuration) self.assertEqual( job.destination_encryption_configuration.kms_key_name, config["destinationEncryptionConfiguration"]["kmsKeyName"], ) else: self.assertIsNone(job.destination_encryption_configuration) def test_ctor(self): client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client) self.assertIs(job.destination, self.TABLE_REF) self.assertEqual(list(job.source_uris), [self.SOURCE1]) self.assertIs(job._client, client) self.assertEqual(job.job_type, self.JOB_TYPE) self.assertEqual(job.path, "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)) self._verifyInitialReadonlyProperties(job) # derived from resource['statistics']['load'] self.assertIsNone(job.input_file_bytes) self.assertIsNone(job.input_files) self.assertIsNone(job.output_bytes) self.assertIsNone(job.output_rows) # set/read from resource['configuration']['load'] self.assertIsNone(job.schema) self.assertIsNone(job.allow_jagged_rows) self.assertIsNone(job.allow_quoted_newlines) self.assertIsNone(job.autodetect) self.assertIsNone(job.create_disposition) self.assertIsNone(job.encoding) self.assertIsNone(job.field_delimiter) self.assertIsNone(job.ignore_unknown_values) self.assertIsNone(job.max_bad_records) self.assertIsNone(job.null_marker) self.assertIsNone(job.quote_character) self.assertIsNone(job.skip_leading_rows) self.assertIsNone(job.source_format) self.assertIsNone(job.write_disposition) self.assertIsNone(job.destination_encryption_configuration) self.assertIsNone(job.destination_table_description) self.assertIsNone(job.destination_table_friendly_name) self.assertIsNone(job.range_partitioning) self.assertIsNone(job.time_partitioning) self.assertIsNone(job.use_avro_logical_types) self.assertIsNone(job.clustering_fields) self.assertIsNone(job.schema_update_options) def test_ctor_w_config(self): from google.cloud.bigquery.schema import SchemaField from google.cloud.bigquery.job import LoadJobConfig client = _make_client(project=self.PROJECT) full_name = SchemaField("full_name", "STRING", mode="REQUIRED") age = SchemaField("age", "INTEGER", mode="REQUIRED") config = LoadJobConfig() config.schema = [full_name, age] job = self._make_one( self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client, config ) self.assertEqual(job.schema, [full_name, age]) config.destination_table_description = "Description" expected = {"description": "Description"} self.assertEqual( config._properties["load"]["destinationTableProperties"], expected ) friendly_name = "Friendly Name" config._properties["load"]["destinationTableProperties"] = { "friendlyName": friendly_name } self.assertEqual(config.destination_table_friendly_name, friendly_name) def test_ctor_w_job_reference(self): from google.cloud.bigquery import job client = _make_client(project=self.PROJECT) job_ref = job._JobReference(self.JOB_ID, "alternative-project", "US") load_job = self._make_one(job_ref, [self.SOURCE1], self.TABLE_REF, client) self.assertEqual(load_job.project, "alternative-project") self.assertEqual(load_job.location, "US") def test_done(self): client = _make_client(project=self.PROJECT) resource = self._make_resource(ended=True) job = self._get_target_class().from_api_repr(resource, client) self.assertTrue(job.done()) def test_result(self): client = _make_client(project=self.PROJECT) resource = self._make_resource(ended=True) job = self._get_target_class().from_api_repr(resource, client) result = job.result() self.assertIs(result, job) def test_result_invokes_begin(self): begun_resource = self._make_resource() done_resource = copy.deepcopy(begun_resource) done_resource["status"] = {"state": "DONE"} connection = _make_connection(begun_resource, done_resource) client = _make_client(self.PROJECT) client._connection = connection job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client) job.result() self.assertEqual(len(connection.api_request.call_args_list), 2) begin_request, reload_request = connection.api_request.call_args_list self.assertEqual(begin_request[1]["method"], "POST") self.assertEqual(reload_request[1]["method"], "GET") def test_schema_setter_non_list(self): from google.cloud.bigquery.job import LoadJobConfig config = LoadJobConfig() with self.assertRaises(TypeError): config.schema = object() def test_schema_setter_invalid_field(self): from google.cloud.bigquery.job import LoadJobConfig from google.cloud.bigquery.schema import SchemaField config = LoadJobConfig() full_name = SchemaField("full_name", "STRING", mode="REQUIRED") with self.assertRaises(ValueError): config.schema = [full_name, object()] def test_schema_setter(self): from google.cloud.bigquery.job import LoadJobConfig from google.cloud.bigquery.schema import SchemaField config = LoadJobConfig() full_name = SchemaField("full_name", "STRING", mode="REQUIRED") age = SchemaField("age", "INTEGER", mode="REQUIRED") config.schema = [full_name, age] self.assertEqual(config.schema, [full_name, age]) def test_props_set_by_server(self): import datetime from google.cloud._helpers import UTC from google.cloud._helpers import _millis CREATED = datetime.datetime(2015, 8, 11, 12, 13, 22, tzinfo=UTC) STARTED = datetime.datetime(2015, 8, 11, 13, 47, 15, tzinfo=UTC) ENDED = datetime.datetime(2015, 8, 11, 14, 47, 15, tzinfo=UTC) FULL_JOB_ID = "%s:%s" % (self.PROJECT, self.JOB_ID) URL = "http://example.com/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID) EMAIL = "phred@example.com" ERROR_RESULT = { "debugInfo": "DEBUG", "location": "LOCATION", "message": "MESSAGE", "reason": "REASON", } client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client) job._properties["etag"] = "ETAG" job._properties["id"] = FULL_JOB_ID job._properties["selfLink"] = URL job._properties["user_email"] = EMAIL statistics = job._properties["statistics"] = {} statistics["creationTime"] = _millis(CREATED) statistics["startTime"] = _millis(STARTED) statistics["endTime"] = _millis(ENDED) self.assertEqual(job.etag, "ETAG") self.assertEqual(job.self_link, URL) self.assertEqual(job.user_email, EMAIL) self.assertEqual(job.created, CREATED) self.assertEqual(job.started, STARTED) self.assertEqual(job.ended, ENDED) # running jobs have no load stats not yet set. self.assertIsNone(job.output_bytes) load_stats = statistics["load"] = {} load_stats["inputFileBytes"] = 12345 load_stats["inputFiles"] = 1 load_stats["outputBytes"] = 23456 load_stats["outputRows"] = 345 self.assertEqual(job.input_file_bytes, 12345) self.assertEqual(job.input_files, 1) self.assertEqual(job.output_bytes, 23456) self.assertEqual(job.output_rows, 345) status = job._properties["status"] = {} self.assertIsNone(job.error_result) self.assertIsNone(job.errors) self.assertIsNone(job.state) status["errorResult"] = ERROR_RESULT status["errors"] = [ERROR_RESULT] status["state"] = "STATE" self.assertEqual(job.error_result, ERROR_RESULT) self.assertEqual(job.errors, [ERROR_RESULT]) self.assertEqual(job.state, "STATE") def test_from_api_repr_missing_identity(self): self._setUpConstants() client = _make_client(project=self.PROJECT) RESOURCE = {} klass = self._get_target_class() with self.assertRaises(KeyError): klass.from_api_repr(RESOURCE, client=client) def test_from_api_repr_missing_config(self): self._setUpConstants() client = _make_client(project=self.PROJECT) RESOURCE = { "id": "%s:%s" % (self.PROJECT, self.JOB_ID), "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, } klass = self._get_target_class() with self.assertRaises(KeyError): klass.from_api_repr(RESOURCE, client=client) def test_from_api_repr_bare(self): self._setUpConstants() client = _make_client(project=self.PROJECT) RESOURCE = { "id": self.FULL_JOB_ID, "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": { "load": { "sourceUris": [self.SOURCE1], "destinationTable": { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.TABLE_ID, }, } }, } klass = self._get_target_class() job = klass.from_api_repr(RESOURCE, client=client) self.assertIs(job._client, client) self._verifyResourceProperties(job, RESOURCE) def test_from_api_with_encryption(self): self._setUpConstants() client = _make_client(project=self.PROJECT) RESOURCE = { "id": self.FULL_JOB_ID, "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": { "load": { "sourceUris": [self.SOURCE1], "destinationTable": { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.TABLE_ID, }, "destinationEncryptionConfiguration": { "kmsKeyName": self.KMS_KEY_NAME }, } }, } klass = self._get_target_class() job = klass.from_api_repr(RESOURCE, client=client) self.assertIs(job._client, client) self._verifyResourceProperties(job, RESOURCE) def test_from_api_repr_w_properties(self): from google.cloud.bigquery.job import CreateDisposition client = _make_client(project=self.PROJECT) RESOURCE = self._make_resource() load_config = RESOURCE["configuration"]["load"] load_config["createDisposition"] = CreateDisposition.CREATE_IF_NEEDED klass = self._get_target_class() job = klass.from_api_repr(RESOURCE, client=client) self.assertIs(job._client, client) self._verifyResourceProperties(job, RESOURCE) def test_begin_w_already_running(self): conn = _make_connection() client = _make_client(project=self.PROJECT, connection=conn) job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client) job._properties["status"] = {"state": "RUNNING"} with self.assertRaises(ValueError): job._begin() def test_begin_w_bound_client(self): RESOURCE = self._make_resource() # Ensure None for missing server-set props del RESOURCE["statistics"]["creationTime"] del RESOURCE["etag"] del RESOURCE["selfLink"] del RESOURCE["user_email"] conn = _make_connection(RESOURCE) client = _make_client(project=self.PROJECT, connection=conn) job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client) job._begin() conn.api_request.assert_called_once_with( method="POST", path="/projects/{}/jobs".format(self.PROJECT), data={ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": { "load": { "sourceUris": [self.SOURCE1], "destinationTable": { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.TABLE_ID, }, } }, }, timeout=None, ) self._verifyResourceProperties(job, RESOURCE) def test_begin_w_autodetect(self): from google.cloud.bigquery.job import LoadJobConfig path = "/projects/{}/jobs".format(self.PROJECT) resource = self._make_resource() resource["configuration"]["load"]["autodetect"] = True # Ensure None for missing server-set props del resource["statistics"]["creationTime"] del resource["etag"] del resource["selfLink"] del resource["user_email"] conn = _make_connection(resource) client = _make_client(project=self.PROJECT, connection=conn) config = LoadJobConfig() config.autodetect = True job = self._make_one( self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client, config ) job._begin() sent = { "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": { "load": { "sourceUris": [self.SOURCE1], "destinationTable": { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.TABLE_ID, }, "autodetect": True, } }, } conn.api_request.assert_called_once_with( method="POST", path=path, data=sent, timeout=None ) self._verifyResourceProperties(job, resource) def test_begin_w_alternate_client(self): from google.cloud.bigquery.job import CreateDisposition from google.cloud.bigquery.job import LoadJobConfig from google.cloud.bigquery.job import SchemaUpdateOption from google.cloud.bigquery.job import WriteDisposition from google.cloud.bigquery.schema import SchemaField PATH = "/projects/%s/jobs" % (self.PROJECT,) RESOURCE = self._make_resource(ended=True) LOAD_CONFIGURATION = { "sourceUris": [self.SOURCE1], "destinationTable": { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.TABLE_ID, }, "allowJaggedRows": True, "allowQuotedNewlines": True, "createDisposition": CreateDisposition.CREATE_NEVER, "encoding": "ISO-8559-1", "fieldDelimiter": "|", "ignoreUnknownValues": True, "maxBadRecords": 100, "nullMarker": r"\N", "quote": "'", "skipLeadingRows": "1", "sourceFormat": "CSV", "useAvroLogicalTypes": True, "writeDisposition": WriteDisposition.WRITE_TRUNCATE, "schema": { "fields": [ { "name": "full_name", "type": "STRING", "mode": "REQUIRED", "description": None, }, { "name": "age", "type": "INTEGER", "mode": "REQUIRED", "description": None, }, ] }, "schemaUpdateOptions": [SchemaUpdateOption.ALLOW_FIELD_ADDITION], } RESOURCE["configuration"]["load"] = LOAD_CONFIGURATION conn1 = _make_connection() client1 = _make_client(project=self.PROJECT, connection=conn1) conn2 = _make_connection(RESOURCE) client2 = _make_client(project=self.PROJECT, connection=conn2) full_name = SchemaField("full_name", "STRING", mode="REQUIRED") age = SchemaField("age", "INTEGER", mode="REQUIRED") config = LoadJobConfig() config.schema = [full_name, age] job = self._make_one( self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client1, config ) config.allow_jagged_rows = True config.allow_quoted_newlines = True config.create_disposition = CreateDisposition.CREATE_NEVER config.encoding = "ISO-8559-1" config.field_delimiter = "|" config.ignore_unknown_values = True config.max_bad_records = 100 config.null_marker = r"\N" config.quote_character = "'" config.skip_leading_rows = 1 config.source_format = "CSV" config.use_avro_logical_types = True config.write_disposition = WriteDisposition.WRITE_TRUNCATE config.schema_update_options = [SchemaUpdateOption.ALLOW_FIELD_ADDITION] job._begin(client=client2) conn1.api_request.assert_not_called() self.assertEqual(len(conn2.api_request.call_args_list), 1) req = conn2.api_request.call_args_list[0] self.assertEqual(req[1]["method"], "POST") self.assertEqual(req[1]["path"], PATH) SENT = { "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": {"load": LOAD_CONFIGURATION}, } self.maxDiff = None self.assertEqual(req[1]["data"], SENT) self._verifyResourceProperties(job, RESOURCE) def test_begin_w_job_reference(self): from google.cloud.bigquery import job resource = self._make_resource() resource["jobReference"]["projectId"] = "alternative-project" resource["jobReference"]["location"] = "US" job_ref = job._JobReference(self.JOB_ID, "alternative-project", "US") conn = _make_connection(resource) client = _make_client(project=self.PROJECT, connection=conn) load_job = self._make_one(job_ref, [self.SOURCE1], self.TABLE_REF, client) load_job._begin() conn.api_request.assert_called_once() _, request = conn.api_request.call_args self.assertEqual(request["method"], "POST") self.assertEqual(request["path"], "/projects/alternative-project/jobs") self.assertEqual( request["data"]["jobReference"]["projectId"], "alternative-project" ) self.assertEqual(request["data"]["jobReference"]["location"], "US") self.assertEqual(request["data"]["jobReference"]["jobId"], self.JOB_ID) def test_exists_miss_w_bound_client(self): PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID) conn = _make_connection() client = _make_client(project=self.PROJECT, connection=conn) job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client) self.assertFalse(job.exists()) conn.api_request.assert_called_once_with( method="GET", path=PATH, query_params={"fields": "id"}, timeout=None ) def test_exists_hit_w_alternate_client(self): PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID) conn1 = _make_connection() client1 = _make_client(project=self.PROJECT, connection=conn1) conn2 = _make_connection({}) client2 = _make_client(project=self.PROJECT, connection=conn2) job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client1) self.assertTrue(job.exists(client=client2)) conn1.api_request.assert_not_called() conn2.api_request.assert_called_once_with( method="GET", path=PATH, query_params={"fields": "id"}, timeout=None ) def test_exists_miss_w_job_reference(self): from google.cloud.bigquery import job job_ref = job._JobReference("my-job-id", "other-project", "US") conn = _make_connection() client = _make_client(project=self.PROJECT, connection=conn) load_job = self._make_one(job_ref, [self.SOURCE1], self.TABLE_REF, client) self.assertFalse(load_job.exists()) conn.api_request.assert_called_once_with( method="GET", path="/projects/other-project/jobs/my-job-id", query_params={"fields": "id", "location": "US"}, timeout=None, ) def test_reload_w_bound_client(self): PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID) RESOURCE = self._make_resource() conn = _make_connection(RESOURCE) client = _make_client(project=self.PROJECT, connection=conn) job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client) job.reload() conn.api_request.assert_called_once_with( method="GET", path=PATH, query_params={}, timeout=None ) self._verifyResourceProperties(job, RESOURCE) def test_reload_w_alternate_client(self): PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID) RESOURCE = self._make_resource() conn1 = _make_connection() client1 = _make_client(project=self.PROJECT, connection=conn1) conn2 = _make_connection(RESOURCE) client2 = _make_client(project=self.PROJECT, connection=conn2) job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client1) job.reload(client=client2) conn1.api_request.assert_not_called() conn2.api_request.assert_called_once_with( method="GET", path=PATH, query_params={}, timeout=None ) self._verifyResourceProperties(job, RESOURCE) def test_reload_w_job_reference(self): from google.cloud.bigquery import job resource = self._make_resource(ended=True) resource["jobReference"]["projectId"] = "alternative-project" resource["jobReference"]["location"] = "US" job_ref = job._JobReference(self.JOB_ID, "alternative-project", "US") conn = _make_connection(resource) client = _make_client(project=self.PROJECT, connection=conn) load_job = self._make_one(job_ref, [self.SOURCE1], self.TABLE_REF, client) load_job.reload() conn.api_request.assert_called_once_with( method="GET", path="/projects/alternative-project/jobs/{}".format(self.JOB_ID), query_params={"location": "US"}, timeout=None, ) def test_cancel_w_bound_client(self): PATH = "/projects/%s/jobs/%s/cancel" % (self.PROJECT, self.JOB_ID) RESOURCE = self._make_resource(ended=True) RESPONSE = {"job": RESOURCE} conn = _make_connection(RESPONSE) client = _make_client(project=self.PROJECT, connection=conn) job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client) job.cancel() conn.api_request.assert_called_once_with( method="POST", path=PATH, query_params={}, timeout=None, ) self._verifyResourceProperties(job, RESOURCE) def test_cancel_w_alternate_client(self): PATH = "/projects/%s/jobs/%s/cancel" % (self.PROJECT, self.JOB_ID) RESOURCE = self._make_resource(ended=True) RESPONSE = {"job": RESOURCE} conn1 = _make_connection() client1 = _make_client(project=self.PROJECT, connection=conn1) conn2 = _make_connection(RESPONSE) client2 = _make_client(project=self.PROJECT, connection=conn2) job = self._make_one(self.JOB_ID, [self.SOURCE1], self.TABLE_REF, client1) job.cancel(client=client2) conn1.api_request.assert_not_called() conn2.api_request.assert_called_once_with( method="POST", path=PATH, query_params={}, timeout=None, ) self._verifyResourceProperties(job, RESOURCE) def test_cancel_w_job_reference(self): from google.cloud.bigquery import job resource = self._make_resource(ended=True) resource["jobReference"]["projectId"] = "alternative-project" resource["jobReference"]["location"] = "US" job_ref = job._JobReference(self.JOB_ID, "alternative-project", "US") conn = _make_connection({"job": resource}) client = _make_client(project=self.PROJECT, connection=conn) load_job = self._make_one(job_ref, [self.SOURCE1], self.TABLE_REF, client) load_job.cancel() conn.api_request.assert_called_once_with( method="POST", path="/projects/alternative-project/jobs/{}/cancel".format(self.JOB_ID), query_params={"location": "US"}, timeout=None, ) class TestCopyJobConfig(unittest.TestCase, _Base): JOB_TYPE = "copy" @staticmethod def _get_target_class(): from google.cloud.bigquery.job import CopyJobConfig return CopyJobConfig def test_ctor_w_properties(self): from google.cloud.bigquery.job import CreateDisposition from google.cloud.bigquery.job import WriteDisposition create_disposition = CreateDisposition.CREATE_NEVER write_disposition = WriteDisposition.WRITE_TRUNCATE config = self._get_target_class()( create_disposition=create_disposition, write_disposition=write_disposition ) self.assertEqual(config.create_disposition, create_disposition) self.assertEqual(config.write_disposition, write_disposition) def test_to_api_repr_with_encryption(self): from google.cloud.bigquery.encryption_configuration import ( EncryptionConfiguration, ) config = self._make_one() config.destination_encryption_configuration = EncryptionConfiguration( kms_key_name=self.KMS_KEY_NAME ) resource = config.to_api_repr() self.assertEqual( resource, { "copy": { "destinationEncryptionConfiguration": { "kmsKeyName": self.KMS_KEY_NAME } } }, ) def test_to_api_repr_with_encryption_none(self): config = self._make_one() config.destination_encryption_configuration = None resource = config.to_api_repr() self.assertEqual( resource, {"copy": {"destinationEncryptionConfiguration": None}} ) class TestCopyJob(unittest.TestCase, _Base): JOB_TYPE = "copy" SOURCE_TABLE = "source_table" DESTINATION_TABLE = "destination_table" @staticmethod def _get_target_class(): from google.cloud.bigquery.job import CopyJob return CopyJob def _make_resource(self, started=False, ended=False): resource = super(TestCopyJob, self)._make_resource(started, ended) config = resource["configuration"]["copy"] config["sourceTables"] = [ { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.SOURCE_TABLE, } ] config["destinationTable"] = { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.DESTINATION_TABLE, } return resource def _verifyResourceProperties(self, job, resource): self._verifyReadonlyResourceProperties(job, resource) config = resource.get("configuration", {}).get("copy") table_ref = config["destinationTable"] self.assertEqual(job.destination.project, table_ref["projectId"]) self.assertEqual(job.destination.dataset_id, table_ref["datasetId"]) self.assertEqual(job.destination.table_id, table_ref["tableId"]) sources = config.get("sourceTables") if sources is None: sources = [config["sourceTable"]] self.assertEqual(len(sources), len(job.sources)) for table_ref, table in zip(sources, job.sources): self.assertEqual(table.project, table_ref["projectId"]) self.assertEqual(table.dataset_id, table_ref["datasetId"]) self.assertEqual(table.table_id, table_ref["tableId"]) if "createDisposition" in config: self.assertEqual(job.create_disposition, config["createDisposition"]) else: self.assertIsNone(job.create_disposition) if "writeDisposition" in config: self.assertEqual(job.write_disposition, config["writeDisposition"]) else: self.assertIsNone(job.write_disposition) if "destinationEncryptionConfiguration" in config: self.assertIsNotNone(job.destination_encryption_configuration) self.assertEqual( job.destination_encryption_configuration.kms_key_name, config["destinationEncryptionConfiguration"]["kmsKeyName"], ) else: self.assertIsNone(job.destination_encryption_configuration) def test_ctor(self): client = _make_client(project=self.PROJECT) source = self._table_ref(self.SOURCE_TABLE) destination = self._table_ref(self.DESTINATION_TABLE) job = self._make_one(self.JOB_ID, [source], destination, client) self.assertIs(job.destination, destination) self.assertEqual(job.sources, [source]) self.assertIs(job._client, client) self.assertEqual(job.job_type, self.JOB_TYPE) self.assertEqual(job.path, "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)) self._verifyInitialReadonlyProperties(job) # set/read from resource['configuration']['copy'] self.assertIsNone(job.create_disposition) self.assertIsNone(job.write_disposition) self.assertIsNone(job.destination_encryption_configuration) def test_from_api_repr_missing_identity(self): self._setUpConstants() client = _make_client(project=self.PROJECT) RESOURCE = {} klass = self._get_target_class() with self.assertRaises(KeyError): klass.from_api_repr(RESOURCE, client=client) def test_from_api_repr_missing_config(self): self._setUpConstants() client = _make_client(project=self.PROJECT) RESOURCE = { "id": "%s:%s" % (self.PROJECT, self.DS_ID), "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, } klass = self._get_target_class() with self.assertRaises(KeyError): klass.from_api_repr(RESOURCE, client=client) def test_from_api_repr_bare(self): self._setUpConstants() client = _make_client(project=self.PROJECT) RESOURCE = { "id": self.JOB_ID, "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": { "copy": { "sourceTables": [ { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.SOURCE_TABLE, } ], "destinationTable": { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.DESTINATION_TABLE, }, } }, } klass = self._get_target_class() job = klass.from_api_repr(RESOURCE, client=client) self.assertIs(job._client, client) self._verifyResourceProperties(job, RESOURCE) def test_from_api_with_encryption(self): self._setUpConstants() client = _make_client(project=self.PROJECT) RESOURCE = { "id": self.JOB_ID, "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": { "copy": { "sourceTables": [ { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.SOURCE_TABLE, } ], "destinationTable": { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.DESTINATION_TABLE, }, "destinationEncryptionConfiguration": { "kmsKeyName": self.KMS_KEY_NAME }, } }, } klass = self._get_target_class() job = klass.from_api_repr(RESOURCE, client=client) self.assertIs(job._client, client) self._verifyResourceProperties(job, RESOURCE) def test_from_api_repr_w_sourcetable(self): self._setUpConstants() client = _make_client(project=self.PROJECT) RESOURCE = { "id": self.JOB_ID, "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": { "copy": { "sourceTable": { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.SOURCE_TABLE, }, "destinationTable": { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.DESTINATION_TABLE, }, } }, } klass = self._get_target_class() job = klass.from_api_repr(RESOURCE, client=client) self.assertIs(job._client, client) self._verifyResourceProperties(job, RESOURCE) def test_from_api_repr_wo_sources(self): self._setUpConstants() client = _make_client(project=self.PROJECT) RESOURCE = { "id": self.JOB_ID, "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": { "copy": { "destinationTable": { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.DESTINATION_TABLE, } } }, } klass = self._get_target_class() with self.assertRaises(KeyError): klass.from_api_repr(RESOURCE, client=client) def test_from_api_repr_w_properties(self): from google.cloud.bigquery.job import CreateDisposition client = _make_client(project=self.PROJECT) RESOURCE = self._make_resource() copy_config = RESOURCE["configuration"]["copy"] copy_config["createDisposition"] = CreateDisposition.CREATE_IF_NEEDED klass = self._get_target_class() job = klass.from_api_repr(RESOURCE, client=client) self.assertIs(job._client, client) self._verifyResourceProperties(job, RESOURCE) def test_begin_w_bound_client(self): PATH = "/projects/%s/jobs" % (self.PROJECT,) RESOURCE = self._make_resource() # Ensure None for missing server-set props del RESOURCE["statistics"]["creationTime"] del RESOURCE["etag"] del RESOURCE["selfLink"] del RESOURCE["user_email"] conn = _make_connection(RESOURCE) client = _make_client(project=self.PROJECT, connection=conn) source = self._table_ref(self.SOURCE_TABLE) destination = self._table_ref(self.DESTINATION_TABLE) job = self._make_one(self.JOB_ID, [source], destination, client) job._begin() conn.api_request.assert_called_once_with( method="POST", path=PATH, data={ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": { "copy": { "sourceTables": [ { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.SOURCE_TABLE, } ], "destinationTable": { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.DESTINATION_TABLE, }, } }, }, timeout=None, ) self._verifyResourceProperties(job, RESOURCE) def test_begin_w_alternate_client(self): from google.cloud.bigquery.job import CopyJobConfig from google.cloud.bigquery.job import CreateDisposition from google.cloud.bigquery.job import WriteDisposition PATH = "/projects/%s/jobs" % (self.PROJECT,) RESOURCE = self._make_resource(ended=True) COPY_CONFIGURATION = { "sourceTables": [ { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.SOURCE_TABLE, } ], "destinationTable": { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.DESTINATION_TABLE, }, "createDisposition": CreateDisposition.CREATE_NEVER, "writeDisposition": WriteDisposition.WRITE_TRUNCATE, } RESOURCE["configuration"]["copy"] = COPY_CONFIGURATION conn1 = _make_connection() client1 = _make_client(project=self.PROJECT, connection=conn1) conn2 = _make_connection(RESOURCE) client2 = _make_client(project=self.PROJECT, connection=conn2) source = self._table_ref(self.SOURCE_TABLE) destination = self._table_ref(self.DESTINATION_TABLE) config = CopyJobConfig() config.create_disposition = CreateDisposition.CREATE_NEVER config.write_disposition = WriteDisposition.WRITE_TRUNCATE job = self._make_one(self.JOB_ID, [source], destination, client1, config) job._begin(client=client2) conn1.api_request.assert_not_called() conn2.api_request.assert_called_once_with( method="POST", path=PATH, data={ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": {"copy": COPY_CONFIGURATION}, }, timeout=None, ) self._verifyResourceProperties(job, RESOURCE) def test_exists_miss_w_bound_client(self): PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID) conn = _make_connection() client = _make_client(project=self.PROJECT, connection=conn) source = self._table_ref(self.SOURCE_TABLE) destination = self._table_ref(self.DESTINATION_TABLE) job = self._make_one(self.JOB_ID, [source], destination, client) self.assertFalse(job.exists()) conn.api_request.assert_called_once_with( method="GET", path=PATH, query_params={"fields": "id"}, timeout=None, ) def test_exists_hit_w_alternate_client(self): PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID) conn1 = _make_connection() client1 = _make_client(project=self.PROJECT, connection=conn1) conn2 = _make_connection({}) client2 = _make_client(project=self.PROJECT, connection=conn2) source = self._table_ref(self.SOURCE_TABLE) destination = self._table_ref(self.DESTINATION_TABLE) job = self._make_one(self.JOB_ID, [source], destination, client1) self.assertTrue(job.exists(client=client2)) conn1.api_request.assert_not_called() conn2.api_request.assert_called_once_with( method="GET", path=PATH, query_params={"fields": "id"}, timeout=None ) def test_reload_w_bound_client(self): PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID) RESOURCE = self._make_resource() conn = _make_connection(RESOURCE) client = _make_client(project=self.PROJECT, connection=conn) source = self._table_ref(self.SOURCE_TABLE) destination = self._table_ref(self.DESTINATION_TABLE) job = self._make_one(self.JOB_ID, [source], destination, client) job.reload() conn.api_request.assert_called_once_with( method="GET", path=PATH, query_params={}, timeout=None ) self._verifyResourceProperties(job, RESOURCE) def test_reload_w_alternate_client(self): PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID) RESOURCE = self._make_resource() conn1 = _make_connection() client1 = _make_client(project=self.PROJECT, connection=conn1) conn2 = _make_connection(RESOURCE) client2 = _make_client(project=self.PROJECT, connection=conn2) source = self._table_ref(self.SOURCE_TABLE) destination = self._table_ref(self.DESTINATION_TABLE) job = self._make_one(self.JOB_ID, [source], destination, client1) job.reload(client=client2) conn1.api_request.assert_not_called() conn2.api_request.assert_called_once_with( method="GET", path=PATH, query_params={}, timeout=None ) self._verifyResourceProperties(job, RESOURCE) class TestExtractJobConfig(unittest.TestCase, _Base): JOB_TYPE = "extract" @staticmethod def _get_target_class(): from google.cloud.bigquery.job import ExtractJobConfig return ExtractJobConfig def test_ctor_w_properties(self): config = self._get_target_class()(field_delimiter="\t", print_header=True) self.assertEqual(config.field_delimiter, "\t") self.assertTrue(config.print_header) def test_to_api_repr(self): from google.cloud.bigquery import job config = self._make_one() config.compression = job.Compression.SNAPPY config.destination_format = job.DestinationFormat.AVRO config.field_delimiter = "ignored for avro" config.print_header = False config._properties["extract"]["someNewField"] = "some-value" config.use_avro_logical_types = True resource = config.to_api_repr() self.assertEqual( resource, { "extract": { "compression": "SNAPPY", "destinationFormat": "AVRO", "fieldDelimiter": "ignored for avro", "printHeader": False, "someNewField": "some-value", "useAvroLogicalTypes": True, } }, ) def test_from_api_repr(self): cls = self._get_target_class() config = cls.from_api_repr( { "extract": { "compression": "NONE", "destinationFormat": "CSV", "fieldDelimiter": "\t", "printHeader": True, "someNewField": "some-value", "useAvroLogicalTypes": False, } } ) self.assertEqual(config.compression, "NONE") self.assertEqual(config.destination_format, "CSV") self.assertEqual(config.field_delimiter, "\t") self.assertEqual(config.print_header, True) self.assertEqual(config._properties["extract"]["someNewField"], "some-value") self.assertEqual(config.use_avro_logical_types, False) class TestExtractJob(unittest.TestCase, _Base): JOB_TYPE = "extract" SOURCE_TABLE = "source_table" DESTINATION_URI = "gs://bucket_name/object_name" @staticmethod def _get_target_class(): from google.cloud.bigquery.job import ExtractJob return ExtractJob def _make_resource(self, started=False, ended=False): resource = super(TestExtractJob, self)._make_resource(started, ended) config = resource["configuration"]["extract"] config["sourceTable"] = { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.SOURCE_TABLE, } config["destinationUris"] = [self.DESTINATION_URI] return resource def _verifyResourceProperties(self, job, resource): self._verifyReadonlyResourceProperties(job, resource) config = resource.get("configuration", {}).get("extract") self.assertEqual(job.destination_uris, config["destinationUris"]) table_ref = config["sourceTable"] self.assertEqual(job.source.project, table_ref["projectId"]) self.assertEqual(job.source.dataset_id, table_ref["datasetId"]) self.assertEqual(job.source.table_id, table_ref["tableId"]) if "compression" in config: self.assertEqual(job.compression, config["compression"]) else: self.assertIsNone(job.compression) if "destinationFormat" in config: self.assertEqual(job.destination_format, config["destinationFormat"]) else: self.assertIsNone(job.destination_format) if "fieldDelimiter" in config: self.assertEqual(job.field_delimiter, config["fieldDelimiter"]) else: self.assertIsNone(job.field_delimiter) if "printHeader" in config: self.assertEqual(job.print_header, config["printHeader"]) else: self.assertIsNone(job.print_header) def test_ctor(self): from google.cloud.bigquery.table import Table client = _make_client(project=self.PROJECT) source = Table(self.TABLE_REF) job = self._make_one(self.JOB_ID, source, [self.DESTINATION_URI], client) self.assertEqual(job.source.project, self.PROJECT) self.assertEqual(job.source.dataset_id, self.DS_ID) self.assertEqual(job.source.table_id, self.TABLE_ID) self.assertEqual(job.destination_uris, [self.DESTINATION_URI]) self.assertIs(job._client, client) self.assertEqual(job.job_type, self.JOB_TYPE) self.assertEqual(job.path, "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)) self._verifyInitialReadonlyProperties(job) # set/read from resource['configuration']['extract'] self.assertIsNone(job.compression) self.assertIsNone(job.destination_format) self.assertIsNone(job.field_delimiter) self.assertIsNone(job.print_header) def test_destination_uri_file_counts(self): file_counts = 23 client = _make_client(project=self.PROJECT) job = self._make_one( self.JOB_ID, self.TABLE_REF, [self.DESTINATION_URI], client ) self.assertIsNone(job.destination_uri_file_counts) statistics = job._properties["statistics"] = {} self.assertIsNone(job.destination_uri_file_counts) extract_stats = statistics["extract"] = {} self.assertIsNone(job.destination_uri_file_counts) extract_stats["destinationUriFileCounts"] = [str(file_counts)] self.assertEqual(job.destination_uri_file_counts, [file_counts]) def test_from_api_repr_missing_identity(self): self._setUpConstants() client = _make_client(project=self.PROJECT) RESOURCE = {} klass = self._get_target_class() with self.assertRaises(KeyError): klass.from_api_repr(RESOURCE, client=client) def test_from_api_repr_missing_config(self): self._setUpConstants() client = _make_client(project=self.PROJECT) RESOURCE = { "id": "%s:%s" % (self.PROJECT, self.DS_ID), "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, } klass = self._get_target_class() with self.assertRaises(KeyError): klass.from_api_repr(RESOURCE, client=client) def test_from_api_repr_bare(self): self._setUpConstants() client = _make_client(project=self.PROJECT) RESOURCE = { "id": self.JOB_ID, "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": { "extract": { "sourceTable": { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.SOURCE_TABLE, }, "destinationUris": [self.DESTINATION_URI], } }, } klass = self._get_target_class() job = klass.from_api_repr(RESOURCE, client=client) self.assertIs(job._client, client) self._verifyResourceProperties(job, RESOURCE) def test_from_api_repr_w_properties(self): from google.cloud.bigquery.job import Compression client = _make_client(project=self.PROJECT) RESOURCE = self._make_resource() extract_config = RESOURCE["configuration"]["extract"] extract_config["compression"] = Compression.GZIP klass = self._get_target_class() job = klass.from_api_repr(RESOURCE, client=client) self.assertIs(job._client, client) self._verifyResourceProperties(job, RESOURCE) def test_begin_w_bound_client(self): from google.cloud.bigquery.dataset import DatasetReference PATH = "/projects/%s/jobs" % (self.PROJECT,) RESOURCE = self._make_resource() # Ensure None for missing server-set props del RESOURCE["statistics"]["creationTime"] del RESOURCE["etag"] del RESOURCE["selfLink"] del RESOURCE["user_email"] conn = _make_connection(RESOURCE) client = _make_client(project=self.PROJECT, connection=conn) source_dataset = DatasetReference(self.PROJECT, self.DS_ID) source = source_dataset.table(self.SOURCE_TABLE) job = self._make_one(self.JOB_ID, source, [self.DESTINATION_URI], client) job._begin() conn.api_request.assert_called_once_with( method="POST", path=PATH, data={ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": { "extract": { "sourceTable": { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.SOURCE_TABLE, }, "destinationUris": [self.DESTINATION_URI], } }, }, timeout=None, ) self._verifyResourceProperties(job, RESOURCE) def test_begin_w_alternate_client(self): from google.cloud.bigquery.dataset import DatasetReference from google.cloud.bigquery.job import Compression from google.cloud.bigquery.job import DestinationFormat from google.cloud.bigquery.job import ExtractJobConfig PATH = "/projects/%s/jobs" % (self.PROJECT,) RESOURCE = self._make_resource(ended=True) EXTRACT_CONFIGURATION = { "sourceTable": { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.SOURCE_TABLE, }, "destinationUris": [self.DESTINATION_URI], "compression": Compression.GZIP, "destinationFormat": DestinationFormat.NEWLINE_DELIMITED_JSON, "fieldDelimiter": "|", "printHeader": False, } RESOURCE["configuration"]["extract"] = EXTRACT_CONFIGURATION conn1 = _make_connection() client1 = _make_client(project=self.PROJECT, connection=conn1) conn2 = _make_connection(RESOURCE) client2 = _make_client(project=self.PROJECT, connection=conn2) source_dataset = DatasetReference(self.PROJECT, self.DS_ID) source = source_dataset.table(self.SOURCE_TABLE) config = ExtractJobConfig() config.compression = Compression.GZIP config.destination_format = DestinationFormat.NEWLINE_DELIMITED_JSON config.field_delimiter = "|" config.print_header = False job = self._make_one( self.JOB_ID, source, [self.DESTINATION_URI], client1, config ) job._begin(client=client2) conn1.api_request.assert_not_called() conn2.api_request.assert_called_once_with( method="POST", path=PATH, data={ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": {"extract": EXTRACT_CONFIGURATION}, }, timeout=None, ) self._verifyResourceProperties(job, RESOURCE) def test_exists_miss_w_bound_client(self): PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID) conn = _make_connection() client = _make_client(project=self.PROJECT, connection=conn) job = self._make_one( self.JOB_ID, self.TABLE_REF, [self.DESTINATION_URI], client ) self.assertFalse(job.exists()) conn.api_request.assert_called_once_with( method="GET", path=PATH, query_params={"fields": "id"}, timeout=None, ) def test_exists_hit_w_alternate_client(self): PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID) conn1 = _make_connection() client1 = _make_client(project=self.PROJECT, connection=conn1) conn2 = _make_connection({}) client2 = _make_client(project=self.PROJECT, connection=conn2) job = self._make_one( self.JOB_ID, self.TABLE_REF, [self.DESTINATION_URI], client1 ) self.assertTrue(job.exists(client=client2)) conn1.api_request.assert_not_called() conn2.api_request.assert_called_once_with( method="GET", path=PATH, query_params={"fields": "id"}, timeout=None ) def test_reload_w_bound_client(self): from google.cloud.bigquery.dataset import DatasetReference PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID) RESOURCE = self._make_resource() conn = _make_connection(RESOURCE) client = _make_client(project=self.PROJECT, connection=conn) source_dataset = DatasetReference(self.PROJECT, self.DS_ID) source = source_dataset.table(self.SOURCE_TABLE) job = self._make_one(self.JOB_ID, source, [self.DESTINATION_URI], client) job.reload() conn.api_request.assert_called_once_with( method="GET", path=PATH, query_params={}, timeout=None ) self._verifyResourceProperties(job, RESOURCE) def test_reload_w_alternate_client(self): from google.cloud.bigquery.dataset import DatasetReference PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID) RESOURCE = self._make_resource() conn1 = _make_connection() client1 = _make_client(project=self.PROJECT, connection=conn1) conn2 = _make_connection(RESOURCE) client2 = _make_client(project=self.PROJECT, connection=conn2) source_dataset = DatasetReference(self.PROJECT, self.DS_ID) source = source_dataset.table(self.SOURCE_TABLE) job = self._make_one(self.JOB_ID, source, [self.DESTINATION_URI], client1) job.reload(client=client2) conn1.api_request.assert_not_called() conn2.api_request.assert_called_once_with( method="GET", path=PATH, query_params={}, timeout=None ) self._verifyResourceProperties(job, RESOURCE) class TestQueryJobConfig(unittest.TestCase, _Base): @staticmethod def _get_target_class(): from google.cloud.bigquery.job import QueryJobConfig return QueryJobConfig def _make_one(self, *args, **kw): return self._get_target_class()(*args, **kw) def test_ctor(self): config = self._make_one() self.assertEqual(config._properties, {"query": {}}) def test_ctor_w_none(self): config = self._make_one() config.default_dataset = None config.destination = None self.assertIsNone(config.default_dataset) self.assertIsNone(config.destination) def test_ctor_w_properties(self): config = self._get_target_class()(use_query_cache=False, use_legacy_sql=True) self.assertFalse(config.use_query_cache) self.assertTrue(config.use_legacy_sql) def test_ctor_w_string_default_dataset(self): from google.cloud.bigquery import dataset default_dataset = "default-proj.default_dset" config = self._get_target_class()(default_dataset=default_dataset) expected = dataset.DatasetReference.from_string(default_dataset) self.assertEqual(config.default_dataset, expected) def test_ctor_w_string_destinaton(self): from google.cloud.bigquery import table destination = "dest-proj.dest_dset.dest_tbl" config = self._get_target_class()(destination=destination) expected = table.TableReference.from_string(destination) self.assertEqual(config.destination, expected) def test_default_dataset_w_string(self): from google.cloud.bigquery import dataset default_dataset = "default-proj.default_dset" config = self._make_one() config.default_dataset = default_dataset expected = dataset.DatasetReference.from_string(default_dataset) self.assertEqual(config.default_dataset, expected) def test_default_dataset_w_dataset(self): from google.cloud.bigquery import dataset default_dataset = "default-proj.default_dset" expected = dataset.DatasetReference.from_string(default_dataset) config = self._make_one() config.default_dataset = dataset.Dataset(expected) self.assertEqual(config.default_dataset, expected) def test_destinaton_w_string(self): from google.cloud.bigquery import table destination = "dest-proj.dest_dset.dest_tbl" config = self._make_one() config.destination = destination expected = table.TableReference.from_string(destination) self.assertEqual(config.destination, expected) def test_range_partitioning_w_none(self): object_under_test = self._get_target_class()() assert object_under_test.range_partitioning is None def test_range_partitioning_w_value(self): object_under_test = self._get_target_class()() object_under_test._properties["query"]["rangePartitioning"] = { "field": "column_one", "range": {"start": 1, "end": 1000, "interval": 10}, } object_under_test.range_partitioning.field == "column_one" object_under_test.range_partitioning.range_.start == 1 object_under_test.range_partitioning.range_.end == 1000 object_under_test.range_partitioning.range_.interval == 10 def test_range_partitioning_setter(self): from google.cloud.bigquery.table import PartitionRange from google.cloud.bigquery.table import RangePartitioning object_under_test = self._get_target_class()() object_under_test.range_partitioning = RangePartitioning( field="column_one", range_=PartitionRange(start=1, end=1000, interval=10) ) object_under_test.range_partitioning.field == "column_one" object_under_test.range_partitioning.range_.start == 1 object_under_test.range_partitioning.range_.end == 1000 object_under_test.range_partitioning.range_.interval == 10 def test_range_partitioning_setter_w_none(self): object_under_test = self._get_target_class()() object_under_test.range_partitioning = None assert object_under_test.range_partitioning is None def test_range_partitioning_setter_w_wrong_type(self): object_under_test = self._get_target_class()() with pytest.raises(ValueError, match="RangePartitioning"): object_under_test.range_partitioning = object() def test_time_partitioning(self): from google.cloud.bigquery import table time_partitioning = table.TimePartitioning( type_=table.TimePartitioningType.DAY, field="name" ) config = self._make_one() config.time_partitioning = time_partitioning # TimePartitioning should be configurable after assigning time_partitioning.expiration_ms = 10000 self.assertEqual(config.time_partitioning.type_, table.TimePartitioningType.DAY) self.assertEqual(config.time_partitioning.field, "name") self.assertEqual(config.time_partitioning.expiration_ms, 10000) config.time_partitioning = None self.assertIsNone(config.time_partitioning) def test_clustering_fields(self): fields = ["email", "postal_code"] config = self._get_target_class()() config.clustering_fields = fields self.assertEqual(config.clustering_fields, fields) config.clustering_fields = None self.assertIsNone(config.clustering_fields) def test_from_api_repr_empty(self): klass = self._get_target_class() config = klass.from_api_repr({}) self.assertIsNone(config.dry_run) self.assertIsNone(config.use_legacy_sql) self.assertIsNone(config.default_dataset) self.assertIsNone(config.destination) self.assertIsNone(config.destination_encryption_configuration) def test_from_api_repr_normal(self): from google.cloud.bigquery.dataset import DatasetReference resource = { "query": { "useLegacySql": True, "query": "no property for me", "defaultDataset": { "projectId": "someproject", "datasetId": "somedataset", }, "someNewProperty": "I should be saved, too.", }, "dryRun": True, } klass = self._get_target_class() config = klass.from_api_repr(resource) self.assertTrue(config.use_legacy_sql) self.assertEqual( config.default_dataset, DatasetReference("someproject", "somedataset") ) self.assertTrue(config.dry_run) # Make sure unknown properties propagate. self.assertEqual(config._properties["query"]["query"], "no property for me") self.assertEqual( config._properties["query"]["someNewProperty"], "I should be saved, too." ) def test_to_api_repr_normal(self): from google.cloud.bigquery.dataset import DatasetReference config = self._make_one() config.use_legacy_sql = True config.default_dataset = DatasetReference("someproject", "somedataset") config.dry_run = False config._properties["someNewProperty"] = "Woohoo, alpha stuff." resource = config.to_api_repr() self.assertFalse(resource["dryRun"]) self.assertTrue(resource["query"]["useLegacySql"]) self.assertEqual( resource["query"]["defaultDataset"]["projectId"], "someproject" ) self.assertEqual( resource["query"]["defaultDataset"]["datasetId"], "somedataset" ) # Make sure unknown properties propagate. self.assertEqual(resource["someNewProperty"], "Woohoo, alpha stuff.") def test_to_api_repr_with_encryption(self): from google.cloud.bigquery.encryption_configuration import ( EncryptionConfiguration, ) config = self._make_one() config.destination_encryption_configuration = EncryptionConfiguration( kms_key_name=self.KMS_KEY_NAME ) resource = config.to_api_repr() self.assertEqual( resource, { "query": { "destinationEncryptionConfiguration": { "kmsKeyName": self.KMS_KEY_NAME } } }, ) def test_to_api_repr_with_encryption_none(self): config = self._make_one() config.destination_encryption_configuration = None resource = config.to_api_repr() self.assertEqual( resource, {"query": {"destinationEncryptionConfiguration": None}} ) def test_from_api_repr_with_encryption(self): resource = { "query": { "destinationEncryptionConfiguration": {"kmsKeyName": self.KMS_KEY_NAME} } } klass = self._get_target_class() config = klass.from_api_repr(resource) self.assertEqual( config.destination_encryption_configuration.kms_key_name, self.KMS_KEY_NAME ) class TestQueryJob(unittest.TestCase, _Base): JOB_TYPE = "query" QUERY = "select count(*) from persons" DESTINATION_TABLE = "destination_table" @staticmethod def _get_target_class(): from google.cloud.bigquery.job import QueryJob return QueryJob def _make_resource(self, started=False, ended=False): resource = super(TestQueryJob, self)._make_resource(started, ended) config = resource["configuration"]["query"] config["query"] = self.QUERY if ended: resource["status"] = {"state": "DONE"} return resource def _verifyBooleanResourceProperties(self, job, config): if "allowLargeResults" in config: self.assertEqual(job.allow_large_results, config["allowLargeResults"]) else: self.assertIsNone(job.allow_large_results) if "flattenResults" in config: self.assertEqual(job.flatten_results, config["flattenResults"]) else: self.assertIsNone(job.flatten_results) if "useQueryCache" in config: self.assertEqual(job.use_query_cache, config["useQueryCache"]) else: self.assertIsNone(job.use_query_cache) if "useLegacySql" in config: self.assertEqual(job.use_legacy_sql, config["useLegacySql"]) else: self.assertIsNone(job.use_legacy_sql) def _verifyIntegerResourceProperties(self, job, config): if "maximumBillingTier" in config: self.assertEqual(job.maximum_billing_tier, config["maximumBillingTier"]) else: self.assertIsNone(job.maximum_billing_tier) if "maximumBytesBilled" in config: self.assertEqual( str(job.maximum_bytes_billed), config["maximumBytesBilled"] ) self.assertIsInstance(job.maximum_bytes_billed, int) else: self.assertIsNone(job.maximum_bytes_billed) def _verify_udf_resources(self, job, config): udf_resources = config.get("userDefinedFunctionResources", ()) self.assertEqual(len(job.udf_resources), len(udf_resources)) for found, expected in zip(job.udf_resources, udf_resources): if "resourceUri" in expected: self.assertEqual(found.udf_type, "resourceUri") self.assertEqual(found.value, expected["resourceUri"]) else: self.assertEqual(found.udf_type, "inlineCode") self.assertEqual(found.value, expected["inlineCode"]) def _verifyQueryParameters(self, job, config): query_parameters = config.get("queryParameters", ()) self.assertEqual(len(job.query_parameters), len(query_parameters)) for found, expected in zip(job.query_parameters, query_parameters): self.assertEqual(found.to_api_repr(), expected) def _verify_table_definitions(self, job, config): table_defs = config.get("tableDefinitions") if job.table_definitions is None: self.assertIsNone(table_defs) else: self.assertEqual(len(job.table_definitions), len(table_defs)) for found_key, found_ec in job.table_definitions.items(): expected_ec = table_defs.get(found_key) self.assertIsNotNone(expected_ec) self.assertEqual(found_ec.to_api_repr(), expected_ec) def _verify_configuration_properties(self, job, configuration): if "dryRun" in configuration: self.assertEqual(job.dry_run, configuration["dryRun"]) else: self.assertIsNone(job.dry_run) def _verifyResourceProperties(self, job, resource): self._verifyReadonlyResourceProperties(job, resource) configuration = resource.get("configuration", {}) self._verify_configuration_properties(job, configuration) query_config = resource.get("configuration", {}).get("query") self._verifyBooleanResourceProperties(job, query_config) self._verifyIntegerResourceProperties(job, query_config) self._verify_udf_resources(job, query_config) self._verifyQueryParameters(job, query_config) self._verify_table_definitions(job, query_config) self.assertEqual(job.query, query_config["query"]) if "createDisposition" in query_config: self.assertEqual(job.create_disposition, query_config["createDisposition"]) else: self.assertIsNone(job.create_disposition) if "defaultDataset" in query_config: ds_ref = job.default_dataset ds_ref = {"projectId": ds_ref.project, "datasetId": ds_ref.dataset_id} self.assertEqual(ds_ref, query_config["defaultDataset"]) else: self.assertIsNone(job.default_dataset) if "destinationTable" in query_config: table = job.destination tb_ref = { "projectId": table.project, "datasetId": table.dataset_id, "tableId": table.table_id, } self.assertEqual(tb_ref, query_config["destinationTable"]) else: self.assertIsNone(job.destination) if "priority" in query_config: self.assertEqual(job.priority, query_config["priority"]) else: self.assertIsNone(job.priority) if "writeDisposition" in query_config: self.assertEqual(job.write_disposition, query_config["writeDisposition"]) else: self.assertIsNone(job.write_disposition) if "destinationEncryptionConfiguration" in query_config: self.assertIsNotNone(job.destination_encryption_configuration) self.assertEqual( job.destination_encryption_configuration.kms_key_name, query_config["destinationEncryptionConfiguration"]["kmsKeyName"], ) else: self.assertIsNone(job.destination_encryption_configuration) if "schemaUpdateOptions" in query_config: self.assertEqual( job.schema_update_options, query_config["schemaUpdateOptions"] ) else: self.assertIsNone(job.schema_update_options) def test_ctor_defaults(self): client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, self.QUERY, client) self.assertEqual(job.query, self.QUERY) self.assertIs(job._client, client) self.assertEqual(job.job_type, self.JOB_TYPE) self.assertEqual(job.path, "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID)) self._verifyInitialReadonlyProperties(job) self.assertFalse(job.use_legacy_sql) # set/read from resource['configuration']['query'] self.assertIsNone(job.allow_large_results) self.assertIsNone(job.create_disposition) self.assertIsNone(job.default_dataset) self.assertIsNone(job.destination) self.assertIsNone(job.flatten_results) self.assertIsNone(job.priority) self.assertIsNone(job.use_query_cache) self.assertIsNone(job.dry_run) self.assertIsNone(job.write_disposition) self.assertIsNone(job.maximum_billing_tier) self.assertIsNone(job.maximum_bytes_billed) self.assertIsNone(job.table_definitions) self.assertIsNone(job.destination_encryption_configuration) self.assertIsNone(job.range_partitioning) self.assertIsNone(job.time_partitioning) self.assertIsNone(job.clustering_fields) self.assertIsNone(job.schema_update_options) def test_ctor_w_udf_resources(self): from google.cloud.bigquery.job import QueryJobConfig from google.cloud.bigquery.query import UDFResource RESOURCE_URI = "gs://some-bucket/js/lib.js" udf_resources = [UDFResource("resourceUri", RESOURCE_URI)] client = _make_client(project=self.PROJECT) config = QueryJobConfig() config.udf_resources = udf_resources job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=config) self.assertEqual(job.udf_resources, udf_resources) def test_ctor_w_query_parameters(self): from google.cloud.bigquery.job import QueryJobConfig from google.cloud.bigquery.query import ScalarQueryParameter query_parameters = [ScalarQueryParameter("foo", "INT64", 123)] client = _make_client(project=self.PROJECT) config = QueryJobConfig(query_parameters=query_parameters) job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=config) self.assertEqual(job.query_parameters, query_parameters) def test_from_api_repr_missing_identity(self): self._setUpConstants() client = _make_client(project=self.PROJECT) RESOURCE = {} klass = self._get_target_class() with self.assertRaises(KeyError): klass.from_api_repr(RESOURCE, client=client) def test_from_api_repr_missing_config(self): self._setUpConstants() client = _make_client(project=self.PROJECT) RESOURCE = { "id": "%s:%s" % (self.PROJECT, self.DS_ID), "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, } klass = self._get_target_class() with self.assertRaises(KeyError): klass.from_api_repr(RESOURCE, client=client) def test_from_api_repr_bare(self): self._setUpConstants() client = _make_client(project=self.PROJECT) RESOURCE = { "id": self.JOB_ID, "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": {"query": {"query": self.QUERY}}, } klass = self._get_target_class() job = klass.from_api_repr(RESOURCE, client=client) self.assertIs(job._client, client) self._verifyResourceProperties(job, RESOURCE) def test_from_api_repr_with_encryption(self): self._setUpConstants() client = _make_client(project=self.PROJECT) RESOURCE = { "id": self.JOB_ID, "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": { "query": { "query": self.QUERY, "destinationEncryptionConfiguration": { "kmsKeyName": self.KMS_KEY_NAME }, } }, } klass = self._get_target_class() job = klass.from_api_repr(RESOURCE, client=client) self.assertIs(job._client, client) self._verifyResourceProperties(job, RESOURCE) def test_from_api_repr_w_properties(self): from google.cloud.bigquery.job import CreateDisposition from google.cloud.bigquery.job import SchemaUpdateOption from google.cloud.bigquery.job import WriteDisposition client = _make_client(project=self.PROJECT) RESOURCE = self._make_resource() query_config = RESOURCE["configuration"]["query"] query_config["createDisposition"] = CreateDisposition.CREATE_IF_NEEDED query_config["writeDisposition"] = WriteDisposition.WRITE_TRUNCATE query_config["destinationTable"] = { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.DESTINATION_TABLE, } query_config["schemaUpdateOptions"] = [SchemaUpdateOption.ALLOW_FIELD_ADDITION] klass = self._get_target_class() job = klass.from_api_repr(RESOURCE, client=client) self.assertIs(job._client, client) self._verifyResourceProperties(job, RESOURCE) def test_cancelled(self): client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, self.QUERY, client) job._properties["status"] = { "state": "DONE", "errorResult": {"reason": "stopped"}, } self.assertTrue(job.cancelled()) def test_done(self): client = _make_client(project=self.PROJECT) resource = self._make_resource(ended=True) job = self._get_target_class().from_api_repr(resource, client) self.assertTrue(job.done()) def test_done_w_timeout(self): client = _make_client(project=self.PROJECT) resource = self._make_resource(ended=False) job = self._get_target_class().from_api_repr(resource, client) with mock.patch.object( client, "_get_query_results" ) as fake_get_results, mock.patch.object(job, "reload") as fake_reload: job.done(timeout=42) fake_get_results.assert_called_once() call_args = fake_get_results.call_args self.assertEqual(call_args.kwargs.get("timeout"), 42) call_args = fake_reload.call_args self.assertEqual(call_args.kwargs.get("timeout"), 42) def test_done_w_timeout_and_shorter_internal_api_timeout(self): from google.cloud.bigquery.job import _TIMEOUT_BUFFER_SECS from google.cloud.bigquery.job import _SERVER_TIMEOUT_MARGIN_SECS client = _make_client(project=self.PROJECT) resource = self._make_resource(ended=False) job = self._get_target_class().from_api_repr(resource, client) job._done_timeout = 8.8 with mock.patch.object( client, "_get_query_results" ) as fake_get_results, mock.patch.object(job, "reload") as fake_reload: job.done(timeout=42) # The expected timeout used is the job's own done_timeout minus a # fixed amount (bigquery.job._TIMEOUT_BUFFER_SECS) increased by the # safety margin on top of server-side processing timeout - that's # because that final number is smaller than the given timeout (42 seconds). expected_timeout = 8.8 - _TIMEOUT_BUFFER_SECS + _SERVER_TIMEOUT_MARGIN_SECS fake_get_results.assert_called_once() call_args = fake_get_results.call_args self.assertAlmostEqual(call_args.kwargs.get("timeout"), expected_timeout) call_args = fake_reload.call_args self.assertAlmostEqual(call_args.kwargs.get("timeout"), expected_timeout) def test_done_w_timeout_and_longer_internal_api_timeout(self): client = _make_client(project=self.PROJECT) resource = self._make_resource(ended=False) job = self._get_target_class().from_api_repr(resource, client) job._done_timeout = 8.8 with mock.patch.object( client, "_get_query_results" ) as fake_get_results, mock.patch.object(job, "reload") as fake_reload: job.done(timeout=5.5) # The expected timeout used is simply the given timeout, as the latter # is shorter than the job's internal done timeout. expected_timeout = 5.5 fake_get_results.assert_called_once() call_args = fake_get_results.call_args self.assertAlmostEqual(call_args.kwargs.get("timeout"), expected_timeout) call_args = fake_reload.call_args self.assertAlmostEqual(call_args.kwargs.get("timeout"), expected_timeout) def test_query_plan(self): from google.cloud._helpers import _RFC3339_MICROS from google.cloud.bigquery.job import QueryPlanEntry from google.cloud.bigquery.job import QueryPlanEntryStep plan_entries = [ { "name": "NAME", "id": "1234", "inputStages": ["88", "101"], "startMs": "1522540800000", "endMs": "1522540804000", "parallelInputs": "1000", "completedParallelInputs": "5", "waitMsAvg": "33", "waitMsMax": "400", "waitRatioAvg": 2.71828, "waitRatioMax": 3.14159, "readMsAvg": "45", "readMsMax": "90", "readRatioAvg": 1.41421, "readRatioMax": 1.73205, "computeMsAvg": "55", "computeMsMax": "99", "computeRatioAvg": 0.69315, "computeRatioMax": 1.09861, "writeMsAvg": "203", "writeMsMax": "340", "writeRatioAvg": 3.32193, "writeRatioMax": 2.30258, "recordsRead": "100", "recordsWritten": "1", "status": "STATUS", "shuffleOutputBytes": "1024", "shuffleOutputBytesSpilled": "1", "steps": [{"kind": "KIND", "substeps": ["SUBSTEP1", "SUBSTEP2"]}], } ] client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, self.QUERY, client) self.assertEqual(job.query_plan, []) statistics = job._properties["statistics"] = {} self.assertEqual(job.query_plan, []) query_stats = statistics["query"] = {} self.assertEqual(job.query_plan, []) query_stats["queryPlan"] = plan_entries self.assertEqual(len(job.query_plan), len(plan_entries)) for found, expected in zip(job.query_plan, plan_entries): self.assertIsInstance(found, QueryPlanEntry) self.assertEqual(found.name, expected["name"]) self.assertEqual(found.entry_id, expected["id"]) self.assertEqual(len(found.input_stages), len(expected["inputStages"])) for f_id in found.input_stages: self.assertIn(f_id, [int(e) for e in expected["inputStages"]]) self.assertEqual( found.start.strftime(_RFC3339_MICROS), "2018-04-01T00:00:00.000000Z" ) self.assertEqual( found.end.strftime(_RFC3339_MICROS), "2018-04-01T00:00:04.000000Z" ) self.assertEqual(found.parallel_inputs, int(expected["parallelInputs"])) self.assertEqual( found.completed_parallel_inputs, int(expected["completedParallelInputs"]), ) self.assertEqual(found.wait_ms_avg, int(expected["waitMsAvg"])) self.assertEqual(found.wait_ms_max, int(expected["waitMsMax"])) self.assertEqual(found.wait_ratio_avg, expected["waitRatioAvg"]) self.assertEqual(found.wait_ratio_max, expected["waitRatioMax"]) self.assertEqual(found.read_ms_avg, int(expected["readMsAvg"])) self.assertEqual(found.read_ms_max, int(expected["readMsMax"])) self.assertEqual(found.read_ratio_avg, expected["readRatioAvg"]) self.assertEqual(found.read_ratio_max, expected["readRatioMax"]) self.assertEqual(found.compute_ms_avg, int(expected["computeMsAvg"])) self.assertEqual(found.compute_ms_max, int(expected["computeMsMax"])) self.assertEqual(found.compute_ratio_avg, expected["computeRatioAvg"]) self.assertEqual(found.compute_ratio_max, expected["computeRatioMax"]) self.assertEqual(found.write_ms_avg, int(expected["writeMsAvg"])) self.assertEqual(found.write_ms_max, int(expected["writeMsMax"])) self.assertEqual(found.write_ratio_avg, expected["writeRatioAvg"]) self.assertEqual(found.write_ratio_max, expected["writeRatioMax"]) self.assertEqual(found.records_read, int(expected["recordsRead"])) self.assertEqual(found.records_written, int(expected["recordsWritten"])) self.assertEqual(found.status, expected["status"]) self.assertEqual( found.shuffle_output_bytes, int(expected["shuffleOutputBytes"]) ) self.assertEqual( found.shuffle_output_bytes_spilled, int(expected["shuffleOutputBytesSpilled"]), ) self.assertEqual(len(found.steps), len(expected["steps"])) for f_step, e_step in zip(found.steps, expected["steps"]): self.assertIsInstance(f_step, QueryPlanEntryStep) self.assertEqual(f_step.kind, e_step["kind"]) self.assertEqual(f_step.substeps, e_step["substeps"]) def test_total_bytes_processed(self): total_bytes = 1234 client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, self.QUERY, client) self.assertIsNone(job.total_bytes_processed) statistics = job._properties["statistics"] = {} self.assertIsNone(job.total_bytes_processed) query_stats = statistics["query"] = {} self.assertIsNone(job.total_bytes_processed) query_stats["totalBytesProcessed"] = str(total_bytes) self.assertEqual(job.total_bytes_processed, total_bytes) def test_total_bytes_billed(self): total_bytes = 1234 client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, self.QUERY, client) self.assertIsNone(job.total_bytes_billed) statistics = job._properties["statistics"] = {} self.assertIsNone(job.total_bytes_billed) query_stats = statistics["query"] = {} self.assertIsNone(job.total_bytes_billed) query_stats["totalBytesBilled"] = str(total_bytes) self.assertEqual(job.total_bytes_billed, total_bytes) def test_billing_tier(self): billing_tier = 1 client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, self.QUERY, client) self.assertIsNone(job.billing_tier) statistics = job._properties["statistics"] = {} self.assertIsNone(job.billing_tier) query_stats = statistics["query"] = {} self.assertIsNone(job.billing_tier) query_stats["billingTier"] = billing_tier self.assertEqual(job.billing_tier, billing_tier) def test_cache_hit(self): client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, self.QUERY, client) self.assertIsNone(job.cache_hit) statistics = job._properties["statistics"] = {} self.assertIsNone(job.cache_hit) query_stats = statistics["query"] = {} self.assertIsNone(job.cache_hit) query_stats["cacheHit"] = True self.assertTrue(job.cache_hit) def test_ddl_operation_performed(self): op = "SKIP" client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, self.QUERY, client) self.assertIsNone(job.ddl_operation_performed) statistics = job._properties["statistics"] = {} self.assertIsNone(job.ddl_operation_performed) query_stats = statistics["query"] = {} self.assertIsNone(job.ddl_operation_performed) query_stats["ddlOperationPerformed"] = op self.assertEqual(job.ddl_operation_performed, op) def test_ddl_target_routine(self): from google.cloud.bigquery.routine import RoutineReference ref_routine = { "projectId": self.PROJECT, "datasetId": "ddl_ds", "routineId": "targetroutine", } client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, self.QUERY, client) self.assertIsNone(job.ddl_target_routine) statistics = job._properties["statistics"] = {} self.assertIsNone(job.ddl_target_routine) query_stats = statistics["query"] = {} self.assertIsNone(job.ddl_target_routine) query_stats["ddlTargetRoutine"] = ref_routine self.assertIsInstance(job.ddl_target_routine, RoutineReference) self.assertEqual(job.ddl_target_routine.routine_id, "targetroutine") self.assertEqual(job.ddl_target_routine.dataset_id, "ddl_ds") self.assertEqual(job.ddl_target_routine.project, self.PROJECT) def test_ddl_target_table(self): from google.cloud.bigquery.table import TableReference ref_table = { "projectId": self.PROJECT, "datasetId": "ddl_ds", "tableId": "targettable", } client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, self.QUERY, client) self.assertIsNone(job.ddl_target_table) statistics = job._properties["statistics"] = {} self.assertIsNone(job.ddl_target_table) query_stats = statistics["query"] = {} self.assertIsNone(job.ddl_target_table) query_stats["ddlTargetTable"] = ref_table self.assertIsInstance(job.ddl_target_table, TableReference) self.assertEqual(job.ddl_target_table.table_id, "targettable") self.assertEqual(job.ddl_target_table.dataset_id, "ddl_ds") self.assertEqual(job.ddl_target_table.project, self.PROJECT) def test_num_dml_affected_rows(self): num_rows = 1234 client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, self.QUERY, client) self.assertIsNone(job.num_dml_affected_rows) statistics = job._properties["statistics"] = {} self.assertIsNone(job.num_dml_affected_rows) query_stats = statistics["query"] = {} self.assertIsNone(job.num_dml_affected_rows) query_stats["numDmlAffectedRows"] = str(num_rows) self.assertEqual(job.num_dml_affected_rows, num_rows) def test_slot_millis(self): millis = 1234 client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, self.QUERY, client) self.assertIsNone(job.slot_millis) statistics = job._properties["statistics"] = {} self.assertIsNone(job.slot_millis) query_stats = statistics["query"] = {} self.assertIsNone(job.slot_millis) query_stats["totalSlotMs"] = millis self.assertEqual(job.slot_millis, millis) def test_statement_type(self): statement_type = "SELECT" client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, self.QUERY, client) self.assertIsNone(job.statement_type) statistics = job._properties["statistics"] = {} self.assertIsNone(job.statement_type) query_stats = statistics["query"] = {} self.assertIsNone(job.statement_type) query_stats["statementType"] = statement_type self.assertEqual(job.statement_type, statement_type) def test_referenced_tables(self): from google.cloud.bigquery.table import TableReference ref_tables_resource = [ {"projectId": self.PROJECT, "datasetId": "dataset", "tableId": "local1"}, {"projectId": self.PROJECT, "datasetId": "dataset", "tableId": "local2"}, { "projectId": "other-project-123", "datasetId": "other-dataset", "tableId": "other-table", }, ] client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, self.QUERY, client) self.assertEqual(job.referenced_tables, []) statistics = job._properties["statistics"] = {} self.assertEqual(job.referenced_tables, []) query_stats = statistics["query"] = {} self.assertEqual(job.referenced_tables, []) query_stats["referencedTables"] = ref_tables_resource local1, local2, remote = job.referenced_tables self.assertIsInstance(local1, TableReference) self.assertEqual(local1.table_id, "local1") self.assertEqual(local1.dataset_id, "dataset") self.assertEqual(local1.project, self.PROJECT) self.assertIsInstance(local2, TableReference) self.assertEqual(local2.table_id, "local2") self.assertEqual(local2.dataset_id, "dataset") self.assertEqual(local2.project, self.PROJECT) self.assertIsInstance(remote, TableReference) self.assertEqual(remote.table_id, "other-table") self.assertEqual(remote.dataset_id, "other-dataset") self.assertEqual(remote.project, "other-project-123") def test_timeline(self): timeline_resource = [ { "elapsedMs": 1, "activeUnits": 22, "pendingUnits": 33, "completedUnits": 44, "totalSlotMs": 101, } ] client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, self.QUERY, client) self.assertEqual(job.timeline, []) statistics = job._properties["statistics"] = {} self.assertEqual(job.timeline, []) query_stats = statistics["query"] = {} self.assertEqual(job.timeline, []) query_stats["timeline"] = timeline_resource self.assertEqual(len(job.timeline), len(timeline_resource)) self.assertEqual(job.timeline[0].elapsed_ms, 1) self.assertEqual(job.timeline[0].active_units, 22) self.assertEqual(job.timeline[0].pending_units, 33) self.assertEqual(job.timeline[0].completed_units, 44) self.assertEqual(job.timeline[0].slot_millis, 101) def test_undeclared_query_parameters(self): from google.cloud.bigquery.query import ArrayQueryParameter from google.cloud.bigquery.query import ScalarQueryParameter from google.cloud.bigquery.query import StructQueryParameter undeclared = [ { "name": "my_scalar", "parameterType": {"type": "STRING"}, "parameterValue": {"value": "value"}, }, { "name": "my_array", "parameterType": {"type": "ARRAY", "arrayType": {"type": "INT64"}}, "parameterValue": { "arrayValues": [{"value": "1066"}, {"value": "1745"}] }, }, { "name": "my_struct", "parameterType": { "type": "STRUCT", "structTypes": [{"name": "count", "type": {"type": "INT64"}}], }, "parameterValue": {"structValues": {"count": {"value": "123"}}}, }, ] client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, self.QUERY, client) self.assertEqual(job.undeclared_query_parameters, []) statistics = job._properties["statistics"] = {} self.assertEqual(job.undeclared_query_parameters, []) query_stats = statistics["query"] = {} self.assertEqual(job.undeclared_query_parameters, []) query_stats["undeclaredQueryParameters"] = undeclared scalar, array, struct = job.undeclared_query_parameters self.assertIsInstance(scalar, ScalarQueryParameter) self.assertEqual(scalar.name, "my_scalar") self.assertEqual(scalar.type_, "STRING") self.assertEqual(scalar.value, "value") self.assertIsInstance(array, ArrayQueryParameter) self.assertEqual(array.name, "my_array") self.assertEqual(array.array_type, "INT64") self.assertEqual(array.values, [1066, 1745]) self.assertIsInstance(struct, StructQueryParameter) self.assertEqual(struct.name, "my_struct") self.assertEqual(struct.struct_types, {"count": "INT64"}) self.assertEqual(struct.struct_values, {"count": 123}) def test_estimated_bytes_processed(self): est_bytes = 123456 client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, self.QUERY, client) self.assertIsNone(job.estimated_bytes_processed) statistics = job._properties["statistics"] = {} self.assertIsNone(job.estimated_bytes_processed) query_stats = statistics["query"] = {} self.assertIsNone(job.estimated_bytes_processed) query_stats["estimatedBytesProcessed"] = str(est_bytes) self.assertEqual(job.estimated_bytes_processed, est_bytes) def test_result(self): from google.cloud.bigquery.table import RowIterator query_resource = { "jobComplete": True, "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "schema": {"fields": [{"name": "col1", "type": "STRING"}]}, "totalRows": "2", } tabledata_resource = { # Explicitly set totalRows to be different from the query response. # to test update during iteration. "totalRows": "1", "pageToken": None, "rows": [{"f": [{"v": "abc"}]}], } connection = _make_connection(query_resource, tabledata_resource) client = _make_client(self.PROJECT, connection=connection) resource = self._make_resource(ended=True) job = self._get_target_class().from_api_repr(resource, client) result = job.result() self.assertIsInstance(result, RowIterator) self.assertEqual(result.total_rows, 2) rows = list(result) self.assertEqual(len(rows), 1) self.assertEqual(rows[0].col1, "abc") # Test that the total_rows property has changed during iteration, based # on the response from tabledata.list. self.assertEqual(result.total_rows, 1) def test_result_with_max_results(self): from google.cloud.bigquery.table import RowIterator query_resource = { "jobComplete": True, "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "schema": {"fields": [{"name": "col1", "type": "STRING"}]}, "totalRows": "5", } tabledata_resource = { "totalRows": "5", "pageToken": None, "rows": [ {"f": [{"v": "abc"}]}, {"f": [{"v": "def"}]}, {"f": [{"v": "ghi"}]}, ], } connection = _make_connection(query_resource, tabledata_resource) client = _make_client(self.PROJECT, connection=connection) resource = self._make_resource(ended=True) job = self._get_target_class().from_api_repr(resource, client) max_results = 3 result = job.result(max_results=max_results) self.assertIsInstance(result, RowIterator) self.assertEqual(result.total_rows, 5) rows = list(result) self.assertEqual(len(rows), 3) self.assertEqual(len(connection.api_request.call_args_list), 2) tabledata_list_request = connection.api_request.call_args_list[1] self.assertEqual( tabledata_list_request[1]["query_params"]["maxResults"], max_results ) def test_result_w_empty_schema(self): from google.cloud.bigquery.table import _EmptyRowIterator # Destination table may have no schema for some DDL and DML queries. query_resource = { "jobComplete": True, "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "schema": {"fields": []}, } connection = _make_connection(query_resource, query_resource) client = _make_client(self.PROJECT, connection=connection) resource = self._make_resource(ended=True) job = self._get_target_class().from_api_repr(resource, client) result = job.result() self.assertIsInstance(result, _EmptyRowIterator) self.assertEqual(list(result), []) def test_result_invokes_begins(self): begun_resource = self._make_resource() incomplete_resource = { "jobComplete": False, "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "schema": {"fields": [{"name": "col1", "type": "STRING"}]}, } query_resource = copy.deepcopy(incomplete_resource) query_resource["jobComplete"] = True done_resource = copy.deepcopy(begun_resource) done_resource["status"] = {"state": "DONE"} connection = _make_connection( begun_resource, incomplete_resource, query_resource, done_resource, query_resource, ) client = _make_client(project=self.PROJECT, connection=connection) job = self._make_one(self.JOB_ID, self.QUERY, client) job.result() self.assertEqual(len(connection.api_request.call_args_list), 4) begin_request = connection.api_request.call_args_list[0] query_request = connection.api_request.call_args_list[2] reload_request = connection.api_request.call_args_list[3] self.assertEqual(begin_request[1]["method"], "POST") self.assertEqual(query_request[1]["method"], "GET") self.assertEqual(reload_request[1]["method"], "GET") def test_result_w_timeout(self): begun_resource = self._make_resource() query_resource = { "jobComplete": True, "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "schema": {"fields": [{"name": "col1", "type": "STRING"}]}, } done_resource = copy.deepcopy(begun_resource) done_resource["status"] = {"state": "DONE"} connection = _make_connection(begun_resource, query_resource, done_resource) client = _make_client(project=self.PROJECT, connection=connection) job = self._make_one(self.JOB_ID, self.QUERY, client) with freezegun.freeze_time("1970-01-01 00:00:00", tick=False): job.result(timeout=1.0) self.assertEqual(len(connection.api_request.call_args_list), 3) begin_request = connection.api_request.call_args_list[0] query_request = connection.api_request.call_args_list[1] reload_request = connection.api_request.call_args_list[2] self.assertEqual(begin_request[1]["method"], "POST") self.assertEqual(query_request[1]["method"], "GET") self.assertEqual( query_request[1]["path"], "/projects/{}/queries/{}".format(self.PROJECT, self.JOB_ID), ) self.assertEqual(query_request[1]["query_params"]["timeoutMs"], 900) self.assertEqual(reload_request[1]["method"], "GET") @mock.patch("google.api_core.future.polling.PollingFuture.result") def test_result_splitting_timout_between_requests(self, polling_result): begun_resource = self._make_resource() query_resource = { "jobComplete": True, "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "schema": {"fields": [{"name": "col1", "type": "STRING"}]}, "totalRows": "5", } done_resource = copy.deepcopy(begun_resource) done_resource["status"] = {"state": "DONE"} connection = _make_connection(begun_resource, query_resource, done_resource) client = _make_client(project=self.PROJECT, connection=connection) job = self._make_one(self.JOB_ID, self.QUERY, client) client.list_rows = mock.Mock() with freezegun.freeze_time("1970-01-01 00:00:00", tick=False) as frozen_time: def delayed_result(*args, **kwargs): frozen_time.tick(delta=0.8) polling_result.side_effect = delayed_result def delayed_get_results(*args, **kwargs): frozen_time.tick(delta=0.5) return orig_get_results(*args, **kwargs) orig_get_results = client._get_query_results client._get_query_results = mock.Mock(side_effect=delayed_get_results) job.result(timeout=2.0) polling_result.assert_called_once_with(timeout=2.0) client._get_query_results.assert_called_once() _, kwargs = client._get_query_results.call_args self.assertAlmostEqual(kwargs.get("timeout"), 1.2) client.list_rows.assert_called_once() _, kwargs = client.list_rows.call_args self.assertAlmostEqual(kwargs.get("timeout"), 0.7) def test_result_w_page_size(self): # Arrange query_results_resource = { "jobComplete": True, "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "schema": {"fields": [{"name": "col1", "type": "STRING"}]}, "totalRows": "4", } job_resource = self._make_resource(started=True, ended=True) q_config = job_resource["configuration"]["query"] q_config["destinationTable"] = { "projectId": self.PROJECT, "datasetId": self.DS_ID, "tableId": self.TABLE_ID, } tabledata_resource = { "totalRows": 4, "pageToken": "some-page-token", "rows": [ {"f": [{"v": "row1"}]}, {"f": [{"v": "row2"}]}, {"f": [{"v": "row3"}]}, ], } tabledata_resource_page_2 = {"totalRows": 4, "rows": [{"f": [{"v": "row4"}]}]} conn = _make_connection( query_results_resource, tabledata_resource, tabledata_resource_page_2 ) client = _make_client(self.PROJECT, connection=conn) job = self._get_target_class().from_api_repr(job_resource, client) # Act result = job.result(page_size=3) # Assert actual_rows = list(result) self.assertEqual(len(actual_rows), 4) tabledata_path = "/projects/%s/datasets/%s/tables/%s/data" % ( self.PROJECT, self.DS_ID, self.TABLE_ID, ) conn.api_request.assert_has_calls( [ mock.call( method="GET", path=tabledata_path, query_params={"maxResults": 3}, timeout=None, ), mock.call( method="GET", path=tabledata_path, query_params={"pageToken": "some-page-token", "maxResults": 3}, timeout=None, ), ] ) def test_result_error(self): from google.cloud import exceptions query = textwrap.dedent( """ SELECT foo, bar FROM table_baz WHERE foo == bar""" ) client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, query, client) error_result = { "debugInfo": "DEBUG", "location": "LOCATION", "message": "MESSAGE", "reason": "invalid", } job._properties["status"] = { "errorResult": error_result, "errors": [error_result], "state": "DONE", } job._set_future_result() with self.assertRaises(exceptions.GoogleCloudError) as exc_info: job.result() self.assertIsInstance(exc_info.exception, exceptions.GoogleCloudError) self.assertEqual(exc_info.exception.code, http_client.BAD_REQUEST) exc_job_instance = getattr(exc_info.exception, "query_job", None) self.assertIs(exc_job_instance, job) full_text = str(exc_info.exception) assert job.job_id in full_text assert "Query Job SQL Follows" in full_text for i, line in enumerate(query.splitlines(), start=1): expected_line = "{}:{}".format(i, line) assert expected_line in full_text def test_result_transport_timeout_error(self): query = textwrap.dedent( """ SELECT foo, bar FROM table_baz WHERE foo == bar""" ) client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, query, client) call_api_patch = mock.patch( "google.cloud.bigquery.client.Client._call_api", autospec=True, side_effect=requests.exceptions.Timeout("Server response took too long."), ) # Make sure that timeout errors get rebranded to concurrent futures timeout. with call_api_patch, self.assertRaises(concurrent.futures.TimeoutError): job.result(timeout=1) def test__begin_error(self): from google.cloud import exceptions query = textwrap.dedent( """ SELECT foo, bar FROM table_baz WHERE foo == bar""" ) client = _make_client(project=self.PROJECT) job = self._make_one(self.JOB_ID, query, client) call_api_patch = mock.patch( "google.cloud.bigquery.client.Client._call_api", autospec=True, side_effect=exceptions.BadRequest("Syntax error in SQL query"), ) with call_api_patch, self.assertRaises(exceptions.GoogleCloudError) as exc_info: job.result() self.assertIsInstance(exc_info.exception, exceptions.GoogleCloudError) self.assertEqual(exc_info.exception.code, http_client.BAD_REQUEST) exc_job_instance = getattr(exc_info.exception, "query_job", None) self.assertIs(exc_job_instance, job) full_text = str(exc_info.exception) assert job.job_id in full_text assert "Query Job SQL Follows" in full_text for i, line in enumerate(query.splitlines(), start=1): expected_line = "{}:{}".format(i, line) assert expected_line in full_text def test__begin_w_timeout(self): PATH = "/projects/%s/jobs" % (self.PROJECT,) RESOURCE = self._make_resource() conn = _make_connection(RESOURCE) client = _make_client(project=self.PROJECT, connection=conn) job = self._make_one(self.JOB_ID, self.QUERY, client) job._begin(timeout=7.5) conn.api_request.assert_called_once_with( method="POST", path=PATH, data={ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": { "query": {"query": self.QUERY, "useLegacySql": False} }, }, timeout=7.5, ) def test_begin_w_bound_client(self): from google.cloud.bigquery.dataset import DatasetReference from google.cloud.bigquery.job import QueryJobConfig PATH = "/projects/%s/jobs" % (self.PROJECT,) DS_ID = "DATASET" RESOURCE = self._make_resource() # Ensure None for missing server-set props del RESOURCE["statistics"]["creationTime"] del RESOURCE["etag"] del RESOURCE["selfLink"] del RESOURCE["user_email"] conn = _make_connection(RESOURCE) client = _make_client(project=self.PROJECT, connection=conn) config = QueryJobConfig() config.default_dataset = DatasetReference(self.PROJECT, DS_ID) job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=config) job._begin() self.assertIsNone(job.default_dataset) self.assertEqual(job.udf_resources, []) conn.api_request.assert_called_once_with( method="POST", path=PATH, data={ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": { "query": { "query": self.QUERY, "useLegacySql": False, "defaultDataset": { "projectId": self.PROJECT, "datasetId": DS_ID, }, } }, }, timeout=None, ) self._verifyResourceProperties(job, RESOURCE) def test_begin_w_alternate_client(self): from google.cloud.bigquery.dataset import DatasetReference from google.cloud.bigquery.job import CreateDisposition from google.cloud.bigquery.job import QueryJobConfig from google.cloud.bigquery.job import QueryPriority from google.cloud.bigquery.job import SchemaUpdateOption from google.cloud.bigquery.job import WriteDisposition PATH = "/projects/%s/jobs" % (self.PROJECT,) TABLE = "TABLE" DS_ID = "DATASET" RESOURCE = self._make_resource(ended=True) QUERY_CONFIGURATION = { "query": self.QUERY, "allowLargeResults": True, "createDisposition": CreateDisposition.CREATE_NEVER, "defaultDataset": {"projectId": self.PROJECT, "datasetId": DS_ID}, "destinationTable": { "projectId": self.PROJECT, "datasetId": DS_ID, "tableId": TABLE, }, "flattenResults": True, "priority": QueryPriority.INTERACTIVE, "useQueryCache": True, "useLegacySql": True, "writeDisposition": WriteDisposition.WRITE_TRUNCATE, "maximumBillingTier": 4, "maximumBytesBilled": "123456", "schemaUpdateOptions": [SchemaUpdateOption.ALLOW_FIELD_RELAXATION], } RESOURCE["configuration"]["query"] = QUERY_CONFIGURATION RESOURCE["configuration"]["dryRun"] = True conn1 = _make_connection() client1 = _make_client(project=self.PROJECT, connection=conn1) conn2 = _make_connection(RESOURCE) client2 = _make_client(project=self.PROJECT, connection=conn2) dataset_ref = DatasetReference(self.PROJECT, DS_ID) table_ref = dataset_ref.table(TABLE) config = QueryJobConfig() config.allow_large_results = True config.create_disposition = CreateDisposition.CREATE_NEVER config.default_dataset = dataset_ref config.destination = table_ref config.dry_run = True config.flatten_results = True config.maximum_billing_tier = 4 config.priority = QueryPriority.INTERACTIVE config.use_legacy_sql = True config.use_query_cache = True config.write_disposition = WriteDisposition.WRITE_TRUNCATE config.maximum_bytes_billed = 123456 config.schema_update_options = [SchemaUpdateOption.ALLOW_FIELD_RELAXATION] job = self._make_one(self.JOB_ID, self.QUERY, client1, job_config=config) job._begin(client=client2) conn1.api_request.assert_not_called() conn2.api_request.assert_called_once_with( method="POST", path=PATH, data={ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": {"dryRun": True, "query": QUERY_CONFIGURATION}, }, timeout=None, ) self._verifyResourceProperties(job, RESOURCE) def test_begin_w_udf(self): from google.cloud.bigquery.job import QueryJobConfig from google.cloud.bigquery.query import UDFResource RESOURCE_URI = "gs://some-bucket/js/lib.js" INLINE_UDF_CODE = 'var someCode = "here";' PATH = "/projects/%s/jobs" % (self.PROJECT,) RESOURCE = self._make_resource() # Ensure None for missing server-set props del RESOURCE["statistics"]["creationTime"] del RESOURCE["etag"] del RESOURCE["selfLink"] del RESOURCE["user_email"] RESOURCE["configuration"]["query"]["userDefinedFunctionResources"] = [ {"resourceUri": RESOURCE_URI}, {"inlineCode": INLINE_UDF_CODE}, ] conn = _make_connection(RESOURCE) client = _make_client(project=self.PROJECT, connection=conn) udf_resources = [ UDFResource("resourceUri", RESOURCE_URI), UDFResource("inlineCode", INLINE_UDF_CODE), ] config = QueryJobConfig() config.udf_resources = udf_resources config.use_legacy_sql = True job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=config) job._begin() self.assertEqual(job.udf_resources, udf_resources) conn.api_request.assert_called_once_with( method="POST", path=PATH, data={ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": { "query": { "query": self.QUERY, "useLegacySql": True, "userDefinedFunctionResources": [ {"resourceUri": RESOURCE_URI}, {"inlineCode": INLINE_UDF_CODE}, ], } }, }, timeout=None, ) self._verifyResourceProperties(job, RESOURCE) def test_begin_w_named_query_parameter(self): from google.cloud.bigquery.job import QueryJobConfig from google.cloud.bigquery.query import ScalarQueryParameter query_parameters = [ScalarQueryParameter("foo", "INT64", 123)] PATH = "/projects/%s/jobs" % (self.PROJECT,) RESOURCE = self._make_resource() # Ensure None for missing server-set props del RESOURCE["statistics"]["creationTime"] del RESOURCE["etag"] del RESOURCE["selfLink"] del RESOURCE["user_email"] config = RESOURCE["configuration"]["query"] config["parameterMode"] = "NAMED" config["queryParameters"] = [ { "name": "foo", "parameterType": {"type": "INT64"}, "parameterValue": {"value": "123"}, } ] conn = _make_connection(RESOURCE) client = _make_client(project=self.PROJECT, connection=conn) jconfig = QueryJobConfig() jconfig.query_parameters = query_parameters job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=jconfig) job._begin() self.assertEqual(job.query_parameters, query_parameters) conn.api_request.assert_called_once_with( method="POST", path=PATH, data={ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": { "query": { "query": self.QUERY, "useLegacySql": False, "parameterMode": "NAMED", "queryParameters": config["queryParameters"], } }, }, timeout=None, ) self._verifyResourceProperties(job, RESOURCE) def test_begin_w_positional_query_parameter(self): from google.cloud.bigquery.job import QueryJobConfig from google.cloud.bigquery.query import ScalarQueryParameter query_parameters = [ScalarQueryParameter.positional("INT64", 123)] PATH = "/projects/%s/jobs" % (self.PROJECT,) RESOURCE = self._make_resource() # Ensure None for missing server-set props del RESOURCE["statistics"]["creationTime"] del RESOURCE["etag"] del RESOURCE["selfLink"] del RESOURCE["user_email"] config = RESOURCE["configuration"]["query"] config["parameterMode"] = "POSITIONAL" config["queryParameters"] = [ {"parameterType": {"type": "INT64"}, "parameterValue": {"value": "123"}} ] conn = _make_connection(RESOURCE) client = _make_client(project=self.PROJECT, connection=conn) jconfig = QueryJobConfig() jconfig.query_parameters = query_parameters job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=jconfig) job._begin() self.assertEqual(job.query_parameters, query_parameters) conn.api_request.assert_called_once_with( method="POST", path=PATH, data={ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": { "query": { "query": self.QUERY, "useLegacySql": False, "parameterMode": "POSITIONAL", "queryParameters": config["queryParameters"], } }, }, timeout=None, ) self._verifyResourceProperties(job, RESOURCE) def test_begin_w_table_defs(self): from google.cloud.bigquery.job import QueryJobConfig from google.cloud.bigquery.external_config import ExternalConfig from google.cloud.bigquery.external_config import BigtableColumn from google.cloud.bigquery.external_config import BigtableColumnFamily PATH = "/projects/%s/jobs" % (self.PROJECT,) RESOURCE = self._make_resource() # Ensure None for missing server-set props del RESOURCE["statistics"]["creationTime"] del RESOURCE["etag"] del RESOURCE["selfLink"] del RESOURCE["user_email"] bt_config = ExternalConfig("BIGTABLE") bt_config.ignore_unknown_values = True bt_config.options.read_rowkey_as_string = True cf = BigtableColumnFamily() cf.family_id = "cf" col = BigtableColumn() col.field_name = "fn" cf.columns = [col] bt_config.options.column_families = [cf] BT_CONFIG_RESOURCE = { "sourceFormat": "BIGTABLE", "ignoreUnknownValues": True, "bigtableOptions": { "readRowkeyAsString": True, "columnFamilies": [ {"familyId": "cf", "columns": [{"fieldName": "fn"}]} ], }, } CSV_CONFIG_RESOURCE = { "sourceFormat": "CSV", "maxBadRecords": 8, "csvOptions": {"allowJaggedRows": True}, } csv_config = ExternalConfig("CSV") csv_config.max_bad_records = 8 csv_config.options.allow_jagged_rows = True bt_table = "bigtable-table" csv_table = "csv-table" RESOURCE["configuration"]["query"]["tableDefinitions"] = { bt_table: BT_CONFIG_RESOURCE, csv_table: CSV_CONFIG_RESOURCE, } want_resource = copy.deepcopy(RESOURCE) conn = _make_connection(RESOURCE) client = _make_client(project=self.PROJECT, connection=conn) config = QueryJobConfig() config.table_definitions = {bt_table: bt_config, csv_table: csv_config} config.use_legacy_sql = True job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=config) job._begin() conn.api_request.assert_called_once_with( method="POST", path=PATH, data={ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": { "query": { "query": self.QUERY, "useLegacySql": True, "tableDefinitions": { bt_table: BT_CONFIG_RESOURCE, csv_table: CSV_CONFIG_RESOURCE, }, } }, }, timeout=None, ) self._verifyResourceProperties(job, want_resource) def test_dry_run_query(self): from google.cloud.bigquery.job import QueryJobConfig PATH = "/projects/%s/jobs" % (self.PROJECT,) RESOURCE = self._make_resource() # Ensure None for missing server-set props del RESOURCE["statistics"]["creationTime"] del RESOURCE["etag"] del RESOURCE["selfLink"] del RESOURCE["user_email"] RESOURCE["configuration"]["dryRun"] = True conn = _make_connection(RESOURCE) client = _make_client(project=self.PROJECT, connection=conn) config = QueryJobConfig() config.dry_run = True job = self._make_one(self.JOB_ID, self.QUERY, client, job_config=config) job._begin() self.assertEqual(job.udf_resources, []) conn.api_request.assert_called_once_with( method="POST", path=PATH, data={ "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "configuration": { "query": {"query": self.QUERY, "useLegacySql": False}, "dryRun": True, }, }, timeout=None, ) self._verifyResourceProperties(job, RESOURCE) def test_exists_miss_w_bound_client(self): PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID) conn = _make_connection() client = _make_client(project=self.PROJECT, connection=conn) job = self._make_one(self.JOB_ID, self.QUERY, client) self.assertFalse(job.exists()) conn.api_request.assert_called_once_with( method="GET", path=PATH, query_params={"fields": "id"}, timeout=None ) def test_exists_hit_w_alternate_client(self): PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID) conn1 = _make_connection() client1 = _make_client(project=self.PROJECT, connection=conn1) conn2 = _make_connection({}) client2 = _make_client(project=self.PROJECT, connection=conn2) job = self._make_one(self.JOB_ID, self.QUERY, client1) self.assertTrue(job.exists(client=client2)) conn1.api_request.assert_not_called() conn2.api_request.assert_called_once_with( method="GET", path=PATH, query_params={"fields": "id"}, timeout=None ) def test_reload_w_bound_client(self): from google.cloud.bigquery.dataset import DatasetReference from google.cloud.bigquery.job import QueryJobConfig PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID) DS_ID = "DATASET" DEST_TABLE = "dest_table" RESOURCE = self._make_resource() conn = _make_connection(RESOURCE) client = _make_client(project=self.PROJECT, connection=conn) dataset_ref = DatasetReference(self.PROJECT, DS_ID) table_ref = dataset_ref.table(DEST_TABLE) config = QueryJobConfig() config.destination = table_ref job = self._make_one(self.JOB_ID, None, client, job_config=config) job.reload() self.assertNotEqual(job.destination, table_ref) conn.api_request.assert_called_once_with( method="GET", path=PATH, query_params={}, timeout=None ) self._verifyResourceProperties(job, RESOURCE) def test_reload_w_alternate_client(self): PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID) DS_ID = "DATASET" DEST_TABLE = "dest_table" RESOURCE = self._make_resource() q_config = RESOURCE["configuration"]["query"] q_config["destinationTable"] = { "projectId": self.PROJECT, "datasetId": DS_ID, "tableId": DEST_TABLE, } conn1 = _make_connection() client1 = _make_client(project=self.PROJECT, connection=conn1) conn2 = _make_connection(RESOURCE) client2 = _make_client(project=self.PROJECT, connection=conn2) job = self._make_one(self.JOB_ID, self.QUERY, client1) job.reload(client=client2) conn1.api_request.assert_not_called() conn2.api_request.assert_called_once_with( method="GET", path=PATH, query_params={}, timeout=None ) self._verifyResourceProperties(job, RESOURCE) def test_reload_w_timeout(self): from google.cloud.bigquery.dataset import DatasetReference from google.cloud.bigquery.job import QueryJobConfig PATH = "/projects/%s/jobs/%s" % (self.PROJECT, self.JOB_ID) DS_ID = "DATASET" DEST_TABLE = "dest_table" RESOURCE = self._make_resource() conn = _make_connection(RESOURCE) client = _make_client(project=self.PROJECT, connection=conn) dataset_ref = DatasetReference(self.PROJECT, DS_ID) table_ref = dataset_ref.table(DEST_TABLE) config = QueryJobConfig() config.destination = table_ref job = self._make_one(self.JOB_ID, None, client, job_config=config) job.reload(timeout=4.2) self.assertNotEqual(job.destination, table_ref) conn.api_request.assert_called_once_with( method="GET", path=PATH, query_params={}, timeout=4.2 ) @unittest.skipIf(pyarrow is None, "Requires `pyarrow`") def test_to_arrow(self): begun_resource = self._make_resource() query_resource = { "jobComplete": True, "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "totalRows": "4", "schema": { "fields": [ { "name": "spouse_1", "type": "RECORD", "fields": [ {"name": "name", "type": "STRING", "mode": "NULLABLE"}, {"name": "age", "type": "INTEGER", "mode": "NULLABLE"}, ], }, { "name": "spouse_2", "type": "RECORD", "fields": [ {"name": "name", "type": "STRING", "mode": "NULLABLE"}, {"name": "age", "type": "INTEGER", "mode": "NULLABLE"}, ], }, ] }, } tabledata_resource = { "rows": [ { "f": [ {"v": {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]}}, {"v": {"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]}}, ] }, { "f": [ {"v": {"f": [{"v": "Bhettye Rhubble"}, {"v": "27"}]}}, {"v": {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]}}, ] }, ] } done_resource = copy.deepcopy(begun_resource) done_resource["status"] = {"state": "DONE"} connection = _make_connection( begun_resource, query_resource, done_resource, tabledata_resource ) client = _make_client(project=self.PROJECT, connection=connection) job = self._make_one(self.JOB_ID, self.QUERY, client) tbl = job.to_arrow() self.assertIsInstance(tbl, pyarrow.Table) self.assertEqual(tbl.num_rows, 2) # Check the schema. self.assertEqual(tbl.schema[0].name, "spouse_1") self.assertEqual(tbl.schema[0].type[0].name, "name") self.assertEqual(tbl.schema[0].type[1].name, "age") self.assertTrue(pyarrow.types.is_struct(tbl.schema[0].type)) self.assertTrue(pyarrow.types.is_string(tbl.schema[0].type[0].type)) self.assertTrue(pyarrow.types.is_int64(tbl.schema[0].type[1].type)) self.assertEqual(tbl.schema[1].name, "spouse_2") self.assertEqual(tbl.schema[1].type[0].name, "name") self.assertEqual(tbl.schema[1].type[1].name, "age") self.assertTrue(pyarrow.types.is_struct(tbl.schema[1].type)) self.assertTrue(pyarrow.types.is_string(tbl.schema[1].type[0].type)) self.assertTrue(pyarrow.types.is_int64(tbl.schema[1].type[1].type)) # Check the data. tbl_data = tbl.to_pydict() spouse_1 = tbl_data["spouse_1"] self.assertEqual( spouse_1, [ {"name": "Phred Phlyntstone", "age": 32}, {"name": "Bhettye Rhubble", "age": 27}, ], ) spouse_2 = tbl_data["spouse_2"] self.assertEqual( spouse_2, [ {"name": "Wylma Phlyntstone", "age": 29}, {"name": "Bharney Rhubble", "age": 33}, ], ) @unittest.skipIf(pandas is None, "Requires `pandas`") def test_to_dataframe(self): begun_resource = self._make_resource() query_resource = { "jobComplete": True, "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "totalRows": "4", "schema": { "fields": [ {"name": "name", "type": "STRING", "mode": "NULLABLE"}, {"name": "age", "type": "INTEGER", "mode": "NULLABLE"}, ] }, } tabledata_resource = { "rows": [ {"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]}, {"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]}, {"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]}, {"f": [{"v": "Bhettye Rhubble"}, {"v": "27"}]}, ] } done_resource = copy.deepcopy(begun_resource) done_resource["status"] = {"state": "DONE"} connection = _make_connection( begun_resource, query_resource, done_resource, tabledata_resource ) client = _make_client(project=self.PROJECT, connection=connection) job = self._make_one(self.JOB_ID, self.QUERY, client) df = job.to_dataframe() self.assertIsInstance(df, pandas.DataFrame) self.assertEqual(len(df), 4) # verify the number of rows self.assertEqual(list(df), ["name", "age"]) # verify the column names @unittest.skipIf(pandas is None, "Requires `pandas`") def test_to_dataframe_ddl_query(self): # Destination table may have no schema for some DDL and DML queries. query_resource = { "jobComplete": True, "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "schema": {"fields": []}, } connection = _make_connection(query_resource) client = _make_client(self.PROJECT, connection=connection) resource = self._make_resource(ended=True) job = self._get_target_class().from_api_repr(resource, client) df = job.to_dataframe() self.assertEqual(len(df), 0) @unittest.skipIf(pandas is None, "Requires `pandas`") @unittest.skipIf( bigquery_storage_v1beta1 is None, "Requires `google-cloud-bigquery-storage`" ) def test_to_dataframe_bqstorage(self): query_resource = { "jobComplete": True, "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "totalRows": "4", "schema": { "fields": [ {"name": "name", "type": "STRING", "mode": "NULLABLE"}, {"name": "age", "type": "INTEGER", "mode": "NULLABLE"}, ] }, } connection = _make_connection(query_resource) client = _make_client(self.PROJECT, connection=connection) resource = self._make_resource(ended=True) job = self._get_target_class().from_api_repr(resource, client) bqstorage_client = mock.create_autospec( bigquery_storage_v1beta1.BigQueryStorageClient ) session = bigquery_storage_v1beta1.types.ReadSession() session.avro_schema.schema = json.dumps( { "type": "record", "name": "__root__", "fields": [ {"name": "name", "type": ["null", "string"]}, {"name": "age", "type": ["null", "long"]}, ], } ) bqstorage_client.create_read_session.return_value = session job.to_dataframe(bqstorage_client=bqstorage_client) bqstorage_client.create_read_session.assert_called_once_with( mock.ANY, "projects/{}".format(self.PROJECT), format_=bigquery_storage_v1beta1.enums.DataFormat.ARROW, read_options=mock.ANY, # Use default number of streams for best performance. requested_streams=0, ) @unittest.skipIf(pandas is None, "Requires `pandas`") def test_to_dataframe_column_dtypes(self): begun_resource = self._make_resource() query_resource = { "jobComplete": True, "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "totalRows": "4", "schema": { "fields": [ {"name": "start_timestamp", "type": "TIMESTAMP"}, {"name": "seconds", "type": "INT64"}, {"name": "miles", "type": "FLOAT64"}, {"name": "km", "type": "FLOAT64"}, {"name": "payment_type", "type": "STRING"}, {"name": "complete", "type": "BOOL"}, {"name": "date", "type": "DATE"}, ] }, } row_data = [ ["1.4338368E9", "420", "1.1", "1.77", "Cash", "true", "1999-12-01"], ["1.3878117E9", "2580", "17.7", "28.5", "Cash", "false", "1953-06-14"], ["1.3855653E9", "2280", "4.4", "7.1", "Credit", "true", "1981-11-04"], ] rows = [{"f": [{"v": field} for field in row]} for row in row_data] query_resource["rows"] = rows done_resource = copy.deepcopy(begun_resource) done_resource["status"] = {"state": "DONE"} connection = _make_connection( begun_resource, query_resource, done_resource, query_resource ) client = _make_client(project=self.PROJECT, connection=connection) job = self._make_one(self.JOB_ID, self.QUERY, client) df = job.to_dataframe(dtypes={"km": "float16"}) self.assertIsInstance(df, pandas.DataFrame) self.assertEqual(len(df), 3) # verify the number of rows exp_columns = [field["name"] for field in query_resource["schema"]["fields"]] self.assertEqual(list(df), exp_columns) # verify the column names self.assertEqual(df.start_timestamp.dtype.name, "datetime64[ns, UTC]") self.assertEqual(df.seconds.dtype.name, "int64") self.assertEqual(df.miles.dtype.name, "float64") self.assertEqual(df.km.dtype.name, "float16") self.assertEqual(df.payment_type.dtype.name, "object") self.assertEqual(df.complete.dtype.name, "bool") self.assertEqual(df.date.dtype.name, "object") @unittest.skipIf(pandas is None, "Requires `pandas`") @unittest.skipIf(tqdm is None, "Requires `tqdm`") @mock.patch("tqdm.tqdm") def test_to_dataframe_with_progress_bar(self, tqdm_mock): begun_resource = self._make_resource() query_resource = { "jobComplete": True, "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "totalRows": "4", "schema": { "fields": [{"name": "name", "type": "STRING", "mode": "NULLABLE"}] }, } done_resource = copy.deepcopy(begun_resource) done_resource["status"] = {"state": "DONE"} connection = _make_connection( begun_resource, query_resource, done_resource, query_resource, query_resource, ) client = _make_client(project=self.PROJECT, connection=connection) job = self._make_one(self.JOB_ID, self.QUERY, client) job.to_dataframe(progress_bar_type=None) tqdm_mock.assert_not_called() job.to_dataframe(progress_bar_type="tqdm") tqdm_mock.assert_called() def test_iter(self): import types begun_resource = self._make_resource() query_resource = { "jobComplete": True, "jobReference": {"projectId": self.PROJECT, "jobId": self.JOB_ID}, "totalRows": "0", "schema": {"fields": [{"name": "col1", "type": "STRING"}]}, } done_resource = copy.deepcopy(begun_resource) done_resource["status"] = {"state": "DONE"} connection = _make_connection(begun_resource, query_resource, done_resource) client = _make_client(project=self.PROJECT, connection=connection) job = self._make_one(self.JOB_ID, self.QUERY, client) self.assertIsInstance(iter(job), types.GeneratorType) class TestQueryPlanEntryStep(unittest.TestCase, _Base): KIND = "KIND" SUBSTEPS = ("SUB1", "SUB2") @staticmethod def _get_target_class(): from google.cloud.bigquery.job import QueryPlanEntryStep return QueryPlanEntryStep def _make_one(self, *args, **kw): return self._get_target_class()(*args, **kw) def test_ctor(self): step = self._make_one(self.KIND, self.SUBSTEPS) self.assertEqual(step.kind, self.KIND) self.assertEqual(step.substeps, list(self.SUBSTEPS)) def test_from_api_repr_empty(self): klass = self._get_target_class() step = klass.from_api_repr({}) self.assertIsNone(step.kind) self.assertEqual(step.substeps, []) def test_from_api_repr_normal(self): resource = {"kind": self.KIND, "substeps": self.SUBSTEPS} klass = self._get_target_class() step = klass.from_api_repr(resource) self.assertEqual(step.kind, self.KIND) self.assertEqual(step.substeps, list(self.SUBSTEPS)) def test___eq___mismatched_type(self): step = self._make_one(self.KIND, self.SUBSTEPS) self.assertNotEqual(step, object()) def test___eq___mismatch_kind(self): step = self._make_one(self.KIND, self.SUBSTEPS) other = self._make_one("OTHER", self.SUBSTEPS) self.assertNotEqual(step, other) def test___eq___mismatch_substeps(self): step = self._make_one(self.KIND, self.SUBSTEPS) other = self._make_one(self.KIND, ()) self.assertNotEqual(step, other) def test___eq___hit(self): step = self._make_one(self.KIND, self.SUBSTEPS) other = self._make_one(self.KIND, self.SUBSTEPS) self.assertEqual(step, other) def test___eq___wrong_type(self): step = self._make_one(self.KIND, self.SUBSTEPS) self.assertFalse(step == "hello") class TestQueryPlanEntry(unittest.TestCase, _Base): NAME = "NAME" ENTRY_ID = 1234 START_MS = 1522540800000 END_MS = 1522540804000 INPUT_STAGES = (88, 101) PARALLEL_INPUTS = 1000 COMPLETED_PARALLEL_INPUTS = 5 WAIT_MS_AVG = 33 WAIT_MS_MAX = 400 WAIT_RATIO_AVG = 2.71828 WAIT_RATIO_MAX = 3.14159 READ_MS_AVG = 45 READ_MS_MAX = 90 READ_RATIO_AVG = 1.41421 READ_RATIO_MAX = 1.73205 COMPUTE_MS_AVG = 55 COMPUTE_MS_MAX = 99 COMPUTE_RATIO_AVG = 0.69315 COMPUTE_RATIO_MAX = 1.09861 WRITE_MS_AVG = 203 WRITE_MS_MAX = 340 WRITE_RATIO_AVG = 3.32193 WRITE_RATIO_MAX = 2.30258 RECORDS_READ = 100 RECORDS_WRITTEN = 1 STATUS = "STATUS" SHUFFLE_OUTPUT_BYTES = 1024 SHUFFLE_OUTPUT_BYTES_SPILLED = 1 START_RFC3339_MICROS = "2018-04-01T00:00:00.000000Z" END_RFC3339_MICROS = "2018-04-01T00:00:04.000000Z" @staticmethod def _get_target_class(): from google.cloud.bigquery.job import QueryPlanEntry return QueryPlanEntry def test_from_api_repr_empty(self): klass = self._get_target_class() entry = klass.from_api_repr({}) self.assertIsNone(entry.name) self.assertIsNone(entry.entry_id) self.assertEqual(entry.input_stages, []) self.assertIsNone(entry.start) self.assertIsNone(entry.end) self.assertIsNone(entry.parallel_inputs) self.assertIsNone(entry.completed_parallel_inputs) self.assertIsNone(entry.wait_ms_avg) self.assertIsNone(entry.wait_ms_max) self.assertIsNone(entry.wait_ratio_avg) self.assertIsNone(entry.wait_ratio_max) self.assertIsNone(entry.read_ms_avg) self.assertIsNone(entry.read_ms_max) self.assertIsNone(entry.read_ratio_avg) self.assertIsNone(entry.read_ratio_max) self.assertIsNone(entry.compute_ms_avg) self.assertIsNone(entry.compute_ms_max) self.assertIsNone(entry.compute_ratio_avg) self.assertIsNone(entry.compute_ratio_max) self.assertIsNone(entry.write_ms_avg) self.assertIsNone(entry.write_ms_max) self.assertIsNone(entry.write_ratio_avg) self.assertIsNone(entry.write_ratio_max) self.assertIsNone(entry.records_read) self.assertIsNone(entry.records_written) self.assertIsNone(entry.status) self.assertIsNone(entry.shuffle_output_bytes) self.assertIsNone(entry.shuffle_output_bytes_spilled) self.assertEqual(entry.steps, []) def test_from_api_repr_normal(self): from google.cloud.bigquery.job import QueryPlanEntryStep steps = [ QueryPlanEntryStep( kind=TestQueryPlanEntryStep.KIND, substeps=TestQueryPlanEntryStep.SUBSTEPS, ) ] resource = { "name": self.NAME, "id": self.ENTRY_ID, "inputStages": self.INPUT_STAGES, "startMs": self.START_MS, "endMs": self.END_MS, "waitMsAvg": self.WAIT_MS_AVG, "waitMsMax": self.WAIT_MS_MAX, "waitRatioAvg": self.WAIT_RATIO_AVG, "waitRatioMax": self.WAIT_RATIO_MAX, "readMsAvg": self.READ_MS_AVG, "readMsMax": self.READ_MS_MAX, "readRatioAvg": self.READ_RATIO_AVG, "readRatioMax": self.READ_RATIO_MAX, "computeMsAvg": self.COMPUTE_MS_AVG, "computeMsMax": self.COMPUTE_MS_MAX, "computeRatioAvg": self.COMPUTE_RATIO_AVG, "computeRatioMax": self.COMPUTE_RATIO_MAX, "writeMsAvg": self.WRITE_MS_AVG, "writeMsMax": self.WRITE_MS_MAX, "writeRatioAvg": self.WRITE_RATIO_AVG, "writeRatioMax": self.WRITE_RATIO_MAX, "recordsRead": self.RECORDS_READ, "recordsWritten": self.RECORDS_WRITTEN, "status": self.STATUS, "shuffleOutputBytes": self.SHUFFLE_OUTPUT_BYTES, "shuffleOutputBytesSpilled": self.SHUFFLE_OUTPUT_BYTES_SPILLED, "steps": [ { "kind": TestQueryPlanEntryStep.KIND, "substeps": TestQueryPlanEntryStep.SUBSTEPS, } ], } klass = self._get_target_class() entry = klass.from_api_repr(resource) self.assertEqual(entry.name, self.NAME) self.assertEqual(entry.entry_id, self.ENTRY_ID) self.assertEqual(entry.wait_ratio_avg, self.WAIT_RATIO_AVG) self.assertEqual(entry.wait_ratio_max, self.WAIT_RATIO_MAX) self.assertEqual(entry.read_ratio_avg, self.READ_RATIO_AVG) self.assertEqual(entry.read_ratio_max, self.READ_RATIO_MAX) self.assertEqual(entry.compute_ratio_avg, self.COMPUTE_RATIO_AVG) self.assertEqual(entry.compute_ratio_max, self.COMPUTE_RATIO_MAX) self.assertEqual(entry.write_ratio_avg, self.WRITE_RATIO_AVG) self.assertEqual(entry.write_ratio_max, self.WRITE_RATIO_MAX) self.assertEqual(entry.records_read, self.RECORDS_READ) self.assertEqual(entry.records_written, self.RECORDS_WRITTEN) self.assertEqual(entry.status, self.STATUS) self.assertEqual(entry.steps, steps) def test_start(self): from google.cloud._helpers import _RFC3339_MICROS klass = self._get_target_class() entry = klass.from_api_repr({}) self.assertEqual(entry.start, None) entry._properties["startMs"] = self.START_MS self.assertEqual( entry.start.strftime(_RFC3339_MICROS), self.START_RFC3339_MICROS ) def test_end(self): from google.cloud._helpers import _RFC3339_MICROS klass = self._get_target_class() entry = klass.from_api_repr({}) self.assertEqual(entry.end, None) entry._properties["endMs"] = self.END_MS self.assertEqual(entry.end.strftime(_RFC3339_MICROS), self.END_RFC3339_MICROS) class TestScriptStackFrame(unittest.TestCase, _Base): def _make_one(self, resource): from google.cloud.bigquery.job import ScriptStackFrame return ScriptStackFrame(resource) def test_procedure_id(self): frame = self._make_one({"procedureId": "some-procedure"}) self.assertEqual(frame.procedure_id, "some-procedure") del frame._properties["procedureId"] self.assertIsNone(frame.procedure_id) def test_start_line(self): frame = self._make_one({"startLine": 5}) self.assertEqual(frame.start_line, 5) frame._properties["startLine"] = "5" self.assertEqual(frame.start_line, 5) def test_start_column(self): frame = self._make_one({"startColumn": 29}) self.assertEqual(frame.start_column, 29) frame._properties["startColumn"] = "29" self.assertEqual(frame.start_column, 29) def test_end_line(self): frame = self._make_one({"endLine": 9}) self.assertEqual(frame.end_line, 9) frame._properties["endLine"] = "9" self.assertEqual(frame.end_line, 9) def test_end_column(self): frame = self._make_one({"endColumn": 14}) self.assertEqual(frame.end_column, 14) frame._properties["endColumn"] = "14" self.assertEqual(frame.end_column, 14) def test_text(self): frame = self._make_one({"text": "QUERY TEXT"}) self.assertEqual(frame.text, "QUERY TEXT") class TestScriptStatistics(unittest.TestCase, _Base): def _make_one(self, resource): from google.cloud.bigquery.job import ScriptStatistics return ScriptStatistics(resource) def test_evalutation_kind(self): stats = self._make_one({"evaluationKind": "EXPRESSION"}) self.assertEqual(stats.evaluation_kind, "EXPRESSION") self.assertEqual(stats.stack_frames, []) def test_stack_frames(self): stats = self._make_one( { "stackFrames": [ { "procedureId": "some-procedure", "startLine": 5, "startColumn": 29, "endLine": 9, "endColumn": 14, "text": "QUERY TEXT", }, {}, ] } ) stack_frames = stats.stack_frames self.assertEqual(len(stack_frames), 2) stack_frame = stack_frames[0] self.assertEqual(stack_frame.procedure_id, "some-procedure") self.assertEqual(stack_frame.start_line, 5) self.assertEqual(stack_frame.start_column, 29) self.assertEqual(stack_frame.end_line, 9) self.assertEqual(stack_frame.end_column, 14) self.assertEqual(stack_frame.text, "QUERY TEXT") stack_frame = stack_frames[1] self.assertIsNone(stack_frame.procedure_id) self.assertIsNone(stack_frame.start_line) self.assertIsNone(stack_frame.start_column) self.assertIsNone(stack_frame.end_line) self.assertIsNone(stack_frame.end_column) self.assertIsNone(stack_frame.text) class TestTimelineEntry(unittest.TestCase, _Base): ELAPSED_MS = 101 ACTIVE_UNITS = 50 PENDING_UNITS = 98 COMPLETED_UNITS = 520 SLOT_MILLIS = 12029 @staticmethod def _get_target_class(): from google.cloud.bigquery.job import TimelineEntry return TimelineEntry def test_from_api_repr_empty(self): klass = self._get_target_class() entry = klass.from_api_repr({}) self.assertIsNone(entry.elapsed_ms) self.assertIsNone(entry.active_units) self.assertIsNone(entry.pending_units) self.assertIsNone(entry.completed_units) self.assertIsNone(entry.slot_millis) def test_from_api_repr_normal(self): resource = { "elapsedMs": self.ELAPSED_MS, "activeUnits": self.ACTIVE_UNITS, "pendingUnits": self.PENDING_UNITS, "completedUnits": self.COMPLETED_UNITS, "totalSlotMs": self.SLOT_MILLIS, } klass = self._get_target_class() entry = klass.from_api_repr(resource) self.assertEqual(entry.elapsed_ms, self.ELAPSED_MS) self.assertEqual(entry.active_units, self.ACTIVE_UNITS) self.assertEqual(entry.pending_units, self.PENDING_UNITS) self.assertEqual(entry.completed_units, self.COMPLETED_UNITS) self.assertEqual(entry.slot_millis, self.SLOT_MILLIS) @pytest.mark.parametrize( "query,expected", ( (None, False), ("", False), ("select name, age from table", False), ("select name, age from table LIMIT 10;", False), ("select name, age from table order by other_column;", True), ("Select name, age From table Order By other_column", True), ("SELECT name, age FROM table ORDER BY other_column;", True), ("select name, age from table order\nby other_column", True), ("Select name, age From table Order\nBy other_column;", True), ("SELECT name, age FROM table ORDER\nBY other_column", True), ("SelecT name, age froM table OrdeR \n\t BY other_column;", True), ), ) def test__contains_order_by(query, expected): from google.cloud.bigquery import job as mut if expected: assert mut._contains_order_by(query) else: assert not mut._contains_order_by(query) @pytest.mark.skipif(pandas is None, reason="Requires `pandas`") @pytest.mark.skipif( bigquery_storage_v1beta1 is None, reason="Requires `google-cloud-bigquery-storage`" ) @pytest.mark.parametrize( "query", ( "select name, age from table order by other_column;", "Select name, age From table Order By other_column;", "SELECT name, age FROM table ORDER BY other_column;", "select name, age from table order\nby other_column;", "Select name, age From table Order\nBy other_column;", "SELECT name, age FROM table ORDER\nBY other_column;", "SelecT name, age froM table OrdeR \n\t BY other_column;", ), ) def test_to_dataframe_bqstorage_preserve_order(query): from google.cloud.bigquery.job import QueryJob as target_class job_resource = _make_job_resource( project_id="test-project", job_type="query", ended=True ) job_resource["configuration"]["query"]["query"] = query job_resource["status"] = {"state": "DONE"} get_query_results_resource = { "jobComplete": True, "jobReference": {"projectId": "test-project", "jobId": "test-job"}, "schema": { "fields": [ {"name": "name", "type": "STRING", "mode": "NULLABLE"}, {"name": "age", "type": "INTEGER", "mode": "NULLABLE"}, ] }, "totalRows": "4", } connection = _make_connection(get_query_results_resource, job_resource) client = _make_client(connection=connection) job = target_class.from_api_repr(job_resource, client) bqstorage_client = mock.create_autospec( bigquery_storage_v1beta1.BigQueryStorageClient ) session = bigquery_storage_v1beta1.types.ReadSession() session.avro_schema.schema = json.dumps( { "type": "record", "name": "__root__", "fields": [ {"name": "name", "type": ["null", "string"]}, {"name": "age", "type": ["null", "long"]}, ], } ) bqstorage_client.create_read_session.return_value = session job.to_dataframe(bqstorage_client=bqstorage_client) bqstorage_client.create_read_session.assert_called_once_with( mock.ANY, "projects/test-project", format_=bigquery_storage_v1beta1.enums.DataFormat.ARROW, read_options=mock.ANY, # Use a single stream to preserve row order. requested_streams=1, )
apache-2.0
zzcclp/spark
python/pyspark/pandas/plot/core.py
11
41921
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import importlib import pandas as pd import numpy as np from pyspark.ml.feature import Bucketizer from pyspark.mllib.stat import KernelDensity # type: ignore from pyspark.sql import functions as F from pandas.core.base import PandasObject from pandas.core.dtypes.inference import is_integer from pyspark.pandas.missing import unsupported_function from pyspark.pandas.config import get_option from pyspark.pandas.spark import functions as SF from pyspark.pandas.utils import name_like_string class TopNPlotBase: def get_top_n(self, data): from pyspark.pandas import DataFrame, Series max_rows = get_option("plotting.max_rows") # Simply use the first 1k elements and make it into a pandas dataframe # For categorical variables, it is likely called from df.x.value_counts().plot.xxx(). if isinstance(data, (Series, DataFrame)): data = data.head(max_rows + 1).to_pandas() else: raise TypeError("Only DataFrame and Series are supported for plotting.") self.partial = False if len(data) > max_rows: self.partial = True data = data.iloc[:max_rows] return data def set_result_text(self, ax): max_rows = get_option("plotting.max_rows") assert hasattr(self, "partial") if self.partial: ax.text( 1, 1, "showing top {} elements only".format(max_rows), size=6, ha="right", va="bottom", transform=ax.transAxes, ) class SampledPlotBase: def get_sampled(self, data): from pyspark.pandas import DataFrame, Series fraction = get_option("plotting.sample_ratio") if fraction is None: fraction = 1 / (len(data) / get_option("plotting.max_rows")) fraction = min(1.0, fraction) self.fraction = fraction if isinstance(data, (DataFrame, Series)): if isinstance(data, Series): data = data.to_frame() sampled = data._internal.resolved_copy.spark_frame.sample(fraction=self.fraction) return DataFrame(data._internal.with_new_sdf(sampled)).to_pandas() else: raise TypeError("Only DataFrame and Series are supported for plotting.") def set_result_text(self, ax): assert hasattr(self, "fraction") if self.fraction < 1: ax.text( 1, 1, "showing the sampled result by fraction %s" % self.fraction, size=6, ha="right", va="bottom", transform=ax.transAxes, ) class HistogramPlotBase: @staticmethod def prepare_hist_data(data, bins): # TODO: this logic is similar with KdePlotBase. Might have to deduplicate it. from pyspark.pandas.series import Series if isinstance(data, Series): data = data.to_frame() numeric_data = data.select_dtypes( include=["byte", "decimal", "integer", "float", "long", "double", np.datetime64] ) # no empty frames or series allowed if len(numeric_data.columns) == 0: raise TypeError( "Empty {0!r}: no numeric data to " "plot".format(numeric_data.__class__.__name__) ) if is_integer(bins): # computes boundaries for the column bins = HistogramPlotBase.get_bins(data.to_spark(), bins) return numeric_data, bins @staticmethod def get_bins(sdf, bins): # 'data' is a Spark DataFrame that selects all columns. if len(sdf.columns) > 1: min_col = F.least(*map(F.min, sdf)) max_col = F.greatest(*map(F.max, sdf)) else: min_col = F.min(sdf.columns[-1]) max_col = F.max(sdf.columns[-1]) boundaries = sdf.select(min_col, max_col).first() # divides the boundaries into bins if boundaries[0] == boundaries[1]: boundaries = (boundaries[0] - 0.5, boundaries[1] + 0.5) return np.linspace(boundaries[0], boundaries[1], bins + 1) @staticmethod def compute_hist(psdf, bins): # 'data' is a Spark DataFrame that selects one column. assert isinstance(bins, (np.ndarray, np.generic)) sdf = psdf._internal.spark_frame scols = [] input_column_names = [] for label in psdf._internal.column_labels: input_column_name = name_like_string(label) input_column_names.append(input_column_name) scols.append(psdf._internal.spark_column_for(label).alias(input_column_name)) sdf = sdf.select(*scols) # 1. Make the bucket output flat to: # +----------+-------+ # |__group_id|buckets| # +----------+-------+ # |0 |0.0 | # |0 |0.0 | # |0 |1.0 | # |0 |2.0 | # |0 |3.0 | # |0 |3.0 | # |1 |0.0 | # |1 |1.0 | # |1 |1.0 | # |1 |2.0 | # |1 |1.0 | # |1 |0.0 | # +----------+-------+ colnames = sdf.columns bucket_names = ["__{}_bucket".format(colname) for colname in colnames] output_df = None for group_id, (colname, bucket_name) in enumerate(zip(colnames, bucket_names)): # creates a Bucketizer to get corresponding bin of each value bucketizer = Bucketizer( splits=bins, inputCol=colname, outputCol=bucket_name, handleInvalid="skip" ) bucket_df = bucketizer.transform(sdf) if output_df is None: output_df = bucket_df.select( SF.lit(group_id).alias("__group_id"), F.col(bucket_name).alias("__bucket") ) else: output_df = output_df.union( bucket_df.select( SF.lit(group_id).alias("__group_id"), F.col(bucket_name).alias("__bucket") ) ) # 2. Calculate the count based on each group and bucket. # +----------+-------+------+ # |__group_id|buckets| count| # +----------+-------+------+ # |0 |0.0 |2 | # |0 |1.0 |1 | # |0 |2.0 |1 | # |0 |3.0 |2 | # |1 |0.0 |2 | # |1 |1.0 |3 | # |1 |2.0 |1 | # +----------+-------+------+ result = ( output_df.groupby("__group_id", "__bucket") .agg(F.count("*").alias("count")) .toPandas() .sort_values(by=["__group_id", "__bucket"]) ) # 3. Fill empty bins and calculate based on each group id. From: # +----------+--------+------+ # |__group_id|__bucket| count| # +----------+--------+------+ # |0 |0.0 |2 | # |0 |1.0 |1 | # |0 |2.0 |1 | # |0 |3.0 |2 | # +----------+--------+------+ # +----------+--------+------+ # |__group_id|__bucket| count| # +----------+--------+------+ # |1 |0.0 |2 | # |1 |1.0 |3 | # |1 |2.0 |1 | # +----------+--------+------+ # # to: # +-----------------+ # |__values1__bucket| # +-----------------+ # |2 | # |1 | # |1 | # |2 | # |0 | # +-----------------+ # +-----------------+ # |__values2__bucket| # +-----------------+ # |2 | # |3 | # |1 | # |0 | # |0 | # +-----------------+ output_series = [] for i, (input_column_name, bucket_name) in enumerate(zip(input_column_names, bucket_names)): current_bucket_result = result[result["__group_id"] == i] # generates a pandas DF with one row for each bin # we need this as some of the bins may be empty indexes = pd.DataFrame({"__bucket": np.arange(0, len(bins) - 1)}) # merges the bins with counts on it and fills remaining ones with zeros pdf = indexes.merge(current_bucket_result, how="left", on=["__bucket"]).fillna(0)[ ["count"] ] pdf.columns = [input_column_name] output_series.append(pdf[input_column_name]) return output_series class BoxPlotBase: @staticmethod def compute_stats(data, colname, whis, precision): # Computes mean, median, Q1 and Q3 with approx_percentile and precision pdf = data._psdf._internal.resolved_copy.spark_frame.agg( *[ F.expr( "approx_percentile(`{}`, {}, {})".format(colname, q, int(1.0 / precision)) ).alias("{}_{}%".format(colname, int(q * 100))) for q in [0.25, 0.50, 0.75] ], F.mean("`%s`" % colname).alias("{}_mean".format(colname)), ).toPandas() # Computes IQR and Tukey's fences iqr = "{}_iqr".format(colname) p75 = "{}_75%".format(colname) p25 = "{}_25%".format(colname) pdf.loc[:, iqr] = pdf.loc[:, p75] - pdf.loc[:, p25] pdf.loc[:, "{}_lfence".format(colname)] = pdf.loc[:, p25] - whis * pdf.loc[:, iqr] pdf.loc[:, "{}_ufence".format(colname)] = pdf.loc[:, p75] + whis * pdf.loc[:, iqr] qnames = ["25%", "50%", "75%", "mean", "lfence", "ufence"] col_summ = pdf[["{}_{}".format(colname, q) for q in qnames]] col_summ.columns = qnames lfence, ufence = col_summ["lfence"], col_summ["ufence"] stats = { "mean": col_summ["mean"].values[0], "med": col_summ["50%"].values[0], "q1": col_summ["25%"].values[0], "q3": col_summ["75%"].values[0], } return stats, (lfence.values[0], ufence.values[0]) @staticmethod def outliers(data, colname, lfence, ufence): # Builds expression to identify outliers expression = F.col("`%s`" % colname).between(lfence, ufence) # Creates a column to flag rows as outliers or not return data._psdf._internal.resolved_copy.spark_frame.withColumn( "__{}_outlier".format(colname), ~expression ) @staticmethod def calc_whiskers(colname, outliers): # Computes min and max values of non-outliers - the whiskers minmax = ( outliers.filter("not `__{}_outlier`".format(colname)) .agg(F.min("`%s`" % colname).alias("min"), F.max(colname).alias("max")) .toPandas() ) return minmax.iloc[0][["min", "max"]].values @staticmethod def get_fliers(colname, outliers, min_val): # Filters only the outliers, should "showfliers" be True fliers_df = outliers.filter("`__{}_outlier`".format(colname)) # If shows fliers, takes the top 1k with highest absolute values # Here we normalize the values by subtracting the minimum value from # each, and use absolute values. order_col = F.abs(F.col("`{}`".format(colname)) - min_val.item()) fliers = ( fliers_df.select(F.col("`{}`".format(colname))) .orderBy(order_col) .limit(1001) .toPandas()[colname] .values ) return fliers class KdePlotBase: @staticmethod def prepare_kde_data(data): # TODO: this logic is similar with HistogramPlotBase. Might have to deduplicate it. from pyspark.pandas.series import Series if isinstance(data, Series): data = data.to_frame() numeric_data = data.select_dtypes( include=["byte", "decimal", "integer", "float", "long", "double", np.datetime64] ) # no empty frames or series allowed if len(numeric_data.columns) == 0: raise TypeError( "Empty {0!r}: no numeric data to " "plot".format(numeric_data.__class__.__name__) ) return numeric_data @staticmethod def get_ind(sdf, ind): def calc_min_max(): if len(sdf.columns) > 1: min_col = F.least(*map(F.min, sdf)) max_col = F.greatest(*map(F.max, sdf)) else: min_col = F.min(sdf.columns[-1]) max_col = F.max(sdf.columns[-1]) return sdf.select(min_col, max_col).first() if ind is None: min_val, max_val = calc_min_max() sample_range = max_val - min_val ind = np.linspace( min_val - 0.5 * sample_range, max_val + 0.5 * sample_range, 1000, ) elif is_integer(ind): min_val, max_val = calc_min_max() sample_range = max_val - min_val ind = np.linspace( min_val - 0.5 * sample_range, max_val + 0.5 * sample_range, ind, ) return ind @staticmethod def compute_kde(sdf, bw_method=None, ind=None): # 'sdf' is a Spark DataFrame that selects one column. # Using RDD is slow so we might have to change it to Dataset based implementation # once Spark has that implementation. sample = sdf.rdd.map(lambda x: float(x[0])) kd = KernelDensity() kd.setSample(sample) assert isinstance(bw_method, (int, float)), "'bw_method' must be set as a scalar number." if bw_method is not None: # Match the bandwidth with Spark. kd.setBandwidth(float(bw_method)) return kd.estimate(list(map(float, ind))) class PandasOnSparkPlotAccessor(PandasObject): """ Series/Frames plotting accessor and method. Uses the backend specified by the option ``plotting.backend``. By default, plotly is used. Plotting methods can also be accessed by calling the accessor as a method with the ``kind`` argument: ``s.plot(kind='hist')`` is equivalent to ``s.plot.hist()`` """ pandas_plot_data_map = { "pie": TopNPlotBase().get_top_n, "bar": TopNPlotBase().get_top_n, "barh": TopNPlotBase().get_top_n, "scatter": TopNPlotBase().get_top_n, "area": SampledPlotBase().get_sampled, "line": SampledPlotBase().get_sampled, } _backends = {} # type: ignore def __init__(self, data): self.data = data @staticmethod def _find_backend(backend): """ Find a pandas-on-Spark plotting backend """ try: return PandasOnSparkPlotAccessor._backends[backend] except KeyError: try: module = importlib.import_module(backend) except ImportError: # We re-raise later on. pass else: if hasattr(module, "plot") or hasattr(module, "plot_pandas_on_spark"): # Validate that the interface is implemented when the option # is set, rather than at plot time. PandasOnSparkPlotAccessor._backends[backend] = module return module raise ValueError( "Could not find plotting backend '{backend}'. Ensure that you've installed " "the package providing the '{backend}' entrypoint, or that the package has a " "top-level `.plot` method.".format(backend=backend) ) @staticmethod def _get_plot_backend(backend=None): backend = backend or get_option("plotting.backend") # Shortcut if backend in PandasOnSparkPlotAccessor._backends: return PandasOnSparkPlotAccessor._backends[backend] if backend == "matplotlib": # Because matplotlib is an optional dependency, # we need to attempt an import here to raise an ImportError if needed. try: # test if matplotlib can be imported import matplotlib # noqa: F401 from pyspark.pandas.plot import matplotlib as module except ImportError: raise ImportError( "matplotlib is required for plotting when the " "default backend 'matplotlib' is selected." ) from None PandasOnSparkPlotAccessor._backends["matplotlib"] = module elif backend == "plotly": try: # test if plotly can be imported import plotly # noqa: F401 from pyspark.pandas.plot import plotly as module except ImportError: raise ImportError( "plotly is required for plotting when the " "default backend 'plotly' is selected." ) from None PandasOnSparkPlotAccessor._backends["plotly"] = module else: module = PandasOnSparkPlotAccessor._find_backend(backend) PandasOnSparkPlotAccessor._backends[backend] = module return module def __call__(self, kind="line", backend=None, **kwargs): plot_backend = PandasOnSparkPlotAccessor._get_plot_backend(backend) plot_data = self.data kind = {"density": "kde"}.get(kind, kind) if hasattr(plot_backend, "plot_pandas_on_spark"): # use if there's pandas-on-Spark specific method. return plot_backend.plot_pandas_on_spark(plot_data, kind=kind, **kwargs) else: # fallback to use pandas' if not PandasOnSparkPlotAccessor.pandas_plot_data_map[kind]: raise NotImplementedError( "'%s' plot is not supported with '%s' plot " "backend yet." % (kind, plot_backend.__name__) ) plot_data = PandasOnSparkPlotAccessor.pandas_plot_data_map[kind](plot_data) return plot_backend.plot(plot_data, kind=kind, **kwargs) def line(self, x=None, y=None, **kwargs): """ Plot DataFrame/Series as lines. This function is useful to plot lines using Series's values as coordinates. Parameters ---------- x : int or str, optional Columns to use for the horizontal axis. Either the location or the label of the columns to be used. By default, it will use the DataFrame indices. y : int, str, or list of them, optional The values to be plotted. Either the location or the label of the columns to be used. By default, it will use the remaining DataFrame numeric columns. **kwds Keyword arguments to pass on to :meth:`Series.plot` or :meth:`DataFrame.plot`. Returns ------- :class:`plotly.graph_objs.Figure` Return an custom object when ``backend!=plotly``. Return an ndarray when ``subplots=True`` (matplotlib-only). See Also -------- plotly.express.line : Plot y versus x as lines and/or markers (plotly). matplotlib.pyplot.plot : Plot y versus x as lines and/or markers (matplotlib). Examples -------- Basic plot. For Series: .. plotly:: >>> s = ps.Series([1, 3, 2]) >>> s.plot.line() # doctest: +SKIP For DataFrame: .. plotly:: The following example shows the populations for some animals over the years. >>> df = ps.DataFrame({'pig': [20, 18, 489, 675, 1776], ... 'horse': [4, 25, 281, 600, 1900]}, ... index=[1990, 1997, 2003, 2009, 2014]) >>> df.plot.line() # doctest: +SKIP .. plotly:: The following example shows the relationship between both populations. >>> df = ps.DataFrame({'pig': [20, 18, 489, 675, 1776], ... 'horse': [4, 25, 281, 600, 1900]}, ... index=[1990, 1997, 2003, 2009, 2014]) >>> df.plot.line(x='pig', y='horse') # doctest: +SKIP """ return self(kind="line", x=x, y=y, **kwargs) def bar(self, x=None, y=None, **kwds): """ Vertical bar plot. Parameters ---------- x : label or position, optional Allows plotting of one column versus another. If not specified, the index of the DataFrame is used. y : label or position, optional Allows plotting of one column versus another. If not specified, all numerical columns are used. **kwds : optional Additional keyword arguments are documented in :meth:`pyspark.pandas.Series.plot` or :meth:`pyspark.pandas.DataFrame.plot`. Returns ------- :class:`plotly.graph_objs.Figure` Return an custom object when ``backend!=plotly``. Return an ndarray when ``subplots=True`` (matplotlib-only). Examples -------- Basic plot. For Series: .. plotly:: >>> s = ps.Series([1, 3, 2]) >>> s.plot.bar() # doctest: +SKIP For DataFrame: .. plotly:: >>> df = ps.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]}) >>> df.plot.bar(x='lab', y='val') # doctest: +SKIP Plot a whole dataframe to a bar plot. Each column is stacked with a distinct color along the horizontal axis. .. plotly:: >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88] >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28] >>> index = ['snail', 'pig', 'elephant', ... 'rabbit', 'giraffe', 'coyote', 'horse'] >>> df = ps.DataFrame({'speed': speed, ... 'lifespan': lifespan}, index=index) >>> df.plot.bar() # doctest: +SKIP Instead of stacking, the figure can be split by column with plotly APIs. .. plotly:: >>> from plotly.subplots import make_subplots >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88] >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28] >>> index = ['snail', 'pig', 'elephant', ... 'rabbit', 'giraffe', 'coyote', 'horse'] >>> df = ps.DataFrame({'speed': speed, ... 'lifespan': lifespan}, index=index) >>> fig = (make_subplots(rows=2, cols=1) ... .add_trace(df.plot.bar(y='speed').data[0], row=1, col=1) ... .add_trace(df.plot.bar(y='speed').data[0], row=1, col=1) ... .add_trace(df.plot.bar(y='lifespan').data[0], row=2, col=1)) >>> fig # doctest: +SKIP Plot a single column. .. plotly:: >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88] >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28] >>> index = ['snail', 'pig', 'elephant', ... 'rabbit', 'giraffe', 'coyote', 'horse'] >>> df = ps.DataFrame({'speed': speed, ... 'lifespan': lifespan}, index=index) >>> df.plot.bar(y='speed') # doctest: +SKIP Plot only selected categories for the DataFrame. .. plotly:: >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88] >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28] >>> index = ['snail', 'pig', 'elephant', ... 'rabbit', 'giraffe', 'coyote', 'horse'] >>> df = ps.DataFrame({'speed': speed, ... 'lifespan': lifespan}, index=index) >>> df.plot.bar(x='lifespan') # doctest: +SKIP """ from pyspark.pandas import DataFrame, Series if isinstance(self.data, Series): return self(kind="bar", **kwds) elif isinstance(self.data, DataFrame): return self(kind="bar", x=x, y=y, **kwds) def barh(self, x=None, y=None, **kwargs): """ Make a horizontal bar plot. A horizontal bar plot is a plot that presents quantitative data with rectangular bars with lengths proportional to the values that they represent. A bar plot shows comparisons among discrete categories. One axis of the plot shows the specific categories being compared, and the other axis represents a measured value. Parameters ---------- x : label or position, default DataFrame.index Column to be used for categories. y : label or position, default All numeric columns in dataframe Columns to be plotted from the DataFrame. **kwds Keyword arguments to pass on to :meth:`pyspark.pandas.DataFrame.plot` or :meth:`pyspark.pandas.Series.plot`. Returns ------- :class:`plotly.graph_objs.Figure` Return an custom object when ``backend!=plotly``. Return an ndarray when ``subplots=True`` (matplotlib-only). See Also -------- plotly.express.bar : Plot a vertical bar plot using plotly. matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib. Examples -------- For Series: .. plotly:: >>> df = ps.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]}) >>> df.val.plot.barh() # doctest: +SKIP For DataFrame: .. plotly:: >>> df = ps.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]}) >>> df.plot.barh(x='lab', y='val') # doctest: +SKIP Plot a whole DataFrame to a horizontal bar plot .. plotly:: >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88] >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28] >>> index = ['snail', 'pig', 'elephant', ... 'rabbit', 'giraffe', 'coyote', 'horse'] >>> df = ps.DataFrame({'speed': speed, ... 'lifespan': lifespan}, index=index) >>> df.plot.barh() # doctest: +SKIP Plot a column of the DataFrame to a horizontal bar plot .. plotly:: >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88] >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28] >>> index = ['snail', 'pig', 'elephant', ... 'rabbit', 'giraffe', 'coyote', 'horse'] >>> df = ps.DataFrame({'speed': speed, ... 'lifespan': lifespan}, index=index) >>> df.plot.barh(y='speed') # doctest: +SKIP Plot DataFrame versus the desired column .. plotly:: >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88] >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28] >>> index = ['snail', 'pig', 'elephant', ... 'rabbit', 'giraffe', 'coyote', 'horse'] >>> df = ps.DataFrame({'speed': speed, ... 'lifespan': lifespan}, index=index) >>> df.plot.barh(x='lifespan') # doctest: +SKIP """ from pyspark.pandas import DataFrame, Series if isinstance(self.data, Series): return self(kind="barh", **kwargs) elif isinstance(self.data, DataFrame): return self(kind="barh", x=x, y=y, **kwargs) def box(self, **kwds): """ Make a box plot of the Series columns. Parameters ---------- **kwds : optional Additional keyword arguments are documented in :meth:`pyspark.pandas.Series.plot`. precision: scalar, default = 0.01 This argument is used by pandas-on-Spark to compute approximate statistics for building a boxplot. Use *smaller* values to get more precise statistics (matplotlib-only). Returns ------- :class:`plotly.graph_objs.Figure` Return an custom object when ``backend!=plotly``. Return an ndarray when ``subplots=True`` (matplotlib-only). Notes ----- There are behavior differences between pandas-on-Spark and pandas. * pandas-on-Spark computes approximate statistics - expect differences between pandas and pandas-on-Spark boxplots, especially regarding 1st and 3rd quartiles. * The `whis` argument is only supported as a single number. * pandas-on-Spark doesn't support the following argument(s) (matplotlib-only). * `bootstrap` argument is not supported * `autorange` argument is not supported Examples -------- Draw a box plot from a DataFrame with four columns of randomly generated data. For Series: .. plotly:: >>> data = np.random.randn(25, 4) >>> df = ps.DataFrame(data, columns=list('ABCD')) >>> df['A'].plot.box() # doctest: +SKIP This is an unsupported function for DataFrame type """ from pyspark.pandas import DataFrame, Series if isinstance(self.data, Series): return self(kind="box", **kwds) elif isinstance(self.data, DataFrame): return unsupported_function(class_name="pd.DataFrame", method_name="box")() def hist(self, bins=10, **kwds): """ Draw one histogram of the DataFrame’s columns. A `histogram`_ is a representation of the distribution of data. This function calls :meth:`plotting.backend.plot`, on each series in the DataFrame, resulting in one histogram per column. .. _histogram: https://en.wikipedia.org/wiki/Histogram Parameters ---------- bins : integer or sequence, default 10 Number of histogram bins to be used. If an integer is given, bins + 1 bin edges are calculated and returned. If bins is a sequence, gives bin edges, including left edge of first bin and right edge of last bin. In this case, bins is returned unmodified. **kwds All other plotting keyword arguments to be passed to plotting backend. Returns ------- :class:`plotly.graph_objs.Figure` Return an custom object when ``backend!=plotly``. Return an ndarray when ``subplots=True`` (matplotlib-only). Examples -------- Basic plot. For Series: .. plotly:: >>> s = ps.Series([1, 3, 2]) >>> s.plot.hist() # doctest: +SKIP For DataFrame: .. plotly:: >>> df = pd.DataFrame( ... np.random.randint(1, 7, 6000), ... columns=['one']) >>> df['two'] = df['one'] + np.random.randint(1, 7, 6000) >>> df = ps.from_pandas(df) >>> df.plot.hist(bins=12, alpha=0.5) # doctest: +SKIP """ return self(kind="hist", bins=bins, **kwds) def kde(self, bw_method=None, ind=None, **kwargs): """ Generate Kernel Density Estimate plot using Gaussian kernels. Parameters ---------- bw_method : scalar The method used to calculate the estimator bandwidth. See KernelDensity in PySpark for more information. ind : NumPy array or integer, optional Evaluation points for the estimated PDF. If None (default), 1000 equally spaced points are used. If `ind` is a NumPy array, the KDE is evaluated at the points passed. If `ind` is an integer, `ind` number of equally spaced points are used. **kwargs : optional Keyword arguments to pass on to :meth:`pandas-on-Spark.Series.plot`. Returns ------- :class:`plotly.graph_objs.Figure` Return an custom object when ``backend!=plotly``. Return an ndarray when ``subplots=True`` (matplotlib-only). Examples -------- A scalar bandwidth should be specified. Using a small bandwidth value can lead to over-fitting, while using a large bandwidth value may result in under-fitting: .. plotly:: >>> s = ps.Series([1, 2, 2.5, 3, 3.5, 4, 5]) >>> s.plot.kde(bw_method=0.3) # doctest: +SKIP .. plotly:: >>> s = ps.Series([1, 2, 2.5, 3, 3.5, 4, 5]) >>> s.plot.kde(bw_method=3) # doctest: +SKIP The `ind` parameter determines the evaluation points for the plot of the estimated KDF: .. plotly:: >>> s = ps.Series([1, 2, 2.5, 3, 3.5, 4, 5]) >>> s.plot.kde(ind=[1, 2, 3, 4, 5], bw_method=0.3) # doctest: +SKIP For DataFrame, it works in the same way as Series: .. plotly:: >>> df = ps.DataFrame({ ... 'x': [1, 2, 2.5, 3, 3.5, 4, 5], ... 'y': [4, 4, 4.5, 5, 5.5, 6, 6], ... }) >>> df.plot.kde(bw_method=0.3) # doctest: +SKIP .. plotly:: >>> df = ps.DataFrame({ ... 'x': [1, 2, 2.5, 3, 3.5, 4, 5], ... 'y': [4, 4, 4.5, 5, 5.5, 6, 6], ... }) >>> df.plot.kde(bw_method=3) # doctest: +SKIP .. plotly:: >>> df = ps.DataFrame({ ... 'x': [1, 2, 2.5, 3, 3.5, 4, 5], ... 'y': [4, 4, 4.5, 5, 5.5, 6, 6], ... }) >>> df.plot.kde(ind=[1, 2, 3, 4, 5, 6], bw_method=0.3) # doctest: +SKIP """ return self(kind="kde", bw_method=bw_method, ind=ind, **kwargs) density = kde def area(self, x=None, y=None, **kwds): """ Draw a stacked area plot. An area plot displays quantitative data visually. This function wraps the plotly area function. Parameters ---------- x : label or position, optional Coordinates for the X axis. By default uses the index. y : label or position, optional Column to plot. By default uses all columns. stacked : bool, default True Area plots are stacked by default. Set to False to create a unstacked plot (matplotlib-only). **kwds : optional Additional keyword arguments are documented in :meth:`DataFrame.plot`. Returns ------- :class:`plotly.graph_objs.Figure` Return an custom object when ``backend!=plotly``. Return an ndarray when ``subplots=True`` (matplotlib-only). Examples -------- For Series .. plotly:: >>> df = ps.DataFrame({ ... 'sales': [3, 2, 3, 9, 10, 6], ... 'signups': [5, 5, 6, 12, 14, 13], ... 'visits': [20, 42, 28, 62, 81, 50], ... }, index=pd.date_range(start='2018/01/01', end='2018/07/01', ... freq='M')) >>> df.sales.plot.area() # doctest: +SKIP For DataFrame .. plotly:: >>> df = ps.DataFrame({ ... 'sales': [3, 2, 3, 9, 10, 6], ... 'signups': [5, 5, 6, 12, 14, 13], ... 'visits': [20, 42, 28, 62, 81, 50], ... }, index=pd.date_range(start='2018/01/01', end='2018/07/01', ... freq='M')) >>> df.plot.area() # doctest: +SKIP """ from pyspark.pandas import DataFrame, Series if isinstance(self.data, Series): return self(kind="area", **kwds) elif isinstance(self.data, DataFrame): return self(kind="area", x=x, y=y, **kwds) def pie(self, **kwds): """ Generate a pie plot. A pie plot is a proportional representation of the numerical data in a column. This function wraps :meth:`plotly.express.pie` for the specified column. Parameters ---------- y : int or label, optional Label or position of the column to plot. If not provided, ``subplots=True`` argument must be passed (matplotlib-only). **kwds Keyword arguments to pass on to :meth:`pandas-on-Spark.Series.plot`. Returns ------- :class:`plotly.graph_objs.Figure` Return an custom object when ``backend!=plotly``. Return an ndarray when ``subplots=True`` (matplotlib-only). Examples -------- For Series: .. plotly:: >>> df = ps.DataFrame({'mass': [0.330, 4.87, 5.97], ... 'radius': [2439.7, 6051.8, 6378.1]}, ... index=['Mercury', 'Venus', 'Earth']) >>> df.mass.plot.pie() # doctest: +SKIP For DataFrame: .. plotly:: >>> df = ps.DataFrame({'mass': [0.330, 4.87, 5.97], ... 'radius': [2439.7, 6051.8, 6378.1]}, ... index=['Mercury', 'Venus', 'Earth']) >>> df.plot.pie(y='mass') # doctest: +SKIP """ from pyspark.pandas import DataFrame, Series if isinstance(self.data, Series): return self(kind="pie", **kwds) else: # pandas will raise an error if y is None and subplots if not True if ( isinstance(self.data, DataFrame) and kwds.get("y", None) is None and not kwds.get("subplots", False) ): raise ValueError( "pie requires either y column or 'subplots=True' (matplotlib-only)" ) return self(kind="pie", **kwds) def scatter(self, x, y, **kwds): """ Create a scatter plot with varying marker point size and color. The coordinates of each point are defined by two dataframe columns and filled circles are used to represent each point. This kind of plot is useful to see complex correlations between two variables. Points could be for instance natural 2D coordinates like longitude and latitude in a map or, in general, any pair of metrics that can be plotted against each other. Parameters ---------- x : int or str The column name or column position to be used as horizontal coordinates for each point. y : int or str The column name or column position to be used as vertical coordinates for each point. s : scalar or array_like, optional (matplotlib-only). c : str, int or array_like, optional (matplotlib-only). **kwds: Optional Keyword arguments to pass on to :meth:`pyspark.pandas.DataFrame.plot`. Returns ------- :class:`plotly.graph_objs.Figure` Return an custom object when ``backend!=plotly``. Return an ndarray when ``subplots=True`` (matplotlib-only). See Also -------- plotly.express.scatter : Scatter plot using multiple input data formats (plotly). matplotlib.pyplot.scatter : Scatter plot using multiple input data formats (matplotlib). Examples -------- Let's see how to draw a scatter plot using coordinates from the values in a DataFrame's columns. .. plotly:: >>> df = ps.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1], ... [6.4, 3.2, 1], [5.9, 3.0, 2]], ... columns=['length', 'width', 'species']) >>> df.plot.scatter(x='length', y='width') # doctest: +SKIP And now with dark scheme: .. plotly:: >>> df = ps.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1], ... [6.4, 3.2, 1], [5.9, 3.0, 2]], ... columns=['length', 'width', 'species']) >>> fig = df.plot.scatter(x='length', y='width') >>> fig.update_layout(template="plotly_dark") # doctest: +SKIP """ return self(kind="scatter", x=x, y=y, **kwds) def hexbin(self, **kwds): return unsupported_function(class_name="pd.DataFrame", method_name="hexbin")()
apache-2.0
yanchen036/tensorflow
tensorflow/contrib/labeled_tensor/python/ops/ops.py
7
46402
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Non-core ops for LabeledTensor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import types import numpy as np from six import string_types from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc from tensorflow.contrib.labeled_tensor.python.ops import core from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import functional_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import numerics from tensorflow.python.ops import random_ops from tensorflow.python.training import input # pylint: disable=redefined-builtin @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensor, ops.Tensor, core.Axis, tc.Optional(string_types)) def _gather_1d_on_axis(labeled_tensor, indexer, axis, name=None): with ops.name_scope(name, 'lt_take', [labeled_tensor]) as scope: temp_axes = core.Axes([axis] + list( labeled_tensor.axes.remove(axis.name).values())) transposed = core.transpose(labeled_tensor, temp_axes.keys()) indexed = core.LabeledTensor( array_ops.gather(transposed.tensor, indexer), temp_axes) return core.transpose(indexed, labeled_tensor.axes.keys(), name=scope) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, tc.Mapping(string_types, tc.Union(slice, collections.Hashable, list)), tc.Optional(string_types)) def select(labeled_tensor, selection, name=None): """Slice out a subset of the tensor. Args: labeled_tensor: The input tensor. selection: A dictionary mapping an axis name to a scalar, slice or list of values to select. Currently supports two types of selections: (a) Any number of scalar and/or slice selections. (b) Exactly one list selection, without any scalars or slices. name: Optional op name. Returns: The selection as a `LabeledTensor`. Raises: ValueError: If the tensor doesn't have an axis in the selection or if that axis lacks labels. KeyError: If any labels in a selection are not found in the original axis. NotImplementedError: If you attempt to combine a list selection with scalar selection or another list selection. """ with ops.name_scope(name, 'lt_select', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) slices = {} indexers = {} for axis_name, value in selection.items(): if axis_name not in labeled_tensor.axes: raise ValueError( 'The tensor does not have an axis named %s. Its axes are: %r' % (axis_name, labeled_tensor.axes.keys())) axis = labeled_tensor.axes[axis_name] if axis.labels is None: raise ValueError( 'The axis named %s does not have labels. The axis is: %r' % (axis_name, axis)) if isinstance(value, slice): # TODO(shoyer): consider deprecating using slices in favor of lists if value.start is None: start = None else: start = axis.index(value.start) if value.stop is None: stop = None else: # For now, follow the pandas convention of making labeled slices # inclusive of both bounds. stop = axis.index(value.stop) + 1 if value.step is not None: raise NotImplementedError('slicing with a step is not yet supported') slices[axis_name] = slice(start, stop) # Needs to be after checking for slices, since slice objects claim to be # instances of collections.Hashable but hash() on them fails. elif isinstance(value, collections.Hashable): slices[axis_name] = axis.index(value) elif isinstance(value, list): if indexers: raise NotImplementedError( 'select does not yet support more than one list selection at ' 'the same time') indexer = [axis.index(v) for v in value] indexers[axis_name] = ops.convert_to_tensor(indexer, dtype=dtypes.int64) else: # If type checking is working properly, this shouldn't be possible. raise TypeError('cannot handle arbitrary types') if indexers and slices: raise NotImplementedError( 'select does not yet support combined scalar and list selection') # For now, handle array selection separately, because tf.gather_nd does # not support gradients yet. Later, using gather_nd will let us combine # these paths. if indexers: (axis_name, indexer), = indexers.items() axis = core.Axis(axis_name, selection[axis_name]) return _gather_1d_on_axis(labeled_tensor, indexer, axis, name=scope) else: return core.slice_function(labeled_tensor, slices, name=scope) @tc.returns(core.LabeledTensor) @tc.accepts( tc.Collection(core.LabeledTensorLike), string_types, tc.Optional(string_types)) def concat(labeled_tensors, axis_name, name=None): """Concatenate tensors along a dimension. See tf.concat. Args: labeled_tensors: A list of input LabeledTensors. axis_name: The name of the axis along which to concatenate. name: Optional op name. Returns: The concatenated tensor. The coordinate labels for the concatenation dimension are also concatenated, if they are available for every tensor. Raises: ValueError: If fewer than one tensor inputs is provided, if the tensors have incompatible axes, or if `axis_name` isn't the name of an axis. """ with ops.name_scope(name, 'lt_concat', labeled_tensors) as scope: labeled_tensors = [ core.convert_to_labeled_tensor(lt) for lt in labeled_tensors ] if len(labeled_tensors) < 1: raise ValueError('concat expects at least 1 tensor, but received %s' % labeled_tensors) # All tensors must have these axes. axes_0 = labeled_tensors[0].axes axis_names = list(axes_0.keys()) if axis_name not in axis_names: raise ValueError('%s not in %s' % (axis_name, axis_names)) shared_axes = axes_0.remove(axis_name) tensors = [labeled_tensors[0].tensor] concat_axis_list = [axes_0[axis_name]] for labeled_tensor in labeled_tensors[1:]: current_shared_axes = labeled_tensor.axes.remove(axis_name) if current_shared_axes != shared_axes: # TODO(shoyer): add more specific checks about what went wrong, # including raising AxisOrderError when appropriate raise ValueError('Mismatched shared axes: the first tensor ' 'had axes %r but this tensor has axes %r.' % (shared_axes, current_shared_axes)) # Accumulate the axis labels, if they're available. concat_axis_list.append(labeled_tensor.axes[axis_name]) tensors.append(labeled_tensor.tensor) concat_axis = core.concat_axes(concat_axis_list) concat_dimension = axis_names.index(axis_name) concat_tensor = array_ops.concat(tensors, concat_dimension, name=scope) values = list(axes_0.values()) concat_axes = (values[:concat_dimension] + [concat_axis] + values[concat_dimension + 1:]) return core.LabeledTensor(concat_tensor, concat_axes) # TODO(shoyer): rename pack/unpack to stack/unstack @tc.returns(core.LabeledTensor) @tc.accepts( tc.Collection(core.LabeledTensorLike), tc.Union(string_types, core.AxisLike), int, tc.Optional(string_types)) def pack(labeled_tensors, new_axis, axis_position=0, name=None): """Pack tensors along a new axis. See tf.pack. Args: labeled_tensors: The input tensors, which must have identical axes. new_axis: The name of the new axis, or a tuple containing the name and coordinate labels. axis_position: Optional integer position at which to insert the new axis. name: Optional op name. Returns: The packed tensors as a single LabeledTensor, with `new_axis` in the given `axis_position`. Raises: ValueError: If fewer than one input tensors is provided, or if the tensors don't have identical axes. """ with ops.name_scope(name, 'lt_pack', labeled_tensors) as scope: labeled_tensors = [ core.convert_to_labeled_tensor(lt) for lt in labeled_tensors ] if len(labeled_tensors) < 1: raise ValueError('pack expects at least 1 tensors, but received %s' % labeled_tensors) axes_0 = labeled_tensors[0].axes for t in labeled_tensors: if t.axes != axes_0: raise ValueError('Non-identical axes. Expected %s but got %s' % (axes_0, t.axes)) pack_op = array_ops.stack( [t.tensor for t in labeled_tensors], axis=axis_position, name=scope) axes = list(axes_0.values()) axes.insert(axis_position, new_axis) return core.LabeledTensor(pack_op, axes) @tc.returns(tc.List(core.LabeledTensor)) @tc.accepts(core.LabeledTensorLike, tc.Optional(string_types), tc.Optional(string_types)) def unpack(labeled_tensor, axis_name=None, name=None): """Unpack the tensor. See tf.unpack. Args: labeled_tensor: The input tensor. axis_name: Optional name of axis to unpack. By default, the first axis is used. name: Optional op name. Returns: The list of unpacked LabeledTensors. Raises: ValueError: If `axis_name` is not an axis on the input. """ with ops.name_scope(name, 'lt_unpack', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) axis_names = list(labeled_tensor.axes.keys()) if axis_name is None: axis_name = axis_names[0] if axis_name not in axis_names: raise ValueError('%s not in %s' % (axis_name, axis_names)) axis = axis_names.index(axis_name) unpack_ops = array_ops.unstack(labeled_tensor.tensor, axis=axis, name=scope) axes = [a for i, a in enumerate(labeled_tensor.axes.values()) if i != axis] return [core.LabeledTensor(t, axes) for t in unpack_ops] @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, tc.Collection(string_types), tc.Collection(tc.Union(string_types, core.AxisLike)), tc.Optional(string_types)) def reshape(labeled_tensor, existing_axes, new_axes, name=None): """Reshape specific axes of a LabeledTensor. Non-indicated axes remain in their original locations. Args: labeled_tensor: The input tensor. existing_axes: List of axis names found on the input tensor. These must appear sequentially in the list of axis names on the input. In other words, they must be a valid slice of `list(labeled_tensor.axes.keys())`. new_axes: List of strings, tuples of (axis_name, axis_value) or Axis objects providing new axes with which to replace `existing_axes` in the reshaped result. At most one element of `new_axes` may be a string, indicating an axis with unknown size. name: Optional op name. Returns: The reshaped LabeledTensor. Raises: ValueError: If `existing_axes` are not all axes on the input, or if more than one of `new_axes` has unknown size. AxisOrderError: If `existing_axes` are not a slice of axis names on the input. """ with ops.name_scope(name, 'lt_reshape', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) original_axis_names = list(labeled_tensor.axes.keys()) existing_axes = list(existing_axes) if not set(existing_axes) <= set(original_axis_names): raise ValueError('existing_axes %r are not contained in the set of axis ' 'names %r on the input labeled tensor' % (existing_axes, original_axis_names)) start = original_axis_names.index(existing_axes[0]) stop = original_axis_names.index(existing_axes[-1]) + 1 if existing_axes != original_axis_names[start:stop]: # We could support existing_axes that aren't a slice by using transpose, # but that could lead to unpredictable performance consequences because # transposes are not free in TensorFlow. If we did transpose # automatically, the user might never realize that their data is being # produced with the wrong order. (The later will occur with some frequency # because of how broadcasting automatically choose axis order.) # So for now we've taken the strict approach. raise core.AxisOrderError( 'existing_axes %r are not a slice of axis names %r on the input ' 'labeled tensor. Use `transpose` or `impose_axis_order` to reorder ' 'axes on the input explicitly.' % (existing_axes, original_axis_names)) if sum(isinstance(axis, string_types) for axis in new_axes) > 1: raise ValueError( 'at most one axis in new_axes can have unknown size. All other ' 'axes must have an indicated integer size or labels: %r' % new_axes) original_values = list(labeled_tensor.axes.values()) axis_size = lambda axis: -1 if axis.size is None else axis.size shape = [axis_size(axis) for axis in original_values[:start]] for axis_ref in new_axes: if isinstance(axis_ref, string_types): shape.append(-1) else: axis = core.as_axis(axis_ref) shape.append(axis_size(axis)) shape.extend(axis_size(axis) for axis in original_values[stop:]) reshaped_tensor = array_ops.reshape( labeled_tensor.tensor, shape, name=scope) axes = original_values[:start] + list(new_axes) + original_values[stop:] return core.LabeledTensor(reshaped_tensor, axes) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, string_types, string_types, tc.Optional(string_types)) def rename_axis(labeled_tensor, existing_name, new_name, name=None): """Rename an axis of LabeledTensor. Args: labeled_tensor: The input tensor. existing_name: Name for an existing axis on the input. new_name: Desired replacement name. name: Optional op name. Returns: LabeledTensor with renamed axis. Raises: ValueError: If `existing_name` is not an axis on the input. """ with ops.name_scope(name, 'lt_rename_axis', [labeled_tensor]) as scope: if existing_name not in labeled_tensor.axes: raise ValueError('existing_name %r are not contained in the set of axis ' 'names %r on the input labeled tensor' % (existing_name, labeled_tensor.axes.keys())) new_axis = core.Axis(new_name, labeled_tensor.axes[existing_name].value) return reshape(labeled_tensor, [existing_name], [new_axis], name=scope) @tc.returns(tc.List(core.LabeledTensor)) @tc.accepts(string_types, collections.Callable, int, bool, tc.Collection(core.LabeledTensorLike), bool, tc.Optional(string_types)) def _batch_helper(default_name, batch_fn, batch_size, enqueue_many, labeled_tensors, allow_smaller_final_batch, name=None): with ops.name_scope(name, default_name, labeled_tensors) as scope: labeled_tensors = [ core.convert_to_labeled_tensor(lt) for lt in labeled_tensors ] batch_ops = batch_fn([t.tensor for t in labeled_tensors], scope) # TODO(shoyer): Remove this when they sanitize the TF API. if not isinstance(batch_ops, list): assert isinstance(batch_ops, ops.Tensor) batch_ops = [batch_ops] if allow_smaller_final_batch: batch_size = None @tc.returns(core.Axes) @tc.accepts(core.Axes) def output_axes(axes): if enqueue_many: if 'batch' not in axes or list(axes.keys()).index('batch') != 0: raise ValueError( 'When enqueue_many is True, input tensors must have an axis ' 'called "batch" as their first dimension, ' 'but axes were %s' % axes) culled_axes = axes.remove('batch') return core.Axes([('batch', batch_size)] + list(culled_axes.values())) else: return core.Axes([('batch', batch_size)] + list(axes.values())) output_labeled_tensors = [] for i, tensor in enumerate(batch_ops): axes = output_axes(labeled_tensors[i].axes) output_labeled_tensors.append(core.LabeledTensor(tensor, axes)) return output_labeled_tensors @tc.returns(tc.List(core.LabeledTensor)) @tc.accepts( tc.Collection(core.LabeledTensorLike), int, int, int, bool, bool, tc.Optional(string_types)) def batch(labeled_tensors, batch_size, num_threads=1, capacity=32, enqueue_many=False, allow_smaller_final_batch=False, name=None): """Rebatch a tensor. See tf.batch. Args: labeled_tensors: The input tensors. batch_size: The output batch size. num_threads: See tf.batch. capacity: See tf.batch. enqueue_many: If true, the input tensors must contain a 'batch' axis as their first axis. If false, the input tensors must not contain a 'batch' axis. See tf.batch. allow_smaller_final_batch: See tf.batch. name: Optional op name. Returns: The rebatched tensors. If enqueue_many is false, the output tensors will have a new 'batch' axis as their first axis. Raises: ValueError: If enqueue_many is True and the first axis of the tensors isn't "batch". """ def fn(tensors, scope): return input.batch( tensors, batch_size=batch_size, num_threads=num_threads, capacity=capacity, enqueue_many=enqueue_many, allow_smaller_final_batch=allow_smaller_final_batch, name=scope) return _batch_helper('lt_batch', fn, batch_size, enqueue_many, labeled_tensors, allow_smaller_final_batch, name) @tc.returns(tc.List(core.LabeledTensor)) @tc.accepts( tc.Collection(core.LabeledTensorLike), int, int, int, bool, int, tc.Optional(int), bool, tc.Optional(string_types)) def shuffle_batch(labeled_tensors, batch_size, num_threads=1, capacity=32, enqueue_many=False, min_after_dequeue=0, seed=None, allow_smaller_final_batch=False, name=None): """Rebatch a tensor, with shuffling. See tf.batch. Args: labeled_tensors: The input tensors. batch_size: The output batch size. num_threads: See tf.batch. capacity: See tf.batch. enqueue_many: If true, the input tensors must contain a 'batch' axis as their first axis. If false, the input tensors must not contain a 'batch' axis. See tf.batch. min_after_dequeue: Minimum number of elements in the queue after a dequeue, used to ensure mixing. seed: Optional random seed. allow_smaller_final_batch: See tf.batch. name: Optional op name. Returns: The rebatched tensors. If enqueue_many is false, the output tensors will have a new 'batch' axis as their first axis. Raises: ValueError: If enqueue_many is True and the first axis of the tensors isn't "batch". """ def fn(tensors, scope): return input.shuffle_batch( tensors, batch_size=batch_size, num_threads=num_threads, capacity=capacity, enqueue_many=enqueue_many, min_after_dequeue=min_after_dequeue, seed=seed, allow_smaller_final_batch=allow_smaller_final_batch, name=scope) return _batch_helper('lt_shuffle_batch', fn, batch_size, enqueue_many, labeled_tensors, allow_smaller_final_batch, name) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, tc.Mapping(string_types, int), tc.Optional(int), tc.Optional(string_types)) def random_crop(labeled_tensor, shape_map, seed=None, name=None): """Randomly crops a tensor to a given size. See tf.random_crop. Args: labeled_tensor: The input tensor. shape_map: A dictionary mapping axis names to the size of the random crop for that dimension. seed: An optional random seed. name: An optional op name. Returns: A tensor of the same rank as `labeled_tensor`, cropped randomly in the selected dimensions. Raises: ValueError: If the shape map contains an axis name not in the input tensor. """ with ops.name_scope(name, 'lt_random_crop', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) for axis_name in shape_map: if axis_name not in labeled_tensor.axes: raise ValueError('Selection axis %s not in axes %s' % (axis_name, labeled_tensor.axes)) shape = [] axes = [] for axis in labeled_tensor.axes.values(): if axis.name in shape_map: size = shape_map[axis.name] shape.append(size) # We lose labels for the axes we crop, leaving just the size. axes.append((axis.name, size)) else: shape.append(len(axis)) axes.append(axis) crop_op = random_ops.random_crop( labeled_tensor.tensor, shape, seed=seed, name=scope) return core.LabeledTensor(crop_op, axes) # TODO(shoyer): Allow the user to select the axis over which to map. @tc.returns(core.LabeledTensor) @tc.accepts(collections.Callable, core.LabeledTensorLike, tc.Optional(string_types)) def map_fn(fn, labeled_tensor, name=None): """Map on the list of tensors unpacked from labeled_tensor. See tf.map_fn. Args: fn: The function to apply to each unpacked LabeledTensor. It should have type LabeledTensor -> LabeledTensor. labeled_tensor: The input tensor. name: Optional op name. Returns: A tensor that packs the results of applying fn to the list of tensors unpacked from labeled_tensor. """ with ops.name_scope(name, 'lt_map_fn', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) unpack_lts = unpack(labeled_tensor) # TODO(ericmc): Fix this upstream. if labeled_tensor.dtype == dtypes.string: # We must construct the full graph here, because functional_ops.map_fn # doesn't work for string-valued tensors. # Constructing the full graph may be slow. map_lts = [fn(t) for t in unpack_lts] return pack(map_lts, list(labeled_tensor.axes.values())[0], name=scope) else: # Figure out what the axis labels should be, but use tf.map_fn to # construct the graph because it's efficient. # It may be slow to construct the full graph, so we infer the labels from # the first element. # TODO(ericmc): This builds a subgraph which then gets thrown away. # Find a more elegant solution. first_map_lt = fn(unpack_lts[0]) final_axes = list(labeled_tensor.axes.values())[:1] + list( first_map_lt.axes.values()) @tc.returns(ops.Tensor) @tc.accepts(ops.Tensor) def tf_fn(tensor): original_axes = list(labeled_tensor.axes.values())[1:] tensor_lt = core.LabeledTensor(tensor, original_axes) return fn(tensor_lt).tensor map_op = functional_ops.map_fn(tf_fn, labeled_tensor.tensor) map_lt = core.LabeledTensor(map_op, final_axes) return core.identity(map_lt, name=scope) @tc.returns(core.LabeledTensor) @tc.accepts(collections.Callable, core.LabeledTensorLike, core.LabeledTensorLike, tc.Optional(string_types)) def foldl(fn, labeled_tensor, initial_value, name=None): """Left fold on the list of tensors unpacked from labeled_tensor. See tf.foldl. Args: fn: The function to apply to each unpacked LabeledTensor. It should have type (LabeledTensor, LabeledTensor) -> LabeledTensor. Its arguments are (accumulated_value, next_value). labeled_tensor: The input tensor. initial_value: The initial value of the accumulator. name: Optional op name. Returns: The accumulated value. """ with ops.name_scope(name, 'lt_foldl', [labeled_tensor, initial_value]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) initial_value = core.convert_to_labeled_tensor(initial_value) @tc.returns(ops.Tensor) @tc.accepts(ops.Tensor, ops.Tensor) def tf_fn(accumulator, next_element): accumulator_lt = core.LabeledTensor(accumulator, initial_value.axes) next_element_lt = core.LabeledTensor( next_element, list(labeled_tensor.axes.values())[1:]) return fn(accumulator_lt, next_element_lt).tensor foldl_op = functional_ops.foldl( tf_fn, labeled_tensor.tensor, initializer=initial_value.tensor) foldl_lt = core.LabeledTensor(foldl_op, initial_value.axes) return core.identity(foldl_lt, name=scope) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, tc.Optional(tc.Collection(string_types)), tc.Optional(string_types)) def squeeze(labeled_tensor, axis_names=None, name=None): """Remove size-1 dimensions. See tf.squeeze. Args: labeled_tensor: The input tensor. axis_names: The names of the dimensions to remove, or None to remove all size-1 dimensions. name: Optional op name. Returns: A tensor with the specified dimensions removed. Raises: ValueError: If the named axes are not in the tensor, or if they are not size-1. """ with ops.name_scope(name, 'lt_squeeze', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) if axis_names is None: axis_names = [a.name for a in labeled_tensor.axes.values() if len(a) == 1] for axis_name in axis_names: if axis_name not in labeled_tensor.axes: raise ValueError('axis %s is not in tensor axes %s' % (axis_name, labeled_tensor.axes)) elif len(labeled_tensor.axes[axis_name]) != 1: raise ValueError( 'cannot squeeze axis with size greater than 1: (%s, %s)' % (axis_name, labeled_tensor.axes[axis_name])) squeeze_dimensions = [] axes = [] for i, axis in enumerate(labeled_tensor.axes.values()): if axis.name in axis_names: squeeze_dimensions.append(i) else: axes.append(axis) if squeeze_dimensions: squeeze_op = array_ops.squeeze( labeled_tensor.tensor, squeeze_dimensions, name=scope) else: squeeze_op = array_ops.identity(labeled_tensor.tensor, name=scope) return core.LabeledTensor(squeeze_op, axes) # pylint: disable=invalid-name ReduceAxis = tc.Union(string_types, tc.Tuple(string_types, collections.Hashable)) ReduceAxes = tc.Optional(tc.Union(ReduceAxis, tc.Collection(ReduceAxis))) # pylint: enable=invalid-name @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike, tc.Optional(string_types)) def matmul(a, b, name=None): """Matrix multiply two tensors with rank 1 or 2. If both tensors have rank 2, a matrix-matrix product is performed. If one tensor has rank 1 and the other has rank 2, then a matrix-vector product is performed. If both tensors have rank 1, then a vector dot-product is performed. (This behavior matches that of `numpy.dot`.) Both tensors must share exactly one dimension in common, which is the dimension the operation is summed along. The inputs will be automatically transposed if necessary as part of the matmul op. We intend to eventually support `matmul` on higher rank input, and also eventually support summing over any number shared dimensions (via an `axis` argument), but neither of these features has been implemented yet. Args: a: First LabeledTensor. b: Second LabeledTensor. name: Optional op name. Returns: LabeledTensor with the result of matrix multiplication. Axes are ordered by the current axis_order_scope, if set, or in or order of appearance on the inputs. Raises: NotImplementedError: If inputs have rank >2 or share multiple axes. ValueError: If the inputs have rank 0 or do not share any axes. """ with ops.name_scope(name, 'lt_matmul', [a, b]) as scope: a = core.convert_to_labeled_tensor(a) b = core.convert_to_labeled_tensor(b) if len(a.axes) > 2 or len(b.axes) > 2: # We could pass batched inputs to tf.matmul to make this work, but we # would also need to use tf.tile and/or tf.transpose. These are more # expensive than doing reshapes, so it's not clear if it's a good idea to # do this automatically. raise NotImplementedError( 'matmul currently requires inputs with rank 2 or less, but ' 'inputs have ranks %r and %r' % (len(a.axes), len(b.axes))) if not a.axes or not b.axes: raise ValueError( 'matmul currently requires inputs with at least rank 1, but ' 'inputs have ranks %r and %r' % (len(a.axes), len(b.axes))) shared_axes = set(a.axes) & set(b.axes) if len(shared_axes) > 1: raise NotImplementedError( 'matmul does not yet support summing over multiple shared axes: %r. ' 'Use transpose and reshape to create a single shared axis to sum ' 'over.' % shared_axes) if not shared_axes: raise ValueError('there must have exactly one axis in common between ' 'input to matmul: %r, %r' % (a.axes.keys(), b.axes.keys())) shared_axis, = shared_axes if a.axes[shared_axis] != b.axes[shared_axis]: raise ValueError('axis %r does not match on input arguments: %r vs %r' % (shared_axis, a.axes[shared_axis].value, b.axes[shared_axis].value)) result_axes = [] for axes in [a.axes, b.axes]: for axis in axes.values(): if axis.name != shared_axis: result_axes.append(axis) axis_scope_order = core.get_axis_order() if axis_scope_order is not None: result_axis_names = [axis.name for axis in result_axes] new_axis_names = [ name for name in axis_scope_order if name in result_axis_names ] if new_axis_names != result_axis_names: # switch a and b b, a = a, b # result_axes is a list of length 1 or 2 result_axes = result_axes[::-1] squeeze_dims = [] if len(a.axes) == 1: a_tensor = array_ops.reshape(a.tensor, (1, -1)) squeeze_dims.append(0) transpose_a = False else: a_tensor = a.tensor transpose_a = list(a.axes.keys()).index(shared_axis) == 0 if len(b.axes) == 1: b_tensor = array_ops.reshape(b.tensor, (-1, 1)) squeeze_dims.append(1) transpose_b = False else: b_tensor = b.tensor transpose_b = list(b.axes.keys()).index(shared_axis) == 1 result_op = math_ops.matmul( a_tensor, b_tensor, transpose_a=transpose_a, transpose_b=transpose_b) if squeeze_dims: result_op = array_ops.squeeze(result_op, squeeze_dims) result_op = array_ops.identity(result_op, name=scope) return core.LabeledTensor(result_op, result_axes) @tc.returns(types.FunctionType) @tc.accepts(string_types, collections.Callable) def define_reduce_op(op_name, reduce_fn): """Define a reduction op for labeled tensors. Args: op_name: string name of the TensorFlow op. reduce_fn: function to call to evaluate the op on a tf.Tensor. Returns: Function defining the given reduction op that acts on a LabeledTensor. """ default_name = 'lt_%s' % op_name @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, ReduceAxes, tc.Optional(string_types)) def op(labeled_tensor, axes=None, name=None): """Computes the given reduction across the given axes of a LabeledTensor. See `tf.{op_name}` for full details. Args: labeled_tensor: The input tensor. axes: A set of axes or None. If None, all axes will be reduced. Axes must all be strings, in which case those dimensions will be removed, or pairs of (name, None) or (name, label), in which case those dimensions will be kept. name: Optional op name. Returns: The reduced LabeledTensor. Raises: ValueError: if any of the axes to reduce over are not found on `labeled_tensor`. """ with ops.name_scope(name, default_name, [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) if axes is None: axes = labeled_tensor.axes.keys() if isinstance(axes, (string_types, tuple)): axes = [axes] reduction_axes = {} axes_to_squeeze = [] for a in axes: if isinstance(a, string_types): # We squeeze out this axis. reduction_axes[a] = a axes_to_squeeze.append(a) else: # We keep this axis, with the user-provided labels. (axis_name, label) = a if label is not None: # The input was a single label, so make it a list so it can be # turned into an Axis. label = [label] reduction_axes[axis_name] = (axis_name, label) for axis_name in reduction_axes: if axis_name not in labeled_tensor.axes: raise ValueError('Axis %s not in axes %s' % (axis_name, labeled_tensor.axes)) intermediate_axes = [] reduction_dimensions = [] for i, axis in enumerate(labeled_tensor.axes.values()): if axis.name in reduction_axes: intermediate_axes.append(reduction_axes[axis.name]) reduction_dimensions.append(i) else: intermediate_axes.append(axis) reduce_op = reduce_fn( labeled_tensor.tensor, reduction_dimensions, keepdims=True) reduce_lt = core.LabeledTensor(reduce_op, intermediate_axes) return squeeze(reduce_lt, axes_to_squeeze, name=scope) op.__doc__ = op.__doc__.format(op_name=op_name) op.__name__ = op_name return op reduce_all = define_reduce_op('reduce_all', math_ops.reduce_all) reduce_any = define_reduce_op('reduce_any', math_ops.reduce_any) reduce_logsumexp = define_reduce_op('reduce_logsumexp', math_ops.reduce_logsumexp) reduce_max = define_reduce_op('reduce_max', math_ops.reduce_max) reduce_mean = define_reduce_op('reduce_mean', math_ops.reduce_mean) reduce_min = define_reduce_op('reduce_min', math_ops.reduce_min) reduce_prod = define_reduce_op('reduce_prod', math_ops.reduce_prod) reduce_sum = define_reduce_op('reduce_sum', math_ops.reduce_sum) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, tc.Mapping(str, tc.Union(int, ops.Tensor)), tc.Optional(string_types)) def tile(labeled_tensor, multiples, name=None): """Constructs a tensor by tiling a given tensor. Only axes without tick-labels can be tiled. (Otherwise, axis labels on tiled tensors would no longer be unique.) See lt.tile. Args: labeled_tensor: The input tensor. multiples: A mapping where the keys are axis names and the values are the integer number of times to tile along that axis. Only axes with a multiple different than 1 need be included. name: Optional op name. Returns: A tensor with the indicated axes tiled. Raises: ValueError: If the tiled axes are not axes in the input tensor, or if any axes in multiples have tick labels. """ with ops.name_scope(name, 'lt_tile', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) if not set(multiples.keys()) <= set(labeled_tensor.axes.keys()): raise ValueError('tile axes %r are not contained in the set of axis ' 'names %r on the input labeled tensor' % (multiples.keys(), labeled_tensor.axes)) labeled_axes = [ name for name in multiples if labeled_tensor.axes[name].labels is not None ] if labeled_axes: raise ValueError('cannot tile axes with tick labels: %r' % labeled_axes) multiples_list = [multiples.get(name, 1) for name in labeled_tensor.axes] tile_op = array_ops.tile(labeled_tensor.tensor, multiples_list, name=scope) new_axes = [ axis.name if axis.labels is None else axis for axis in labeled_tensor.axes.values() ] return core.LabeledTensor(tile_op, new_axes) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, tc.Mapping(str, tc.Tuple(core.AxisValue, core.AxisValue)), string_types, tc.Optional(string_types)) def pad(labeled_tensor, paddings, mode='CONSTANT', name=None): """Pads a tensor. See tf.pad. Args: labeled_tensor: The input tensor. paddings: A mapping where the keys are axis names and the values are tuples where the first element is the padding to insert at the beginning of the axis and the second is the padding to insert at the end of the axis. mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC". name: Optional op name. Returns: A tensor with the indicated axes padded, optionally with those axes extended with the provided labels. Raises: ValueError: If the padded axes are not axes in the input tensor. """ with ops.name_scope(name, 'lt_pad', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) if not set(paddings.keys()) <= set(labeled_tensor.axes.keys()): raise ValueError('pad axes %r are not contained in the set of axis ' 'names %r on the input labeled tensor' % (paddings.keys(), labeled_tensor.axes)) new_axes = [] padding_pairs = [] for name, axis in labeled_tensor.axes.items(): if name in paddings: padding_before, padding_after = paddings[name] axis_before = core.Axis(name, padding_before) axis_after = core.Axis(name, padding_after) new_axes.append(core.concat_axes([axis_before, axis, axis_after])) padding_pairs.append((len(axis_before), len(axis_after))) else: new_axes.append(axis) padding_pairs.append((0, 0)) pad_op = array_ops.pad(labeled_tensor.tensor, padding_pairs, mode, name=scope) return core.LabeledTensor(pad_op, new_axes) @tc.returns(core.LabeledTensor) @tc.accepts( tc.Union(np.ndarray, list, tuple, core.Scalar), tc.Optional(dtypes.DType), tc.Optional( tc.Union(core.Axes, tc.Collection( tc.Union(string_types, core.AxisLike)))), tc.Optional(string_types)) def constant(value, dtype=None, axes=None, name=None): """Creates a constant tensor. If `axes` includes any strings, shape is inferred from `value`. Otherwise, the sizes of the given `axes` are used to set `shape` for `tf.constant`. See tf.constant for more details. Args: value: The input tensor. dtype: The type of the returned tensor. axes: Optional Axes, list of strings or list of objects coercible to Axis objects. By default, axes are assumed to be an empty list (i.e., `value` is treated as a scalar). name: Optional op name. Returns: The tensor with elements set to zero. """ with ops.name_scope(name, 'lt_constant', [value]) as scope: if axes is None: axes = [] if isinstance(axes, core.Axes): axes = axes.values() if any(isinstance(ax, string_types) for ax in axes): # need to infer shape shape = None else: # axes already indicate shape axes = [core.as_axis(a) for a in axes] shape = [a.size for a in axes] op = array_ops.constant(value, dtype=dtype, shape=shape, name=scope) return core.LabeledTensor(op, axes) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, tc.Optional(dtypes.DType), tc.Optional(string_types)) def zeros_like(labeled_tensor, dtype=None, name=None): """Creates an identical tensor with all elements set to zero. Args: labeled_tensor: The input tensor. dtype: The type of the returned tensor. name: Optional op name. Returns: The tensor with elements set to zero. """ with ops.name_scope(name, 'lt_zeros_like', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) op = array_ops.zeros_like(labeled_tensor.tensor, dtype=dtype, name=scope) return core.LabeledTensor(op, labeled_tensor.axes) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, tc.Optional(dtypes.DType), tc.Optional(string_types)) def ones_like(labeled_tensor, dtype=None, name=None): """Creates an identical tensor with all elements set to one. Args: labeled_tensor: The input tensor. dtype: The type of the returned tensor. name: Optional op name. Returns: The tensor with elements set to one. """ with ops.name_scope(name, 'lt_ones_like', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) op = array_ops.ones_like(labeled_tensor.tensor, dtype=dtype, name=scope) return core.LabeledTensor(op, labeled_tensor.axes) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, tc.Optional(dtypes.DType), tc.Optional(string_types)) def cast(labeled_tensor, dtype=None, name=None): """Casts a labeled tensor to a new type. Args: labeled_tensor: The input tensor. dtype: The type of the returned tensor. name: Optional op name. Returns: A labeled tensor with the new dtype. """ with ops.name_scope(name, 'lt_cast', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) op = math_ops.cast(labeled_tensor.tensor, dtype=dtype, name=scope) return core.LabeledTensor(op, labeled_tensor.axes) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, string_types, tc.Optional(string_types)) def verify_tensor_all_finite(labeled_tensor, message, name=None): """Asserts a tensor doesn't contain NaNs or Infs. See tf.verify_tensor_all_finite. Args: labeled_tensor: The input tensor. message: Message to log on failure. name: Optional op name. Returns: The input tensor. """ with ops.name_scope(name, 'lt_verify_tensor_all_finite', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) op = numerics.verify_tensor_all_finite( labeled_tensor.tensor, msg=message, name=scope) return core.LabeledTensor(op, labeled_tensor.axes) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike, tc.Optional(string_types)) def boolean_mask(labeled_tensor, mask, name=None): """Apply a boolean mask to a labeled tensor. Unlike `tf.boolean_mask`, this currently only works on 1-dimensional masks. The mask is applied to the first axis of `labeled_tensor`. Labels on the first axis are removed, because True indices in `mask` may not be known dynamically. Args: labeled_tensor: The input tensor. mask: The type of the returned tensor. name: Optional op name. Returns: The masked labeled tensor. Raises: ValueError: if the first axis of the mask """ with ops.name_scope(name, 'lt_boolean_mask', [labeled_tensor, mask]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) mask = core.convert_to_labeled_tensor(mask) if len(mask.axes) > 1: raise NotImplementedError( "LabeledTensor's boolean_mask currently only supports 1D masks") mask_axis = list(mask.axes.values())[0] lt_axis = list(labeled_tensor.axes.values())[0] if mask_axis != lt_axis: raise ValueError('the first axis of the labeled tensor and the mask ' 'are not equal:\n%r\n%r' % (lt_axis, mask_axis)) op = array_ops.boolean_mask(labeled_tensor.tensor, mask.tensor, name=scope) # TODO(shoyer): attempt to infer labels for the masked values, by calling # tf.contrib.util.constant_value on the mask? axes = [lt_axis.name] + list(labeled_tensor.axes.values())[1:] return core.LabeledTensor(op, axes) @tc.returns(core.LabeledTensor) @tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike, core.LabeledTensorLike, tc.Optional(string_types)) def where(condition, x, y, name=None): """Return elements from x or y depending on condition. See `tf.where` for more details. This function currently only implements the three argument version of where. Args: condition: LabeledTensor of type `bool`. x: LabeledTensor for values where condition is true. y: LabeledTensor for values where condition is false. name: Optional op name. Returns: The labeled tensor with values according to condition. Raises: ValueError: if `x` and `y` have different axes, or if the axes of `x` do not start with the axes of `condition`. """ with ops.name_scope(name, 'lt_where', [condition, x, y]) as scope: condition = core.convert_to_labeled_tensor(condition) x = core.convert_to_labeled_tensor(x) y = core.convert_to_labeled_tensor(y) if not condition.axes == x.axes == y.axes: raise ValueError('all inputs to `where` must have equal axes') op = array_ops.where(condition.tensor, x.tensor, y.tensor, name=scope) return core.LabeledTensor(op, x.axes)
apache-2.0
ronny3050/MobileNet
retrain.py
1
56996
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Simple transfer learning with Inception v3 or Mobilenet models. With support for TensorBoard. This example shows how to take a Inception v3 or Mobilenet model trained on ImageNet images, and train a new top layer that can recognize other classes of images. The top layer receives as input a 2048-dimensional vector (1001-dimensional for Mobilenet) for each image. We train a softmax layer on top of this representation. Assuming the softmax layer contains N labels, this corresponds to learning N + 2048*N (or 1001*N) model parameters corresponding to the learned biases and weights. Here's an example, which assumes you have a folder containing class-named subfolders, each full of images for each label. The example folder flower_photos should have a structure like this: ~/flower_photos/daisy/photo1.jpg ~/flower_photos/daisy/photo2.jpg ... ~/flower_photos/rose/anotherphoto77.jpg ... ~/flower_photos/sunflower/somepicture.jpg The subfolder names are important, since they define what label is applied to each image, but the filenames themselves don't matter. Once your images are prepared, you can run the training with a command like this: ```bash bazel build tensorflow/examples/image_retraining:retrain && \ bazel-bin/tensorflow/examples/image_retraining/retrain \ --image_dir ~/flower_photos ``` Or, if you have a pip installation of tensorflow, `retrain.py` can be run without bazel: ```bash python tensorflow/examples/image_retraining/retrain.py \ --image_dir ~/flower_photos ``` You can replace the image_dir argument with any folder containing subfolders of images. The label for each image is taken from the name of the subfolder it's in. This produces a new model file that can be loaded and run by any TensorFlow program, for example the label_image sample code. By default this script will use the high accuracy, but comparatively large and slow Inception v3 model architecture. It's recommended that you start with this to validate that you have gathered good training data, but if you want to deploy on resource-limited platforms, you can try the `--architecture` flag with a Mobilenet model. For example: ```bash python tensorflow/examples/image_retraining/retrain.py \ --image_dir ~/flower_photos --architecture mobilenet_1.0_224 ``` There are 32 different Mobilenet models to choose from, with a variety of file size and latency options. The first number can be '1.0', '0.75', '0.50', or '0.25' to control the size, and the second controls the input image size, either '224', '192', '160', or '128', with smaller sizes running faster. See https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html for more information on Mobilenet. To use with TensorBoard: By default, this script will log summaries to /tmp/retrain_logs directory Visualize the summaries with this command: tensorboard --logdir /tmp/retrain_logs """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse from datetime import datetime import hashlib import os.path import random import re import sys import tarfile from sklearn.model_selection import train_test_split import numpy as np from six.moves import urllib import tensorflow as tf from tensorflow.python.framework import graph_util from tensorflow.python.framework import tensor_shape from tensorflow.python.platform import gfile from tensorflow.python.util import compat FLAGS = None # These are all parameters that are tied to the particular model architecture # we're using for Inception v3. These include things like tensor names and their # sizes. If you want to adapt this script to work with another model, you will # need to update these to reflect the values in the network you're using. MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M def create_image_lists(image_dir, testing_percentage, validation_percentage): """Builds a list of training images from the file system. Analyzes the sub folders in the image directory, splits them into stable training, testing, and validation sets, and returns a data structure describing the lists of images for each label and their paths. Args: image_dir: String path to a folder containing subfolders of images. testing_percentage: Integer percentage of the images to reserve for tests. validation_percentage: Integer percentage of images reserved for validation. Returns: A dictionary containing an entry for each label subfolder, with images split into training, testing, and validation sets within each label. """ if not gfile.Exists(image_dir): tf.logging.error("Image directory '" + image_dir + "' not found.") return None result = {} sub_dirs = [x[0] for x in gfile.Walk(image_dir)] # The root directory comes first, so skip it. is_root_dir = True for sub_dir in sub_dirs: if is_root_dir: is_root_dir = False continue extensions = ['jpg', 'jpeg', 'JPG', 'JPEG'] file_list = [] dir_name = os.path.basename(sub_dir) if dir_name == image_dir: continue tf.logging.info("Looking for images in '" + dir_name + "'") for extension in extensions: file_glob = os.path.join(image_dir, dir_name, '*.' + extension) file_list.extend(gfile.Glob(file_glob)) if not file_list: tf.logging.warning('No files found') continue if len(file_list) < 20: tf.logging.warning( 'WARNING: Folder has less than 20 images, which may cause issues.') elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS: tf.logging.warning( 'WARNING: Folder {} has more than {} images. Some images will ' 'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS)) label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower()) training_images = [] testing_images = [] validation_images = [] file_names = [os.path.basename(file_name) for file_name in file_list] training_images, validation_images = train_test_split(file_names, test_size=validation_percentage/100.0) ## for file_name in file_list: ## base_name = os.path.basename(file_name) ## # We want to ignore anything after '_nohash_' in the file name when ## # deciding which set to put an image in, the data set creator has a way of ## # grouping photos that are close variations of each other. For example ## # this is used in the plant disease data set to group multiple pictures of ## # the same leaf. ## hash_name = re.sub(r'_nohash_.*$', '', file_name) ## # This looks a bit magical, but we need to decide whether this file should ## # go into the training, testing, or validation sets, and we want to keep ## # existing files in the same set even if more files are subsequently ## # added. ## # To do that, we need a stable way of deciding based on just the file name ## # itself, so we do a hash of that and then use that to generate a ## # probability value that we use to assign it. ## hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest() ## percentage_hash = ((int(hash_name_hashed, 16) % ## (MAX_NUM_IMAGES_PER_CLASS + 1)) * ## (100.0 / MAX_NUM_IMAGES_PER_CLASS)) ## if percentage_hash < validation_percentage: ## validation_images.append(base_name) #### elif percentage_hash < (testing_percentage + validation_percentage): #### testing_images.append(base_name) ## else: ## training_images.append(base_name) result[label_name] = { 'dir': dir_name, 'training': training_images, 'testing': testing_images, 'validation': validation_images, } return result def get_image_path(image_lists, label_name, index, image_dir, category): """"Returns a path to an image for a label at the given index. Args: image_lists: Dictionary of training images for each label. label_name: Label string we want to get an image for. index: Int offset of the image we want. This will be moduloed by the available number of images for the label, so it can be arbitrarily large. image_dir: Root folder string of the subfolders containing the training images. category: Name string of set to pull images from - training, testing, or validation. Returns: File system path string to an image that meets the requested parameters. """ if label_name not in image_lists: tf.logging.fatal('Label does not exist %s.', label_name) label_lists = image_lists[label_name] if category not in label_lists: tf.logging.fatal('Category does not exist %s.', category) category_list = label_lists[category] if not category_list: tf.logging.fatal('Label %s has no images in the category %s.', label_name, category) mod_index = index % len(category_list) base_name = category_list[mod_index] sub_dir = label_lists['dir'] full_path = os.path.join(image_dir, sub_dir, base_name) return full_path def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir, category, architecture): """"Returns a path to a bottleneck file for a label at the given index. Args: image_lists: Dictionary of training images for each label. label_name: Label string we want to get an image for. index: Integer offset of the image we want. This will be moduloed by the available number of images for the label, so it can be arbitrarily large. bottleneck_dir: Folder string holding cached files of bottleneck values. category: Name string of set to pull images from - training, testing, or validation. architecture: The name of the model architecture. Returns: File system path string to an image that meets the requested parameters. """ return get_image_path(image_lists, label_name, index, bottleneck_dir, category) + '_' + architecture + '.txt' def create_model_graph(model_info): """"Creates a graph from saved GraphDef file and returns a Graph object. Args: model_info: Dictionary containing information about the model architecture. Returns: Graph holding the trained Inception network, and various tensors we'll be manipulating. """ with tf.Graph().as_default() as graph: model_path = os.path.join(FLAGS.model_dir, model_info['model_file_name']) with gfile.FastGFile(model_path, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) bottleneck_tensor, resized_input_tensor = (tf.import_graph_def( graph_def, name='', return_elements=[ model_info['bottleneck_tensor_name'], model_info['resized_input_tensor_name'], ])) return graph, bottleneck_tensor, resized_input_tensor def run_bottleneck_on_image(sess, image_data, image_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor): """Runs inference on an image to extract the 'bottleneck' summary layer. Args: sess: Current active TensorFlow Session. image_data: String of raw JPEG data. image_data_tensor: Input data layer in the graph. decoded_image_tensor: Output of initial image resizing and preprocessing. resized_input_tensor: The input node of the recognition graph. bottleneck_tensor: Layer before the final softmax. Returns: Numpy array of bottleneck values. """ # First decode the JPEG image, resize it, and rescale the pixel values. resized_input_values = sess.run(decoded_image_tensor, {image_data_tensor: image_data}) # Then run it through the recognition network. bottleneck_values = sess.run(bottleneck_tensor, {resized_input_tensor: resized_input_values}) bottleneck_values = np.squeeze(bottleneck_values) return bottleneck_values def maybe_download_and_extract(data_url): """Download and extract model tar file. If the pretrained model we're using doesn't already exist, this function downloads it from the TensorFlow.org website and unpacks it into a directory. Args: data_url: Web location of the tar file containing the pretrained model. """ dest_directory = FLAGS.model_dir if not os.path.exists(dest_directory): os.makedirs(dest_directory) filename = data_url.split('/')[-1] filepath = os.path.join(dest_directory, filename) if not os.path.exists(filepath): def _progress(count, block_size, total_size): sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0)) sys.stdout.flush() filepath, _ = urllib.request.urlretrieve(data_url, filepath, _progress) print() statinfo = os.stat(filepath) tf.logging.info('Successfully downloaded', filename, statinfo.st_size, 'bytes.') tarfile.open(filepath, 'r:gz').extractall(dest_directory) def ensure_dir_exists(dir_name): """Makes sure the folder exists on disk. Args: dir_name: Path string to the folder we want to create. """ if not os.path.exists(dir_name): os.makedirs(dir_name) bottleneck_path_2_bottleneck_values = {} def create_bottleneck_file(bottleneck_path, image_lists, label_name, index, image_dir, category, sess, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor): """Create a single bottleneck file.""" tf.logging.info('Creating bottleneck at ' + bottleneck_path) image_path = get_image_path(image_lists, label_name, index, image_dir, category) if not gfile.Exists(image_path): tf.logging.fatal('File does not exist %s', image_path) image_data = gfile.FastGFile(image_path, 'rb').read() try: bottleneck_values = run_bottleneck_on_image( sess, image_data, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor) except Exception as e: raise RuntimeError('Error during processing file %s (%s)' % (image_path, str(e))) bottleneck_string = ','.join(str(x) for x in bottleneck_values) with open(bottleneck_path, 'w') as bottleneck_file: bottleneck_file.write(bottleneck_string) def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir, category, bottleneck_dir, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor, architecture): """Retrieves or calculates bottleneck values for an image. If a cached version of the bottleneck data exists on-disk, return that, otherwise calculate the data and save it to disk for future use. Args: sess: The current active TensorFlow Session. image_lists: Dictionary of training images for each label. label_name: Label string we want to get an image for. index: Integer offset of the image we want. This will be modulo-ed by the available number of images for the label, so it can be arbitrarily large. image_dir: Root folder string of the subfolders containing the training images. category: Name string of which set to pull images from - training, testing, or validation. bottleneck_dir: Folder string holding cached files of bottleneck values. jpeg_data_tensor: The tensor to feed loaded jpeg data into. decoded_image_tensor: The output of decoding and resizing the image. resized_input_tensor: The input node of the recognition graph. bottleneck_tensor: The output tensor for the bottleneck values. architecture: The name of the model architecture. Returns: Numpy array of values produced by the bottleneck layer for the image. """ label_lists = image_lists[label_name] sub_dir = label_lists['dir'] sub_dir_path = os.path.join(bottleneck_dir, sub_dir) ensure_dir_exists(sub_dir_path) bottleneck_path = get_bottleneck_path(image_lists, label_name, index, bottleneck_dir, category, architecture) if not os.path.exists(bottleneck_path): create_bottleneck_file(bottleneck_path, image_lists, label_name, index, image_dir, category, sess, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor) with open(bottleneck_path, 'r') as bottleneck_file: bottleneck_string = bottleneck_file.read() did_hit_error = False try: bottleneck_values = [float(x) for x in bottleneck_string.split(',')] except ValueError: tf.logging.warning('Invalid float found, recreating bottleneck') did_hit_error = True if did_hit_error: create_bottleneck_file(bottleneck_path, image_lists, label_name, index, image_dir, category, sess, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor) with open(bottleneck_path, 'r') as bottleneck_file: bottleneck_string = bottleneck_file.read() # Allow exceptions to propagate here, since they shouldn't happen after a # fresh creation bottleneck_values = [float(x) for x in bottleneck_string.split(',')] return bottleneck_values def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor, architecture): """Ensures all the training, testing, and validation bottlenecks are cached. Because we're likely to read the same image multiple times (if there are no distortions applied during training) it can speed things up a lot if we calculate the bottleneck layer values once for each image during preprocessing, and then just read those cached values repeatedly during training. Here we go through all the images we've found, calculate those values, and save them off. Args: sess: The current active TensorFlow Session. image_lists: Dictionary of training images for each label. image_dir: Root folder string of the subfolders containing the training images. bottleneck_dir: Folder string holding cached files of bottleneck values. jpeg_data_tensor: Input tensor for jpeg data from file. decoded_image_tensor: The output of decoding and resizing the image. resized_input_tensor: The input node of the recognition graph. bottleneck_tensor: The penultimate output layer of the graph. architecture: The name of the model architecture. Returns: Nothing. """ how_many_bottlenecks = 0 ensure_dir_exists(bottleneck_dir) for label_name, label_lists in image_lists.items(): for category in ['training', 'testing', 'validation']: category_list = label_lists[category] for index, unused_base_name in enumerate(category_list): get_or_create_bottleneck( sess, image_lists, label_name, index, image_dir, category, bottleneck_dir, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor, architecture) how_many_bottlenecks += 1 if how_many_bottlenecks % 100 == 0: tf.logging.info( str(how_many_bottlenecks) + ' bottleneck files created.') def get_random_cached_bottlenecks(sess, image_lists, how_many, category, bottleneck_dir, image_dir, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor, architecture): """Retrieves bottleneck values for cached images. If no distortions are being applied, this function can retrieve the cached bottleneck values directly from disk for images. It picks a random set of images from the specified category. Args: sess: Current TensorFlow Session. image_lists: Dictionary of training images for each label. how_many: If positive, a random sample of this size will be chosen. If negative, all bottlenecks will be retrieved. category: Name string of which set to pull from - training, testing, or validation. bottleneck_dir: Folder string holding cached files of bottleneck values. image_dir: Root folder string of the subfolders containing the training images. jpeg_data_tensor: The layer to feed jpeg image data into. decoded_image_tensor: The output of decoding and resizing the image. resized_input_tensor: The input node of the recognition graph. bottleneck_tensor: The bottleneck output layer of the CNN graph. architecture: The name of the model architecture. Returns: List of bottleneck arrays, their corresponding ground truths, and the relevant filenames. """ class_count = len(image_lists.keys()) bottlenecks = [] ground_truths = [] filenames = [] if how_many >= 0: # Retrieve a random sample of bottlenecks. for unused_i in range(how_many): label_index = random.randrange(class_count) label_name = list(image_lists.keys())[label_index] image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1) image_name = get_image_path(image_lists, label_name, image_index, image_dir, category) bottleneck = get_or_create_bottleneck( sess, image_lists, label_name, image_index, image_dir, category, bottleneck_dir, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor, architecture) ground_truth = np.zeros(class_count, dtype=np.float32) ground_truth[label_index] = 1.0 bottlenecks.append(bottleneck) ground_truths.append(ground_truth) filenames.append(image_name) else: # Retrieve all bottlenecks. for label_index, label_name in enumerate(image_lists.keys()): for image_index, image_name in enumerate( image_lists[label_name][category]): image_name = get_image_path(image_lists, label_name, image_index, image_dir, category) bottleneck = get_or_create_bottleneck( sess, image_lists, label_name, image_index, image_dir, category, bottleneck_dir, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor, architecture) ground_truth = np.zeros(class_count, dtype=np.float32) ground_truth[label_index] = 1.0 bottlenecks.append(bottleneck) ground_truths.append(ground_truth) filenames.append(image_name) return bottlenecks, ground_truths, filenames def get_random_distorted_bottlenecks( sess, image_lists, how_many, category, image_dir, input_jpeg_tensor, distorted_image, resized_input_tensor, bottleneck_tensor): """Retrieves bottleneck values for training images, after distortions. If we're training with distortions like crops, scales, or flips, we have to recalculate the full model for every image, and so we can't use cached bottleneck values. Instead we find random images for the requested category, run them through the distortion graph, and then the full graph to get the bottleneck results for each. Args: sess: Current TensorFlow Session. image_lists: Dictionary of training images for each label. how_many: The integer number of bottleneck values to return. category: Name string of which set of images to fetch - training, testing, or validation. image_dir: Root folder string of the subfolders containing the training images. input_jpeg_tensor: The input layer we feed the image data to. distorted_image: The output node of the distortion graph. resized_input_tensor: The input node of the recognition graph. bottleneck_tensor: The bottleneck output layer of the CNN graph. Returns: List of bottleneck arrays and their corresponding ground truths. """ class_count = len(image_lists.keys()) bottlenecks = [] ground_truths = [] for unused_i in range(how_many): label_index = random.randrange(class_count) label_name = list(image_lists.keys())[label_index] image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1) image_path = get_image_path(image_lists, label_name, image_index, image_dir, category) if not gfile.Exists(image_path): tf.logging.fatal('File does not exist %s', image_path) jpeg_data = gfile.FastGFile(image_path, 'rb').read() # Note that we materialize the distorted_image_data as a numpy array before # sending running inference on the image. This involves 2 memory copies and # might be optimized in other implementations. distorted_image_data = sess.run(distorted_image, {input_jpeg_tensor: jpeg_data}) bottleneck_values = sess.run(bottleneck_tensor, {resized_input_tensor: distorted_image_data}) bottleneck_values = np.squeeze(bottleneck_values) ground_truth = np.zeros(class_count, dtype=np.float32) ground_truth[label_index] = 1.0 bottlenecks.append(bottleneck_values) ground_truths.append(ground_truth) return bottlenecks, ground_truths def should_distort_images(flip_left_right, random_crop, random_scale, random_brightness): """Whether any distortions are enabled, from the input flags. Args: flip_left_right: Boolean whether to randomly mirror images horizontally. random_crop: Integer percentage setting the total margin used around the crop box. random_scale: Integer percentage of how much to vary the scale by. random_brightness: Integer range to randomly multiply the pixel values by. Returns: Boolean value indicating whether any distortions should be applied. """ return (flip_left_right or (random_crop != 0) or (random_scale != 0) or (random_brightness != 0)) def add_input_distortions(flip_left_right, random_crop, random_scale, random_brightness, input_width, input_height, input_depth, input_mean, input_std): """Creates the operations to apply the specified distortions. During training it can help to improve the results if we run the images through simple distortions like crops, scales, and flips. These reflect the kind of variations we expect in the real world, and so can help train the model to cope with natural data more effectively. Here we take the supplied parameters and construct a network of operations to apply them to an image. Cropping ~~~~~~~~ Cropping is done by placing a bounding box at a random position in the full image. The cropping parameter controls the size of that box relative to the input image. If it's zero, then the box is the same size as the input and no cropping is performed. If the value is 50%, then the crop box will be half the width and height of the input. In a diagram it looks like this: < width > +---------------------+ | | | width - crop% | | < > | | +------+ | | | | | | | | | | | | | | +------+ | | | | | +---------------------+ Scaling ~~~~~~~ Scaling is a lot like cropping, except that the bounding box is always centered and its size varies randomly within the given range. For example if the scale percentage is zero, then the bounding box is the same size as the input and no scaling is applied. If it's 50%, then the bounding box will be in a random range between half the width and height and full size. Args: flip_left_right: Boolean whether to randomly mirror images horizontally. random_crop: Integer percentage setting the total margin used around the crop box. random_scale: Integer percentage of how much to vary the scale by. random_brightness: Integer range to randomly multiply the pixel values by. graph. input_width: Horizontal size of expected input image to model. input_height: Vertical size of expected input image to model. input_depth: How many channels the expected input image should have. input_mean: Pixel value that should be zero in the image for the graph. input_std: How much to divide the pixel values by before recognition. Returns: The jpeg input layer and the distorted result tensor. """ jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput') decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth) decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32) decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0) margin_scale = 1.0 + (random_crop / 100.0) resize_scale = 1.0 + (random_scale / 100.0) margin_scale_value = tf.constant(margin_scale) resize_scale_value = tf.random_uniform(tensor_shape.scalar(), minval=1.0, maxval=resize_scale) scale_value = tf.multiply(margin_scale_value, resize_scale_value) precrop_width = tf.multiply(scale_value, input_width) precrop_height = tf.multiply(scale_value, input_height) precrop_shape = tf.stack([precrop_height, precrop_width]) precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32) precropped_image = tf.image.resize_bilinear(decoded_image_4d, precrop_shape_as_int) precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0]) cropped_image = tf.random_crop(precropped_image_3d, [input_height, input_width, input_depth]) if flip_left_right: flipped_image = tf.image.random_flip_left_right(cropped_image) else: flipped_image = cropped_image brightness_min = 1.0 - (random_brightness / 100.0) brightness_max = 1.0 + (random_brightness / 100.0) brightness_value = tf.random_uniform(tensor_shape.scalar(), minval=brightness_min, maxval=brightness_max) brightened_image = tf.multiply(flipped_image, brightness_value) offset_image = tf.subtract(brightened_image, input_mean) mul_image = tf.multiply(offset_image, 1.0 / input_std) distort_result = tf.expand_dims(mul_image, 0, name='DistortResult') return jpeg_data, distort_result def variable_summaries(var): """Attach a lot of summaries to a Tensor (for TensorBoard visualization).""" with tf.name_scope('summaries'): mean = tf.reduce_mean(var) tf.summary.scalar('mean', mean) with tf.name_scope('stddev'): stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) tf.summary.scalar('stddev', stddev) tf.summary.scalar('max', tf.reduce_max(var)) tf.summary.scalar('min', tf.reduce_min(var)) tf.summary.histogram('histogram', var) def get_center_loss(features, labels, alpha, num_classes): nrof_features = features.get_shape()[1] centers = tf.get_variable('centers', [num_classes, nrof_features], dtype=tf.float32, initializer=tf.constant_initializer(0), trainable=False) labels = tf.argmax(labels,1) label = tf.reshape(labels, [-1]) centers_batch = tf.gather(centers, label) diff = (1 - alpha) * (centers_batch - features) centers = tf.scatter_sub(centers, label, diff) with tf.control_dependencies([centers]): loss = tf.reduce_mean(tf.square(features - centers_batch)) return loss, centers def add_final_training_ops(class_count, labels, final_tensor_name, bottleneck_tensor, bottleneck_tensor_size): """Adds a new softmax and fully-connected layer for training. We need to retrain the top layer to identify our new classes, so this function adds the right operations to the graph, along with some variables to hold the weights, and then sets up all the gradients for the backward pass. The set up for the softmax and fully-connected layers is based on: https://www.tensorflow.org/versions/master/tutorials/mnist/beginners/index.html Args: class_count: Integer of how many categories of things we're trying to recognize. final_tensor_name: Name string for the new final node that produces results. bottleneck_tensor: The output of the main CNN graph. bottleneck_tensor_size: How many entries in the bottleneck vector. Returns: The tensors for the training and cross entropy results, and tensors for the bottleneck input and ground truth input. """ with tf.name_scope('input'): bottleneck_input = tf.placeholder_with_default( bottleneck_tensor, shape=[None, bottleneck_tensor_size], name='BottleneckInputPlaceholder') ground_truth_input = tf.placeholder(tf.int32, [None, class_count], name='GroundTruthInput') # Organizing the following ops as `final_training_ops` so they're easier # to see in TensorBoard layer_name = 'final_training_ops' with tf.name_scope(layer_name): with tf.name_scope('weights'): initial_value = tf.truncated_normal( [bottleneck_tensor_size, class_count], stddev=0.001) layer_weights = tf.Variable(initial_value, name='final_weights') variable_summaries(layer_weights) with tf.name_scope('biases'): layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases') variable_summaries(layer_biases) with tf.name_scope('Wx_plus_b'): logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases tf.summary.histogram('pre_activations', logits) final_tensor = tf.nn.softmax(logits, name=final_tensor_name) tf.summary.histogram('activations', final_tensor) with tf.name_scope('center_loss'): center_loss, centers = get_center_loss(bottleneck_input, ground_truth_input, FLAGS.center_loss_alpha, class_count) with tf.name_scope('cross_entropy'): cross_entropy = tf.nn.softmax_cross_entropy_with_logits( labels=ground_truth_input, logits=logits) with tf.name_scope('total'): cross_entropy_mean = tf.reduce_mean(cross_entropy) total_loss = cross_entropy_mean + FLAGS.center_loss_factor * center_loss tf.summary.scalar('center_loss', center_loss) tf.summary.scalar('cross_entropy', cross_entropy_mean) tf.summary.scalar('total_loss', total_loss) with tf.name_scope('train'): optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate) train_step = optimizer.minimize(total_loss) return (train_step, total_loss, bottleneck_input, ground_truth_input, final_tensor) def add_evaluation_step(result_tensor, ground_truth_tensor): """Inserts the operations we need to evaluate the accuracy of our results. Args: result_tensor: The new final node that produces results. ground_truth_tensor: The node we feed ground truth data into. Returns: Tuple of (evaluation step, prediction). """ with tf.name_scope('accuracy'): with tf.name_scope('correct_prediction'): prediction = tf.argmax(result_tensor, 1) correct_prediction = tf.equal( prediction, tf.argmax(ground_truth_tensor, 1)) with tf.name_scope('accuracy'): evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) tf.summary.scalar('accuracy', evaluation_step) return evaluation_step, prediction def save_graph_to_file(sess, graph, graph_file_name): output_graph_def = graph_util.convert_variables_to_constants( sess, graph.as_graph_def(), [FLAGS.final_tensor_name]) with gfile.FastGFile(graph_file_name, 'wb') as f: f.write(output_graph_def.SerializeToString()) return def prepare_file_system(): # Setup the directory we'll write summaries to for TensorBoard if tf.gfile.Exists(FLAGS.summaries_dir): tf.gfile.DeleteRecursively(FLAGS.summaries_dir) tf.gfile.MakeDirs(FLAGS.summaries_dir) if FLAGS.intermediate_store_frequency > 0: ensure_dir_exists(FLAGS.intermediate_output_graphs_dir) return def create_model_info(architecture): """Given the name of a model architecture, returns information about it. There are different base image recognition pretrained models that can be retrained using transfer learning, and this function translates from the name of a model to the attributes that are needed to download and train with it. Args: architecture: Name of a model architecture. Returns: Dictionary of information about the model, or None if the name isn't recognized Raises: ValueError: If architecture name is unknown. """ architecture = architecture.lower() if architecture == 'inception_v3': # pylint: disable=line-too-long data_url = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz' # pylint: enable=line-too-long bottleneck_tensor_name = 'pool_3/_reshape:0' bottleneck_tensor_size = 2048 input_width = 299 input_height = 299 input_depth = 3 resized_input_tensor_name = 'Mul:0' model_file_name = 'classify_image_graph_def.pb' input_mean = 128 input_std = 128 elif architecture.startswith('mobilenet_'): parts = architecture.split('_') if len(parts) != 3 and len(parts) != 4: tf.logging.error("Couldn't understand architecture name '%s'", architecture) return None version_string = parts[1] if (version_string != '1.0' and version_string != '0.75' and version_string != '0.50' and version_string != '0.25'): tf.logging.error( """"The Mobilenet version should be '1.0', '0.75', '0.50', or '0.25', but found '%s' for architecture '%s'""", version_string, architecture) return None size_string = parts[2] if (size_string != '224' and size_string != '192' and size_string != '160' and size_string != '128'): tf.logging.error( """The Mobilenet input size should be '224', '192', '160', or '128', but found '%s' for architecture '%s'""", size_string, architecture) return None if len(parts) == 3: is_quantized = False else: if parts[3] != 'quantized': tf.logging.error( "Couldn't understand architecture suffix '%s' for '%s'", parts[3], architecture) return None is_quantized = True data_url = 'http://download.tensorflow.org/models/mobilenet_v1_' data_url += version_string + '_' + size_string + '_frozen.tgz' bottleneck_tensor_name = 'MobilenetV1/Predictions/Reshape:0' bottleneck_tensor_size = 1001 input_width = int(size_string) input_height = int(size_string) input_depth = 3 resized_input_tensor_name = 'input:0' if is_quantized: model_base_name = 'quantized_graph.pb' else: model_base_name = 'frozen_graph.pb' model_dir_name = 'mobilenet_v1_' + version_string + '_' + size_string model_file_name = os.path.join(model_dir_name, model_base_name) input_mean = 127.5 input_std = 127.5 else: tf.logging.error("Couldn't understand architecture name '%s'", architecture) raise ValueError('Unknown architecture', architecture) return { 'data_url': data_url, 'bottleneck_tensor_name': bottleneck_tensor_name, 'bottleneck_tensor_size': bottleneck_tensor_size, 'input_width': input_width, 'input_height': input_height, 'input_depth': input_depth, 'resized_input_tensor_name': resized_input_tensor_name, 'model_file_name': model_file_name, 'input_mean': input_mean, 'input_std': input_std, } def add_jpeg_decoding(input_width, input_height, input_depth, input_mean, input_std): """Adds operations that perform JPEG decoding and resizing to the graph.. Args: input_width: Desired width of the image fed into the recognizer graph. input_height: Desired width of the image fed into the recognizer graph. input_depth: Desired channels of the image fed into the recognizer graph. input_mean: Pixel value that should be zero in the image for the graph. input_std: How much to divide the pixel values by before recognition. Returns: Tensors for the node to feed JPEG data into, and the output of the preprocessing steps. """ jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput') decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth) decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32) decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0) resize_shape = tf.stack([input_height, input_width]) resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32) resized_image = tf.image.resize_bilinear(decoded_image_4d, resize_shape_as_int) offset_image = tf.subtract(resized_image, input_mean) mul_image = tf.multiply(offset_image, 1.0 / input_std) return jpeg_data, mul_image def get_numerical_labels(image_lists): numerical_labels = [] cnt = 0 prev = '' for i in image_lists.keys(): numerical_labels.append(cnt) cnt = cnt + 1 return numerical_labels def main(_): # Needed to make sure the logging output is visible. # See https://github.com/tensorflow/tensorflow/issues/3047 tf.logging.set_verbosity(tf.logging.INFO) # Prepare necessary directories that can be used during training prepare_file_system() # Gather information about the model architecture we'll be using. model_info = create_model_info(FLAGS.architecture) if not model_info: tf.logging.error('Did not recognize architecture flag') return -1 # Set up the pre-trained graph. maybe_download_and_extract(model_info['data_url']) graph, bottleneck_tensor, resized_image_tensor = ( create_model_graph(model_info)) # Look at the folder structure, and create lists of all the images. image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage, FLAGS.validation_percentage) f = open( 'image_lists.py', 'w' ) f.write( 'dict = ' + repr(image_lists) + '\n' ) f.close() class_count = len(image_lists.keys()) if class_count == 0: tf.logging.error('No valid folders of images found at ' + FLAGS.image_dir) return -1 if class_count == 1: tf.logging.error('Only one valid folder of images found at ' + FLAGS.image_dir + ' - multiple classes are needed for classification.') return -1 # See if the command-line flags mean we're applying any distortions. do_distort_images = should_distort_images( FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale, FLAGS.random_brightness) numerical_labels = get_numerical_labels(image_lists) with tf.Session(graph=graph) as sess: # Set up the image decoding sub-graph. jpeg_data_tensor, decoded_image_tensor = add_jpeg_decoding( model_info['input_width'], model_info['input_height'], model_info['input_depth'], model_info['input_mean'], model_info['input_std']) if do_distort_images: # We will be applying distortions, so setup the operations we'll need. (distorted_jpeg_data_tensor, distorted_image_tensor) = add_input_distortions( FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale, FLAGS.random_brightness, model_info['input_width'], model_info['input_height'], model_info['input_depth'], model_info['input_mean'], model_info['input_std']) else: # We'll make sure we've calculated the 'bottleneck' image summaries and # cached them on disk. cache_bottlenecks(sess, image_lists, FLAGS.image_dir, FLAGS.bottleneck_dir, jpeg_data_tensor, decoded_image_tensor, resized_image_tensor, bottleneck_tensor, FLAGS.architecture) # Add the new layer that we'll be training. (train_step, total_loss, bottleneck_input, ground_truth_input, final_tensor) = add_final_training_ops( len(image_lists.keys()), get_numerical_labels(image_lists), FLAGS.final_tensor_name, bottleneck_tensor, model_info['bottleneck_tensor_size']) # Create the operations we need to evaluate the accuracy of our new layer. evaluation_step, prediction = add_evaluation_step( final_tensor, ground_truth_input) # Merge all the summaries and write them out to the summaries_dir merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train', sess.graph) validation_writer = tf.summary.FileWriter( FLAGS.summaries_dir + '/validation') # Set up all our weights to their initial default values. init = tf.global_variables_initializer() sess.run(init) # Run the training for as many cycles as requested on the command line. for i in range(FLAGS.how_many_training_steps): # Get a batch of input bottleneck values, either calculated fresh every # time with distortions applied, or from the cache stored on disk. if do_distort_images: (train_bottlenecks, train_ground_truth) = get_random_distorted_bottlenecks( sess, image_lists, FLAGS.train_batch_size, 'training', FLAGS.image_dir, distorted_jpeg_data_tensor, distorted_image_tensor, resized_image_tensor, bottleneck_tensor) else: (train_bottlenecks, train_ground_truth, _) = get_random_cached_bottlenecks( sess, image_lists, FLAGS.train_batch_size, 'training', FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor, decoded_image_tensor, resized_image_tensor, bottleneck_tensor, FLAGS.architecture) # Feed the bottlenecks and ground truth into the graph, and run a training # step. Capture training summaries for TensorBoard with the `merged` op. train_summary, _ = sess.run( [merged, train_step], feed_dict={bottleneck_input: train_bottlenecks, ground_truth_input: train_ground_truth}) train_writer.add_summary(train_summary, i) # Every so often, print out how well the graph is training. is_last_step = (i + 1 == FLAGS.how_many_training_steps) if (i % FLAGS.eval_step_interval) == 0 or is_last_step: train_accuracy, total_loss_values = sess.run( [evaluation_step, total_loss], feed_dict={bottleneck_input: train_bottlenecks, ground_truth_input: train_ground_truth}) tf.logging.info('%s: Step %d: Train accuracy = %.1f%%' % (datetime.now(), i, train_accuracy * 100)) tf.logging.info('%s: Step %d: Total loss = %f' % (datetime.now(), i, total_loss_values)) validation_bottlenecks, validation_ground_truth, _ = ( get_random_cached_bottlenecks( sess, image_lists, FLAGS.validation_batch_size, 'validation', FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor, decoded_image_tensor, resized_image_tensor, bottleneck_tensor, FLAGS.architecture)) # Run a validation step and capture training summaries for TensorBoard # with the `merged` op. validation_summary, validation_accuracy = sess.run( [merged, evaluation_step], feed_dict={bottleneck_input: validation_bottlenecks, ground_truth_input: validation_ground_truth}) validation_writer.add_summary(validation_summary, i) tf.logging.info('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' % (datetime.now(), i, validation_accuracy * 100, len(validation_bottlenecks))) # Store intermediate results intermediate_frequency = FLAGS.intermediate_store_frequency if (intermediate_frequency > 0 and (i % intermediate_frequency == 0) and i > 0): intermediate_file_name = (FLAGS.intermediate_output_graphs_dir + 'intermediate_' + str(i) + '.pb') tf.logging.info('Save intermediate result to : ' + intermediate_file_name) save_graph_to_file(sess, graph, intermediate_file_name) # We've completed all our training, so run a final test evaluation on # some new images we haven't used before. if(FLAGS.testing_percentage > 0): test_bottlenecks, test_ground_truth, test_filenames = ( get_random_cached_bottlenecks( sess, image_lists, FLAGS.test_batch_size, 'testing', FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor, decoded_image_tensor, resized_image_tensor, bottleneck_tensor, FLAGS.architecture)) test_accuracy, predictions = sess.run( [evaluation_step, prediction], feed_dict={bottleneck_input: test_bottlenecks, ground_truth_input: test_ground_truth}) tf.logging.info('Final test accuracy = %.1f%% (N=%d)' % (test_accuracy * 100, len(test_bottlenecks))) if FLAGS.print_misclassified_test_images: tf.logging.info('=== MISCLASSIFIED TEST IMAGES ===') for i, test_filename in enumerate(test_filenames): if predictions[i] != test_ground_truth[i].argmax(): tf.logging.info('%70s %s' % (test_filename, list(image_lists.keys())[predictions[i]])) # Write out the trained graph and labels with the weights stored as # constants. save_graph_to_file(sess, graph, FLAGS.output_graph) with gfile.FastGFile(FLAGS.output_labels, 'w') as f: f.write('\n'.join(image_lists.keys()) + '\n') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--image_dir', type=str, default='', help='Path to folders of labeled images.' ) parser.add_argument( '--output_graph', type=str, default='tmp/output_graph.pb', help='Where to save the trained graph.' ) parser.add_argument( '--intermediate_output_graphs_dir', type=str, default='', help='Where to save the intermediate graphs.' ) parser.add_argument( '--intermediate_store_frequency', type=int, default=0, help="""\ How many steps to store intermediate graph. If "0" then will not store.\ """ ) parser.add_argument( '--center_loss', default=False, help="""\ Use center loss along with softmax? """ ) parser.add_argument( '--output_labels', type=str, default='tmp/output_labels.txt', help='Where to save the trained graph\'s labels.' ) parser.add_argument( '--summaries_dir', type=str, default='tmp/retrain_logs', help='Where to save summary logs for TensorBoard.' ) parser.add_argument( '--how_many_training_steps', type=int, default=4000, help='How many training steps to run before ending.' ) parser.add_argument( '--learning_rate', type=float, default=0.01, help='How large a learning rate to use when training.' ) parser.add_argument( '--testing_percentage', type=int, default=10, help='What percentage of images to use as a test set.' ) parser.add_argument( '--validation_percentage', type=int, default=10, help='What percentage of images to use as a validation set.' ) parser.add_argument( '--eval_step_interval', type=int, default=10, help='How often to evaluate the training results.' ) parser.add_argument( '--train_batch_size', type=int, default=100, help='How many images to train on at a time.' ) parser.add_argument( '--test_batch_size', type=int, default=-1, help="""\ How many images to test on. This test set is only used once, to evaluate the final accuracy of the model after training completes. A value of -1 causes the entire test set to be used, which leads to more stable results across runs.\ """ ) parser.add_argument( '--validation_batch_size', type=int, default=100, help="""\ How many images to use in an evaluation batch. This validation set is used much more often than the test set, and is an early indicator of how accurate the model is during training. A value of -1 causes the entire validation set to be used, which leads to more stable results across training iterations, but may be slower on large training sets.\ """ ) parser.add_argument( '--print_misclassified_test_images', default=False, help="""\ Whether to print out a list of all misclassified test images.\ """, action='store_true' ) parser.add_argument( '--model_dir', type=str, default='tmp/imagenet', help="""\ Path to classify_image_graph_def.pb, imagenet_synset_to_human_label_map.txt, and imagenet_2012_challenge_label_map_proto.pbtxt.\ """ ) parser.add_argument( '--bottleneck_dir', type=str, default='tmp/bottleneck', help='Path to cache bottleneck layer values as files.' ) parser.add_argument( '--final_tensor_name', type=str, default='final_result', help="""\ The name of the output classification layer in the retrained graph.\ """ ) parser.add_argument( '--flip_left_right', default=False, help="""\ Whether to randomly flip half of the training images horizontally.\ """, action='store_true' ) parser.add_argument( '--random_crop', type=int, default=0, help="""\ A percentage determining how much of a margin to randomly crop off the training images.\ """ ) parser.add_argument( '--random_scale', type=int, default=0, help="""\ A percentage determining how much to randomly scale up the size of the training images by.\ """ ) parser.add_argument( '--random_brightness', type=int, default=0, help="""\ A percentage determining how much to randomly multiply the training image input pixels up or down by.\ """ ) parser.add_argument( '--center_loss_alpha', type=float, default=0, help="""\ A percentage determining how much to randomly multiply the training image input pixels up or down by.\ """ ) parser.add_argument( '--center_loss_factor', type=float, default=0, help="""\ A percentage determining how much to randomly multiply the training image input pixels up or down by.\ """ ) parser.add_argument( '--architecture', type=str, default='inception_v3', help="""\ Which model architecture to use. 'inception_v3' is the most accurate, but also the slowest. For faster or smaller models, chose a MobileNet with the form 'mobilenet_<parameter size>_<input_size>[_quantized]'. For example, 'mobilenet_1.0_224' will pick a model that is 17 MB in size and takes 224 pixel input images, while 'mobilenet_0.25_128_quantized' will choose a much less accurate, but smaller and faster network that's 920 KB on disk and takes 128x128 images. See https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html for more information on Mobilenet.\ """) FLAGS, unparsed = parser.parse_known_args() tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
mit
nhejazi/scikit-learn
sklearn/linear_model/least_angle.py
6
58438
""" Least Angle Regression algorithm. See the documentation on the Generalized Linear Model for a complete discussion. """ from __future__ import print_function # Author: Fabian Pedregosa <fabian.pedregosa@inria.fr> # Alexandre Gramfort <alexandre.gramfort@inria.fr> # Gael Varoquaux # # License: BSD 3 clause from math import log import sys import warnings import numpy as np from scipy import linalg, interpolate from scipy.linalg.lapack import get_lapack_funcs from .base import LinearModel from ..base import RegressorMixin from ..utils import arrayfuncs, as_float_array, check_X_y, deprecated from ..model_selection import check_cv from ..exceptions import ConvergenceWarning from ..externals.joblib import Parallel, delayed from ..externals.six.moves import xrange from ..externals.six import string_types solve_triangular_args = {'check_finite': False} def lars_path(X, y, Xy=None, Gram=None, max_iter=500, alpha_min=0, method='lar', copy_X=True, eps=np.finfo(np.float).eps, copy_Gram=True, verbose=0, return_path=True, return_n_iter=False, positive=False): """Compute Least Angle Regression or Lasso path using LARS algorithm [1] The optimization objective for the case method='lasso' is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 in the case of method='lars', the objective function is only known in the form of an implicit equation (see discussion in [1]) Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ----------- X : array, shape: (n_samples, n_features) Input data. y : array, shape: (n_samples) Input targets. Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \ optional Xy = np.dot(X.T, y) that can be precomputed. It is useful only when the Gram matrix is precomputed. Gram : None, 'auto', array, shape: (n_features, n_features), optional Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram matrix is precomputed from the given X, if there are more samples than features. max_iter : integer, optional (default=500) Maximum number of iterations to perform, set to infinity for no limit. alpha_min : float, optional (default=0) Minimum correlation along the path. It corresponds to the regularization parameter alpha parameter in the Lasso. method : {'lar', 'lasso'}, optional (default='lar') Specifies the returned model. Select ``'lar'`` for Least Angle Regression, ``'lasso'`` for the Lasso. copy_X : bool, optional (default=True) If ``False``, ``X`` is overwritten. eps : float, optional (default=``np.finfo(np.float).eps``) The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. copy_Gram : bool, optional (default=True) If ``False``, ``Gram`` is overwritten. verbose : int (default=0) Controls output verbosity. return_path : bool, optional (default=True) If ``return_path==True`` returns the entire path, else returns only the last point of the path. return_n_iter : bool, optional (default=False) Whether to return the number of iterations. positive : boolean (default=False) Restrict coefficients to be >= 0. When using this option together with method 'lasso' the model coefficients will not converge to the ordinary-least-squares solution for small values of alpha (neither will they when using method 'lar' ..). Only coefficients up to the smallest alpha value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso algorithm are typically in congruence with the solution of the coordinate descent lasso_path function. Returns -------- alphas : array, shape: [n_alphas + 1] Maximum of covariances (in absolute value) at each iteration. ``n_alphas`` is either ``max_iter``, ``n_features`` or the number of nodes in the path with ``alpha >= alpha_min``, whichever is smaller. active : array, shape [n_alphas] Indices of active variables at the end of the path. coefs : array, shape (n_features, n_alphas + 1) Coefficients along the path n_iter : int Number of iterations run. Returned only if return_n_iter is set to True. See also -------- lasso_path LassoLars Lars LassoLarsCV LarsCV sklearn.decomposition.sparse_encode References ---------- .. [1] "Least Angle Regression", Effron et al. http://statweb.stanford.edu/~tibs/ftp/lars.pdf .. [2] `Wikipedia entry on the Least-angle regression <https://en.wikipedia.org/wiki/Least-angle_regression>`_ .. [3] `Wikipedia entry on the Lasso <https://en.wikipedia.org/wiki/Lasso_(statistics)>`_ """ n_features = X.shape[1] n_samples = y.size max_features = min(max_iter, n_features) if return_path: coefs = np.zeros((max_features + 1, n_features)) alphas = np.zeros(max_features + 1) else: coef, prev_coef = np.zeros(n_features), np.zeros(n_features) alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas? n_iter, n_active = 0, 0 active, indices = list(), np.arange(n_features) # holds the sign of covariance sign_active = np.empty(max_features, dtype=np.int8) drop = False # will hold the cholesky factorization. Only lower part is # referenced. # We are initializing this to "zeros" and not empty, because # it is passed to scipy linalg functions and thus if it has NaNs, # even if they are in the upper part that it not used, we # get errors raised. # Once we support only scipy > 0.12 we can use check_finite=False and # go back to "empty" L = np.zeros((max_features, max_features), dtype=X.dtype) swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,)) solve_cholesky, = get_lapack_funcs(('potrs',), (X,)) if Gram is None or Gram is False: Gram = None if copy_X: # force copy. setting the array to be fortran-ordered # speeds up the calculation of the (partial) Gram matrix # and allows to easily swap columns X = X.copy('F') elif isinstance(Gram, string_types) and Gram == 'auto' or Gram is True: if Gram is True or X.shape[0] > X.shape[1]: Gram = np.dot(X.T, X) else: Gram = None elif copy_Gram: Gram = Gram.copy() if Xy is None: Cov = np.dot(X.T, y) else: Cov = Xy.copy() if verbose: if verbose > 1: print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC") else: sys.stdout.write('.') sys.stdout.flush() tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning equality_tolerance = np.finfo(np.float32).eps while True: if Cov.size: if positive: C_idx = np.argmax(Cov) else: C_idx = np.argmax(np.abs(Cov)) C_ = Cov[C_idx] if positive: C = C_ else: C = np.fabs(C_) else: C = 0. if return_path: alpha = alphas[n_iter, np.newaxis] coef = coefs[n_iter] prev_alpha = alphas[n_iter - 1, np.newaxis] prev_coef = coefs[n_iter - 1] alpha[0] = C / n_samples if alpha[0] <= alpha_min + equality_tolerance: # early stopping if abs(alpha[0] - alpha_min) > equality_tolerance: # interpolation factor 0 <= ss < 1 if n_iter > 0: # In the first iteration, all alphas are zero, the formula # below would make ss a NaN ss = ((prev_alpha[0] - alpha_min) / (prev_alpha[0] - alpha[0])) coef[:] = prev_coef + ss * (coef - prev_coef) alpha[0] = alpha_min if return_path: coefs[n_iter] = coef break if n_iter >= max_iter or n_active >= n_features: break if not drop: ########################################################## # Append x_j to the Cholesky factorization of (Xa * Xa') # # # # ( L 0 ) # # L -> ( ) , where L * w = Xa' x_j # # ( w z ) and z = ||x_j|| # # # ########################################################## if positive: sign_active[n_active] = np.ones_like(C_) else: sign_active[n_active] = np.sign(C_) m, n = n_active, C_idx + n_active Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0]) indices[n], indices[m] = indices[m], indices[n] Cov_not_shortened = Cov Cov = Cov[1:] # remove Cov[0] if Gram is None: X.T[n], X.T[m] = swap(X.T[n], X.T[m]) c = nrm2(X.T[n_active]) ** 2 L[n_active, :n_active] = \ np.dot(X.T[n_active], X.T[:n_active].T) else: # swap does only work inplace if matrix is fortran # contiguous ... Gram[m], Gram[n] = swap(Gram[m], Gram[n]) Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n]) c = Gram[n_active, n_active] L[n_active, :n_active] = Gram[n_active, :n_active] # Update the cholesky decomposition for the Gram matrix if n_active: linalg.solve_triangular(L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, **solve_triangular_args) v = np.dot(L[n_active, :n_active], L[n_active, :n_active]) diag = max(np.sqrt(np.abs(c - v)), eps) L[n_active, n_active] = diag if diag < 1e-7: # The system is becoming too ill-conditioned. # We have degenerate vectors in our active set. # We'll 'drop for good' the last regressor added. # Note: this case is very rare. It is no longer triggered by # the test suite. The `equality_tolerance` margin added in 0.16 # to get early stopping to work consistently on all versions of # Python including 32 bit Python under Windows seems to make it # very difficult to trigger the 'drop for good' strategy. warnings.warn('Regressors in active set degenerate. ' 'Dropping a regressor, after %i iterations, ' 'i.e. alpha=%.3e, ' 'with an active set of %i regressors, and ' 'the smallest cholesky pivot element being %.3e.' ' Reduce max_iter or increase eps parameters.' % (n_iter, alpha, n_active, diag), ConvergenceWarning) # XXX: need to figure a 'drop for good' way Cov = Cov_not_shortened Cov[0] = 0 Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0]) continue active.append(indices[n_active]) n_active += 1 if verbose > 1: print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '', n_active, C)) if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]: # alpha is increasing. This is because the updates of Cov are # bringing in too much numerical error that is greater than # than the remaining correlation with the # regressors. Time to bail out warnings.warn('Early stopping the lars path, as the residues ' 'are small and the current value of alpha is no ' 'longer well controlled. %i iterations, alpha=%.3e, ' 'previous alpha=%.3e, with an active set of %i ' 'regressors.' % (n_iter, alpha, prev_alpha, n_active), ConvergenceWarning) break # least squares solution least_squares, info = solve_cholesky(L[:n_active, :n_active], sign_active[:n_active], lower=True) if least_squares.size == 1 and least_squares == 0: # This happens because sign_active[:n_active] = 0 least_squares[...] = 1 AA = 1. else: # is this really needed ? AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active])) if not np.isfinite(AA): # L is too ill-conditioned i = 0 L_ = L[:n_active, :n_active].copy() while not np.isfinite(AA): L_.flat[::n_active + 1] += (2 ** i) * eps least_squares, info = solve_cholesky( L_, sign_active[:n_active], lower=True) tmp = max(np.sum(least_squares * sign_active[:n_active]), eps) AA = 1. / np.sqrt(tmp) i += 1 least_squares *= AA if Gram is None: # equiangular direction of variables in the active set eq_dir = np.dot(X.T[:n_active].T, least_squares) # correlation between each unactive variables and # eqiangular vector corr_eq_dir = np.dot(X.T[n_active:], eq_dir) else: # if huge number of features, this takes 50% of time, I # think could be avoided if we just update it using an # orthogonal (QR) decomposition of X corr_eq_dir = np.dot(Gram[:n_active, n_active:].T, least_squares) g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny32)) if positive: gamma_ = min(g1, C / AA) else: g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny32)) gamma_ = min(g1, g2, C / AA) # TODO: better names for these variables: z drop = False z = -coef[active] / (least_squares + tiny32) z_pos = arrayfuncs.min_pos(z) if z_pos < gamma_: # some coefficients have changed sign idx = np.where(z == z_pos)[0][::-1] # update the sign, important for LAR sign_active[idx] = -sign_active[idx] if method == 'lasso': gamma_ = z_pos drop = True n_iter += 1 if return_path: if n_iter >= coefs.shape[0]: del coef, alpha, prev_alpha, prev_coef # resize the coefs and alphas array add_features = 2 * max(1, (max_features - n_active)) coefs = np.resize(coefs, (n_iter + add_features, n_features)) coefs[-add_features:] = 0 alphas = np.resize(alphas, n_iter + add_features) alphas[-add_features:] = 0 coef = coefs[n_iter] prev_coef = coefs[n_iter - 1] alpha = alphas[n_iter, np.newaxis] prev_alpha = alphas[n_iter - 1, np.newaxis] else: # mimic the effect of incrementing n_iter on the array references prev_coef = coef prev_alpha[0] = alpha[0] coef = np.zeros_like(coef) coef[active] = prev_coef[active] + gamma_ * least_squares # update correlations Cov -= gamma_ * corr_eq_dir # See if any coefficient has changed sign if drop and method == 'lasso': # handle the case when idx is not length of 1 [arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in idx] n_active -= 1 m, n = idx, n_active # handle the case when idx is not length of 1 drop_idx = [active.pop(ii) for ii in idx] if Gram is None: # propagate dropped variable for ii in idx: for i in range(ii, n_active): X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1]) # yeah this is stupid indices[i], indices[i + 1] = indices[i + 1], indices[i] # TODO: this could be updated residual = y - np.dot(X[:, :n_active], coef[active]) temp = np.dot(X.T[n_active], residual) Cov = np.r_[temp, Cov] else: for ii in idx: for i in range(ii, n_active): indices[i], indices[i + 1] = indices[i + 1], indices[i] Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1]) Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i], Gram[:, i + 1]) # Cov_n = Cov_j + x_j * X + increment(betas) TODO: # will this still work with multiple drops ? # recompute covariance. Probably could be done better # wrong as Xy is not swapped with the rest of variables # TODO: this could be updated residual = y - np.dot(X, coef) temp = np.dot(X.T[drop_idx], residual) Cov = np.r_[temp, Cov] sign_active = np.delete(sign_active, idx) sign_active = np.append(sign_active, 0.) # just to maintain size if verbose > 1: print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx, n_active, abs(temp))) if return_path: # resize coefs in case of early stop alphas = alphas[:n_iter + 1] coefs = coefs[:n_iter + 1] if return_n_iter: return alphas, active, coefs.T, n_iter else: return alphas, active, coefs.T else: if return_n_iter: return alpha, active, coef, n_iter else: return alpha, active, coef ############################################################################### # Estimator classes class Lars(LinearModel, RegressorMixin): """Least Angle Regression model a.k.a. LAR Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ---------- fit_intercept : boolean Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default True This parameter is ignored when ``fit_intercept`` is set to False. If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm. If you wish to standardize, please use :class:`sklearn.preprocessing.StandardScaler` before calling ``fit`` on an estimator with ``normalize=False``. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. n_nonzero_coefs : int, optional Target number of non-zero coefficients. Use ``np.inf`` for no limit. eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the ``tol`` parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. fit_path : boolean If True the full path is stored in the ``coef_path_`` attribute. If you compute the solution for a large problem or many targets, setting ``fit_path`` to ``False`` will lead to a speedup, especially with a small alpha. positive : boolean (default=False) Restrict coefficients to be >= 0. Be aware that you might want to remove fit_intercept which is set True by default. Attributes ---------- alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays Maximum of covariances (in absolute value) at each iteration. \ ``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \ whichever is smaller. active_ : list, length = n_alphas | list of n_targets such lists Indices of active variables at the end of the path. coef_path_ : array, shape (n_features, n_alphas + 1) \ | list of n_targets such arrays The varying values of the coefficients along the path. It is not present if the ``fit_path`` parameter is ``False``. coef_ : array, shape (n_features,) or (n_targets, n_features) Parameter vector (w in the formulation formula). intercept_ : float | array, shape (n_targets,) Independent term in decision function. n_iter_ : array-like or int The number of iterations taken by lars_path to find the grid of alphas for each target. Examples -------- >>> from sklearn import linear_model >>> reg = linear_model.Lars(n_nonzero_coefs=1) >>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111]) ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True, n_nonzero_coefs=1, normalize=True, positive=False, precompute='auto', verbose=False) >>> print(reg.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE [ 0. -1.11...] See also -------- lars_path, LarsCV sklearn.decomposition.sparse_encode """ method = 'lar' def __init__(self, fit_intercept=True, verbose=False, normalize=True, precompute='auto', n_nonzero_coefs=500, eps=np.finfo(np.float).eps, copy_X=True, fit_path=True, positive=False): self.fit_intercept = fit_intercept self.verbose = verbose self.normalize = normalize self.precompute = precompute self.n_nonzero_coefs = n_nonzero_coefs self.positive = positive self.eps = eps self.copy_X = copy_X self.fit_path = fit_path def _get_gram(self, precompute, X, y): if (not hasattr(precompute, '__array__')) and ( (precompute is True) or (precompute == 'auto' and X.shape[0] > X.shape[1]) or (precompute == 'auto' and y.shape[1] > 1)): precompute = np.dot(X.T, X) return precompute def _fit(self, X, y, max_iter, alpha, fit_path, Xy=None): """Auxiliary method to fit the model using X, y as training data""" n_features = X.shape[1] X, y, X_offset, y_offset, X_scale = self._preprocess_data(X, y, self.fit_intercept, self.normalize, self.copy_X) if y.ndim == 1: y = y[:, np.newaxis] n_targets = y.shape[1] Gram = self._get_gram(self.precompute, X, y) self.alphas_ = [] self.n_iter_ = [] self.coef_ = np.empty((n_targets, n_features)) if fit_path: self.active_ = [] self.coef_path_ = [] for k in xrange(n_targets): this_Xy = None if Xy is None else Xy[:, k] alphas, active, coef_path, n_iter_ = lars_path( X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X, copy_Gram=True, alpha_min=alpha, method=self.method, verbose=max(0, self.verbose - 1), max_iter=max_iter, eps=self.eps, return_path=True, return_n_iter=True, positive=self.positive) self.alphas_.append(alphas) self.active_.append(active) self.n_iter_.append(n_iter_) self.coef_path_.append(coef_path) self.coef_[k] = coef_path[:, -1] if n_targets == 1: self.alphas_, self.active_, self.coef_path_, self.coef_ = [ a[0] for a in (self.alphas_, self.active_, self.coef_path_, self.coef_)] self.n_iter_ = self.n_iter_[0] else: for k in xrange(n_targets): this_Xy = None if Xy is None else Xy[:, k] alphas, _, self.coef_[k], n_iter_ = lars_path( X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X, copy_Gram=True, alpha_min=alpha, method=self.method, verbose=max(0, self.verbose - 1), max_iter=max_iter, eps=self.eps, return_path=False, return_n_iter=True, positive=self.positive) self.alphas_.append(alphas) self.n_iter_.append(n_iter_) if n_targets == 1: self.alphas_ = self.alphas_[0] self.n_iter_ = self.n_iter_[0] self._set_intercept(X_offset, y_offset, X_scale) return self def fit(self, X, y, Xy=None): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values. Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \ optional Xy = np.dot(X.T, y) that can be precomputed. It is useful only when the Gram matrix is precomputed. Returns ------- self : object returns an instance of self. """ X, y = check_X_y(X, y, y_numeric=True, multi_output=True) alpha = getattr(self, 'alpha', 0.) if hasattr(self, 'n_nonzero_coefs'): alpha = 0. # n_nonzero_coefs parametrization takes priority max_iter = self.n_nonzero_coefs else: max_iter = self.max_iter self._fit(X, y, max_iter=max_iter, alpha=alpha, fit_path=self.fit_path, Xy=Xy) return self class LassoLars(Lars): """Lasso model fit with Least Angle Regression a.k.a. Lars It is a Linear Model trained with an L1 prior as regularizer. The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ---------- alpha : float Constant that multiplies the penalty term. Defaults to 1.0. ``alpha = 0`` is equivalent to an ordinary least square, solved by :class:`LinearRegression`. For numerical reasons, using ``alpha = 0`` with the LassoLars object is not advised and you should prefer the LinearRegression object. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default True This parameter is ignored when ``fit_intercept`` is set to False. If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm. If you wish to standardize, please use :class:`sklearn.preprocessing.StandardScaler` before calling ``fit`` on an estimator with ``normalize=False``. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter : integer, optional Maximum number of iterations to perform. eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the ``tol`` parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization. copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. fit_path : boolean If ``True`` the full path is stored in the ``coef_path_`` attribute. If you compute the solution for a large problem or many targets, setting ``fit_path`` to ``False`` will lead to a speedup, especially with a small alpha. positive : boolean (default=False) Restrict coefficients to be >= 0. Be aware that you might want to remove fit_intercept which is set True by default. Under the positive restriction the model coefficients will not converge to the ordinary-least-squares solution for small values of alpha. Only coefficients up to the smallest alpha value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso algorithm are typically in congruence with the solution of the coordinate descent Lasso estimator. Attributes ---------- alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays Maximum of covariances (in absolute value) at each iteration. \ ``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \ nodes in the path with correlation greater than ``alpha``, whichever \ is smaller. active_ : list, length = n_alphas | list of n_targets such lists Indices of active variables at the end of the path. coef_path_ : array, shape (n_features, n_alphas + 1) or list If a list is passed it's expected to be one of n_targets such arrays. The varying values of the coefficients along the path. It is not present if the ``fit_path`` parameter is ``False``. coef_ : array, shape (n_features,) or (n_targets, n_features) Parameter vector (w in the formulation formula). intercept_ : float | array, shape (n_targets,) Independent term in decision function. n_iter_ : array-like or int. The number of iterations taken by lars_path to find the grid of alphas for each target. Examples -------- >>> from sklearn import linear_model >>> reg = linear_model.LassoLars(alpha=0.01) >>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1]) ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True, fit_path=True, max_iter=500, normalize=True, positive=False, precompute='auto', verbose=False) >>> print(reg.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE [ 0. -0.963257...] See also -------- lars_path lasso_path Lasso LassoCV LassoLarsCV sklearn.decomposition.sparse_encode """ method = 'lasso' def __init__(self, alpha=1.0, fit_intercept=True, verbose=False, normalize=True, precompute='auto', max_iter=500, eps=np.finfo(np.float).eps, copy_X=True, fit_path=True, positive=False): self.alpha = alpha self.fit_intercept = fit_intercept self.max_iter = max_iter self.verbose = verbose self.normalize = normalize self.positive = positive self.precompute = precompute self.copy_X = copy_X self.eps = eps self.fit_path = fit_path ############################################################################### # Cross-validated estimator classes def _check_copy_and_writeable(array, copy=False): if copy or not array.flags.writeable: return array.copy() return array def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None, copy=True, method='lars', verbose=False, fit_intercept=True, normalize=True, max_iter=500, eps=np.finfo(np.float).eps, positive=False): """Compute the residues on left-out data for a full LARS path Parameters ----------- X_train : array, shape (n_samples, n_features) The data to fit the LARS on y_train : array, shape (n_samples) The target variable to fit LARS on X_test : array, shape (n_samples, n_features) The data to compute the residues on y_test : array, shape (n_samples) The target variable to compute the residues on Gram : None, 'auto', array, shape: (n_features, n_features), optional Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram matrix is precomputed from the given X, if there are more samples than features copy : boolean, optional Whether X_train, X_test, y_train and y_test should be copied; if False, they may be overwritten. method : 'lar' | 'lasso' Specifies the returned model. Select ``'lar'`` for Least Angle Regression, ``'lasso'`` for the Lasso. verbose : integer, optional Sets the amount of verbosity fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). positive : boolean (default=False) Restrict coefficients to be >= 0. Be aware that you might want to remove fit_intercept which is set True by default. See reservations for using this option in combination with method 'lasso' for expected small values of alpha in the doc of LassoLarsCV and LassoLarsIC. normalize : boolean, optional, default True This parameter is ignored when ``fit_intercept`` is set to False. If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm. If you wish to standardize, please use :class:`sklearn.preprocessing.StandardScaler` before calling ``fit`` on an estimator with ``normalize=False``. max_iter : integer, optional Maximum number of iterations to perform. eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the ``tol`` parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization. Returns -------- alphas : array, shape (n_alphas,) Maximum of covariances (in absolute value) at each iteration. ``n_alphas`` is either ``max_iter`` or ``n_features``, whichever is smaller. active : list Indices of active variables at the end of the path. coefs : array, shape (n_features, n_alphas) Coefficients along the path residues : array, shape (n_alphas, n_samples) Residues of the prediction on the test data """ X_train = _check_copy_and_writeable(X_train, copy) y_train = _check_copy_and_writeable(y_train, copy) X_test = _check_copy_and_writeable(X_test, copy) y_test = _check_copy_and_writeable(y_test, copy) if fit_intercept: X_mean = X_train.mean(axis=0) X_train -= X_mean X_test -= X_mean y_mean = y_train.mean(axis=0) y_train = as_float_array(y_train, copy=False) y_train -= y_mean y_test = as_float_array(y_test, copy=False) y_test -= y_mean if normalize: norms = np.sqrt(np.sum(X_train ** 2, axis=0)) nonzeros = np.flatnonzero(norms) X_train[:, nonzeros] /= norms[nonzeros] alphas, active, coefs = lars_path( X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False, method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps, positive=positive) if normalize: coefs[nonzeros] /= norms[nonzeros][:, np.newaxis] residues = np.dot(X_test, coefs) - y_test[:, np.newaxis] return alphas, active, coefs, residues.T class LarsCV(Lars): """Cross-validated Least Angle Regression model Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ---------- fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). verbose : boolean or integer, optional Sets the verbosity amount max_iter : integer, optional Maximum number of iterations to perform. normalize : boolean, optional, default True This parameter is ignored when ``fit_intercept`` is set to False. If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm. If you wish to standardize, please use :class:`sklearn.preprocessing.StandardScaler` before calling ``fit`` on an estimator with ``normalize=False``. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix cannot be passed as argument since we will use only subsets of X. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. max_n_alphas : integer, optional The maximum number of points on the path used to compute the residuals in the cross-validation n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. positive : boolean (default=False) Restrict coefficients to be >= 0. Be aware that you might want to remove fit_intercept which is set True by default. Attributes ---------- coef_ : array, shape (n_features,) parameter vector (w in the formulation formula) intercept_ : float independent term in decision function coef_path_ : array, shape (n_features, n_alphas) the varying values of the coefficients along the path alpha_ : float the estimated regularization parameter alpha alphas_ : array, shape (n_alphas,) the different values of alpha along the path cv_alphas_ : array, shape (n_cv_alphas,) all the values of alpha along the path for the different folds mse_path_ : array, shape (n_folds, n_cv_alphas) the mean square error on left-out for each fold along the path (alpha values given by ``cv_alphas``) n_iter_ : array-like or int the number of iterations run by Lars with the optimal alpha. See also -------- lars_path, LassoLars, LassoLarsCV """ method = 'lar' def __init__(self, fit_intercept=True, verbose=False, max_iter=500, normalize=True, precompute='auto', cv=None, max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps, copy_X=True, positive=False): self.max_iter = max_iter self.cv = cv self.max_n_alphas = max_n_alphas self.n_jobs = n_jobs super(LarsCV, self).__init__(fit_intercept=fit_intercept, verbose=verbose, normalize=normalize, precompute=precompute, n_nonzero_coefs=500, eps=eps, copy_X=copy_X, fit_path=True, positive=positive) def fit(self, X, y): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) Target values. Returns ------- self : object returns an instance of self. """ X, y = check_X_y(X, y, y_numeric=True) X = as_float_array(X, copy=self.copy_X) y = as_float_array(y, copy=self.copy_X) # init cross-validation generator cv = check_cv(self.cv, classifier=False) # As we use cross-validation, the Gram matrix is not precomputed here Gram = self.precompute if hasattr(Gram, '__array__'): warnings.warn("Parameter 'precompute' cannot be an array in " "%s. Automatically switch to 'auto' instead." % self.__class__.__name__) Gram = 'auto' cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( delayed(_lars_path_residues)( X[train], y[train], X[test], y[test], Gram=Gram, copy=False, method=self.method, verbose=max(0, self.verbose - 1), normalize=self.normalize, fit_intercept=self.fit_intercept, max_iter=self.max_iter, eps=self.eps, positive=self.positive) for train, test in cv.split(X, y)) all_alphas = np.concatenate(list(zip(*cv_paths))[0]) # Unique also sorts all_alphas = np.unique(all_alphas) # Take at most max_n_alphas values stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas)))) all_alphas = all_alphas[::stride] mse_path = np.empty((len(all_alphas), len(cv_paths))) for index, (alphas, active, coefs, residues) in enumerate(cv_paths): alphas = alphas[::-1] residues = residues[::-1] if alphas[0] != 0: alphas = np.r_[0, alphas] residues = np.r_[residues[0, np.newaxis], residues] if alphas[-1] != all_alphas[-1]: alphas = np.r_[alphas, all_alphas[-1]] residues = np.r_[residues, residues[-1, np.newaxis]] this_residues = interpolate.interp1d(alphas, residues, axis=0)(all_alphas) this_residues **= 2 mse_path[:, index] = np.mean(this_residues, axis=-1) mask = np.all(np.isfinite(mse_path), axis=-1) all_alphas = all_alphas[mask] mse_path = mse_path[mask] # Select the alpha that minimizes left-out error i_best_alpha = np.argmin(mse_path.mean(axis=-1)) best_alpha = all_alphas[i_best_alpha] # Store our parameters self.alpha_ = best_alpha self.cv_alphas_ = all_alphas self.mse_path_ = mse_path # Now compute the full model # it will call a lasso internally when self if LassoLarsCV # as self.method == 'lasso' self._fit(X, y, max_iter=self.max_iter, alpha=best_alpha, Xy=None, fit_path=True) return self @property @deprecated("Attribute alpha is deprecated in 0.19 and " "will be removed in 0.21. See ``alpha_`` instead") def alpha(self): # impedance matching for the above Lars.fit (should not be documented) return self.alpha_ @property @deprecated("Attribute ``cv_mse_path_`` is deprecated in 0.18 and " "will be removed in 0.20. Use ``mse_path_`` instead") def cv_mse_path_(self): return self.mse_path_ class LassoLarsCV(LarsCV): """Cross-validated Lasso, using the LARS algorithm The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ---------- fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). verbose : boolean or integer, optional Sets the verbosity amount max_iter : integer, optional Maximum number of iterations to perform. normalize : boolean, optional, default True This parameter is ignored when ``fit_intercept`` is set to False. If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm. If you wish to standardize, please use :class:`sklearn.preprocessing.StandardScaler` before calling ``fit`` on an estimator with ``normalize=False``. precompute : True | False | 'auto' Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix cannot be passed as argument since we will use only subsets of X. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. max_n_alphas : integer, optional The maximum number of points on the path used to compute the residuals in the cross-validation n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. positive : boolean (default=False) Restrict coefficients to be >= 0. Be aware that you might want to remove fit_intercept which is set True by default. Under the positive restriction the model coefficients do not converge to the ordinary-least-squares solution for small values of alpha. Only coefficients up to the smallest alpha value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso algorithm are typically in congruence with the solution of the coordinate descent Lasso estimator. As a consequence using LassoLarsCV only makes sense for problems where a sparse solution is expected and/or reached. Attributes ---------- coef_ : array, shape (n_features,) parameter vector (w in the formulation formula) intercept_ : float independent term in decision function. coef_path_ : array, shape (n_features, n_alphas) the varying values of the coefficients along the path alpha_ : float the estimated regularization parameter alpha alphas_ : array, shape (n_alphas,) the different values of alpha along the path cv_alphas_ : array, shape (n_cv_alphas,) all the values of alpha along the path for the different folds mse_path_ : array, shape (n_folds, n_cv_alphas) the mean square error on left-out for each fold along the path (alpha values given by ``cv_alphas``) n_iter_ : array-like or int the number of iterations run by Lars with the optimal alpha. Notes ----- The object solves the same problem as the LassoCV object. However, unlike the LassoCV, it find the relevant alphas values by itself. In general, because of this property, it will be more stable. However, it is more fragile to heavily multicollinear datasets. It is more efficient than the LassoCV if only a small number of features are selected compared to the total number, for instance if there are very few samples compared to the number of features. See also -------- lars_path, LassoLars, LarsCV, LassoCV """ method = 'lasso' def __init__(self, fit_intercept=True, verbose=False, max_iter=500, normalize=True, precompute='auto', cv=None, max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps, copy_X=True, positive=False): self.fit_intercept = fit_intercept self.verbose = verbose self.max_iter = max_iter self.normalize = normalize self.precompute = precompute self.cv = cv self.max_n_alphas = max_n_alphas self.n_jobs = n_jobs self.eps = eps self.copy_X = copy_X self.positive = positive # XXX : we don't use super(LarsCV, self).__init__ # to avoid setting n_nonzero_coefs class LassoLarsIC(LassoLars): """Lasso model fit with Lars using BIC or AIC for model selection The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 AIC is the Akaike information criterion and BIC is the Bayes Information criterion. Such criteria are useful to select the value of the regularization parameter by making a trade-off between the goodness of fit and the complexity of the model. A good model should explain well the data while being simple. Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ---------- criterion : 'bic' | 'aic' The type of criterion to use. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default True This parameter is ignored when ``fit_intercept`` is set to False. If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm. If you wish to standardize, please use :class:`sklearn.preprocessing.StandardScaler` before calling ``fit`` on an estimator with ``normalize=False``. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter : integer, optional Maximum number of iterations to perform. Can be used for early stopping. eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the ``tol`` parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization. copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. positive : boolean (default=False) Restrict coefficients to be >= 0. Be aware that you might want to remove fit_intercept which is set True by default. Under the positive restriction the model coefficients do not converge to the ordinary-least-squares solution for small values of alpha. Only coefficients up to the smallest alpha value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso algorithm are typically in congruence with the solution of the coordinate descent Lasso estimator. As a consequence using LassoLarsIC only makes sense for problems where a sparse solution is expected and/or reached. Attributes ---------- coef_ : array, shape (n_features,) parameter vector (w in the formulation formula) intercept_ : float independent term in decision function. alpha_ : float the alpha parameter chosen by the information criterion n_iter_ : int number of iterations run by lars_path to find the grid of alphas. criterion_ : array, shape (n_alphas,) The value of the information criteria ('aic', 'bic') across all alphas. The alpha which has the smallest information criterion is chosen. This value is larger by a factor of ``n_samples`` compared to Eqns. 2.15 and 2.16 in (Zou et al, 2007). Examples -------- >>> from sklearn import linear_model >>> reg = linear_model.LassoLarsIC(criterion='bic') >>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111]) ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True, max_iter=500, normalize=True, positive=False, precompute='auto', verbose=False) >>> print(reg.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE [ 0. -1.11...] Notes ----- The estimation of the number of degrees of freedom is given by: "On the degrees of freedom of the lasso" Hui Zou, Trevor Hastie, and Robert Tibshirani Ann. Statist. Volume 35, Number 5 (2007), 2173-2192. https://en.wikipedia.org/wiki/Akaike_information_criterion https://en.wikipedia.org/wiki/Bayesian_information_criterion See also -------- lars_path, LassoLars, LassoLarsCV """ def __init__(self, criterion='aic', fit_intercept=True, verbose=False, normalize=True, precompute='auto', max_iter=500, eps=np.finfo(np.float).eps, copy_X=True, positive=False): self.criterion = criterion self.fit_intercept = fit_intercept self.positive = positive self.max_iter = max_iter self.verbose = verbose self.normalize = normalize self.copy_X = copy_X self.precompute = precompute self.eps = eps self.fit_path = True def fit(self, X, y, copy_X=True): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape (n_samples, n_features) training data. y : array-like, shape (n_samples,) target values. Will be cast to X's dtype if necessary copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. Returns ------- self : object returns an instance of self. """ X, y = check_X_y(X, y, y_numeric=True) X, y, Xmean, ymean, Xstd = LinearModel._preprocess_data( X, y, self.fit_intercept, self.normalize, self.copy_X) max_iter = self.max_iter Gram = self.precompute alphas_, active_, coef_path_, self.n_iter_ = lars_path( X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0, method='lasso', verbose=self.verbose, max_iter=max_iter, eps=self.eps, return_n_iter=True, positive=self.positive) n_samples = X.shape[0] if self.criterion == 'aic': K = 2 # AIC elif self.criterion == 'bic': K = log(n_samples) # BIC else: raise ValueError('criterion should be either bic or aic') R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals mean_squared_error = np.mean(R ** 2, axis=0) sigma2 = np.var(y) df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom for k, coef in enumerate(coef_path_.T): mask = np.abs(coef) > np.finfo(coef.dtype).eps if not np.any(mask): continue # get the number of degrees of freedom equal to: # Xc = X[:, mask] # Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs df[k] = np.sum(mask) self.alphas_ = alphas_ eps64 = np.finfo('float64').eps self.criterion_ = (n_samples * mean_squared_error / (sigma2 + eps64) + K * df) # Eqns. 2.15--16 in (Zou et al, 2007) n_best = np.argmin(self.criterion_) self.alpha_ = alphas_[n_best] self.coef_ = coef_path_[:, n_best] self._set_intercept(Xmean, ymean, Xstd) return self
bsd-3-clause
madjelan/scikit-learn
sklearn/preprocessing/data.py
113
56747
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Olivier Grisel <olivier.grisel@ensta.org> # Andreas Mueller <amueller@ais.uni-bonn.de> # Eric Martin <eric@ericmart.in> # License: BSD 3 clause from itertools import chain, combinations import numbers import warnings import numpy as np from scipy import sparse from ..base import BaseEstimator, TransformerMixin from ..externals import six from ..utils import check_array from ..utils.extmath import row_norms from ..utils.fixes import combinations_with_replacement as combinations_w_r from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1, inplace_csr_row_normalize_l2) from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis, min_max_axis, inplace_row_scale) from ..utils.validation import check_is_fitted, FLOAT_DTYPES zip = six.moves.zip map = six.moves.map range = six.moves.range __all__ = [ 'Binarizer', 'KernelCenterer', 'MinMaxScaler', 'MaxAbsScaler', 'Normalizer', 'OneHotEncoder', 'RobustScaler', 'StandardScaler', 'add_dummy_feature', 'binarize', 'normalize', 'scale', 'robust_scale', 'maxabs_scale', 'minmax_scale', ] def _mean_and_std(X, axis=0, with_mean=True, with_std=True): """Compute mean and std deviation for centering, scaling. Zero valued std components are reset to 1.0 to avoid NaNs when scaling. """ X = np.asarray(X) Xr = np.rollaxis(X, axis) if with_mean: mean_ = Xr.mean(axis=0) else: mean_ = None if with_std: std_ = Xr.std(axis=0) std_ = _handle_zeros_in_scale(std_) else: std_ = None return mean_, std_ def _handle_zeros_in_scale(scale): ''' Makes sure that whenever scale is zero, we handle it correctly. This happens in most scalers when we have constant features.''' # if we are fitting on 1D arrays, scale might be a scalar if np.isscalar(scale): if scale == 0: scale = 1. elif isinstance(scale, np.ndarray): scale[scale == 0.0] = 1.0 scale[~np.isfinite(scale)] = 1.0 return scale def scale(X, axis=0, with_mean=True, with_std=True, copy=True): """Standardize a dataset along any axis Center to the mean and component wise scale to unit variance. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : array-like or CSR matrix. The data to center and scale. axis : int (0 by default) axis used to compute the means and standard deviations along. If 0, independently standardize each feature, otherwise (if 1) standardize each sample. with_mean : boolean, True by default If True, center the data before scaling. with_std : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_mean=False` (in that case, only variance scaling will be performed on the features of the CSR matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSR matrix. See also -------- :class:`sklearn.preprocessing.StandardScaler` to perform centering and scaling using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False, warn_on_dtype=True, estimator='the scale function', dtype=FLOAT_DTYPES) if sparse.issparse(X): if with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` instead" " See docstring for motivation and alternatives.") if axis != 0: raise ValueError("Can only scale sparse matrix on axis=0, " " got axis=%d" % axis) if not sparse.isspmatrix_csr(X): X = X.tocsr() copy = False if copy: X = X.copy() _, var = mean_variance_axis(X, axis=0) var = _handle_zeros_in_scale(var) inplace_column_scale(X, 1 / np.sqrt(var)) else: X = np.asarray(X) mean_, std_ = _mean_and_std( X, axis, with_mean=with_mean, with_std=with_std) if copy: X = X.copy() # Xr is a view on the original array that enables easy use of # broadcasting on the axis in which we are interested in Xr = np.rollaxis(X, axis) if with_mean: Xr -= mean_ mean_1 = Xr.mean(axis=0) # Verify that mean_1 is 'close to zero'. If X contains very # large values, mean_1 can also be very large, due to a lack of # precision of mean_. In this case, a pre-scaling of the # concerned feature is efficient, for instance by its mean or # maximum. if not np.allclose(mean_1, 0): warnings.warn("Numerical issues were encountered " "when centering the data " "and might not be solved. Dataset may " "contain too large values. You may need " "to prescale your features.") Xr -= mean_1 if with_std: Xr /= std_ if with_mean: mean_2 = Xr.mean(axis=0) # If mean_2 is not 'close to zero', it comes from the fact that # std_ is very small so that mean_2 = mean_1/std_ > 0, even if # mean_1 was close to zero. The problem is thus essentially due # to the lack of precision of mean_. A solution is then to # substract the mean again: if not np.allclose(mean_2, 0): warnings.warn("Numerical issues were encountered " "when scaling the data " "and might not be solved. The standard " "deviation of the data is probably " "very close to 0. ") Xr -= mean_2 return X class MinMaxScaler(BaseEstimator, TransformerMixin): """Transforms features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by:: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. This transformation is often used as an alternative to zero mean, unit variance scaling. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- feature_range: tuple (min, max), default=(0, 1) Desired range of transformed data. copy : boolean, optional, default True Set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array). Attributes ---------- min_ : ndarray, shape (n_features,) Per feature adjustment for minimum. scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. """ def __init__(self, feature_range=(0, 1), copy=True): self.feature_range = feature_range self.copy = copy def fit(self, X, y=None): """Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) feature_range = self.feature_range if feature_range[0] >= feature_range[1]: raise ValueError("Minimum of desired feature range must be smaller" " than maximum. Got %s." % str(feature_range)) data_min = np.min(X, axis=0) data_range = np.max(X, axis=0) - data_min data_range = _handle_zeros_in_scale(data_range) self.scale_ = (feature_range[1] - feature_range[0]) / data_range self.min_ = feature_range[0] - data_min * self.scale_ self.data_range = data_range self.data_min = data_min return self def transform(self, X): """Scaling features of X according to feature_range. Parameters ---------- X : array-like with shape [n_samples, n_features] Input data that will be transformed. """ check_is_fitted(self, 'scale_') X = check_array(X, copy=self.copy, ensure_2d=False) X *= self.scale_ X += self.min_ return X def inverse_transform(self, X): """Undo the scaling of X according to feature_range. Parameters ---------- X : array-like with shape [n_samples, n_features] Input data that will be transformed. """ check_is_fitted(self, 'scale_') X = check_array(X, copy=self.copy, ensure_2d=False) X -= self.min_ X /= self.scale_ return X def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True): """Transforms features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by:: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. This transformation is often used as an alternative to zero mean, unit variance scaling. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- feature_range: tuple (min, max), default=(0, 1) Desired range of transformed data. axis : int (0 by default) axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). """ s = MinMaxScaler(feature_range=feature_range, copy=copy) if axis == 0: return s.fit_transform(X) else: return s.fit_transform(X.T).T class StandardScaler(BaseEstimator, TransformerMixin): """Standardize features by removing the mean and scaling to unit variance Centering and scaling happen independently on each feature by computing the relevant statistics on the samples in the training set. Mean and standard deviation are then stored to be used on later data using the `transform` method. Standardization of a dataset is a common requirement for many machine learning estimators: they might behave badly if the individual feature do not more or less look like standard normally distributed data (e.g. Gaussian with 0 mean and unit variance). For instance many elements used in the objective function of a learning algorithm (such as the RBF kernel of Support Vector Machines or the L1 and L2 regularizers of linear models) assume that all features are centered around 0 and have variance in the same order. If a feature has a variance that is orders of magnitude larger that others, it might dominate the objective function and make the estimator unable to learn from other features correctly as expected. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- with_mean : boolean, True by default If True, center the data before scaling. This does not work (and will raise an exception) when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory. with_std : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default True If False, try to avoid a copy and do inplace scaling instead. This is not guaranteed to always work inplace; e.g. if the data is not a NumPy array or scipy.sparse CSR matrix, a copy may still be returned. Attributes ---------- mean_ : array of floats with shape [n_features] The mean value for each feature in the training set. std_ : array of floats with shape [n_features] The standard deviation for each feature in the training set. Set to one if the standard deviation is zero for a given feature. See also -------- :func:`sklearn.preprocessing.scale` to perform centering and scaling without using the ``Transformer`` object oriented API :class:`sklearn.decomposition.RandomizedPCA` with `whiten=True` to further remove the linear correlation across features. """ def __init__(self, copy=True, with_mean=True, with_std=True): self.with_mean = with_mean self.with_std = with_std self.copy = copy def fit(self, X, y=None): """Compute the mean and std to be used for later scaling. Parameters ---------- X : array-like or CSR matrix with shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. """ X = check_array(X, accept_sparse='csr', copy=self.copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") self.mean_ = None if self.with_std: var = mean_variance_axis(X, axis=0)[1] self.std_ = np.sqrt(var) self.std_ = _handle_zeros_in_scale(self.std_) else: self.std_ = None return self else: self.mean_, self.std_ = _mean_and_std( X, axis=0, with_mean=self.with_mean, with_std=self.with_std) return self def transform(self, X, y=None, copy=None): """Perform standardization by centering and scaling Parameters ---------- X : array-like with shape [n_samples, n_features] The data used to scale along the features axis. """ check_is_fitted(self, 'std_') copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.std_ is not None: inplace_column_scale(X, 1 / self.std_) else: if self.with_mean: X -= self.mean_ if self.with_std: X /= self.std_ return X def inverse_transform(self, X, copy=None): """Scale back the data to the original representation Parameters ---------- X : array-like with shape [n_samples, n_features] The data used to scale along the features axis. """ check_is_fitted(self, 'std_') copy = copy if copy is not None else self.copy if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot uncenter sparse matrices: pass `with_mean=False` " "instead See docstring for motivation and alternatives.") if not sparse.isspmatrix_csr(X): X = X.tocsr() copy = False if copy: X = X.copy() if self.std_ is not None: inplace_column_scale(X, self.std_) else: X = np.asarray(X) if copy: X = X.copy() if self.with_std: X *= self.std_ if self.with_mean: X += self.mean_ return X class MaxAbsScaler(BaseEstimator, TransformerMixin): """Scale each feature by its maximum absolute value. This estimator scales and translates each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. It does not shift/center the data, and thus does not destroy any sparsity. This scaler can also be applied to sparse CSR or CSC matrices. Parameters ---------- copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). Attributes ---------- scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. """ def __init__(self, copy=True): self.copy = copy def fit(self, X, y=None): """Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): mins, maxs = min_max_axis(X, axis=0) scales = np.maximum(np.abs(mins), np.abs(maxs)) else: scales = np.abs(X).max(axis=0) scales = np.array(scales) scales = scales.reshape(-1) self.scale_ = _handle_zeros_in_scale(scales) return self def transform(self, X, y=None): """Scale the data Parameters ---------- X : array-like or CSR matrix. The data that should be scaled. """ check_is_fitted(self, 'scale_') X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if X.shape[0] == 1: inplace_row_scale(X, 1.0 / self.scale_) else: inplace_column_scale(X, 1.0 / self.scale_) else: X /= self.scale_ return X def inverse_transform(self, X): """Scale back the data to the original representation Parameters ---------- X : array-like or CSR matrix. The data that should be transformed back. """ check_is_fitted(self, 'scale_') X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if X.shape[0] == 1: inplace_row_scale(X, self.scale_) else: inplace_column_scale(X, self.scale_) else: X *= self.scale_ return X def maxabs_scale(X, axis=0, copy=True): """Scale each feature to the [-1, 1] range without breaking the sparsity. This estimator scales each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. This scaler can also be applied to sparse CSR or CSC matrices. Parameters ---------- axis : int (0 by default) axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). """ s = MaxAbsScaler(copy=copy) if axis == 0: return s.fit_transform(X) else: return s.fit_transform(X.T).T class RobustScaler(BaseEstimator, TransformerMixin): """Scale features using statistics that are robust to outliers. This Scaler removes the median and scales the data according to the Interquartile Range (IQR). The IQR is the range between the 1st quartile (25th quantile) and the 3rd quartile (75th quantile). Centering and scaling happen independently on each feature (or each sample, depending on the `axis` argument) by computing the relevant statistics on the samples in the training set. Median and interquartile range are then stored to be used on later data using the `transform` method. Standardization of a dataset is a common requirement for many machine learning estimators. Typically this is done by removing the mean and scaling to unit variance. However, outliers can often influence the sample mean / variance in a negative way. In such cases, the median and the interquartile range often give better results. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- with_centering : boolean, True by default If True, center the data before scaling. This does not work (and will raise an exception) when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory. with_scaling : boolean, True by default If True, scale the data to interquartile range. copy : boolean, optional, default is True If False, try to avoid a copy and do inplace scaling instead. This is not guaranteed to always work inplace; e.g. if the data is not a NumPy array or scipy.sparse CSR matrix, a copy may still be returned. Attributes ---------- center_ : array of floats The median value for each feature in the training set. scale_ : array of floats The (scaled) interquartile range for each feature in the training set. See also -------- :class:`sklearn.preprocessing.StandardScaler` to perform centering and scaling using mean and variance. :class:`sklearn.decomposition.RandomizedPCA` with `whiten=True` to further remove the linear correlation across features. Notes ----- See examples/preprocessing/plot_robust_scaling.py for an example. http://en.wikipedia.org/wiki/Median_(statistics) http://en.wikipedia.org/wiki/Interquartile_range """ def __init__(self, with_centering=True, with_scaling=True, copy=True): self.with_centering = with_centering self.with_scaling = with_scaling self.copy = copy def _check_array(self, X, copy): """Makes sure centering is not enabled for sparse matrices.""" X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if sparse.issparse(X): if self.with_centering: raise ValueError( "Cannot center sparse matrices: use `with_centering=False`" " instead. See docstring for motivation and alternatives.") return X def fit(self, X, y=None): """Compute the median and quantiles to be used for scaling. Parameters ---------- X : array-like with shape [n_samples, n_features] The data used to compute the median and quantiles used for later scaling along the features axis. """ if sparse.issparse(X): raise TypeError("RobustScaler cannot be fitted on sparse inputs") X = self._check_array(X, self.copy) if self.with_centering: self.center_ = np.median(X, axis=0) if self.with_scaling: q = np.percentile(X, (25, 75), axis=0) self.scale_ = (q[1] - q[0]) self.scale_ = _handle_zeros_in_scale(self.scale_) return self def transform(self, X, y=None): """Center and scale the data Parameters ---------- X : array-like or CSR matrix. The data used to scale along the specified axis. """ if self.with_centering: check_is_fitted(self, 'center_') if self.with_scaling: check_is_fitted(self, 'scale_') X = self._check_array(X, self.copy) if sparse.issparse(X): if self.with_scaling: if X.shape[0] == 1: inplace_row_scale(X, 1.0 / self.scale_) elif self.axis == 0: inplace_column_scale(X, 1.0 / self.scale_) else: if self.with_centering: X -= self.center_ if self.with_scaling: X /= self.scale_ return X def inverse_transform(self, X): """Scale back the data to the original representation Parameters ---------- X : array-like or CSR matrix. The data used to scale along the specified axis. """ if self.with_centering: check_is_fitted(self, 'center_') if self.with_scaling: check_is_fitted(self, 'scale_') X = self._check_array(X, self.copy) if sparse.issparse(X): if self.with_scaling: if X.shape[0] == 1: inplace_row_scale(X, self.scale_) else: inplace_column_scale(X, self.scale_) else: if self.with_scaling: X *= self.scale_ if self.with_centering: X += self.center_ return X def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True): """Standardize a dataset along any axis Center to the median and component wise scale according to the interquartile range. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : array-like. The data to center and scale. axis : int (0 by default) axis used to compute the medians and IQR along. If 0, independently scale each feature, otherwise (if 1) scale each sample. with_centering : boolean, True by default If True, center the data before scaling. with_scaling : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default is True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_centering=False` (in that case, only variance scaling will be performed on the features of the CSR matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSR matrix. See also -------- :class:`sklearn.preprocessing.RobustScaler` to perform centering and scaling using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling, copy=copy) if axis == 0: return s.fit_transform(X) else: return s.fit_transform(X.T).T class PolynomialFeatures(BaseEstimator, TransformerMixin): """Generate polynomial and interaction features. Generate a new feature matrix consisting of all polynomial combinations of the features with degree less than or equal to the specified degree. For example, if an input sample is two dimensional and of the form [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2]. Parameters ---------- degree : integer The degree of the polynomial features. Default = 2. interaction_only : boolean, default = False If true, only interaction features are produced: features that are products of at most ``degree`` *distinct* input features (so not ``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.). include_bias : boolean If True (default), then include a bias column, the feature in which all polynomial powers are zero (i.e. a column of ones - acts as an intercept term in a linear model). Examples -------- >>> X = np.arange(6).reshape(3, 2) >>> X array([[0, 1], [2, 3], [4, 5]]) >>> poly = PolynomialFeatures(2) >>> poly.fit_transform(X) array([[ 1, 0, 1, 0, 0, 1], [ 1, 2, 3, 4, 6, 9], [ 1, 4, 5, 16, 20, 25]]) >>> poly = PolynomialFeatures(interaction_only=True) >>> poly.fit_transform(X) array([[ 1, 0, 1, 0], [ 1, 2, 3, 6], [ 1, 4, 5, 20]]) Attributes ---------- powers_ : array, shape (n_input_features, n_output_features) powers_[i, j] is the exponent of the jth input in the ith output. n_input_features_ : int The total number of input features. n_output_features_ : int The total number of polynomial output features. The number of output features is computed by iterating over all suitably sized combinations of input features. Notes ----- Be aware that the number of features in the output array scales polynomially in the number of features of the input array, and exponentially in the degree. High degrees can cause overfitting. See :ref:`examples/linear_model/plot_polynomial_interpolation.py <example_linear_model_plot_polynomial_interpolation.py>` """ def __init__(self, degree=2, interaction_only=False, include_bias=True): self.degree = degree self.interaction_only = interaction_only self.include_bias = include_bias @staticmethod def _combinations(n_features, degree, interaction_only, include_bias): comb = (combinations if interaction_only else combinations_w_r) start = int(not include_bias) return chain.from_iterable(comb(range(n_features), i) for i in range(start, degree + 1)) @property def powers_(self): check_is_fitted(self, 'n_input_features_') combinations = self._combinations(self.n_input_features_, self.degree, self.interaction_only, self.include_bias) return np.vstack(np.bincount(c, minlength=self.n_input_features_) for c in combinations) def fit(self, X, y=None): """ Compute number of output features. """ n_samples, n_features = check_array(X).shape combinations = self._combinations(n_features, self.degree, self.interaction_only, self.include_bias) self.n_input_features_ = n_features self.n_output_features_ = sum(1 for _ in combinations) return self def transform(self, X, y=None): """Transform data to polynomial features Parameters ---------- X : array with shape [n_samples, n_features] The data to transform, row by row. Returns ------- XP : np.ndarray shape [n_samples, NP] The matrix of features, where NP is the number of polynomial features generated from the combination of inputs. """ check_is_fitted(self, ['n_input_features_', 'n_output_features_']) X = check_array(X) n_samples, n_features = X.shape if n_features != self.n_input_features_: raise ValueError("X shape does not match training shape") # allocate output data XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype) combinations = self._combinations(n_features, self.degree, self.interaction_only, self.include_bias) for i, c in enumerate(combinations): XP[:, i] = X[:, c].prod(1) return XP def normalize(X, norm='l2', axis=1, copy=True): """Scale input vectors individually to unit norm (vector length). Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] The data to normalize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. norm : 'l1', 'l2', or 'max', optional ('l2' by default) The norm to use to normalize each non zero sample (or each non-zero feature if axis is 0). axis : 0 or 1, optional (1 by default) axis used to normalize the data along. If 1, independently normalize each sample, otherwise (if 0) normalize each feature. copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). See also -------- :class:`sklearn.preprocessing.Normalizer` to perform normalization using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ if norm not in ('l1', 'l2', 'max'): raise ValueError("'%s' is not a supported norm" % norm) if axis == 0: sparse_format = 'csc' elif axis == 1: sparse_format = 'csr' else: raise ValueError("'%d' is not a supported axis" % axis) X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True, estimator='the normalize function', dtype=FLOAT_DTYPES) if axis == 0: X = X.T if sparse.issparse(X): if norm == 'l1': inplace_csr_row_normalize_l1(X) elif norm == 'l2': inplace_csr_row_normalize_l2(X) elif norm == 'max': _, norms = min_max_axis(X, 1) norms = norms.repeat(np.diff(X.indptr)) mask = norms != 0 X.data[mask] /= norms[mask] else: if norm == 'l1': norms = np.abs(X).sum(axis=1) elif norm == 'l2': norms = row_norms(X) elif norm == 'max': norms = np.max(X, axis=1) norms = _handle_zeros_in_scale(norms) X /= norms[:, np.newaxis] if axis == 0: X = X.T return X class Normalizer(BaseEstimator, TransformerMixin): """Normalize samples individually to unit norm. Each sample (i.e. each row of the data matrix) with at least one non zero component is rescaled independently of other samples so that its norm (l1 or l2) equals one. This transformer is able to work both with dense numpy arrays and scipy.sparse matrix (use CSR format if you want to avoid the burden of a copy / conversion). Scaling inputs to unit norms is a common operation for text classification or clustering for instance. For instance the dot product of two l2-normalized TF-IDF vectors is the cosine similarity of the vectors and is the base similarity metric for the Vector Space Model commonly used by the Information Retrieval community. Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- norm : 'l1', 'l2', or 'max', optional ('l2' by default) The norm to use to normalize each non zero sample. copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix). Notes ----- This estimator is stateless (besides constructor parameters), the fit method does nothing but is useful when used in a pipeline. See also -------- :func:`sklearn.preprocessing.normalize` equivalent function without the object oriented API """ def __init__(self, norm='l2', copy=True): self.norm = norm self.copy = copy def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. """ X = check_array(X, accept_sparse='csr') return self def transform(self, X, y=None, copy=None): """Scale each non zero row of X to unit norm Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] The data to normalize, row by row. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. """ copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr') return normalize(X, norm=self.norm, axis=1, copy=copy) def binarize(X, threshold=0.0, copy=True): """Boolean thresholding of array-like or scipy.sparse matrix Read more in the :ref:`User Guide <preprocessing_binarization>`. Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] The data to binarize, element by element. scipy.sparse matrices should be in CSR or CSC format to avoid an un-necessary copy. threshold : float, optional (0.0 by default) Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : boolean, optional, default True set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR / CSC matrix and if axis is 1). See also -------- :class:`sklearn.preprocessing.Binarizer` to perform binarization using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy) if sparse.issparse(X): if threshold < 0: raise ValueError('Cannot binarize a sparse matrix with threshold ' '< 0') cond = X.data > threshold not_cond = np.logical_not(cond) X.data[cond] = 1 X.data[not_cond] = 0 X.eliminate_zeros() else: cond = X > threshold not_cond = np.logical_not(cond) X[cond] = 1 X[not_cond] = 0 return X class Binarizer(BaseEstimator, TransformerMixin): """Binarize data (set feature values to 0 or 1) according to a threshold Values greater than the threshold map to 1, while values less than or equal to the threshold map to 0. With the default threshold of 0, only positive values map to 1. Binarization is a common operation on text count data where the analyst can decide to only consider the presence or absence of a feature rather than a quantified number of occurrences for instance. It can also be used as a pre-processing step for estimators that consider boolean random variables (e.g. modelled using the Bernoulli distribution in a Bayesian setting). Read more in the :ref:`User Guide <preprocessing_binarization>`. Parameters ---------- threshold : float, optional (0.0 by default) Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : boolean, optional, default True set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix). Notes ----- If the input is a sparse matrix, only the non-zero values are subject to update by the Binarizer class. This estimator is stateless (besides constructor parameters), the fit method does nothing but is useful when used in a pipeline. """ def __init__(self, threshold=0.0, copy=True): self.threshold = threshold self.copy = copy def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. """ check_array(X, accept_sparse='csr') return self def transform(self, X, y=None, copy=None): """Binarize each element of X Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] The data to binarize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. """ copy = copy if copy is not None else self.copy return binarize(X, threshold=self.threshold, copy=copy) class KernelCenterer(BaseEstimator, TransformerMixin): """Center a kernel matrix Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a function mapping x to a Hilbert space. KernelCenterer centers (i.e., normalize to have zero mean) the data without explicitly computing phi(x). It is equivalent to centering phi(x) with sklearn.preprocessing.StandardScaler(with_std=False). Read more in the :ref:`User Guide <kernel_centering>`. """ def fit(self, K, y=None): """Fit KernelCenterer Parameters ---------- K : numpy array of shape [n_samples, n_samples] Kernel matrix. Returns ------- self : returns an instance of self. """ K = check_array(K) n_samples = K.shape[0] self.K_fit_rows_ = np.sum(K, axis=0) / n_samples self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples return self def transform(self, K, y=None, copy=True): """Center kernel matrix. Parameters ---------- K : numpy array of shape [n_samples1, n_samples2] Kernel matrix. copy : boolean, optional, default True Set to False to perform inplace computation. Returns ------- K_new : numpy array of shape [n_samples1, n_samples2] """ check_is_fitted(self, 'K_fit_all_') K = check_array(K) if copy: K = K.copy() K_pred_cols = (np.sum(K, axis=1) / self.K_fit_rows_.shape[0])[:, np.newaxis] K -= self.K_fit_rows_ K -= K_pred_cols K += self.K_fit_all_ return K def add_dummy_feature(X, value=1.0): """Augment dataset with an additional dummy feature. This is useful for fitting an intercept term with implementations which cannot otherwise fit it directly. Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] Data. value : float Value to use for the dummy feature. Returns ------- X : array or scipy.sparse matrix with shape [n_samples, n_features + 1] Same data with dummy feature added as first column. Examples -------- >>> from sklearn.preprocessing import add_dummy_feature >>> add_dummy_feature([[0, 1], [1, 0]]) array([[ 1., 0., 1.], [ 1., 1., 0.]]) """ X = check_array(X, accept_sparse=['csc', 'csr', 'coo']) n_samples, n_features = X.shape shape = (n_samples, n_features + 1) if sparse.issparse(X): if sparse.isspmatrix_coo(X): # Shift columns to the right. col = X.col + 1 # Column indices of dummy feature are 0 everywhere. col = np.concatenate((np.zeros(n_samples), col)) # Row indices of dummy feature are 0, ..., n_samples-1. row = np.concatenate((np.arange(n_samples), X.row)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.ones(n_samples) * value, X.data)) return sparse.coo_matrix((data, (row, col)), shape) elif sparse.isspmatrix_csc(X): # Shift index pointers since we need to add n_samples elements. indptr = X.indptr + n_samples # indptr[0] must be 0. indptr = np.concatenate((np.array([0]), indptr)) # Row indices of dummy feature are 0, ..., n_samples-1. indices = np.concatenate((np.arange(n_samples), X.indices)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.ones(n_samples) * value, X.data)) return sparse.csc_matrix((data, indices, indptr), shape) else: klass = X.__class__ return klass(add_dummy_feature(X.tocoo(), value)) else: return np.hstack((np.ones((n_samples, 1)) * value, X)) def _transform_selected(X, transform, selected="all", copy=True): """Apply a transform function to portion of selected features Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Dense array or sparse matrix. transform : callable A callable transform(X) -> X_transformed copy : boolean, optional Copy X even if it could be avoided. selected: "all" or array of indices or mask Specify which features to apply the transform to. Returns ------- X : array or sparse matrix, shape=(n_samples, n_features_new) """ if selected == "all": return transform(X) X = check_array(X, accept_sparse='csc', copy=copy) if len(selected) == 0: return X n_features = X.shape[1] ind = np.arange(n_features) sel = np.zeros(n_features, dtype=bool) sel[np.asarray(selected)] = True not_sel = np.logical_not(sel) n_selected = np.sum(sel) if n_selected == 0: # No features selected. return X elif n_selected == n_features: # All features selected. return transform(X) else: X_sel = transform(X[:, ind[sel]]) X_not_sel = X[:, ind[not_sel]] if sparse.issparse(X_sel) or sparse.issparse(X_not_sel): return sparse.hstack((X_sel, X_not_sel)) else: return np.hstack((X_sel, X_not_sel)) class OneHotEncoder(BaseEstimator, TransformerMixin): """Encode categorical integer features using a one-hot aka one-of-K scheme. The input to this transformer should be a matrix of integers, denoting the values taken on by categorical (discrete) features. The output will be a sparse matrix where each column corresponds to one possible value of one feature. It is assumed that input features take on values in the range [0, n_values). This encoding is needed for feeding categorical data to many scikit-learn estimators, notably linear models and SVMs with the standard kernels. Read more in the :ref:`User Guide <preprocessing_categorical_features>`. Parameters ---------- n_values : 'auto', int or array of ints Number of values per feature. - 'auto' : determine value range from training data. - int : maximum value for all features. - array : maximum value per feature. categorical_features: "all" or array of indices or mask Specify what features are treated as categorical. - 'all' (default): All features are treated as categorical. - array of indices: Array of categorical feature indices. - mask: Array of length n_features and with dtype=bool. Non-categorical features are always stacked to the right of the matrix. dtype : number type, default=np.float Desired dtype of output. sparse : boolean, default=True Will return sparse matrix if set True else will return an array. handle_unknown : str, 'error' or 'ignore' Whether to raise an error or ignore if a unknown categorical feature is present during transform. Attributes ---------- active_features_ : array Indices for active features, meaning values that actually occur in the training set. Only available when n_values is ``'auto'``. feature_indices_ : array of shape (n_features,) Indices to feature ranges. Feature ``i`` in the original data is mapped to features from ``feature_indices_[i]`` to ``feature_indices_[i+1]`` (and then potentially masked by `active_features_` afterwards) n_values_ : array of shape (n_features,) Maximum number of values per feature. Examples -------- Given a dataset with three features and two samples, we let the encoder find the maximum value per feature and transform the data to a binary one-hot encoding. >>> from sklearn.preprocessing import OneHotEncoder >>> enc = OneHotEncoder() >>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \ [1, 0, 2]]) # doctest: +ELLIPSIS OneHotEncoder(categorical_features='all', dtype=<... 'float'>, handle_unknown='error', n_values='auto', sparse=True) >>> enc.n_values_ array([2, 3, 4]) >>> enc.feature_indices_ array([0, 2, 5, 9]) >>> enc.transform([[0, 1, 1]]).toarray() array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]]) See also -------- sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of dictionary items (also handles string-valued features). sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot encoding of dictionary items or strings. """ def __init__(self, n_values="auto", categorical_features="all", dtype=np.float, sparse=True, handle_unknown='error'): self.n_values = n_values self.categorical_features = categorical_features self.dtype = dtype self.sparse = sparse self.handle_unknown = handle_unknown def fit(self, X, y=None): """Fit OneHotEncoder to X. Parameters ---------- X : array-like, shape=(n_samples, n_feature) Input array of type int. Returns ------- self """ self.fit_transform(X) return self def _fit_transform(self, X): """Assumes X contains only categorical features.""" X = check_array(X, dtype=np.int) if np.any(X < 0): raise ValueError("X needs to contain only non-negative integers.") n_samples, n_features = X.shape if self.n_values == 'auto': n_values = np.max(X, axis=0) + 1 elif isinstance(self.n_values, numbers.Integral): if (np.max(X, axis=0) >= self.n_values).any(): raise ValueError("Feature out of bounds for n_values=%d" % self.n_values) n_values = np.empty(n_features, dtype=np.int) n_values.fill(self.n_values) else: try: n_values = np.asarray(self.n_values, dtype=int) except (ValueError, TypeError): raise TypeError("Wrong type for parameter `n_values`. Expected" " 'auto', int or array of ints, got %r" % type(X)) if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]: raise ValueError("Shape mismatch: if n_values is an array," " it has to be of shape (n_features,).") self.n_values_ = n_values n_values = np.hstack([[0], n_values]) indices = np.cumsum(n_values) self.feature_indices_ = indices column_indices = (X + indices[:-1]).ravel() row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features) data = np.ones(n_samples * n_features) out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr() if self.n_values == 'auto': mask = np.array(out.sum(axis=0)).ravel() != 0 active_features = np.where(mask)[0] out = out[:, active_features] self.active_features_ = active_features return out if self.sparse else out.toarray() def fit_transform(self, X, y=None): """Fit OneHotEncoder to X, then transform X. Equivalent to self.fit(X).transform(X), but more convenient and more efficient. See fit for the parameters, transform for the return value. """ return _transform_selected(X, self._fit_transform, self.categorical_features, copy=True) def _transform(self, X): """Assumes X contains only categorical features.""" X = check_array(X, dtype=np.int) if np.any(X < 0): raise ValueError("X needs to contain only non-negative integers.") n_samples, n_features = X.shape indices = self.feature_indices_ if n_features != indices.shape[0] - 1: raise ValueError("X has different shape than during fitting." " Expected %d, got %d." % (indices.shape[0] - 1, n_features)) # We use only those catgorical features of X that are known using fit. # i.e lesser than n_values_ using mask. # This means, if self.handle_unknown is "ignore", the row_indices and # col_indices corresponding to the unknown categorical feature are # ignored. mask = (X < self.n_values_).ravel() if np.any(~mask): if self.handle_unknown not in ['error', 'ignore']: raise ValueError("handle_unknown should be either error or " "unknown got %s" % self.handle_unknown) if self.handle_unknown == 'error': raise ValueError("unknown categorical feature present %s " "during transform." % X[~mask]) column_indices = (X + indices[:-1]).ravel()[mask] row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features)[mask] data = np.ones(np.sum(mask)) out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr() if self.n_values == 'auto': out = out[:, self.active_features_] return out if self.sparse else out.toarray() def transform(self, X): """Transform X using one-hot encoding. Parameters ---------- X : array-like, shape=(n_samples, n_features) Input array of type int. Returns ------- X_out : sparse matrix if sparse=True else a 2-d array, dtype=int Transformed input. """ return _transform_selected(X, self._transform, self.categorical_features, copy=True)
bsd-3-clause
Carmezim/tensorflow
tensorflow/contrib/learn/python/learn/estimators/estimator.py
5
55283
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base Estimator class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import copy import os import tempfile import numpy as np import six from tensorflow.contrib import framework as contrib_framework from tensorflow.contrib import layers from tensorflow.contrib import metrics as metrics_lib from tensorflow.contrib.framework import deprecated from tensorflow.contrib.framework import deprecated_args from tensorflow.contrib.framework import list_variables from tensorflow.contrib.framework import load_variable from tensorflow.contrib.framework.python.ops import variables as contrib_variables from tensorflow.contrib.learn.python.learn import evaluable from tensorflow.contrib.learn.python.learn import metric_spec from tensorflow.contrib.learn.python.learn import monitors as monitor_lib from tensorflow.contrib.learn.python.learn import trainable from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn from tensorflow.contrib.learn.python.learn.estimators import constants from tensorflow.contrib.learn.python.learn.estimators import metric_key from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib from tensorflow.contrib.learn.python.learn.estimators import run_config from tensorflow.contrib.learn.python.learn.estimators import tensor_signature from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError from tensorflow.contrib.learn.python.learn.learn_io import data_feeder from tensorflow.contrib.learn.python.learn.utils import export from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils from tensorflow.contrib.training.python.training import evaluation from tensorflow.core.framework import summary_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session as tf_session from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import lookup_ops from tensorflow.python.ops import resources from tensorflow.python.ops import variables from tensorflow.python.platform import gfile from tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model import builder as saved_model_builder from tensorflow.python.saved_model import tag_constants from tensorflow.python.training import basic_session_run_hooks from tensorflow.python.training import device_setter from tensorflow.python.training import monitored_session from tensorflow.python.training import saver from tensorflow.python.training import summary_io from tensorflow.python.util import compat from tensorflow.python.util import tf_decorator from tensorflow.python.util import tf_inspect AS_ITERABLE_DATE = '2016-09-15' AS_ITERABLE_INSTRUCTIONS = ( 'The default behavior of predict() is changing. The default value for\n' 'as_iterable will change to True, and then the flag will be removed\n' 'altogether. The behavior of this flag is described below.') SCIKIT_DECOUPLE_DATE = '2016-12-01' SCIKIT_DECOUPLE_INSTRUCTIONS = ( 'Estimator is decoupled from Scikit Learn interface by moving into\n' 'separate class SKCompat. Arguments x, y and batch_size are only\n' 'available in the SKCompat class, Estimator will only accept input_fn.\n' 'Example conversion:\n' ' est = Estimator(...) -> est = SKCompat(Estimator(...))') def _verify_input_args(x, y, input_fn, feed_fn, batch_size): """Verifies validity of co-existance of input arguments.""" if input_fn is None: if x is None: raise ValueError('Either x or input_fn must be provided.') if contrib_framework.is_tensor(x) or (y is not None and contrib_framework.is_tensor(y)): raise ValueError('Inputs cannot be tensors. Please provide input_fn.') if feed_fn is not None: raise ValueError('Can not provide both feed_fn and x or y.') else: if (x is not None) or (y is not None): raise ValueError('Can not provide both input_fn and x or y.') if batch_size is not None: raise ValueError('Can not provide both input_fn and batch_size.') def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1): """Make inputs into input and feed functions. Args: x: Numpy, Pandas or Dask matrix or iterable. y: Numpy, Pandas or Dask matrix or iterable. input_fn: Pre-defined input function for training data. feed_fn: Pre-defined data feeder function. batch_size: Size to split data into parts. Must be >= 1. shuffle: Whether to shuffle the inputs. epochs: Number of epochs to run. Returns: Data input and feeder function based on training data. Raises: ValueError: Only one of `(x & y)` or `input_fn` must be provided. """ _verify_input_args(x, y, input_fn, feed_fn, batch_size) if input_fn is not None: return input_fn, feed_fn df = data_feeder.setup_train_data_feeder( x, y, n_classes=None, batch_size=batch_size, shuffle=shuffle, epochs=epochs) return df.input_builder, df.get_feed_dict_fn() def infer_real_valued_columns_from_input_fn(input_fn): """Creates `FeatureColumn` objects for inputs defined by `input_fn`. This interprets all inputs as dense, fixed-length float values. This creates a local graph in which it calls `input_fn` to build the tensors, then discards it. Args: input_fn: Input function returning a tuple of: features - Dictionary of string feature name to `Tensor` or `Tensor`. labels - `Tensor` of label values. Returns: List of `FeatureColumn` objects. """ with ops.Graph().as_default(): features, _ = input_fn() return layers.infer_real_valued_columns(features) def infer_real_valued_columns_from_input(x): """Creates `FeatureColumn` objects for inputs defined by input `x`. This interprets all inputs as dense, fixed-length float values. Args: x: Real-valued matrix of shape [n_samples, n_features...]. Can be iterator that returns arrays of features. Returns: List of `FeatureColumn` objects. """ input_fn, _ = _get_input_fn( x=x, y=None, input_fn=None, feed_fn=None, batch_size=None) return infer_real_valued_columns_from_input_fn(input_fn) def _model_fn_args(fn): """Get argument names for function-like object. Args: fn: Function, or function-like object (e.g., result of `functools.partial`). Returns: `tuple` of string argument names. Raises: ValueError: if partial function has positionally bound arguments """ _, fn = tf_decorator.unwrap(fn) if hasattr(fn, 'func') and hasattr(fn, 'keywords') and hasattr(fn, 'args'): # Handle functools.partial and similar objects. return tuple([ arg for arg in tf_inspect.getargspec(fn.func).args[len(fn.args):] if arg not in set(fn.keywords.keys()) ]) # Handle function. return tuple(tf_inspect.getargspec(fn).args) def _get_replica_device_setter(config): """Creates a replica device setter if required. Args: config: A RunConfig instance. Returns: A replica device setter, or None. """ ps_ops = [ 'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable', 'MutableHashTableOfTensors', 'MutableDenseHashTable' ] if config.task_type: worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id) else: worker_device = '/job:worker' if config.num_ps_replicas > 0: return device_setter.replica_device_setter( ps_tasks=config.num_ps_replicas, worker_device=worker_device, merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec) else: return None def _make_metrics_ops(metrics, features, labels, predictions): """Add metrics based on `features`, `labels`, and `predictions`. `metrics` contains a specification for how to run metrics. It is a dict mapping friendly names to either `MetricSpec` objects, or directly to a metric function (assuming that `predictions` and `labels` are single tensors), or to `(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and `labels` to `metric` (assuming `labels` is a single tensor). Users are encouraged to use `MetricSpec` objects, which are more flexible and cleaner. They also lead to clearer errors. Args: metrics: A dict mapping names to metrics specification, for example `MetricSpec` objects. features: A dict of tensors returned from an input_fn as features/inputs. labels: A single tensor or a dict of tensors returned from an input_fn as labels. predictions: A single tensor or a dict of tensors output from a model as predictions. Returns: A dict mapping the friendly given in `metrics` to the result of calling the given metric function. Raises: ValueError: If metrics specifications do not work with the type of `features`, `labels`, or `predictions` provided. Mostly, a dict is given but no pred_name specified. """ metrics = metrics or {} # If labels is a dict with a single key, unpack into a single tensor. labels_tensor_or_dict = labels if isinstance(labels, dict) and len(labels) == 1: labels_tensor_or_dict = labels[list(labels.keys())[0]] result = {} # Iterate in lexicographic order, so the graph is identical among runs. for name, metric in sorted(six.iteritems(metrics)): if isinstance(metric, metric_spec.MetricSpec): result[name] = metric.create_metric_ops(features, labels, predictions) continue # TODO(b/31229024): Remove the rest of this loop logging.warning('Please specify metrics using MetricSpec. Using bare ' 'functions or (key, fn) tuples is deprecated and support ' 'for it will be removed on Oct 1, 2016.') if isinstance(name, tuple): # Multi-head metrics. if len(name) != 2: raise ValueError('Invalid metric for {}. It returned a tuple with ' 'len {}, expected 2.'.format(name, len(name))) if not isinstance(predictions, dict): raise ValueError( 'Metrics passed provide (name, prediction), ' 'but predictions are not dict. ' 'Metrics: %s, Predictions: %s.' % (metrics, predictions)) # Here are two options: labels are single Tensor or a dict. if isinstance(labels, dict) and name[1] in labels: # If labels are dict and the prediction name is in it, apply metric. result[name[0]] = metric(predictions[name[1]], labels[name[1]]) else: # Otherwise pass the labels to the metric. result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict) else: # Single head metrics. if isinstance(predictions, dict): raise ValueError( 'Metrics passed provide only name, no prediction, ' 'but predictions are dict. ' 'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict)) result[name] = metric(predictions, labels_tensor_or_dict) return result def _dict_to_str(dictionary): """Get a `str` representation of a `dict`. Args: dictionary: The `dict` to be represented as `str`. Returns: A `str` representing the `dictionary`. """ return ', '.join('%s = %s' % (k, v) for k, v in sorted(dictionary.items())) def _write_dict_to_summary(output_dir, dictionary, current_global_step): """Writes a `dict` into summary file in given output directory. Args: output_dir: `str`, directory to write the summary file in. dictionary: the `dict` to be written to summary file. current_global_step: `int`, the current global step. """ logging.info('Saving dict for global step %d: %s', current_global_step, _dict_to_str(dictionary)) summary_writer = summary_io.SummaryWriterCache.get(output_dir) summary_proto = summary_pb2.Summary() for key in dictionary: if dictionary[key] is None: continue value = summary_proto.value.add() value.tag = key if (isinstance(dictionary[key], np.float32) or isinstance(dictionary[key], float)): value.simple_value = float(dictionary[key]) else: logging.warn('Skipping summary for %s, must be a float or np.float32.', key) summary_writer.add_summary(summary_proto, current_global_step) summary_writer.flush() class BaseEstimator( sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable): """Abstract BaseEstimator class to train and evaluate TensorFlow models. Users should not instantiate or subclass this class. Instead, use `Estimator`. """ __metaclass__ = abc.ABCMeta # Note that for Google users, this is overriden with # learn_runner.EstimatorConfig. # TODO(wicke): Remove this once launcher takes over config functionality _Config = run_config.RunConfig # pylint: disable=invalid-name def __init__(self, model_dir=None, config=None): """Initializes a BaseEstimator instance. Args: model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. If `None`, the model_dir in `config` will be used if set. If both are set, they must be same. config: A RunConfig instance. """ # Create a run configuration. if config is None: self._config = BaseEstimator._Config() logging.info('Using default config.') else: self._config = config if self._config.session_config is None: self._session_config = config_pb2.ConfigProto(allow_soft_placement=True) else: self._session_config = self._config.session_config # Model directory. if (model_dir is not None) and (self._config.model_dir is not None): if model_dir != self._config.model_dir: # TODO(b/9965722): remove this suppression after it is no longer # necessary. # pylint: disable=g-doc-exception raise ValueError( "model_dir are set both in constructor and RunConfig, but with " "different values. In constructor: '{}', in RunConfig: " "'{}' ".format(model_dir, self._config.model_dir)) self._model_dir = model_dir or self._config.model_dir if self._model_dir is None: self._model_dir = tempfile.mkdtemp() logging.warning('Using temporary folder as model directory: %s', self._model_dir) if self._config.model_dir is None: self._config = self._config.replace(model_dir=self._model_dir) logging.info('Using config: %s', str(vars(self._config))) # Set device function depending if there are replicas or not. self._device_fn = _get_replica_device_setter(self._config) # Features and labels TensorSignature objects. # TODO(wicke): Rename these to something more descriptive self._features_info = None self._labels_info = None self._graph = None @property def config(self): # TODO(wicke): make RunConfig immutable, and then return it without a copy. return copy.deepcopy(self._config) @deprecated_args( SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None), ('y', None), ('batch_size', None) ) def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None, monitors=None, max_steps=None): # pylint: disable=g-doc-args,g-doc-return-or-yield """See `Trainable`. Raises: ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`. ValueError: If both `steps` and `max_steps` are not `None`. """ if (steps is not None) and (max_steps is not None): raise ValueError('Can not provide both steps and max_steps.') _verify_input_args(x, y, input_fn, None, batch_size) if x is not None: SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors) return self if max_steps is not None: try: start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP) if max_steps <= start_step: logging.info('Skipping training since max_steps has already saved.') return self except: # pylint: disable=bare-except pass hooks = monitor_lib.replace_monitors_with_hooks(monitors, self) if steps is not None or max_steps is not None: hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps)) loss = self._train_model(input_fn=input_fn, hooks=hooks) logging.info('Loss for final step: %s.', loss) return self @deprecated_args( SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None), ('y', None), ('batch_size', None) ) def partial_fit( self, x=None, y=None, input_fn=None, steps=1, batch_size=None, monitors=None): """Incremental fit on a batch of samples. This method is expected to be called several times consecutively on different or the same chunks of the dataset. This either can implement iterative training or out-of-core/online training. This is especially useful when the whole dataset is too big to fit in memory at the same time. Or when model is taking long time to converge, and you want to split up training into subparts. Args: x: Matrix of shape [n_samples, n_features...]. Can be iterator that returns arrays of features. The training input samples for fitting the model. If set, `input_fn` must be `None`. y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be iterator that returns array of labels. The training label values (class labels in classification, real numbers in regression). If set, `input_fn` must be `None`. input_fn: Input function. If set, `x`, `y`, and `batch_size` must be `None`. steps: Number of steps for which to train model. If `None`, train forever. batch_size: minibatch size to use on the input, defaults to first dimension of `x`. Must be `None` if `input_fn` is provided. monitors: List of `BaseMonitor` subclass instances. Used for callbacks inside the training loop. Returns: `self`, for chaining. Raises: ValueError: If at least one of `x` and `y` is provided, and `input_fn` is provided. """ logging.warning('The current implementation of partial_fit is not optimized' ' for use in a loop. Consider using fit() instead.') return self.fit(x=x, y=y, input_fn=input_fn, steps=steps, batch_size=batch_size, monitors=monitors) @deprecated_args( SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None), ('y', None), ('batch_size', None) ) def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None, batch_size=None, steps=None, metrics=None, name=None, checkpoint_path=None, hooks=None, log_progress=True): # pylint: disable=g-doc-args,g-doc-return-or-yield """See `Evaluable`. Raises: ValueError: If at least one of `x` or `y` is provided, and at least one of `input_fn` or `feed_fn` is provided. Or if `metrics` is not `None` or `dict`. """ _verify_input_args(x, y, input_fn, feed_fn, batch_size) if x is not None: return SKCompat(self).score(x, y, batch_size, steps, metrics) if metrics is not None and not isinstance(metrics, dict): raise ValueError('Metrics argument should be None or dict. ' 'Got %s.' % metrics) eval_results, global_step = self._evaluate_model( input_fn=input_fn, feed_fn=feed_fn, steps=steps, metrics=metrics, name=name, checkpoint_path=checkpoint_path, hooks=hooks, log_progress=log_progress) if eval_results is not None: eval_results.update({'global_step': global_step}) return eval_results @deprecated_args( SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None), ('batch_size', None), ('as_iterable', True) ) def predict( self, x=None, input_fn=None, batch_size=None, outputs=None, as_iterable=True): """Returns predictions for given features. Args: x: Matrix of shape [n_samples, n_features...]. Can be iterator that returns arrays of features. The training input samples for fitting the model. If set, `input_fn` must be `None`. input_fn: Input function. If set, `x` and 'batch_size' must be `None`. batch_size: Override default batch size. If set, 'input_fn' must be 'None'. outputs: list of `str`, name of the output to predict. If `None`, returns all. as_iterable: If True, return an iterable which keeps yielding predictions for each example until inputs are exhausted. Note: The inputs must terminate if you want the iterable to terminate (e.g. be sure to pass num_epochs=1 if you are using something like read_batch_features). Returns: A numpy array of predicted classes or regression values if the constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict` of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of predictions if as_iterable is True. Raises: ValueError: If x and input_fn are both provided or both `None`. """ _verify_input_args(x, None, input_fn, None, batch_size) if x is not None and not as_iterable: return SKCompat(self).predict(x, batch_size) input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size) return self._infer_model( input_fn=input_fn, feed_fn=feed_fn, outputs=outputs, as_iterable=as_iterable) def get_variable_value(self, name): """Returns value of the variable given by name. Args: name: string, name of the tensor. Returns: Numpy array - value of the tensor. """ return load_variable(self.model_dir, name) def get_variable_names(self): """Returns list of all variable names in this model. Returns: List of names. """ return [name for name, _ in list_variables(self.model_dir)] @property def model_dir(self): return self._model_dir @deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.') def export(self, export_dir, input_fn=export._default_input_fn, # pylint: disable=protected-access input_feature_key=None, use_deprecated_input_fn=True, signature_fn=None, prediction_key=None, default_batch_size=1, exports_to_keep=None, checkpoint_path=None): """Exports inference graph into given dir. Args: export_dir: A string containing a directory to write the exported graph and checkpoints. input_fn: If `use_deprecated_input_fn` is true, then a function that given `Tensor` of `Example` strings, parses it into features that are then passed to the model. Otherwise, a function that takes no argument and returns a tuple of (features, labels), where features is a dict of string key to `Tensor` and labels is a `Tensor` that's currently not used (and so can be `None`). input_feature_key: Only used if `use_deprecated_input_fn` is false. String key into the features dict returned by `input_fn` that corresponds to a the raw `Example` strings `Tensor` that the exported model will take as input. Can only be `None` if you're using a custom `signature_fn` that does not use the first arg (examples). use_deprecated_input_fn: Determines the signature format of `input_fn`. signature_fn: Function that returns a default signature and a named signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s for features and `Tensor` or `dict` of `Tensor`s for predictions. prediction_key: The key for a tensor in the `predictions` dict (output from the `model_fn`) to use as the `predictions` input to the `signature_fn`. Optional. If `None`, predictions will pass to `signature_fn` without filtering. default_batch_size: Default batch size of the `Example` placeholder. exports_to_keep: Number of exports to keep. checkpoint_path: the checkpoint path of the model to be exported. If it is `None` (which is default), will use the latest checkpoint in export_dir. Returns: The string path to the exported directory. NB: this functionality was added ca. 2016/09/25; clients that depend on the return value may need to handle the case where this function returns None because subclasses are not returning a value. """ # pylint: disable=protected-access return export._export_estimator( estimator=self, export_dir=export_dir, signature_fn=signature_fn, prediction_key=prediction_key, input_fn=input_fn, input_feature_key=input_feature_key, use_deprecated_input_fn=use_deprecated_input_fn, default_batch_size=default_batch_size, exports_to_keep=exports_to_keep, checkpoint_path=checkpoint_path) @abc.abstractproperty def _get_train_ops(self, features, labels): """Method that builds model graph and returns trainer ops. Expected to be overridden by sub-classes that require custom support. Args: features: `Tensor` or `dict` of `Tensor` objects. labels: `Tensor` or `dict` of `Tensor` objects. Returns: A `ModelFnOps` object. """ pass @abc.abstractproperty def _get_predict_ops(self, features): """Method that builds model graph and returns prediction ops. Args: features: `Tensor` or `dict` of `Tensor` objects. Returns: A `ModelFnOps` object. """ pass def _get_eval_ops(self, features, labels, metrics): """Method that builds model graph and returns evaluation ops. Expected to be overriden by sub-classes that require custom support. Args: features: `Tensor` or `dict` of `Tensor` objects. labels: `Tensor` or `dict` of `Tensor` objects. metrics: Dict of metrics to run. If None, the default metric functions are used; if {}, no metrics are used. Otherwise, `metrics` should map friendly names for the metric to a `MetricSpec` object defining which model outputs to evaluate against which labels with which metric function. Metric ops should support streaming, e.g., returning update_op and value tensors. See more details in `../../../../metrics/python/metrics/ops/streaming_metrics.py` and `../metric_spec.py`. Returns: A `ModelFnOps` object. """ raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator') @deprecated( '2016-09-23', 'The signature of the input_fn accepted by export is changing to be ' 'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, ' 'which makes this function useless. This will be removed after the ' 'deprecation date.') def _get_feature_ops_from_example(self, examples_batch): """Returns feature parser for given example batch using features info. This function requires `fit()` has been called. Args: examples_batch: batch of tf.Example Returns: features: `Tensor` or `dict` of `Tensor` objects. Raises: ValueError: If `_features_info` attribute is not available (usually because `fit()` has not been called). """ if self._features_info is None: raise ValueError('Features information missing, was fit() ever called?') return tensor_signature.create_example_parser_from_signatures( self._features_info, examples_batch) def _check_inputs(self, features, labels): if self._features_info is not None: logging.debug('Given features: %s, required signatures: %s.', str(features), str(self._features_info)) if not tensor_signature.tensors_compatible(features, self._features_info): raise ValueError('Features are incompatible with given information. ' 'Given features: %s, required signatures: %s.' % (str(features), str(self._features_info))) else: self._features_info = tensor_signature.create_signatures(features) logging.debug('Setting feature info to %s.', str(self._features_info)) if labels is not None: if self._labels_info is not None: logging.debug('Given labels: %s, required signatures: %s.', str(labels), str(self._labels_info)) if not tensor_signature.tensors_compatible(labels, self._labels_info): raise ValueError('Labels are incompatible with given information. ' 'Given labels: %s, required signatures: %s.' % (str(labels), str(self._labels_info))) else: self._labels_info = tensor_signature.create_signatures(labels) logging.debug('Setting labels info to %s', str(self._labels_info)) def _extract_metric_update_ops(self, eval_dict): """Separate update operations from metric value operations.""" update_ops = [] value_ops = {} for name, metric_ops in six.iteritems(eval_dict): if isinstance(metric_ops, (list, tuple)): if len(metric_ops) == 2: value_ops[name] = metric_ops[0] update_ops.append(metric_ops[1]) else: logging.warning( 'Ignoring metric {}. It returned a list|tuple with len {}, ' 'expected 2'.format(name, len(metric_ops))) value_ops[name] = metric_ops else: value_ops[name] = metric_ops if update_ops: update_ops = control_flow_ops.group(*update_ops) else: update_ops = None return update_ops, value_ops def _evaluate_model(self, input_fn, steps, feed_fn=None, metrics=None, name='', checkpoint_path=None, hooks=None, log_progress=True): # TODO(wicke): Remove this once Model and associated code are gone. if (hasattr(self._config, 'execution_mode') and self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')): return None, None # Check that model has been trained (if nothing has been set explicitly). if not checkpoint_path: latest_path = saver.latest_checkpoint(self._model_dir) if not latest_path: raise NotFittedError("Couldn't find trained model at %s." % self._model_dir) checkpoint_path = latest_path # Setup output directory. eval_dir = os.path.join(self._model_dir, 'eval' if not name else 'eval_' + name) with ops.Graph().as_default() as g: random_seed.set_random_seed(self._config.tf_random_seed) global_step = contrib_framework.create_global_step(g) features, labels = input_fn() self._check_inputs(features, labels) model_fn_results = self._get_eval_ops(features, labels, metrics) eval_dict = model_fn_results.eval_metric_ops update_op, eval_dict = self._extract_metric_update_ops(eval_dict) # We need to copy the hook array as we modify it, thus [:]. hooks = hooks[:] if hooks else [] if feed_fn: hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn)) if steps: hooks.append( evaluation.StopAfterNEvalsHook( steps, log_progress=log_progress)) global_step_key = 'global_step' while global_step_key in eval_dict: global_step_key = '_' + global_step_key eval_dict[global_step_key] = global_step eval_results = evaluation.evaluate_once( checkpoint_path=checkpoint_path, master=self._config.evaluation_master, scaffold=model_fn_results.scaffold, eval_ops=update_op, final_ops=eval_dict, hooks=hooks, config=self._session_config) current_global_step = eval_results[global_step_key] _write_dict_to_summary(eval_dir, eval_results, current_global_step) return eval_results, current_global_step def _get_features_from_input_fn(self, input_fn): result = input_fn() if isinstance(result, (list, tuple)): return result[0] return result def _infer_model(self, input_fn, feed_fn=None, outputs=None, as_iterable=True, iterate_batches=False): # Check that model has been trained. checkpoint_path = saver.latest_checkpoint(self._model_dir) if not checkpoint_path: raise NotFittedError("Couldn't find trained model at %s." % self._model_dir) with ops.Graph().as_default() as g: random_seed.set_random_seed(self._config.tf_random_seed) contrib_framework.create_global_step(g) features = self._get_features_from_input_fn(input_fn) infer_ops = self._get_predict_ops(features) predictions = self._filter_predictions(infer_ops.predictions, outputs) mon_sess = monitored_session.MonitoredSession( session_creator=monitored_session.ChiefSessionCreator( checkpoint_filename_with_path=checkpoint_path, scaffold=infer_ops.scaffold, config=self._session_config)) if not as_iterable: with mon_sess: if not mon_sess.should_stop(): return mon_sess.run(predictions, feed_fn() if feed_fn else None) else: return self._predict_generator(mon_sess, predictions, feed_fn, iterate_batches) def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches): with mon_sess: while not mon_sess.should_stop(): preds = mon_sess.run(predictions, feed_fn() if feed_fn else None) if iterate_batches: yield preds elif not isinstance(predictions, dict): for pred in preds: yield pred else: first_tensor = list(preds.values())[0] if isinstance(first_tensor, sparse_tensor.SparseTensorValue): batch_length = first_tensor.dense_shape[0] else: batch_length = first_tensor.shape[0] for i in range(batch_length): yield {key: value[i] for key, value in six.iteritems(preds)} if self._is_input_constant(feed_fn, mon_sess.graph): return def _is_input_constant(self, feed_fn, graph): # If there are no queue_runners, the input `predictions` is a # constant, and we should stop after the first epoch. If, # instead, there are queue_runners, eventually they should throw # an `OutOfRangeError`. if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS): return False # data_feeder uses feed_fn to generate `OutOfRangeError`. if feed_fn is not None: return False return True def _filter_predictions(self, predictions, outputs): if not outputs: return predictions if not isinstance(predictions, dict): raise ValueError( 'outputs argument is not valid in case of non-dict predictions.') existing_keys = predictions.keys() predictions = { key: value for key, value in six.iteritems(predictions) if key in outputs } if not predictions: raise ValueError('Expected to run at least one output from %s, ' 'provided %s.' % (existing_keys, outputs)) return predictions def _train_model(self, input_fn, hooks): all_hooks = [] self._graph = ops.Graph() with self._graph.as_default() as g, g.device(self._device_fn): random_seed.set_random_seed(self._config.tf_random_seed) global_step = contrib_framework.create_global_step(g) features, labels = input_fn() self._check_inputs(features, labels) model_fn_ops = self._get_train_ops(features, labels) ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss) all_hooks.extend([ basic_session_run_hooks.NanTensorHook(model_fn_ops.loss), basic_session_run_hooks.LoggingTensorHook( { 'loss': model_fn_ops.loss, 'step': global_step }, every_n_iter=100) ]) all_hooks.extend(hooks) scaffold = model_fn_ops.scaffold or monitored_session.Scaffold() if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)): ops.add_to_collection( ops.GraphKeys.SAVERS, saver.Saver( sharded=True, max_to_keep=self._config.keep_checkpoint_max, defer_build=True, save_relative_paths=True)) chief_hooks = [] if (self._config.save_checkpoints_secs or self._config.save_checkpoints_steps): saver_hook_exists = any([ isinstance(h, basic_session_run_hooks.CheckpointSaverHook) for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks + model_fn_ops.training_chief_hooks) ]) if not saver_hook_exists: chief_hooks = [ basic_session_run_hooks.CheckpointSaverHook( self._model_dir, save_secs=self._config.save_checkpoints_secs, save_steps=self._config.save_checkpoints_steps, scaffold=scaffold) ] with monitored_session.MonitoredTrainingSession( master=self._config.master, is_chief=self._config.is_chief, checkpoint_dir=self._model_dir, scaffold=scaffold, hooks=all_hooks + model_fn_ops.training_hooks, chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks, save_checkpoint_secs=0, # Saving is handled by a hook. save_summaries_steps=self._config.save_summary_steps, config=self._session_config ) as mon_sess: loss = None while not mon_sess.should_stop(): _, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss]) summary_io.SummaryWriterCache.clear() return loss def _identity_feature_engineering_fn(features, labels): return features, labels class Estimator(BaseEstimator): """Estimator class is the basic TensorFlow model trainer/evaluator. """ def __init__(self, model_fn=None, model_dir=None, config=None, params=None, feature_engineering_fn=None): """Constructs an `Estimator` instance. Args: model_fn: Model function. Follows the signature: * Args: * `features`: single `Tensor` or `dict` of `Tensor`s (depending on data passed to `fit`), * `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head models). If mode is `ModeKeys.INFER`, `labels=None` will be passed. If the `model_fn`'s signature does not accept `mode`, the `model_fn` must still be able to handle `labels=None`. * `mode`: Optional. Specifies if this training, evaluation or prediction. See `ModeKeys`. * `params`: Optional `dict` of hyperparameters. Will receive what is passed to Estimator in `params` parameter. This allows to configure Estimators from hyper parameter tuning. * `config`: Optional configuration object. Will receive what is passed to Estimator in `config` parameter, or the default `config`. Allows updating things in your model_fn based on configuration such as `num_ps_replicas`. * `model_dir`: Optional directory where model parameters, graph etc are saved. Will receive what is passed to Estimator in `model_dir` parameter, or the default `model_dir`. Allows updating things in your model_fn that expect model_dir, such as training hooks. * Returns: `ModelFnOps` Also supports a legacy signature which returns tuple of: * predictions: `Tensor`, `SparseTensor` or dictionary of same. Can also be any type that is convertible to a `Tensor` or `SparseTensor`, or dictionary of same. * loss: Scalar loss `Tensor`. * train_op: Training update `Tensor` or `Operation`. Supports next three signatures for the function: * `(features, labels) -> (predictions, loss, train_op)` * `(features, labels, mode) -> (predictions, loss, train_op)` * `(features, labels, mode, params) -> (predictions, loss, train_op)` * `(features, labels, mode, params, config) -> (predictions, loss, train_op)` * `(features, labels, mode, params, config, model_dir) -> (predictions, loss, train_op)` model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. config: Configuration object. params: `dict` of hyper parameters that will be passed into `model_fn`. Keys are names of parameters, values are basic python types. feature_engineering_fn: Feature engineering function. Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into `model_fn`. Please check `model_fn` for a definition of features and labels. Raises: ValueError: parameters of `model_fn` don't match `params`. """ super(Estimator, self).__init__(model_dir=model_dir, config=config) if model_fn is not None: # Check number of arguments of the given function matches requirements. model_fn_args = _model_fn_args(model_fn) if params is not None and 'params' not in model_fn_args: raise ValueError('Estimator\'s model_fn (%s) has less than 4 ' 'arguments, but not None params (%s) are passed.' % (model_fn, params)) if params is None and 'params' in model_fn_args: logging.warning('Estimator\'s model_fn (%s) includes params ' 'argument, but params are not passed to Estimator.', model_fn) self._model_fn = model_fn self.params = params self._feature_engineering_fn = ( feature_engineering_fn or _identity_feature_engineering_fn) def _call_model_fn(self, features, labels, mode): """Calls model function with support of 2, 3 or 4 arguments. Args: features: features dict. labels: labels dict. mode: ModeKeys Returns: A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a `ModelFnOps` object. Raises: ValueError: if model_fn returns invalid objects. """ features, labels = self._feature_engineering_fn(features, labels) model_fn_args = _model_fn_args(self._model_fn) kwargs = {} if 'mode' in model_fn_args: kwargs['mode'] = mode if 'params' in model_fn_args: kwargs['params'] = self.params if 'config' in model_fn_args: kwargs['config'] = self.config if 'model_dir' in model_fn_args: kwargs['model_dir'] = self.model_dir model_fn_results = self._model_fn(features, labels, **kwargs) if isinstance(model_fn_results, model_fn_lib.ModelFnOps): return model_fn_results # Here model_fn_results should be a tuple with 3 elements. if len(model_fn_results) != 3: raise ValueError('Unrecognized value returned by model_fn, ' 'please return ModelFnOps.') return model_fn_lib.ModelFnOps( mode=mode, predictions=model_fn_results[0], loss=model_fn_results[1], train_op=model_fn_results[2]) def _get_train_ops(self, features, labels): """Method that builds model graph and returns trainer ops. Expected to be overriden by sub-classes that require custom support. This implementation uses `model_fn` passed as parameter to constructor to build model. Args: features: `Tensor` or `dict` of `Tensor` objects. labels: `Tensor` or `dict` of `Tensor` objects. Returns: `ModelFnOps` object. """ return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN) def _get_eval_ops(self, features, labels, metrics): """Method that builds model graph and returns evaluation ops. Expected to be overriden by sub-classes that require custom support. This implementation uses `model_fn` passed as parameter to constructor to build model. Args: features: `Tensor` or `dict` of `Tensor` objects. labels: `Tensor` or `dict` of `Tensor` objects. metrics: Dict of metrics to run. If None, the default metric functions are used; if {}, no metrics are used. Otherwise, `metrics` should map friendly names for the metric to a `MetricSpec` object defining which model outputs to evaluate against which labels with which metric function. Metric ops should support streaming, e.g., returning update_op and value tensors. See more details in `../../../../metrics/python/metrics/ops/streaming_metrics.py` and `../metric_spec.py`. Returns: `ModelFnOps` object. Raises: ValueError: if `metrics` don't match `labels`. """ model_fn_ops = self._call_model_fn( features, labels, model_fn_lib.ModeKeys.EVAL) features, labels = self._feature_engineering_fn(features, labels) # Custom metrics should overwrite defaults. if metrics: model_fn_ops.eval_metric_ops.update(_make_metrics_ops( metrics, features, labels, model_fn_ops.predictions)) if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops: model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = ( metrics_lib.streaming_mean(model_fn_ops.loss)) return model_fn_ops def _get_predict_ops(self, features): """Method that builds model graph and returns prediction ops. Expected to be overriden by sub-classes that require custom support. This implementation uses `model_fn` passed as parameter to constructor to build model. Args: features: `Tensor` or `dict` of `Tensor` objects. Returns: `ModelFnOps` object. """ labels = tensor_signature.create_placeholders_from_signatures( self._labels_info) return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER) def export_savedmodel( self, export_dir_base, serving_input_fn, default_output_alternative_key=None, assets_extra=None, as_text=False, checkpoint_path=None): """Exports inference graph as a SavedModel into given dir. Args: export_dir_base: A string containing a directory to write the exported graph and checkpoints. serving_input_fn: A function that takes no argument and returns an `InputFnOps`. default_output_alternative_key: the name of the head to serve when none is specified. Not needed for single-headed models. assets_extra: A dict specifying how to populate the assets.extra directory within the exported SavedModel. Each key should give the destination path (including the filename) relative to the assets.extra directory. The corresponding value gives the full path of the source file to be copied. For example, the simple case of copying a single file without renaming it is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`. as_text: whether to write the SavedModel proto in text format. checkpoint_path: The checkpoint path to export. If None (the default), the most recent checkpoint found within the model directory is chosen. Returns: The string path to the exported directory. Raises: ValueError: if an unrecognized export_type is requested. """ if serving_input_fn is None: raise ValueError('serving_input_fn must be defined.') with ops.Graph().as_default() as g: contrib_variables.create_global_step(g) # Call the serving_input_fn and collect the input alternatives. input_ops = serving_input_fn() input_alternatives, features = ( saved_model_export_utils.get_input_alternatives(input_ops)) # TODO(b/34388557) This is a stopgap, pending recording model provenance. # Record which features are expected at serving time. It is assumed that # these are the features that were used in training. for feature_key in input_ops.features.keys(): ops.add_to_collection( constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS, feature_key) # Call the model_fn and collect the output alternatives. model_fn_ops = self._call_model_fn(features, None, model_fn_lib.ModeKeys.INFER) output_alternatives, actual_default_output_alternative_key = ( saved_model_export_utils.get_output_alternatives( model_fn_ops, default_output_alternative_key)) # Build the SignatureDefs from all pairs of input and output alternatives signature_def_map = saved_model_export_utils.build_all_signature_defs( input_alternatives, output_alternatives, actual_default_output_alternative_key) if not checkpoint_path: # Locate the latest checkpoint checkpoint_path = saver.latest_checkpoint(self._model_dir) if not checkpoint_path: raise NotFittedError("Couldn't find trained model at %s." % self._model_dir) export_dir = saved_model_export_utils.get_timestamped_export_dir( export_dir_base) if (model_fn_ops.scaffold is not None and model_fn_ops.scaffold.saver is not None): saver_for_restore = model_fn_ops.scaffold.saver else: saver_for_restore = saver.Saver(sharded=True) with tf_session.Session('') as session: saver_for_restore.restore(session, checkpoint_path) init_op = control_flow_ops.group( variables.local_variables_initializer(), resources.initialize_resources(resources.shared_resources()), lookup_ops.tables_initializer()) # Perform the export builder = saved_model_builder.SavedModelBuilder(export_dir) builder.add_meta_graph_and_variables( session, [tag_constants.SERVING], signature_def_map=signature_def_map, assets_collection=ops.get_collection( ops.GraphKeys.ASSET_FILEPATHS), legacy_init_op=init_op) builder.save(as_text) # Add the extra assets if assets_extra: assets_extra_path = os.path.join(compat.as_bytes(export_dir), compat.as_bytes('assets.extra')) for dest_relative, source in assets_extra.items(): dest_absolute = os.path.join(compat.as_bytes(assets_extra_path), compat.as_bytes(dest_relative)) dest_path = os.path.dirname(dest_absolute) gfile.MakeDirs(dest_path) gfile.Copy(source, dest_absolute) return export_dir # For time of deprecation x,y from Estimator allow direct access. # pylint: disable=protected-access class SKCompat(sklearn.BaseEstimator): """Scikit learn wrapper for TensorFlow Learn Estimator.""" def __init__(self, estimator): self._estimator = estimator def fit(self, x, y, batch_size=128, steps=None, max_steps=None, monitors=None): input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None, batch_size=batch_size, shuffle=True, epochs=None) all_monitors = [] if feed_fn: all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)] if monitors: all_monitors.extend(monitors) self._estimator.fit(input_fn=input_fn, steps=steps, max_steps=max_steps, monitors=all_monitors) return self def score(self, x, y, batch_size=128, steps=None, metrics=None): input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None, batch_size=batch_size, shuffle=False, epochs=1) if metrics is not None and not isinstance(metrics, dict): raise ValueError('Metrics argument should be None or dict. ' 'Got %s.' % metrics) eval_results, global_step = self._estimator._evaluate_model( input_fn=input_fn, feed_fn=feed_fn, steps=steps, metrics=metrics, name='score') if eval_results is not None: eval_results.update({'global_step': global_step}) return eval_results def predict(self, x, batch_size=128, outputs=None): input_fn, feed_fn = _get_input_fn( x, None, input_fn=None, feed_fn=None, batch_size=batch_size, shuffle=False, epochs=1) results = list( self._estimator._infer_model( input_fn=input_fn, feed_fn=feed_fn, outputs=outputs, as_iterable=True, iterate_batches=True)) if not isinstance(results[0], dict): return np.concatenate([output for output in results], axis=0) return { key: np.concatenate( [output[key] for output in results], axis=0) for key in results[0] }
apache-2.0
josenavas/labman
labman/db/process.py
1
115578
# ---------------------------------------------------------------------------- # Copyright (c) 2017-, labman development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- from datetime import date, datetime from io import StringIO from itertools import chain import re from json import dumps import numpy as np import pandas as pd from . import base from . import sql_connection from . import user as user_module from . import plate as plate_module from . import container as container_module from . import composition as composition_module from . import equipment as equipment_module from .study import Study class Process(base.LabmanObject): """Base process object Attributes ---------- id date personnel """ @staticmethod def factory(process_id): """Initializes the correct Process subclass Parameters ---------- process_id : int The process id Returns ------- An instance of a subclass of Process """ factory_classes = { # 'primer template creation': TODO, 'primer working plate creation': PrimerWorkingPlateCreationProcess, 'sample plating': SamplePlatingProcess, 'reagent creation': ReagentCreationProcess, 'gDNA extraction': GDNAExtractionProcess, '16S library prep': LibraryPrep16SProcess, 'shotgun library prep': LibraryPrepShotgunProcess, 'quantification': QuantificationProcess, 'gDNA normalization': NormalizationProcess, 'compress gDNA plates': GDNAPlateCompressionProcess, 'pooling': PoolingProcess, 'sequencing': SequencingProcess} with sql_connection.TRN as TRN: sql = """SELECT description FROM qiita.process_type JOIN qiita.process USING (process_type_id) WHERE process_id = %s""" TRN.add(sql, [process_id]) p_type = TRN.execute_fetchlast() constructor = factory_classes[p_type] if constructor._table == 'qiita.process': instance = constructor(process_id) else: sql = """SELECT {} FROM {} WHERE process_id = %s""".format( constructor._id_column, constructor._table) TRN.add(sql, [process_id]) subclass_id = TRN.execute_fetchlast() instance = constructor(subclass_id) return instance @classmethod def _common_creation_steps(cls, user, process_date=None): if process_date is None: process_date = date.today() with sql_connection.TRN as TRN: sql = """SELECT process_type_id FROM qiita.process_type WHERE description = %s""" TRN.add(sql, [cls._process_type]) pt_id = TRN.execute_fetchlast() sql = """INSERT INTO qiita.process (process_type_id, run_date, run_personnel_id) VALUES (%s, %s, %s) RETURNING process_id""" TRN.add(sql, [pt_id, process_date, user.id]) p_id = TRN.execute_fetchlast() return p_id def _get_process_attr(self, attr): """Returns the value of the given process attribute Parameters ---------- attr : str The attribute to retrieve Returns ------- Object The attribute """ with sql_connection.TRN as TRN: sql = """SELECT {} FROM qiita.process JOIN {} USING (process_id) WHERE {} = %s""".format(attr, self._table, self._id_column) TRN.add(sql, [self.id]) return TRN.execute_fetchlast() @property def date(self): return self._get_process_attr('run_date') @property def personnel(self): return user_module.User(self._get_process_attr('run_personnel_id')) @property def process_id(self): return self._get_process_attr('process_id') @property def plates(self): """The plates being extracted by this process Returns ------- plate : list of labman.db.Plate The extracted plates """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT plate_id FROM qiita.container LEFT JOIN qiita.well USING (container_id) WHERE latest_upstream_process_id = %s ORDER BY plate_id""" TRN.add(sql, [self.process_id]) plate_ids = TRN.execute_fetchflatten() return [plate_module.Plate(plate_id) for plate_id in plate_ids] class _Process(Process): """Process object Not all processes have a specific subtable, so we need to override the date and personnel attributes Attributes ---------- id date personnel """ _table = 'qiita.process' _id_column = 'process_id' @property def date(self): return self._get_attr('run_date') @property def personnel(self): return user_module.User(self._get_attr('run_personnel_id')) @property def process_id(self): return self._get_attr('process_id') class SamplePlatingProcess(_Process): """Sample plating process""" _process_type = 'sample plating' @classmethod def create(cls, user, plate_config, plate_ext_id, volume=None): """Creates a new sample plating process Parameters ---------- user : labman.db.user.User User performing the plating plate_config : labman.db.PlateConfiguration The sample plate configuration plate_ext_id : str The external plate id volume : float, optional Starting well volume Returns ------- SamplePlatingProcess """ with sql_connection.TRN: volume = volume if volume else 0 # Add the row to the process table instance = cls(cls._common_creation_steps(user)) # Create the plate plate = plate_module.Plate.create(plate_ext_id, plate_config) # By definition, all well plates are blank at the beginning # so populate all the wells in the plate with BLANKS for i in range(plate_config.num_rows): for j in range(plate_config.num_columns): well = container_module.Well.create( plate, instance, volume, i + 1, j + 1) composition_module.SampleComposition.create( instance, well, volume) return instance @property def plate(self): """The plate being plated by this process Returns ------- plate : labman.db.Plate The plate being plated """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT plate_id FROM qiita.container LEFT JOIN qiita.well USING (container_id) LEFT JOIN qiita.plate USING (plate_id) WHERE latest_upstream_process_id = %s""" TRN.add(sql, [self.id]) plate_id = TRN.execute_fetchlast() return plate_module.Plate(plate_id) def update_well(self, row, col, content): """Updates the content of a well Parameters ---------- row: int The well row col: int The well column content: str The new contents of the well Returns ------- str The new contents of the well """ return self.plate.get_well(row, col).composition.update(content) def comment_well(self, row, col, comment): """Updates the comment of a well Parameters ---------- row: int The well row col: int The well column content: str The new contents of the well """ self.plate.get_well(row, col).composition.notes = comment class ReagentCreationProcess(_Process): """Reagent creation process""" _process_type = 'reagent creation' @classmethod def create(cls, user, external_id, volume, reagent_type): """Creates a new reagent creation process Parameters ---------- user : labman.db.user.User User adding the reagent to the system external_id: str The external id of the reagent volume: float Initial reagent volume reagent_type : str The type of the reagent Returns ------- ReagentCreationProce """ with sql_connection.TRN: # Add the row to the process table instance = cls(cls._common_creation_steps(user)) # Create the tube and the composition tube = container_module.Tube.create(instance, external_id, volume) composition_module.ReagentComposition.create( instance, tube, volume, reagent_type, external_id) return instance @property def tube(self): """The tube storing the reagent""" with sql_connection.TRN as TRN: sql = """SELECT tube_id FROM qiita.tube LEFT JOIN qiita.container USING (container_id) WHERE latest_upstream_process_id = %s""" TRN.add(sql, [self.process_id]) tube_id = TRN.execute_fetchlast() return container_module.Tube(tube_id) class PrimerWorkingPlateCreationProcess(Process): """Primer working plate creation process object Attributes ---------- primer_set master_set_order_number """ _table = 'qiita.primer_working_plate_creation_process' _id_column = 'primer_working_plate_creation_process_id' _process_type = 'primer working plate creation' @classmethod def create(cls, user, primer_set, master_set_order, creation_date=None): """Creates a new set of working primer plates Parameters ---------- user : labman.db.user.User User creating the new set of primer plates primer_set : labman.composition.PrimerSet The primer set master_set_order : str The master set order creation_date: datetime.date, optional The creation date. Default: today Returns ------- PrimerWorkingPlateCreationProcess """ with sql_connection.TRN as TRN: # Add the row to the process table process_id = cls._common_creation_steps( user, process_date=creation_date) sql = """INSERT INTO qiita.primer_working_plate_creation_process (process_id, primer_set_id, master_set_order_number) VALUES (%s, %s, %s) RETURNING primer_working_plate_creation_process_id""" TRN.add(sql, [process_id, primer_set.id, master_set_order]) instance = cls(TRN.execute_fetchlast()) creation_date = instance.date plate_name_suffix = creation_date.strftime('%Y-%m-%d') primer_set_plates = primer_set.plates check_name = '%s %s' % (primer_set_plates[0].external_id, plate_name_suffix) if plate_module.Plate.external_id_exists(check_name): # The likelihood of this happening in the real system is really # low, but better be safe than sorry plate_name_suffix = datetime.now().strftime('%Y-%m-%d %H:%M') for ps_plate in primer_set_plates: # Create a new working primer plate plate_name = '%s %s' % (ps_plate.external_id, plate_name_suffix) plate_config = ps_plate.plate_configuration work_plate = plate_module.Plate.create( plate_name, plate_config) # Add the wells to the new plate for row in ps_plate.layout: for ps_well in row: w_well = container_module.Well.create( work_plate, instance, 10, ps_well.row, ps_well.column) composition_module.PrimerComposition.create( instance, w_well, 10, ps_well.composition) return instance @property def primer_set(self): """The primer set template from which the working plates are created Returns ------- PrimerSet """ return composition_module.PrimerSet(self._get_attr('primer_set_id')) @property def master_set_order(self): """The master set order Returns ------- str """ return self._get_attr('master_set_order_number') class GDNAExtractionProcess(Process): """gDNA extraction process object Attributes ---------- kingfisher epmotion epmotion_tool extraction_kit sample_plate volume See Also -------- Process """ _table = 'qiita.gdna_extraction_process' _id_column = 'gdna_extraction_process_id' _process_type = 'gDNA extraction' @property def kingfisher(self): """The King Fisher robot used during extraction Returns ------- Equipment """ return equipment_module.Equipment( self._get_attr('kingfisher_robot_id')) @property def epmotion(self): """The EpMotion robot used during extraction Returns ------- Equipment """ return equipment_module.Equipment(self._get_attr('epmotion_robot_id')) @property def epmotion_tool(self): """The EpMotion tool used during extraction Returns ------- Equipment """ return equipment_module.Equipment(self._get_attr('epmotion_tool_id')) @property def extraction_kit(self): """The extraction kit used Returns ------- ReagentComposition """ return composition_module.ReagentComposition( self._get_attr('extraction_kit_id')) @property def sample_plate(self): """The source sample plate Returns ------- Plate """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT plate_id FROM qiita.composition gc JOIN qiita.gdna_composition gdc ON gc.composition_id = gdc.composition_id JOIN qiita.sample_composition ssc USING (sample_composition_id) JOIN qiita.composition sc ON ssc.composition_id = sc.composition_id JOIN qiita.well w ON sc.container_id = w.container_id WHERE gc.upstream_process_id = %s""" TRN.add(sql, [self.process_id]) return plate_module.Plate(TRN.execute_fetchlast()) @property def volume(self): """The elution volume Returns ------- float """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT total_volume FROM qiita.composition WHERE upstream_process_id = %s""" TRN.add(sql, [self.process_id]) return TRN.execute_fetchlast() @classmethod def create(cls, user, plate, kingfisher, epmotion, epmotion_tool, extraction_kit, volume, gdna_plate_name, extraction_date=None): """Creates a new gDNA extraction process Parameters ---------- user : labman.db.user.User User performing the gDNA extraction plate: labman.db.plate.Plate The plate being extracted kingfisher: labman.db.equipment.Equipment The KingFisher used epmotion: labman.db.equipment.Equipment The EpMotion used epmotion_tool: labman.db.equipment.Equipment The EpMotion tool used extraciton_kit: labman.db.composition.ReagentComposition The extraction kit used volume : float The elution extracted gdna_plate_name : str The name for the gdna plate extraction_date : datetime.date, optional The extraction date. Default: today Returns ------- GDNAExtractionProcess """ with sql_connection.TRN as TRN: # Add the row to the process table process_id = cls._common_creation_steps( user, process_date=extraction_date) # Add the row to the gdna_extraction_process table sql = """INSERT INTO qiita.gdna_extraction_process (process_id, epmotion_robot_id, epmotion_tool_id, kingfisher_robot_id, extraction_kit_id) VALUES (%s, %s, %s, %s, %s) RETURNING gdna_extraction_process_id""" TRN.add(sql, [process_id, epmotion.id, epmotion_tool.id, kingfisher.id, extraction_kit.id]) instance = cls(TRN.execute_fetchlast()) # Create the extracted plate plate_config = plate.plate_configuration gdna_plate = plate_module.Plate.create( gdna_plate_name, plate_config) plate_layout = plate.layout # Add the wells to the new plate for i in range(plate_config.num_rows): for j in range(plate_config.num_columns): plated_sample = plate_layout[i][j].composition if plated_sample.sample_composition_type != 'empty': well = container_module.Well.create( gdna_plate, instance, volume, i + 1, j + 1) composition_module.GDNAComposition.create( instance, well, volume, plated_sample) return instance class GDNAPlateCompressionProcess(Process): """Gets 1 to 4 96-well gDNA plates and remaps them in a 384-well plate The remapping schema follows this strucutre: A B A B A B A B ... C D C D C D C D ... A B A B A B A B ... C D C D C D C D ... ... """ _table = 'qiita.compression_process' _id_column = 'compression_process_id' _process_type = "compress gDNA plates" def _compress_plate(self, out_plate, in_plate, row_pad, col_pad, volume=1): """Compresses the 96-well in_plate into the 384-well out_plate""" with sql_connection.TRN: layout = in_plate.layout for row in layout: for well in row: if well is not None: # The row/col pair is stored in the DB starting at 1 # subtract 1 to make it start at 0 so the math works # and re-add 1 at the end out_well_row = (((well.row - 1) * 2) + row_pad) + 1 out_well_col = (((well.column - 1) * 2) + col_pad) + 1 out_well = container_module.Well.create( out_plate, self, volume, out_well_row, out_well_col) composition_module.CompressedGDNAComposition.create( self, out_well, volume, well.composition) @classmethod def create(cls, user, plates, plate_ext_id, robot): """Creates a new gDNA compression process Parameters ---------- user : labman.db.user.User User performing the plating plates: list of labman.db.plate.Plate The plates to compress plate_ext_id : str The external plate id robot: Equipment The robot performing the compression Raises ------ ValueError Returns ------- GDNAPlateCompressionProcess """ if not (1 <= len(plates) <= 4): raise ValueError( 'Cannot compress %s gDNA plates. Please provide 1 to 4 ' 'gDNA plates' % len(plates)) with sql_connection.TRN as TRN: # Add the row to the process table process_id = cls._common_creation_steps(user) # Add the row to the compression_process table sql = """INSERT INTO qiita.compression_process (process_id, robot_id) VALUES (%s, %s) RETURNING compression_process_id""" TRN.add(sql, [process_id, robot.id]) instance = cls(TRN.execute_fetchlast()) # Create the output plate # Magic number 3 -> 384-well plate plate = plate_module.Plate.create( plate_ext_id, plate_module.PlateConfiguration(3)) # Compress the plates for i, in_plate in enumerate(plates): row_pad = int(np.floor(i / 2)) col_pad = i % 2 instance._compress_plate(plate, in_plate, row_pad, col_pad) return instance @property def robot(self): """The robot performing the compression""" return equipment_module.Equipment(self._get_attr('robot_id')) @property def gdna_plates(self): """The input gdna plates""" with sql_connection.TRN as TRN: # Rationale: giving the compression algorithm, we only need to look # at the 4 wells on the top left corner (1, 1), (1, 2), (2, 1) and # (2, 2), and in that order, to know which plates have been # compressed sql = """SELECT gw.plate_id FROM qiita.composition cc JOIN qiita.well cw ON cc.container_id = cw.container_id JOIN qiita.compressed_gdna_composition cgc ON cc.composition_id = cgc.composition_id JOIN qiita.gdna_composition gdnac ON cgc.gdna_composition_id = gdnac.gdna_composition_id JOIN qiita.composition gc ON gdnac.composition_id = gc.composition_id JOIN qiita.well gw ON gc.container_id = gw.container_id WHERE cc.upstream_process_id = %s AND cw.row_num IN (1, 2) AND cw.col_num IN (1, 2) ORDER BY cw.row_num, cw.col_num""" TRN.add(sql, [self.process_id]) return [plate_module.Plate(pid) for pid in TRN.execute_fetchflatten()] class LibraryPrep16SProcess(Process): """16S Library Prep process object Attributes ---------- mastermix_lots water_lots epmotions See Also -------- Process """ _table = 'qiita.library_prep_16s_process' _id_column = 'library_prep_16s_process_id' _process_type = '16S library prep' @classmethod def create(cls, user, plate, primer_plate, lib_plate_name, epmotion, epmotion_tool_tm300, epmotion_tool_tm50, master_mix, water_lot, volume, preparation_date=None): """Creates a new 16S library prep process Parameters ---------- user : labman.db.user.User User performing the library prep plate: labman.db.plate.Plate The plate being prepared for amplicon sequencing primer_plate: labman.db.plate.Plate The primer plate lib_plate_name: str The name of the prepared plate epmotion: labman.db.equipment.Equipment The EpMotion epmotion_tool_tm300: labman.db.equipment.Equipment The EpMotion TM300 8 tool epmotion_tool_tm50: labman.db.equipment.Equipment The EpMotion TM300 8 tool master_mix: labman.db.composition.ReagentComposition The mastermix used water_lot: labman.db.composition.ReagentComposition The water lot used volume : float The PCR total volume in the wells preparation_date : datetime.date, optional The preparation date. Default: today Returns ------- LibraryPrep16SProcess """ with sql_connection.TRN as TRN: # Add the row to the process table process_id = cls._common_creation_steps( user, process_date=preparation_date) # Add the row to the library_prep_16s_process sql = """INSERT INTO qiita.library_prep_16s_process (process_id, epmotion_robot_id, epmotion_tm300_8_tool_id, epmotion_tm50_8_tool_id, master_mix_id, water_lot_id) VALUES (%s, %s, %s, %s, %s, %s) RETURNING library_prep_16s_process_id""" TRN.add(sql, [process_id, epmotion.id, epmotion_tool_tm300.id, epmotion_tool_tm50.id, master_mix.id, water_lot.id]) instance = cls(TRN.execute_fetchlast()) # Create the library plate plate_config = plate.plate_configuration library_plate = plate_module.Plate.create(lib_plate_name, plate_config) gdna_layout = plate.layout primer_layout = primer_plate.layout for i in range(plate_config.num_rows): for j in range(plate_config.num_columns): if gdna_layout[i][j] is not None: well = container_module.Well.create( library_plate, instance, volume, i + 1, j + 1) composition_module.LibraryPrep16SComposition.create( instance, well, volume, gdna_layout[i][j].composition, primer_layout[i][j].composition) return instance @property def mastermix(self): """The master mix lot used Returns ------- ReagentComposition """ return composition_module.ReagentComposition( self._get_attr('master_mix_id')) @property def water_lot(self): """The water lot used Returns ------- ReagentComposition """ return composition_module.ReagentComposition( self._get_attr('water_lot_id')) @property def epmotion(self): """The EpMotion robot used Returns ------- Equipment """ return equipment_module.Equipment(self._get_attr('epmotion_robot_id')) @property def epmotion_tm300_tool(self): """The EpMotion tm300 tool used Returns ------- Equipment """ return equipment_module.Equipment( self._get_attr('epmotion_tm300_8_tool_id')) @property def epmotion_tm50_tool(self): """The EpMotion tm50 tool used Returns ------- Equipment """ return equipment_module.Equipment( self._get_attr('epmotion_tm50_8_tool_id')) @property def gdna_plate(self): """The input gdna plate Returns ------- Plate """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT plate_id FROM qiita.composition lc JOIN qiita.library_prep_16s_composition l16sc ON lc.composition_id = l16sc.composition_id JOIN qiita.gdna_composition gdc USING (gdna_composition_id) JOIN qiita.composition gc ON gc.composition_id = gdc.composition_id JOIN qiita.well w ON gc.container_id = w.container_id WHERE lc.upstream_process_id = %s""" TRN.add(sql, [self.process_id]) return plate_module.Plate(TRN.execute_fetchlast()) @property def primer_plate(self): """The primer plate Returns ------- plate """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT plate_id FROM qiita.composition lc JOIN qiita.library_prep_16s_composition l16sc ON lc.composition_id = l16sc.composition_id JOIN qiita.primer_composition prc USING (primer_composition_id) JOIN qiita.composition pc ON pc.composition_id = prc.composition_id JOIN qiita.well w ON pc.container_id = w.container_id WHERE lc.upstream_process_id = %s""" TRN.add(sql, [self.process_id]) return plate_module.Plate(TRN.execute_fetchlast()) @property def volume(self): """The PCR Total volume Returns ------- float """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT total_volume FROM qiita.composition WHERE upstream_process_id = %s""" TRN.add(sql, [self.process_id]) return TRN.execute_fetchlast() class NormalizationProcess(Process): """Normalization process object Attributes ---------- quantification_process water_lot See Also -------- Process """ _table = 'qiita.normalization_process' _id_column = 'normalization_process_id' _process_type = 'gDNA normalization' @staticmethod def _calculate_norm_vol(dna_concs, ng=5, min_vol=2.5, max_vol=3500, resolution=2.5): """Calculates nanoliters of each sample to add to get a normalized pool Parameters ---------- dna_concs : numpy array of float The concentrations calculated via PicoGreen (ng/uL) ng : float, optional The amount of DNA to pool (ng). Default: 5 min_vol : float, optional The minimum volume to pool (nL). Default: 2.5 max_vol : float, optional The maximum volume to pool (nL). Default: 3500 resolution: float, optional Resolution to use (nL). Default: 2.5 Returns ------- sample_vols : numpy array of float The volumes to pool (nL) """ sample_vols = ng / np.nan_to_num(dna_concs) * 1000 sample_vols = np.clip(sample_vols, min_vol, max_vol) sample_vols = np.round(sample_vols / resolution) * resolution return sample_vols @classmethod def create(cls, user, quant_process, water, plate_name, total_vol=3500, ng=5, min_vol=2.5, max_vol=3500, resolution=2.5, reformat=False): """Creates a new normalization process Parameters ---------- user : labman.db.user.User User performing the gDNA extraction quant_process : QuantificationProcess The quantification process to use for normalization water: ReagentComposition The water lot used for the normalization plate_name: str The output plate name total_vol: float, optional The total volume of normalized DNA (nL). Default: 3500 ng : float, optional The amount of DNA to pool (ng). Default: 5 min_vol : float, optional The minimum volume to pool (nL). Default: 2.5 max_vol : float, optional The maximum volume to pool (nL). Default: 3500 resolution: float, optional Resolution to use. Default: 2.5 reformat: bool, optional If true, reformat the plate from the interleaved format to the column format. Useful when 384-well plate is not full to save reagents. Default: False Returns ------- NormalizationProcess """ with sql_connection.TRN as TRN: # Add the row to the process table process_id = cls._common_creation_steps(user) # Add the row to the normalization_process tables func_data = { 'function': 'default', 'parameters': {'total_volume': total_vol, 'target_dna': ng, 'min_vol': min_vol, 'max_volume': max_vol, 'resolution': resolution, 'reformat': reformat}} sql = """INSERT INTO qiita.normalization_process (process_id, quantitation_process_id, water_lot_id, normalization_function_data) VALUES (%s, %s, %s, %s) RETURNING normalization_process_id""" TRN.add(sql, [process_id, quant_process.id, water.id, dumps(func_data)]) instance = cls(TRN.execute_fetchlast()) # Retrieve all the concentration values concs = quant_process.concentrations # Transform the concentrations to a numpy array np_conc = np.asarray([raw_con for _, raw_con, _ in concs]) dna_v = NormalizationProcess._calculate_norm_vol( np_conc, ng, min_vol, max_vol, resolution) water_v = total_vol - dna_v # Create the plate. 3 -> 384-well plate plate_config = plate_module.PlateConfiguration(3) plate = plate_module.Plate.create(plate_name, plate_config) for (comp, _, _), dna_vol, water_vol in zip(concs, dna_v, water_v): comp_well = comp.container row = comp_well.row column = comp_well.column if reformat: row = row - 1 column = column - 1 roffset = row % 2 row = int(row - roffset + np.floor(column / 12)) + 1 coffset = column % 2 + (row % 2) * 2 column = int(coffset * 6 + (column / 2) % 6) + 1 well = container_module.Well.create( plate, instance, total_vol, row, column) composition_module.NormalizedGDNAComposition.create( instance, well, total_vol, comp, dna_vol, water_vol) return instance @property def quantification_process(self): """The quantification process used Returns ------- QuantificationProcess """ return QuantificationProcess(self._get_attr('quantitation_process_id')) @property def water_lot(self): """The water lot used Returns ------- ReagentComposition """ return composition_module.ReagentComposition( self._get_attr('water_lot_id')) @property def compressed_plate(self): """The input compressed plate Returns ------- Plate """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT plate_id FROM qiita.composition nc JOIN qiita.normalized_gdna_composition ngc ON nc.composition_id = ngc.composition_id JOIN qiita.compressed_gdna_composition cgdnac USING (compressed_gdna_composition_id) JOIN qiita.composition cc ON cc.composition_id = cgdnac.composition_id JOIN qiita.well w ON cc.container_id = w.container_id WHERE nc.upstream_process_id = %s""" TRN.add(sql, [self.process_id]) return plate_module.Plate(TRN.execute_fetchlast()) @property def normalization_function_data(self): """The information about the normalization function Returns ------- str """ return self._get_attr('normalization_function_data') @staticmethod def _format_picklist(dna_vols, water_vols, wells, dest_wells=None, dna_concs=None, sample_names=None, dna_plate_name='Sample', water_plate_name='Water', dna_plate_type='384PP_AQ_BP2_HT', water_plate_type='384PP_AQ_BP2_HT', dest_plate_name='NormalizedDNA', dna_plate_names=None): """Formats Echo pick list to achieve a normalized input DNA pool Parameters ---------- dna_vols: numpy array of float The volumes of dna to add water_vols: numpy array of float The volumes of water to add wells: numpy array of str The well codes in the same orientation as the DNA concentrations dest_wells: numpy array of str The well codes, in the same orientation as `wells`, in which to place each sample if reformatting dna_concs: numpy array of float The concentrations calculated via PicoGreen (ng/uL) sample_names: numpy array of str The sample names in the same orientation as the DNA concentrations Returns ------- picklist : str The Echo formatted pick list """ # check that arrays are the right size if dna_vols.shape != wells.shape != water_vols.shape: raise ValueError( 'dna_vols %r has a size different from wells %r or water_vols' % (dna_vols.shape, wells.shape, water_vols.shape)) # if destination wells not specified, use source wells if dest_wells is None: dest_wells = wells if sample_names is None: sample_names = np.empty(dna_vols.shape) * np.nan if dna_concs is None: dna_concs = np.empty(dna_vols.shape) * np.nan if dna_concs.shape != sample_names.shape != dna_vols.shape: raise ValueError( 'dna_vols %r has a size different from dna_concs %r or ' 'sample_names' % (dna_vols.shape, dna_concs.shape, sample_names.shape)) # header picklist = [ 'Sample\tSource Plate Name\tSource Plate Type\tSource Well' '\tConcentration\tTransfer Volume\tDestination Plate Name' '\tDestination Well'] # water additions for index, sample in np.ndenumerate(sample_names): picklist.append('\t'.join( [str(sample), water_plate_name, water_plate_type, str(wells[index]), str(dna_concs[index]), str(water_vols[index]), dest_plate_name, str(dest_wells[index])])) # DNA additions for index, sample in np.ndenumerate(sample_names): if dna_plate_names is not None: dna_plate_name = dna_plate_names[index] picklist.append('\t'.join( [str(sample), dna_plate_name, dna_plate_type, str(wells[index]), str(dna_concs[index]), str(dna_vols[index]), dest_plate_name, str(dest_wells[index])])) return '\n'.join(picklist) def generate_echo_picklist(self): """Generates Echo pick list to achieve a normalized input DNA pool Returns ------- str The echo-formatted pick list """ concentrations = { comp: conc for comp, conc, _ in self.quantification_process.concentrations} dna_vols = [] water_vols = [] wells = [] dest_wells = [] sample_names = [] dna_concs = [] layout = self.plates[0].layout for row in layout: for well in row: if well: composition = well.composition dna_vols.append(composition.dna_volume) water_vols.append(composition.water_volume) # For the source well we need to take a look at the # gdna comp c_gdna_comp = composition.compressed_gdna_composition wells.append(c_gdna_comp.container.well_id) dest_wells.append(well.well_id) # For the sample name we need to check the sample # composition sample_comp = c_gdna_comp.gdna_composition.\ sample_composition sample_names.append(sample_comp.content) # For the DNA concentrations we need to look at # the quantification process dna_concs.append(concentrations[c_gdna_comp]) # _format_picklist expects numpy arrays dna_vols = np.asarray(dna_vols) water_vols = np.asarray(water_vols) wells = np.asarray(wells) dest_wells = np.asarray(dest_wells) sample_names = np.asarray(sample_names) dna_concs = np.asarray(dna_concs) return NormalizationProcess._format_picklist( dna_vols, water_vols, wells, dest_wells=dest_wells, sample_names=sample_names, dna_concs=dna_concs) class LibraryPrepShotgunProcess(Process): """Shotgun Library Prep process object Attributes ---------- kappa_hyper_plus_kit stub_lot normalization_process See Also -------- Process """ _table = 'qiita.library_prep_shotgun_process' _id_column = 'library_prep_shotgun_process_id' _process_type = 'shotgun library prep' @classmethod def create(cls, user, plate, plate_name, kappa_hyper_plus_kit, stub_lot, volume, i5_plate, i7_plate): """Creats a new LibraryPrepShotgunProcess Parameters ---------- user : labman.db.user.User User performing the library prep plate: labman.db.plate.Plate The normalized gDNA plate of origin plate_name: str The library kappa_hyper_plus_kit: labman.db.composition.ReagentComposition The Kappa Hyper Plus kit used stub_lot: labman.db.composition.ReagentComposition The stub lot used volume : float The initial volume in the wells i5_plate: labman.db.plate.Plate The i5 primer working plate i7_plate: labman.db.plate.Plate The i7 primer working plate Returns ------- LibraryPrepShotgunProcess The newly created process """ with sql_connection.TRN as TRN: # Add the row to the process table process_id = cls._common_creation_steps(user) # Add the row to the library_prep_shotgun_process sql = """INSERT INTO qiita.library_prep_shotgun_process (process_id, kappa_hyper_plus_kit_id, stub_lot_id, normalization_process_id) VALUES (%s, %s, %s, ( SELECT DISTINCT normalization_process_id FROM qiita.normalization_process np JOIN qiita.container c ON np.process_id = c.latest_upstream_process_id JOIN qiita.well USING (container_id) WHERE plate_id = %s)) RETURNING library_prep_shotgun_process_id""" TRN.add(sql, [process_id, kappa_hyper_plus_kit.id, stub_lot.id, plate.id]) instance = cls(TRN.execute_fetchlast()) # Get the primer set for the plates sql = """SELECT DISTINCT shotgun_primer_set_id FROM qiita.shotgun_combo_primer_set cps JOIN qiita.primer_set_composition psc ON cps.i5_primer_set_composition_id = psc.primer_set_composition_id JOIN qiita.primer_composition pc USING (primer_set_composition_id) JOIN qiita.composition c ON pc.composition_id = c.composition_id JOIN qiita.well USING (container_id) WHERE plate_id = %s""" TRN.add(sql, [i5_plate.id]) primer_set = composition_module.ShotgunPrimerSet( TRN.execute_fetchlast()) # Get a list of wells that actually contain information wells = [well for well in chain.from_iterable(plate.layout) if well is not None] # Get the list of index pairs to use idx_combos = primer_set.get_next_combos(len(wells)) i5_layout = i5_plate.layout i7_layout = i7_plate.layout # Create the library plate lib_plate = plate_module.Plate.create( plate_name, plate.plate_configuration) for well, idx_combo in zip(wells, idx_combos): i5_well = idx_combo[0].container i7_well = idx_combo[1].container i5_comp = i5_layout[ i5_well.row - 1][i5_well.column - 1].composition i7_comp = i7_layout[ i7_well.row - 1][i7_well.column - 1].composition lib_well = container_module.Well.create( lib_plate, instance, volume, well.row, well.column) composition_module.LibraryPrepShotgunComposition.create( instance, lib_well, volume, well.composition, i5_comp, i7_comp) return instance @property def kappa_hyper_plus_kit(self): """The Kappa Hyper plus kit used Returns ------- ReagentComposition """ return composition_module.ReagentComposition( self._get_attr('kappa_hyper_plus_kit_id')) @property def stub_lot(self): """The stub lot used Returns ------- ReagentComposition """ return composition_module.ReagentComposition( self._get_attr('stub_lot_id')) @property def normalization_process(self): """The normalization process used Returns ------- NormalizationProcess """ return NormalizationProcess(self._get_attr('normalization_process_id')) @property def normalized_plate(self): """The input normalized plate Returns ------- Plate """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT plate_id FROM qiita.composition lc JOIN qiita.library_prep_shotgun_composition lpsc ON lc.composition_id = lpsc.composition_id JOIN qiita.normalized_gdna_composition ngdnac USING (normalized_gdna_composition_id) JOIN qiita.composition nc ON ngdnac.composition_id = nc.composition_id JOIN qiita.well w ON nc.container_id = w.container_id WHERE lc.upstream_process_id = %s""" TRN.add(sql, [self.process_id]) return plate_module.Plate(TRN.execute_fetchlast()) @property def i5_primer_plate(self): """The i5 primer plate Returns ------- Plate """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT plate_id FROM qiita.composition lc JOIN qiita.library_prep_shotgun_composition lsc ON lc.composition_id = lsc.composition_id JOIN qiita.primer_composition prc ON lsc.i5_primer_composition_id = prc.primer_composition_id JOIN qiita.composition pc ON prc.composition_id = pc.composition_id JOIN qiita.well w ON pc.container_id = w.container_id WHERE lc.upstream_process_id = %s""" TRN.add(sql, [self.process_id]) return plate_module.Plate(TRN.execute_fetchlast()) @property def i7_primer_plate(self): """The i7 primer plate Returns ------- Plate """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT plate_id FROM qiita.composition lc JOIN qiita.library_prep_shotgun_composition lsc ON lc.composition_id = lsc.composition_id JOIN qiita.primer_composition prc ON lsc.i7_primer_composition_id = prc.primer_composition_id JOIN qiita.composition pc ON prc.composition_id = pc.composition_id JOIN qiita.well w ON pc.container_id = w.container_id WHERE lc.upstream_process_id = %s""" TRN.add(sql, [self.process_id]) return plate_module.Plate(TRN.execute_fetchlast()) @property def volume(self): """The volume Returns ------- float """ with sql_connection.TRN as TRN: sql = """SELECT DISTINCT total_volume FROM qiita.composition WHERE upstream_process_id = %s""" TRN.add(sql, [self.process_id]) return TRN.execute_fetchlast() @staticmethod def _format_picklist(sample_names, sample_wells, indices, i5_vol=250, i7_vol=250, i5_plate_type='384LDV_AQ_B2_HT', i7_plate_type='384LDV_AQ_B2_HT', dest_plate_name='IndexPCRPlate'): """Formats Echo-format pick list for preparing the shotgun library Parameters ---------- sample_names: array-like of str The sample names matching index order of indices sample_wells: array-like of str The wells matching sample name order indices: pandas DataFrame The dataframe with index info matching sample_names i5_vol: int, optional The volume of i5 index to transfer. Default: 250 i7_vol: int, optional The volume of i7 index to transfer. Default: 250 i5_plate_type: str, optional The i5 plate type. Default: 384LDV_AQ_B2_HT i7_plate_type: str, optional The i7 plate type. Default: 384LDV_AQ_B2_HT dest_plate_name: str, optional The name of the destination plate. Default: IndexPCRPlate Returns ------- str The Echo formatted pick list """ # check that arrays are the right size if len(sample_names) != len(sample_wells) != len(indices): raise ValueError( 'sample_names (%s) has a size different from sample_wells ' '(%s) or index list (%s)' % (len(sample_names), len(sample_wells), len(indices))) # header picklist = [ 'Sample\tSource Plate Name\tSource Plate Type\tSource Well\t' 'Transfer Volume\tIndex Name\tIndex Sequence\t' 'Destination Plate Name\tDestination Well'] # i5 additions for i, (sample, well) in enumerate(zip(sample_names, sample_wells)): picklist.append('\t'.join([ str(sample), indices.iloc[i]['i5 plate'], i5_plate_type, indices.iloc[i]['i5 well'], str(i5_vol), indices.iloc[i]['i5 name'], indices.iloc[i]['i5 sequence'], dest_plate_name, well])) # i7 additions for i, (sample, well) in enumerate(zip(sample_names, sample_wells)): picklist.append('\t'.join([ str(sample), indices.iloc[i]['i7 plate'], i7_plate_type, indices.iloc[i]['i7 well'], str(i7_vol), indices.iloc[i]['i7 name'], indices.iloc[i]['i7 sequence'], dest_plate_name, well])) return '\n'.join(picklist) def generate_echo_picklist(self): """Generates Echo pick list for preparing the shotgun library Returns ------- str The echo-formatted pick list """ sample_names = [] sample_wells = [] indices = {'i5 name': {}, 'i5 plate': {}, 'i5 sequence': {}, 'i5 well': {}, 'i7 name': {}, 'i7 plate': {}, 'i7 sequence': {}, 'i7 well': {}, 'index combo': {}, 'index combo seq': {}} for idx, well in enumerate(chain.from_iterable(self.plates[0].layout)): if well is None: continue # Add the sample well sample_wells.append(well.well_id) # Get the sample name - we need to go back to the SampleComposition lib_comp = well.composition sample_comp = lib_comp.normalized_gdna_composition\ .compressed_gdna_composition.gdna_composition\ .sample_composition sample_names.append(sample_comp.content) # Retrieve all the information about the indices i5_comp = lib_comp.i5_composition.primer_set_composition i5_well = i5_comp.container indices['i5 name'][idx] = i5_comp.external_id indices['i5 plate'][idx] = i5_well.plate.external_id indices['i5 sequence'][idx] = i5_comp.barcode indices['i5 well'][idx] = i5_well.well_id i7_comp = lib_comp.i7_composition.primer_set_composition i7_well = i7_comp.container indices['i7 name'][idx] = i7_comp.external_id indices['i7 plate'][idx] = i7_well.plate.external_id indices['i7 sequence'][idx] = i7_comp.barcode indices['i7 well'][idx] = i7_well.well_id indices['index combo seq'][idx] = '%s%s' % ( indices['i5 sequence'][idx], indices['i7 sequence'][idx]) sample_names = np.asarray(sample_names) sample_wells = np.asarray(sample_wells) indices = pd.DataFrame(indices) return LibraryPrepShotgunProcess._format_picklist( sample_names, sample_wells, indices) class QuantificationProcess(Process): """Quantification process object Attributes ---------- concentrations See Also -------- Process """ _table = 'qiita.quantification_process' _id_column = 'quantification_process_id' _process_type = 'quantification' @staticmethod def _compute_shotgun_pico_concentration(dna_vals, size=500): """Computes molar concentration of libraries from library DNA concentration values. Parameters ---------- dna_vals : numpy array of float The DNA concentration in ng/uL size : int The average library molecule size in bp Returns ------- np.array of floats Array of calculated concentrations, in nanomolar units """ lib_concentration = (dna_vals / (660 * float(size))) * 10**6 return lib_concentration @staticmethod def _make_2D_array(df, data_col='Sample DNA Concentration', well_col='Well', rows=8, cols=12): """Pulls a column of data out of a dataframe and puts into array format based on well IDs in another column Parameters ---------- df: Pandas DataFrame dataframe from which to pull values data_col: str, optional name of column with data. Default: Sample DNA Concentration well_col: str, optional name of column with well IDs, in 'A1,B12' format. Default: Well rows: int, optional number of rows in array to return. Default: 8 cols: int, optional number of cols in array to return. Default: 12 Returns ------- numpy 2D array """ # initialize empty Cp array cp_array = np.empty((rows, cols), dtype=object) # fill Cp array with the post-cleaned values from the right half of the # plate for record in df.iterrows(): row = ord(str.upper(record[1][well_col][0])) - ord('A') col = int(record[1][well_col][1:]) - 1 cp_array[row, col] = record[1][data_col] return cp_array @staticmethod def _parse_pico_csv(contents, sep='\t', conc_col_name='Sample DNA Concentration'): """Reads tab-delimited pico quant Parameters ---------- contents: fp or open filehandle pico quant file sep: str sep char used in quant file conc_col_name: str name to use for concentration column output Returns ------- pico_df: pandas DataFrame object DataFrame relating well location and DNA concentration """ raw_df = pd.read_csv(contents, sep=sep, skiprows=2, skipfooter=5, engine='python') pico_df = raw_df[['Well', '[Concentration]']] pico_df = pico_df.rename(columns={'[Concentration]': conc_col_name}) # coerce oddball concentrations to np.nan pico_df[conc_col_name] = pd.to_numeric(pico_df[conc_col_name], errors='coerce') return pico_df @staticmethod def parse(contents, file_format="minipico", rows=8, cols=12): """Parses the quantification output Parameters ---------- contents : str The contents of the plate reader output file_format: str The quantification file format rows: int, optional The number of rows in the plate. Default: 8 cols: int, optional The number of cols in the plate. Default: 12 Returns ------- DataFrame """ parsers = {'minipico': QuantificationProcess._parse_pico_csv} contents_io = StringIO(contents) if file_format not in parsers: raise ValueError( 'File format %s not recognized. Supported file formats: %s' % (file_format, ', '.join(parsers))) df = parsers[file_format](contents_io) array = QuantificationProcess._make_2D_array(df, rows=rows, cols=cols) return array.astype(float) @classmethod def create_manual(cls, user, quantifications): """Creates a new manual quantification process Parameters ---------- user: labman.db.user.User User performing the quantification process quantifications: list of dict The quantifications in the form of {'composition': Composition, 'conenctration': float} Returns ------- QuantificationProcess """ with sql_connection.TRN as TRN: # Add the row to the process table process_id = cls._common_creation_steps(user) # Add the row to the quantification process table sql = """INSERT INTO qiita.quantification_process (process_id) VALUES (%s) RETURNING quantification_process_id""" TRN.add(sql, [process_id]) instance = cls(TRN.execute_fetchlast()) sql = """INSERT INTO qiita.concentration_calculation (quantitated_composition_id, upstream_process_id, raw_concentration) VALUES (%s, %s, %s)""" sql_args = [] for quant in quantifications: sql_args.append([quant['composition'].composition_id, instance.id, quant['concentration']]) TRN.add(sql, sql_args, many=True) TRN.execute() return instance @classmethod def create(cls, user, plate, concentrations): """Creates a new quantification process Parameters ---------- user: labman.db.user.User User performing the quantification process plate: labman.db.plate.Plate The plate being quantified concentrations: 2D np.array The plate concentrations Returns ------- QuantificationProcess """ with sql_connection.TRN as TRN: # Add the row to the process table process_id = cls._common_creation_steps(user) # Add the row to the quantification process table sql = """INSERT INTO qiita.quantification_process (process_id) VALUES (%s) RETURNING quantification_process_id""" TRN.add(sql, [process_id]) instance = cls(TRN.execute_fetchlast()) sql = """INSERT INTO qiita.concentration_calculation (quantitated_composition_id, upstream_process_id, raw_concentration) VALUES (%s, %s, %s)""" sql_args = [] layout = plate.layout for p_row, c_row in zip(layout, concentrations): for well, conc in zip(p_row, c_row): if well is not None: sql_args.append([well.composition.composition_id, instance.id, conc]) if len(sql_args) == 0: raise ValueError('No concentration values have been provided') TRN.add(sql, sql_args, many=True) TRN.execute() return instance @property def concentrations(self): """The concentrations measured Returns ------- list of (Composition, float, float) """ with sql_connection.TRN as TRN: sql = """SELECT quantitated_composition_id, raw_concentration, computed_concentration FROM qiita.concentration_calculation WHERE upstream_process_id = %s ORDER BY concentration_calculation_id""" TRN.add(sql, [self._id]) return [ (composition_module.Composition.factory(comp_id), r_con, c_con) for comp_id, r_con, c_con in TRN.execute_fetchindex()] def compute_concentrations(self, dna_amount=240, min_val=1, max_val=15, blank_volume=2, size=500): """Compute the normalized concentrations Parameters ---------- dna_amount: float, optional (Amplicon) Total amount of DNA, in ng. Default: 240 min_val: float, optional (Amplicon) Minimum amount of DNA to normalize to (nM). Default: 1 max_val: float, optional (Amplicon) Maximum value. Wells above this number will be excluded (nM). Default: 15 blank_volume: float, optional (Amplicon) Amount to pool for the blanks (nM). Default: 2. size: int, optional (Shotgun) The average library molecule size, in bp. """ concentrations = self.concentrations layout = concentrations[0][0].container.plate.layout res = None if isinstance(concentrations[0][0], composition_module.LibraryPrep16SComposition): # Amplicon sample_concs = np.zeros_like(layout, dtype=float) is_blank = np.zeros_like(layout, dtype=bool) for comp, r_conc, _ in concentrations: well = comp.container row = well.row - 1 col = well.column - 1 sample_concs[row][col] = r_conc sc = comp.gdna_composition.sample_composition is_blank[row][col] = sc.sample_composition_type == 'blank' res = QuantificationProcess._compute_amplicon_pool_values( sample_concs, dna_amount) res[sample_concs < min_val] = min_val # If there is any sample whose concentration is above the # user-defined max_value, the decision is to not pool that sample. # To not pool the sample, define it's volume to 0 and it will not # get pooled. res[sample_concs > max_val] = 0 res[is_blank] = blank_volume elif isinstance(concentrations[0][0], composition_module.LibraryPrepShotgunComposition): # Shotgun sample_concs = np.zeros_like(layout, dtype=float) for comp, r_conc, _ in concentrations: well = comp.container row = well.row - 1 col = well.column - 1 sample_concs[row][col] = r_conc res = QuantificationProcess._compute_shotgun_pico_concentration( sample_concs, size) # No need for else, because if it is not one of the above types # we don't need to do anything if res is not None: sql_args = [] for p_row, c_row in zip(layout, res): for well, conc in zip(p_row, c_row): if well is not None: sql_args.append([conc, self.id, well.composition.composition_id]) sql = """UPDATE qiita.concentration_calculation SET computed_concentration = %s WHERE upstream_process_id = %s AND quantitated_composition_id = %s""" with sql_connection.TRN as TRN: TRN.add(sql, sql_args, many=True) TRN.execute() @staticmethod def _compute_amplicon_pool_values(sample_concs, dna_amount=240): """Computes amplicon pooling values Parameters ---------- sample_concs: 2D array of float nM sample concentrations dna_amount: float, optional Total amount of DNA, in ng. Default: 240 Returns ------- np.array of floats A 2D array of floats """ return float(dna_amount) / sample_concs class PoolingProcess(Process): """Pooling process object Attributes ---------- quantification_process robot See Also -------- Process """ _table = 'qiita.pooling_process' _id_column = 'pooling_process_id' _process_type = 'pooling' @staticmethod def estimate_pool_conc_vol(sample_vols, sample_concs): """Estimates the molarity and volume of a pool. Parameters ---------- sample_concs : numpy array of float The concentrations calculated via PicoGreen (nM) sample_vols : numpy array of float The calculated pooling volumes (nL) Returns ------- pool_conc : float The estimated actual concentration of the pool, in nM total_vol : float The total volume of the pool, in nL """ # scalar to adjust nL to L for molarity calculations nl_scalar = 1e-9 # calc total pool pmols total_pmols = np.multiply(sample_concs, sample_vols) * nl_scalar # calc total pool vol in nanoliters total_vol = sample_vols.sum() # pool pM is total pmols divided by total liters # (total vol in nL * 1 L / 10^9 nL) pool_conc = total_pmols.sum() / (total_vol * nl_scalar) return (pool_conc, total_vol) @staticmethod def compute_shotgun_pooling_values_eqvol(sample_concs, total_vol=60.0): """Computes molar concentration of libraries from concentration values, using an even volume per sample Parameters ---------- sample_concs : numpy array of float The concentrations calculated via PicoGreen (nM) total_vol : float, optional The total volume to pool (uL). Default: 60 Returns ------- np.array of floats A 2D array of floats """ per_sample_vol = (total_vol / sample_concs.size) * 1000.0 sample_vols = np.zeros(sample_concs.shape) + per_sample_vol return sample_vols @staticmethod def compute_shotgun_pooling_values_minvol( sample_concs, sample_fracs=None, floor_vol=100, floor_conc=40, total_nmol=.01): """Computes pooling volumes for samples based on concentration estimates of nM concentrations (`sample_concs`), taking a minimum volume of samples below a threshold. Reads in concentration values in nM. Samples below a minimum concentration (`floor_conc`, default 40 nM) will be included, but at a decreased volume (`floor_vol`, default 100 nL) to avoid overdiluting the pool. Samples can be assigned a target molar fraction in the pool by passing a np.array (`sample_fracs`, same shape as `sample_concs`) with fractional values per sample. By default, will aim for equal molar pooling. Finally, total pooling size is determined by a target nanomolar quantity (`total_nmol`, default .01). For a perfect 384 sample library, in which you had all samples at a concentration of exactly 400 nM and wanted a total volume of 60 uL, this would be 0.024 nmol. For a Novaseq, we expect to need 150 uL at 4 nM, or about 0.0006 nmol. Taking into account sample loss on the pippin prep (1/2) and molar loss due to exclusion of primer dimers (1/2), figure we need 4 times that or 0.0024. Parameters ---------- sample_concs: 2D array of float nM sample concentrations sample_fracs: 2D of float, optional fractional value for each sample (default 1/N) floor_vol: float, optional volume (nL) at which samples below floor_conc will be pooled. Default: 100 floor_conc: float, optional minimum value (nM) for pooling at real estimated value. Default: 40 total_nmol : float, optional total number of nM to have in pool. Default: 0.01 Returns ------- sample_vols: np.array of floats the volumes in nL per each sample pooled """ if sample_fracs is None: sample_fracs = np.ones(sample_concs.shape) / sample_concs.size # calculate volumetric fractions including floor val sample_vols = (total_nmol * sample_fracs) / sample_concs # convert L to nL sample_vols *= 10**9 # drop volumes for samples below floor concentration to floor_vol sample_vols[sample_concs < floor_conc] = floor_vol return sample_vols @staticmethod def compute_shotgun_pooling_values_floor( sample_concs, sample_fracs=None, min_conc=10, floor_conc=50, total_nmol=.01): """Computes pooling volumes for samples based on concentration estimates of nM concentrations (`sample_concs`). Reads in concentration values in nM. Samples must be above a minimum concentration threshold (`min_conc`, default 10 nM) to be included. Samples above this threshold but below a given floor concentration (`floor_conc`, default 50 nM) will be pooled as if they were at the floor concentration, to avoid overdiluting the pool. Samples can be assigned a target molar fraction in the pool by passing a np.array (`sample_fracs`, same shape as `sample_concs`) with fractional values per sample. By default, will aim for equal molar pooling. Finally, total pooling size is determined by a target nanomolar quantity (`total_nmol`, default .01). For a perfect 384 sample library, in which you had all samples at a concentration of exactly 400 nM and wanted a total volume of 60 uL, this would be 0.024 nmol. Parameters ---------- sample_concs: 2D array of float nM calculated by compute_qpcr_concentration sample_fracs: 2D of float, optional fractional value for each sample (default 1/N) min_conc: float, optional minimum nM concentration to be included in pool. Default: 10 floor_conc: float, optional minimum value for pooling for samples above min_conc. Default: 50 total_nmol : float, optional total number of nM to have in pool. Default 0.01 Returns ------- sample_vols: np.array of floats the volumes in nL per each sample pooled """ if sample_fracs is None: sample_fracs = np.ones(sample_concs.shape) / sample_concs.size # get samples above threshold sample_fracs_pass = sample_fracs.copy() sample_fracs_pass[sample_concs <= min_conc] = 0 # renormalize to exclude lost samples sample_fracs_pass *= 1/sample_fracs_pass.sum() # floor concentration value sample_concs_floor = sample_concs.copy() sample_concs_floor[sample_concs < floor_conc] = floor_conc # calculate volumetric fractions including floor val sample_vols = (total_nmol * sample_fracs_pass) / sample_concs_floor # convert L to nL sample_vols *= 10**9 return sample_vols @classmethod def create(cls, user, quantification_process, pool_name, volume, input_compositions, func_data, robot=None, destination=None): """Creates a new pooling process Parameters ---------- user: labman.db.user.User User performing the pooling process quantification_process: labman.db.process.QuantificationProcess The quantification process this pooling is based on pool_name: str The name of the new pool volume: float The initial volume input_compositions: list of dicts The input compositions for the pool {'composition': Composition, 'input_volume': float, 'percentage_of_output': float} func_data : dict Dictionary with the pooling function information robot: labman.equipment.Equipment, optional The robot performing the pooling, if not manual destination: str The EpMotion destination tube Returns ------- PoolingProcess """ with sql_connection.TRN as TRN: # Add the row to the process table process_id = cls._common_creation_steps(user) # Add the row to the pooling process table sql = """INSERT INTO qiita.pooling_process (process_id, quantification_process_id, robot_id, destination, pooling_function_data) VALUES (%s, %s, %s, %s, %s) RETURNING pooling_process_id""" r_id = robot.id if robot is not None else None if r_id is None: destination = None TRN.add(sql, [process_id, quantification_process.id, r_id, destination, dumps(func_data)]) instance = cls(TRN.execute_fetchlast()) # Create the new pool tube = container_module.Tube.create(instance, pool_name, volume) pool = composition_module.PoolComposition.create( instance, tube, volume) # Link the pool with its contents sql = """INSERT INTO qiita.pool_composition_components (output_pool_composition_id, input_composition_id, input_volume, percentage_of_output) VALUES (%s, %s, %s, %s)""" sql_args = [] for in_comp in input_compositions: # The wet lab pointed out that we don't need to pool the ones # that have a value below 0.001 if in_comp['input_volume'] < 0.001: continue sql_args.append([pool.id, in_comp['composition'].composition_id, in_comp['input_volume'], in_comp['percentage_of_output']]) TRN.add(sql, sql_args, many=True) TRN.execute() return instance @property def quantification_process(self): """The quantification process used Returns ------- QuantificationProcess """ return QuantificationProcess( self._get_attr('quantification_process_id')) @property def robot(self): """The robot used Returns ------- Equipment """ return equipment_module.Equipment(self._get_attr('robot_id')) @property def destination(self): """The EpMotion destination tube Returns ------- str """ return self._get_attr('destination') @property def components(self): """The components of the pool Returns ------- list of (Composition, float) """ with sql_connection.TRN as TRN: sql = """SELECT input_composition_id, input_volume FROM qiita.pool_composition_components JOIN qiita.pool_composition ON output_pool_composition_id = pool_composition_id JOIN qiita.composition USING (composition_id) WHERE upstream_process_id = %s ORDER BY pool_composition_components_id""" TRN.add(sql, [self.process_id]) return [(composition_module.Composition.factory(comp_id), vol) for comp_id, vol in TRN.execute_fetchindex()] @property def pool(self): """The generated pool composition Returns ------- PoolComposition """ with sql_connection.TRN as TRN: sql = """SELECT composition_id FROM qiita.composition WHERE upstream_process_id = %s""" TRN.add(sql, [self.process_id]) return composition_module.Composition.factory( TRN.execute_fetchlast()) @property def pooling_function_data(self): """The information about the pooling process Returns ------- dict """ return self._get_attr('pooling_function_data') @staticmethod def _format_picklist(vol_sample, max_vol_per_well=60000, dest_plate_shape=None): """Format the contents of an echo pooling pick list Parameters ---------- vol_sample : 2d numpy array of floats The per well sample volume, in nL max_vol_per_well : floats, optional Maximum destination well volume, in nL. Default: 60000 dest_plate_shape: list of 2 elements The destination plate shape """ if dest_plate_shape is None: dest_plate_shape = [16, 24] contents = ['Source Plate Name,Source Plate Type,Source Well,' 'Concentration,Transfer Volume,Destination Plate Name,' 'Destination Well'] # Write the sample transfer volumes rows, cols = vol_sample.shape # replace NaN values with 0s to leave a trail of unpooled wells pool_vols = np.nan_to_num(vol_sample) running_tot = 0 d = 1 for i in range(rows): for j in range(cols): well_name = "%s%d" % (chr(ord('A') + i), j+1) # Machine will round, so just give it enough info to do the # correct rounding. val = "%.2f" % pool_vols[i][j] # test to see if we will exceed total vol per well if running_tot + pool_vols[i][j] > max_vol_per_well: d += 1 running_tot = pool_vols[i][j] else: running_tot += pool_vols[i][j] dest = "%s%d" % (chr(ord('A') + int(np.floor(d/dest_plate_shape[0]))), (d % dest_plate_shape[1])) contents.append(",".join(['1', '384LDV_AQ_B2_HT', well_name, "", val, 'NormalizedDNA', dest])) return "\n".join(contents) def generate_echo_picklist(self, max_vol_per_well=30000): """Generates Echo pick list for pooling the shotgun library Parameters ---------- max_vol_per_well : floats, optional Maximum destination well volume, in nL. Default: 30000 Returns ------- str The echo-formatted pick list """ vol_sample = np.zeros((16, 24)) for comp, vol in self.components: well = comp.container vol_sample[well.row - 1][well.column - 1] = vol return PoolingProcess._format_picklist(vol_sample) def generate_epmotion_file(self): """Generates an EpMotion file to perform the pooling Returns ------- str The EpMotion-formatted pool file contents """ contents = ['Rack,Source,Rack,Destination,Volume,Tool'] destination = self.destination for comp, vol in self.components: source = comp.container.well_id val = "%.3f" % vol # Hard-coded values - never changes according to the wet lab contents.append( ",".join(['1', source, '1', destination, val, '1'])) return "\n".join(contents) def generate_pool_file(self): """Generates the correct pool file based on the pool contents Returns ------- str The contents of the pool file """ comp = self.components[0][0] if isinstance(comp, composition_module.LibraryPrep16SComposition): return self.generate_epmotion_file() elif isinstance(comp, composition_module.LibraryPrepShotgunComposition): return self.generate_echo_picklist() else: # This error should only be shown to programmers raise ValueError( "Can't generate a pooling file for a pool containing " "compositions of type: %s" % comp.__class__.__name__) class SequencingProcess(Process): """Sequencing process object Attributes ---------- See Also -------- Process """ _table = 'qiita.sequencing_process' _id_column = 'sequencing_process_id' _process_type = 'sequencing' sequencer_lanes = { 'HiSeq4000': 8, 'HiSeq3000': 8, 'HiSeq2500': 2, 'HiSeq1500': 2, 'MiSeq': 1, 'MiniSeq': 1, 'NextSeq': 1, 'NovaSeq': 1} @staticmethod def list_sequencing_runs(): """Generates a list of sequencing runs Returns ------- list of dicts The list of sequence run information with the structure: [{'process_id': int, 'run_name': string, ...}] """ with sql_connection.TRN as TRN: sql = """SELECT * FROM qiita.sequencing_process ORDER BY process_id""" TRN.add(sql) return [dict(r) for r in TRN.execute_fetchindex()] @classmethod def create(cls, user, pools, run_name, experiment, sequencer, fwd_cycles, rev_cycles, principal_investigator, contacts=None): """Creates a new sequencing process Parameters ---------- user : labman.db.user.User User preparing the sequencing pools: list of labman.db.composition.PoolComposition The pools being sequenced, in lane order run_name: str The run name experiment: str The run experiment sequencer: labman.db.equipment.Equipment The sequencer used fwd_cycles : int The number of forward cycles rev_cycles : int The number of reverse cycles principal_investigator : labman.db.user.User The principal investigator to list in the run contacts: list of labman.db.user.User, optinal Any additional contacts to add to the Sample Sheet Returns ------- SequencingProcess Raises ------ ValueError If the number of cycles are <= 0 """ if fwd_cycles <= 0 or not isinstance(fwd_cycles, int): raise ValueError("fwd_cycles must be > 0") if rev_cycles <= 0 or not isinstance(rev_cycles, int): raise ValueError("rev_cycles must be > 0") if len(pools) > cls.sequencer_lanes[sequencer.equipment_type]: raise ValueError( 'Number of pools cannot be bigger than the number of lanes ' 'in the sequencer. Pools: %s. Lanes in a %s sequencer: %s' % (len(pools), sequencer.equipment_type, cls.sequencer_lanes[sequencer.equipment_type])) with sql_connection.TRN as TRN: # Add the row to the process table process_id = cls._common_creation_steps(user) assay = None pool = pools[0] CM = composition_module while assay is None: comp = pool.components[0]['composition'] if isinstance(comp, CM.LibraryPrep16SComposition): assay = 'Amplicon' elif isinstance(comp, CM.LibraryPrepShotgunComposition): assay = 'Metagenomics' elif isinstance(comp, CM.PoolComposition): pool = comp else: # This should never happen - i.e. there is no way # of creating a pool like that raise ValueError( 'Pool with unexpected composition type: %s' % comp.__class__.__name__) # Add the row to the sequencing table sql = """INSERT INTO qiita.sequencing_process (process_id, run_name, experiment, sequencer_id, fwd_cycles, rev_cycles, assay, principal_investigator) VALUES (%s, %s, %s, %s, %s, %s, %s, %s) RETURNING sequencing_process_id""" TRN.add(sql, [process_id, run_name, experiment, sequencer.id, fwd_cycles, rev_cycles, assay, principal_investigator.id]) instance = cls(TRN.execute_fetchlast()) sql = """INSERT INTO qiita.sequencing_process_lanes (sequencing_process_id, pool_composition_id, lane_number) VALUES (%s, %s, %s)""" sql_args = [[instance.id, p.id, i + 1] for i, p in enumerate(pools)] TRN.add(sql, sql_args, many=True) if contacts: sql = """INSERT INTO qiita.sequencing_process_contacts (sequencing_process_id, contact_id) VALUES (%s, %s)""" sql_args = [[instance.id, c.id] for c in contacts] TRN.add(sql, sql_args, many=True) TRN.execute() return instance @property def pools(self): with sql_connection.TRN as TRN: sql = """SELECT pool_composition_id, lane_number FROM qiita.sequencing_process_lanes WHERE sequencing_process_id = %s ORDER BY lane_number""" TRN.add(sql, [self.id]) res = [[composition_module.PoolComposition(p), l] for p, l in TRN.execute_fetchindex()] return res @property def run_name(self): return self._get_attr('run_name') @property def experiment(self): return self._get_attr('experiment') @property def sequencer(self): return equipment_module.Equipment(self._get_attr('sequencer_id')) @property def fwd_cycles(self): return self._get_attr('fwd_cycles') @property def rev_cycles(self): return self._get_attr('rev_cycles') @property def assay(self): return self._get_attr('assay') @property def principal_investigator(self): return user_module.User(self._get_attr('principal_investigator')) @property def contacts(self): with sql_connection.TRN as TRN: sql = """SELECT contact_id FROM qiita.sequencing_process_contacts WHERE sequencing_process_id = %s ORDER BY contact_id""" TRN.add(sql, [self.id]) return [user_module.User(r[0]) for r in TRN.execute_fetchindex()] @staticmethod def _bcl_scrub_name(name): """Modifies a sample name to be BCL2fastq compatible Parameters ---------- name : str the sample name Returns ------- str the sample name, formatted for bcl2fastq """ return re.sub('[^0-9a-zA-Z\-\_]+', '_', name) @staticmethod def _reverse_complement(seq): """Reverse-complement a sequence From http://stackoverflow.com/a/25189185/7146785 Parameters ---------- seq : str The sequence to reverse-complement Returns ------- str The reverse-complemented sequence """ complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'} rev_seq = "".join(complement.get(base, base) for base in reversed(seq)) return rev_seq @staticmethod def _sequencer_i5_index(sequencer, indices): """Decides if the indices should be reversed based on the sequencer """ revcomp_sequencers = ['HiSeq4000', 'MiniSeq', 'NextSeq', 'HiSeq3000'] other_sequencers = ['HiSeq2500', 'HiSeq1500', 'MiSeq', 'NovaSeq'] if sequencer in revcomp_sequencers: return([SequencingProcess._reverse_complement(x) for x in indices]) elif sequencer in other_sequencers: return(indices) else: raise ValueError( 'Your indicated sequencer [%s] is not recognized.\nRecognized ' 'sequencers are: \n' % ' '.join(revcomp_sequencers + other_sequencers)) @staticmethod def _format_sample_sheet_data(sample_ids, i7_name, i7_seq, i5_name, i5_seq, wells=None, sample_plates=None, sample_proj='', description=None, lanes=[1], sep=',', include_header=True): """Creates the [Data] component of the Illumina sample sheet Parameters ---------- sample_ids: array-like The bcl2fastq-compatible sample ids i7_name: array-like The i7 index name, in sample_ids order i7_seq: array-like The i7 sequences, in sample_ids order i5_name: array-like The i5 index name, in sample_ids order i5_seq: array-like The i5 sequences, in sample_ids order wells: array-like, optional The source sample wells, in sample_ids order. Default: None sample_plate: str, optional The plate name. Default: '' sample_proj: str, optional The project name. Default: '' description: array-like, optional The original sample ids, in sample_ids order. Default: None lanes: array-lie, optional The lanes in which the pool will be sequenced. Default: [1] sep: str, optional The file-format separator. Default: ',' include_header: bool, optional Wheather to include the header or not. Default: true Returns ------- str The formatted [Data] component of the Illumina sample sheet Raises ------ ValueError If sample_ids, i7_name, i7_seq, i5_name and i5_seq do not have all the same length """ if sample_plates is None: sample_plates = [''] * len(sample_ids) if (len(sample_ids) != len(i7_name) != len(i7_seq) != len(i5_name) != len(i5_seq) != len(sample_plates)): raise ValueError('Sample information lengths are not all equal') if wells is None: wells = [''] * len(sample_ids) if description is None: description = [''] * len(sample_ids) data = [] for lane in lanes: for i, sample in enumerate(sample_ids): line = sep.join([str(lane), sample, sample, sample_plates[i], wells[i], i7_name[i], i7_seq[i], i5_name[i], i5_seq[i], sample_proj, description[i]]) data.append(line) data = sorted(data) if include_header: data.insert(0, sep.join([ 'Lane', 'Sample_ID', 'Sample_Name', 'Sample_Plate', 'Sample_Well', 'I7_Index_ID', 'index', 'I5_Index_ID', 'index2', 'Sample_Project', 'Description'])) return '\n'.join(data) @staticmethod def _format_sample_sheet_comments(principal_investigator=None, contacts=None, other=None, sep=','): """Formats the sample sheet comments Parameters ---------- principal_investigator: dict, optional The principal investigator information: {name: email} contacts: dict, optional The contacts information: {name: email} other: str, optional Other information to include in the sample sheet comments sep: str, optional The sample sheet separator Returns ------- str The formatted comments of the sample sheet """ comments = [] if principal_investigator is not None: comments.append('PI{0}{1}\n'.format( sep, sep.join( '{0}{1}{2}'.format(x, sep, principal_investigator[x]) for x in principal_investigator.keys()))) if contacts is not None: comments.append( 'Contact{0}{1}\nContact emails{0}{2}\n'.format( sep, sep.join(x for x in sorted(contacts.keys())), sep.join(contacts[x] for x in sorted(contacts.keys())))) if other is not None: comments.append('%s\n' % other) return ''.join(comments) @staticmethod def _format_sample_sheet(sample_sheet_dict, sep=','): """Formats Illumina-compatible sample sheet. Parameters ---------- sample_sheet_dict : dict dict with the sample sheet information sep: str, optional The sample sheet separator Returns ------- sample_sheet : str the sample sheet string """ template = ( '{comments}[Header]\nIEMFileVersion{sep}{IEMFileVersion}\n' 'Investigator Name{sep}{Investigator Name}\n' 'Experiment Name{sep}{Experiment Name}\nDate{sep}{Date}\n' 'Workflow{sep}{Workflow}\nApplication{sep}{Application}\n' 'Assay{sep}{Assay}\nDescription{sep}{Description}\n' 'Chemistry{sep}{Chemistry}\n\n[Reads]\n{read1}\n{read2}\n\n' '[Settings]\nReverseComplement{sep}{ReverseComplement}\n\n' '[Data]\n{data}') if sample_sheet_dict['comments']: sample_sheet_dict['comments'] = re.sub( '^', '# ', sample_sheet_dict['comments'].rstrip(), flags=re.MULTILINE) + '\n' sample_sheet = template.format(**sample_sheet_dict, **{'sep': sep}) return sample_sheet def _generate_shotgun_sample_sheet(self): """Generates Illumina compatible shotgun sample sheets Returns ------- str The illumina-formatted sample sheet """ bcl2fastq_sample_ids = [] i7_names = [] i7_sequences = [] i5_names = [] i5_sequences = [] wells = [] sample_ids = [] sample_plates = [] sequencer_type = self.sequencer.equipment_type data = [] include_header = True for pool, lane in self.pools: for component in pool.components: lp_composition = component['composition'] # Get the well information well = lp_composition.container wells.append(well.well_id) # Get the plate information sample_plates.append(well.plate.external_id) # Get the i7 index information i7_comp = lp_composition.i7_composition.primer_set_composition i7_names.append(i7_comp.external_id) i7_sequences.append(i7_comp.barcode) # Get the i5 index information i5_comp = lp_composition.i5_composition.primer_set_composition i5_names.append(i5_comp.external_id) i5_sequences.append(i5_comp.barcode) # Get the sample id sample_id = lp_composition.normalized_gdna_composition.\ compressed_gdna_composition.gdna_composition.\ sample_composition.content sample_ids.append(sample_id) # Transform te sample ids to be bcl2fastq-compatible bcl2fastq_sample_ids = [ SequencingProcess._bcl_scrub_name(sid) for sid in sample_ids] # Reverse the i5 sequences if needed based on the sequencer i5_sequences = SequencingProcess._sequencer_i5_index( sequencer_type, i5_sequences) # add the data of the curent pool data.append(SequencingProcess._format_sample_sheet_data( bcl2fastq_sample_ids, i7_names, i7_sequences, i5_names, i5_sequences, wells=wells, sample_plates=sample_plates, description=sample_ids, sample_proj=self.run_name, lanes=[lane], sep=',', include_header=include_header)) include_header = False data = '\n'.join(data) contacts = {c.name: c.email for c in self.contacts} pi = self.principal_investigator principal_investigator = {pi.name: pi.email} sample_sheet_dict = { 'comments': SequencingProcess._format_sample_sheet_comments( principal_investigator=principal_investigator, contacts=contacts), 'IEMFileVersion': '4', 'Investigator Name': pi.name, 'Experiment Name': self.experiment, 'Date': str(self.date), 'Workflow': 'GenerateFASTQ', 'Application': 'FASTQ Only', 'Assay': self.assay, 'Description': '', 'Chemistry': 'Default', 'read1': self.fwd_cycles, 'read2': self.rev_cycles, 'ReverseComplement': '0', 'data': data} return SequencingProcess._format_sample_sheet(sample_sheet_dict) def _generate_amplicon_sample_sheet(self): """Generates Illumina compatible sample sheets Returns ------- str The illumina-formatted sample sheet """ fixed_run_name = SequencingProcess._bcl_scrub_name(self.run_name) data = ( 'Sample_ID,Sample_Name,Sample_Plate,Sample_Well,I7_Index_ID,' 'index,Sample_Project,Description,,\n' '%s,,,,,NNNNNNNNNNNN,,,,,' % fixed_run_name) contacts = {c.name: c.email for c in self.contacts} pi = self.principal_investigator principal_investigator = {pi.name: pi.email} sample_sheet_dict = { 'comments': SequencingProcess._format_sample_sheet_comments( principal_investigator=principal_investigator, contacts=contacts), 'IEMFileVersion': '4', 'Investigator Name': pi.name, 'Experiment Name': self.experiment, 'Date': str(self.date), 'Workflow': 'GenerateFASTQ', 'Application': 'FASTQ Only', 'Assay': self.assay, 'Description': '', 'Chemistry': 'Default', 'read1': self.fwd_cycles, 'read2': self.rev_cycles, 'ReverseComplement': '0', 'data': data} return SequencingProcess._format_sample_sheet(sample_sheet_dict) def generate_sample_sheet(self): """Generates Illumina compatible sample sheets Returns ------- str The illumina-formatted sample sheet """ assay = self.assay if assay == 'Amplicon': return self._generate_amplicon_sample_sheet() elif assay == 'Metagenomics': return self._generate_shotgun_sample_sheet() def generate_prep_information(self): """Generates prep information Returns ------- dict labman.db.study.Study: str a dict of the Study and the prep """ assay = self.assay data = {} blanks = {} if assay == 'Amplicon': extra_fields = [ # 'e'/'r': equipment/reagent ('e', 'lepmotion_robot_id', 'epmotion_robot'), ('e', 'epmotion_tm300_8_tool_id', 'epmotion_tm300_8_tool'), ('e', 'epmotion_tm50_8_tool_id', 'epmotion_tm50_8_tool'), ('e', 'gepmotion_robot_id', 'gdata_robot'), ('e', 'epmotion_tool_id', 'epmotion_tool'), ('e', 'kingfisher_robot_id', 'kingfisher_robot'), ('r', 'extraction_kit_id', 'extraction_kit'), ('r', 'master_mix_id', 'master_mix'), ('r', 'water_lot_id', 'water_lot'), ] sql = """ SELECT study_id, sample_id, content, run_name, experiment, fwd_cycles, rev_cycles, principal_investigator, et.description as sequencer_description, lpp.epmotion_robot_id as lepmotion_robot_id, epmotion_tm300_8_tool_id, epmotion_tm50_8_tool_id, master_mix_id, water_lot_id, gep.epmotion_robot_id as gepmotion_robot_id, epmotion_tool_id, kingfisher_robot_id, extraction_kit_id, p1.external_id as plate, w1.row_num as row_num, w1.col_num as col_num, p2.external_id as primer_composition, psc.barcode_seq as primer_set_composition, run_name as run_prefix, sp.sequencer_id as platform_id, sp.experiment as center_project_name -- Retrieve sequencing information FROM qiita.sequencing_process sp LEFT JOIN qiita.equipment e ON ( sequencer_id = equipment_id) LEFT JOIN qiita.equipment_type et ON ( et.equipment_type_id = e.equipment_type_id) LEFT JOIN qiita.sequencing_process_lanes spl USING ( sequencing_process_id) -- Retrieve pooling information LEFT JOIN qiita.pool_composition_components pcc1 ON ( pcc1.output_pool_composition_id = spl.pool_composition_id) LEFT JOIN qiita.pool_composition pccon ON ( pcc1.input_composition_id = pccon.composition_id) LEFT JOIN qiita.pool_composition_components pcc2 ON ( pccon.pool_composition_id = pcc2.output_pool_composition_id) -- Retrieve amplicon library prep information LEFT JOIN qiita.library_prep_16S_composition lp ON ( pcc2.input_composition_id = lp.composition_id) LEFT JOIN qiita.composition c1 ON ( lp.composition_id = c1.composition_id) LEFT JOIN qiita.library_prep_16s_process lpp ON ( lpp.process_id = c1.upstream_process_id) -- Retrieve the extracted gdna information LEFT JOIN qiita.gdna_composition gc USING (gdna_composition_id) LEFT JOIN qiita.composition c2 ON ( gc.composition_id = c2.composition_id) LEFT JOIN qiita.gdna_extraction_process gep ON ( gep.process_id = c2.upstream_process_id) -- Retrieve the sample information LEFT JOIN qiita.sample_composition sc USING ( sample_composition_id) LEFT JOIN qiita.composition c3 ON ( c3.composition_id = sc.composition_id) LEFT JOIN qiita.well w1 ON ( w1.container_id = c3.container_id) LEFT JOIN qiita.plate p1 ON ( w1.plate_id = p1.plate_id) LEFT JOIN qiita.composition c4 ON ( lp.primer_composition_id = c4.composition_id ) LEFT JOIN qiita.well w2 ON ( w2.container_id = c4.container_id) LEFT JOIN qiita.plate p2 ON ( w2.plate_id = p2.plate_id) LEFT JOIN qiita.primer_composition pc ON ( lp.primer_composition_id = pc.primer_composition_id) LEFT JOIN qiita.primer_set_composition psc ON ( pc.primer_set_composition_id = psc.primer_set_composition_id) FULL JOIN qiita.study_sample USING (sample_id) WHERE sequencing_process_id = %s ORDER BY study_id, sample_id, row_num, col_num""" elif assay == 'Metagenomics': extra_fields = [ ('e', 'gepmotion_robot_id', 'gdata_robot'), ('e', 'epmotion_tool_id', 'epmotion_tool'), ('e', 'kingfisher_robot_id', 'kingfisher_robot'), ('r', 'kappa_hyper_plus_kit_id', 'kappa_hyper_plus_kit'), ('r', 'stub_lot_id', 'stub_lot'), ('r', 'extraction_kit_id', 'extraction_kit'), ('r', 'nwater_lot_id', 'normalization_water_lot'), ] sql = """ SELECT study_id, sample_id, content, run_name, experiment, fwd_cycles, rev_cycles, principal_investigator, i5.barcode_seq as i5_sequence, i7.barcode_seq as i5_sequence, et.description as sequencer_description, gep.epmotion_robot_id as gepmotion_robot_id, epmotion_tool_id, kingfisher_robot_id, extraction_kit_id, np.water_lot_id as nwater_lot_id, kappa_hyper_plus_kit_id, stub_lot_id, p1.external_id as plate, row_num, col_num, sp.sequencer_id as platform_id, sp.experiment as center_project_name -- Retrieve sequencing information FROM qiita.sequencing_process sp LEFT JOIN qiita.equipment e ON ( sequencer_id = equipment_id) LEFT JOIN qiita.equipment_type et ON ( et.equipment_type_id = e.equipment_type_id) LEFT JOIN qiita.sequencing_process_lanes USING ( sequencing_process_id) -- Retrieving pool information LEFT JOIN qiita.pool_composition_components ON ( output_pool_composition_id = pool_composition_id) -- Retrieving library prep information LEFT JOIN qiita.library_prep_shotgun_composition ON ( input_composition_id = composition_id) LEFT JOIN qiita.primer_composition i5pc ON ( i5_primer_composition_id = i5pc.primer_composition_id) LEFT JOIN qiita.primer_set_composition i5 ON ( i5pc.primer_set_composition_id = i5.primer_set_composition_id ) LEFT JOIN qiita.primer_composition i7pc ON ( i7_primer_composition_id = i7pc.primer_composition_id) LEFT JOIN qiita.primer_set_composition i7 ON ( i7pc.primer_set_composition_id = i7.primer_set_composition_id ) -- Retrieving normalized gdna information LEFT JOIN qiita.normalized_gdna_composition ngc USING ( normalized_gdna_composition_id) LEFT JOIN qiita.composition c1 ON ( ngc.composition_id = c1.composition_id) LEFT JOIN qiita.library_prep_shotgun_process lps ON ( lps.process_id = c1.upstream_process_id) LEFT JOIN qiita.normalization_process np USING ( normalization_process_id) -- Retrieving compressed gdna information LEFT JOIN qiita.compressed_gdna_composition cgc USING ( compressed_gdna_composition_id) -- Retrieving gdna information LEFT JOIN qiita.gdna_composition gc USING (gdna_composition_id) LEFT JOIN qiita.composition c2 ON ( gc.composition_id = c2.composition_id) LEFT JOIN qiita.gdna_extraction_process gep ON ( gep.process_id = c2.upstream_process_id) LEFT JOIN qiita.sample_composition sc USING ( sample_composition_id) LEFT JOIN qiita.composition c3 ON ( c3.composition_id = sc.composition_id) LEFT JOIN qiita.well w1 ON ( w1.container_id = c3.container_id) LEFT JOIN qiita.plate p1 ON ( w1.plate_id = p1.plate_id) FULL JOIN qiita.study_sample USING (sample_id) WHERE sequencing_process_id = %s ORDER BY study_id, sample_id, row_num, col_num, i5.barcode_seq """ with sql_connection.TRN as TRN: # to simplify the main queries, let's get all the equipment info TRN.add("""SELECT equipment_id, external_id, notes, description FROM qiita.equipment LEFT JOIN qiita.equipment_type USING (equipment_type_id)""") equipment = {} for row in TRN.execute_fetchindex(): row = dict(row) eid = row.pop('equipment_id') equipment[eid] = row # and the reagents TRN.add("""SELECT reagent_composition_id, composition_id, external_lot_id, description FROM qiita.reagent_composition LEFT JOIN qiita.reagent_composition_type USING (reagent_composition_type_id)""") reagent = {} for row in TRN.execute_fetchindex(): row = dict(row) rid = row.pop('reagent_composition_id') reagent[rid] = row TRN.add(sql, [self.id]) for result in TRN.execute_fetchindex(): result = dict(result) study_id = result.pop('study_id') sid = result.pop('sample_id') content = result.pop('content') # format well col = result.pop('col_num') row = result.pop('row_num') well = [] while row: row, rem = divmod(row-1, 26) well[:0] = container_module.LETTERS[rem] result['well'] = ''.join(well) + str(col) # format extra fields list for t, k, nk in extra_fields: _id = result.pop(k) if _id is not None: if t == 'e': val = equipment[_id]['external_id'] else: val = reagent[_id]['external_lot_id'] else: val = '' result[nk] = val # format some final fields result['platform'] = equipment[ result.pop('platform_id')]['description'] if sid is not None and study_id is not None: study = Study(study_id) if study not in data: data[study] = {} data[study][content] = result if assay == 'Metagenomics': result['run_prefix'] = \ SequencingProcess._bcl_scrub_name(content) else: if assay == 'Metagenomics': result['run_prefix'] = \ SequencingProcess._bcl_scrub_name(content) blanks[content] = result # converting from dict to pandas and then to tsv for study, vals in data.items(): merged = {**vals, **blanks} df = pd.DataFrame.from_dict(merged, orient='index') df.sort_index(inplace=True) cols = sorted(list(df.columns)) sio = StringIO() df[cols].to_csv(sio, sep='\t', index_label='sample_name') data[study] = sio.getvalue() return data
bsd-3-clause
mrares/incubator-airflow
airflow/www/views.py
1
97991
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from past.builtins import basestring, unicode import ast import logging import os import pkg_resources import socket from functools import wraps from datetime import datetime, timedelta import dateutil.parser import copy import math import json import bleach from collections import defaultdict import inspect from textwrap import dedent import traceback import sqlalchemy as sqla from sqlalchemy import or_, desc, and_, union_all from flask import ( redirect, url_for, request, Markup, Response, current_app, render_template, make_response) from flask_admin import BaseView, expose, AdminIndexView from flask_admin.contrib.sqla import ModelView from flask_admin.actions import action from flask_admin.babel import lazy_gettext from flask_admin.tools import iterdecode from flask_login import flash from flask._compat import PY2 from jinja2.sandbox import ImmutableSandboxedEnvironment from jinja2 import escape import markdown import nvd3 from wtforms import ( Form, SelectField, TextAreaField, PasswordField, StringField, validators) from pygments import highlight, lexers from pygments.formatters import HtmlFormatter import airflow from airflow import configuration as conf from airflow import models from airflow import settings from airflow.api.common.experimental.mark_tasks import set_dag_run_state from airflow.exceptions import AirflowException from airflow.settings import Session from airflow.models import XCom, DagRun from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, SCHEDULER_DEPS from airflow.models import BaseOperator from airflow.operators.subdag_operator import SubDagOperator from airflow.utils.json import json_ser from airflow.utils.state import State from airflow.utils.db import provide_session from airflow.utils.helpers import alchemy_to_dict from airflow.utils.dates import infer_time_unit, scale_time_units from airflow.www import utils as wwwutils from airflow.www.forms import DateTimeForm, DateTimeWithNumRunsForm from airflow.www.validators import GreaterEqualThan QUERY_LIMIT = 100000 CHART_LIMIT = 200000 dagbag = models.DagBag(settings.DAGS_FOLDER) login_required = airflow.login.login_required current_user = airflow.login.current_user logout_user = airflow.login.logout_user FILTER_BY_OWNER = False PAGE_SIZE = conf.getint('webserver', 'page_size') if conf.getboolean('webserver', 'FILTER_BY_OWNER'): # filter_by_owner if authentication is enabled and filter_by_owner is true FILTER_BY_OWNER = not current_app.config['LOGIN_DISABLED'] def dag_link(v, c, m, p): dag_id = bleach.clean(m.dag_id) url = url_for( 'airflow.graph', dag_id=dag_id) return Markup( '<a href="{}">{}</a>'.format(url, dag_id)) def log_url_formatter(v, c, m, p): return Markup( '<a href="{m.log_url}">' ' <span class="glyphicon glyphicon-book" aria-hidden="true">' '</span></a>').format(**locals()) def task_instance_link(v, c, m, p): dag_id = bleach.clean(m.dag_id) task_id = bleach.clean(m.task_id) url = url_for( 'airflow.task', dag_id=dag_id, task_id=task_id, execution_date=m.execution_date.isoformat()) url_root = url_for( 'airflow.graph', dag_id=dag_id, root=task_id, execution_date=m.execution_date.isoformat()) return Markup( """ <span style="white-space: nowrap;"> <a href="{url}">{task_id}</a> <a href="{url_root}" title="Filter on this task and upstream"> <span class="glyphicon glyphicon-filter" style="margin-left: 0px;" aria-hidden="true"></span> </a> </span> """.format(**locals())) def state_token(state): color = State.color(state) return Markup( '<span class="label" style="background-color:{color};">' '{state}</span>'.format(**locals())) def state_f(v, c, m, p): return state_token(m.state) def duration_f(v, c, m, p): if m.end_date and m.duration: return timedelta(seconds=m.duration) def datetime_f(v, c, m, p): attr = getattr(m, p) dttm = attr.isoformat() if attr else '' if datetime.utcnow().isoformat()[:4] == dttm[:4]: dttm = dttm[5:] return Markup("<nobr>{}</nobr>".format(dttm)) def nobr_f(v, c, m, p): return Markup("<nobr>{}</nobr>".format(getattr(m, p))) def label_link(v, c, m, p): try: default_params = ast.literal_eval(m.default_params) except: default_params = {} url = url_for( 'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no, **default_params) return Markup("<a href='{url}'>{m.label}</a>".format(**locals())) def pool_link(v, c, m, p): url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool return Markup("<a href='{url}'>{m.pool}</a>".format(**locals())) def pygment_html_render(s, lexer=lexers.TextLexer): return highlight( s, lexer(), HtmlFormatter(linenos=True), ) def render(obj, lexer): out = "" if isinstance(obj, basestring): out += pygment_html_render(obj, lexer) elif isinstance(obj, (tuple, list)): for i, s in enumerate(obj): out += "<div>List item #{}</div>".format(i) out += "<div>" + pygment_html_render(s, lexer) + "</div>" elif isinstance(obj, dict): for k, v in obj.items(): out += '<div>Dict item "{}"</div>'.format(k) out += "<div>" + pygment_html_render(v, lexer) + "</div>" return out def wrapped_markdown(s): return '<div class="rich_doc">' + markdown.markdown(s) + "</div>" attr_renderer = { 'bash_command': lambda x: render(x, lexers.BashLexer), 'hql': lambda x: render(x, lexers.SqlLexer), 'sql': lambda x: render(x, lexers.SqlLexer), 'doc': lambda x: render(x, lexers.TextLexer), 'doc_json': lambda x: render(x, lexers.JsonLexer), 'doc_rst': lambda x: render(x, lexers.RstLexer), 'doc_yaml': lambda x: render(x, lexers.YamlLexer), 'doc_md': wrapped_markdown, 'python_callable': lambda x: render( inspect.getsource(x), lexers.PythonLexer), } def data_profiling_required(f): """Decorator for views requiring data profiling access""" @wraps(f) def decorated_function(*args, **kwargs): if ( current_app.config['LOGIN_DISABLED'] or (not current_user.is_anonymous() and current_user.data_profiling()) ): return f(*args, **kwargs) else: flash("This page requires data profiling privileges", "error") return redirect(url_for('admin.index')) return decorated_function def fused_slots(v, c, m, p): url = ( '/admin/taskinstance/' + '?flt1_pool_equals=' + m.pool + '&flt2_state_equals=running') return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots())) def fqueued_slots(v, c, m, p): url = ( '/admin/taskinstance/' + '?flt1_pool_equals=' + m.pool + '&flt2_state_equals=queued&sort=10&desc=1') return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots())) def recurse_tasks(tasks, task_ids, dag_ids, task_id_to_dag): if isinstance(tasks, list): for task in tasks: recurse_tasks(task, task_ids, dag_ids, task_id_to_dag) return if isinstance(tasks, SubDagOperator): subtasks = tasks.subdag.tasks dag_ids.append(tasks.subdag.dag_id) for subtask in subtasks: if subtask.task_id not in task_ids: task_ids.append(subtask.task_id) task_id_to_dag[subtask.task_id] = tasks.subdag recurse_tasks(subtasks, task_ids, dag_ids, task_id_to_dag) if isinstance(tasks, BaseOperator): task_id_to_dag[tasks.task_id] = tasks.dag def get_chart_height(dag): """ TODO(aoen): See [AIRFLOW-1263] We use the number of tasks in the DAG as a heuristic to approximate the size of generated chart (otherwise the charts are tiny and unreadable when DAGs have a large number of tasks). Ideally nvd3 should allow for dynamic-height charts, that is charts that take up space based on the size of the components within. """ return 600 + len(dag.tasks) * 10 class Airflow(BaseView): def is_visible(self): return False @expose('/') @login_required def index(self): return self.render('airflow/dags.html') @expose('/chart_data') @data_profiling_required @wwwutils.gzipped # @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key) def chart_data(self): from airflow import macros import pandas as pd session = settings.Session() chart_id = request.args.get('chart_id') csv = request.args.get('csv') == "true" chart = session.query(models.Chart).filter_by(id=chart_id).first() db = session.query( models.Connection).filter_by(conn_id=chart.conn_id).first() session.expunge_all() session.commit() session.close() payload = { "state": "ERROR", "error": "" } # Processing templated fields try: args = ast.literal_eval(chart.default_params) if type(args) is not type(dict()): raise AirflowException('Not a dict') except: args = {} payload['error'] += ( "Default params is not valid, string has to evaluate as " "a Python dictionary. ") request_dict = {k: request.args.get(k) for k in request.args} args.update(request_dict) args['macros'] = macros sandbox = ImmutableSandboxedEnvironment() sql = sandbox.from_string(chart.sql).render(**args) label = sandbox.from_string(chart.label).render(**args) payload['sql_html'] = Markup(highlight( sql, lexers.SqlLexer(), # Lexer call HtmlFormatter(noclasses=True)) ) payload['label'] = label pd.set_option('display.max_colwidth', 100) hook = db.get_hook() try: df = hook.get_pandas_df( wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type)) df = df.fillna(0) except Exception as e: payload['error'] += "SQL execution failed. Details: " + str(e) if csv: return Response( response=df.to_csv(index=False), status=200, mimetype="application/text") if not payload['error'] and len(df) == CHART_LIMIT: payload['warning'] = ( "Data has been truncated to {0}" " rows. Expect incomplete results.").format(CHART_LIMIT) if not payload['error'] and len(df) == 0: payload['error'] += "Empty result set. " elif ( not payload['error'] and chart.sql_layout == 'series' and chart.chart_type != "datatable" and len(df.columns) < 3): payload['error'] += "SQL needs to return at least 3 columns. " elif ( not payload['error'] and chart.sql_layout == 'columns' and len(df.columns) < 2): payload['error'] += "SQL needs to return at least 2 columns. " elif not payload['error']: import numpy as np chart_type = chart.chart_type data = None if chart.show_datatable or chart_type == "datatable": data = df.to_dict(orient="split") data['columns'] = [{'title': c} for c in data['columns']] payload['data'] = data # Trying to convert time to something Highcharts likes x_col = 1 if chart.sql_layout == 'series' else 0 if chart.x_is_date: try: # From string to datetime df[df.columns[x_col]] = pd.to_datetime( df[df.columns[x_col]]) df[df.columns[x_col]] = df[df.columns[x_col]].apply( lambda x: int(x.strftime("%s")) * 1000) except Exception as e: payload['error'] = "Time conversion failed" if chart_type == 'datatable': payload['state'] = 'SUCCESS' return wwwutils.json_response(payload) else: if chart.sql_layout == 'series': # User provides columns (series, x, y) xaxis_label = df.columns[1] yaxis_label = df.columns[2] df[df.columns[2]] = df[df.columns[2]].astype(np.float) df = df.pivot_table( index=df.columns[1], columns=df.columns[0], values=df.columns[2], aggfunc=np.sum) else: # User provides columns (x, y, metric1, metric2, ...) xaxis_label = df.columns[0] yaxis_label = 'y' df.index = df[df.columns[0]] df = df.sort(df.columns[0]) del df[df.columns[0]] for col in df.columns: df[col] = df[col].astype(np.float) df = df.fillna(0) NVd3ChartClass = chart_mapping.get(chart.chart_type) NVd3ChartClass = getattr(nvd3, NVd3ChartClass) nvd3_chart = NVd3ChartClass(x_is_date=chart.x_is_date) for col in df.columns: nvd3_chart.add_serie(name=col, y=df[col].tolist(), x=df[col].index.tolist()) try: nvd3_chart.buildcontent() payload['chart_type'] = nvd3_chart.__class__.__name__ payload['htmlcontent'] = nvd3_chart.htmlcontent except Exception as e: payload['error'] = str(e) payload['state'] = 'SUCCESS' payload['request_dict'] = request_dict return wwwutils.json_response(payload) @expose('/chart') @data_profiling_required def chart(self): session = settings.Session() chart_id = request.args.get('chart_id') embed = request.args.get('embed') chart = session.query(models.Chart).filter_by(id=chart_id).first() session.expunge_all() session.commit() session.close() NVd3ChartClass = chart_mapping.get(chart.chart_type) if not NVd3ChartClass: flash( "Not supported anymore as the license was incompatible, " "sorry", "danger") redirect('/admin/chart/') sql = "" if chart.show_sql: sql = Markup(highlight( chart.sql, lexers.SqlLexer(), # Lexer call HtmlFormatter(noclasses=True)) ) return self.render( 'airflow/nvd3.html', chart=chart, title="Airflow - Chart", sql=sql, label=chart.label, embed=embed) @expose('/dag_stats') def dag_stats(self): ds = models.DagStat session = Session() ds.update() qry = ( session.query(ds.dag_id, ds.state, ds.count) ) data = {} for dag_id, state, count in qry: if dag_id not in data: data[dag_id] = {} data[dag_id][state] = count payload = {} for dag in dagbag.dags.values(): payload[dag.safe_dag_id] = [] for state in State.dag_states: try: count = data[dag.dag_id][state] except Exception: count = 0 d = { 'state': state, 'count': count, 'dag_id': dag.dag_id, 'color': State.color(state) } payload[dag.safe_dag_id].append(d) return wwwutils.json_response(payload) @expose('/task_stats') def task_stats(self): TI = models.TaskInstance DagRun = models.DagRun Dag = models.DagModel session = Session() LastDagRun = ( session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date')) .join(Dag, Dag.dag_id == DagRun.dag_id) .filter(DagRun.state != State.RUNNING) .filter(Dag.is_active == True) .group_by(DagRun.dag_id) .subquery('last_dag_run') ) RunningDagRun = ( session.query(DagRun.dag_id, DagRun.execution_date) .join(Dag, Dag.dag_id == DagRun.dag_id) .filter(DagRun.state == State.RUNNING) .filter(Dag.is_active == True) .subquery('running_dag_run') ) # Select all task_instances from active dag_runs. # If no dag_run is active, return task instances from most recent dag_run. LastTI = ( session.query(TI.dag_id.label('dag_id'), TI.state.label('state')) .join(LastDagRun, and_( LastDagRun.c.dag_id == TI.dag_id, LastDagRun.c.execution_date == TI.execution_date)) ) RunningTI = ( session.query(TI.dag_id.label('dag_id'), TI.state.label('state')) .join(RunningDagRun, and_( RunningDagRun.c.dag_id == TI.dag_id, RunningDagRun.c.execution_date == TI.execution_date)) ) UnionTI = union_all(LastTI, RunningTI).alias('union_ti') qry = ( session.query(UnionTI.c.dag_id, UnionTI.c.state, sqla.func.count()) .group_by(UnionTI.c.dag_id, UnionTI.c.state) ) data = {} for dag_id, state, count in qry: if dag_id not in data: data[dag_id] = {} data[dag_id][state] = count session.commit() session.close() payload = {} for dag in dagbag.dags.values(): payload[dag.safe_dag_id] = [] for state in State.task_states: try: count = data[dag.dag_id][state] except: count = 0 d = { 'state': state, 'count': count, 'dag_id': dag.dag_id, 'color': State.color(state) } payload[dag.safe_dag_id].append(d) return wwwutils.json_response(payload) @expose('/code') @login_required def code(self): dag_id = request.args.get('dag_id') dag = dagbag.get_dag(dag_id) title = dag_id try: with open(dag.fileloc, 'r') as f: code = f.read() html_code = highlight( code, lexers.PythonLexer(), HtmlFormatter(linenos=True)) except IOError as e: html_code = str(e) return self.render( 'airflow/dag_code.html', html_code=html_code, dag=dag, title=title, root=request.args.get('root'), demo_mode=conf.getboolean('webserver', 'demo_mode')) @expose('/dag_details') @login_required def dag_details(self): dag_id = request.args.get('dag_id') dag = dagbag.get_dag(dag_id) title = "DAG details" session = settings.Session() TI = models.TaskInstance states = ( session.query(TI.state, sqla.func.count(TI.dag_id)) .filter(TI.dag_id == dag_id) .group_by(TI.state) .all() ) return self.render( 'airflow/dag_details.html', dag=dag, title=title, states=states, State=State) @current_app.errorhandler(404) def circles(self): return render_template( 'airflow/circles.html', hostname=socket.getfqdn()), 404 @current_app.errorhandler(500) def show_traceback(self): from airflow.utils import asciiart as ascii_ return render_template( 'airflow/traceback.html', hostname=socket.getfqdn(), nukular=ascii_.nukular, info=traceback.format_exc()), 500 @expose('/noaccess') def noaccess(self): return self.render('airflow/noaccess.html') @expose('/pickle_info') @login_required def pickle_info(self): d = {} dag_id = request.args.get('dag_id') dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values() for dag in dags: if not dag.is_subdag: d[dag.dag_id] = dag.pickle_info() return wwwutils.json_response(d) @expose('/login', methods=['GET', 'POST']) def login(self): return airflow.login.login(self, request) @expose('/logout') def logout(self): logout_user() flash('You have been logged out.') return redirect(url_for('admin.index')) @expose('/rendered') @login_required @wwwutils.action_logging def rendered(self): dag_id = request.args.get('dag_id') task_id = request.args.get('task_id') execution_date = request.args.get('execution_date') dttm = dateutil.parser.parse(execution_date) form = DateTimeForm(data={'execution_date': dttm}) dag = dagbag.get_dag(dag_id) task = copy.copy(dag.get_task(task_id)) ti = models.TaskInstance(task=task, execution_date=dttm) try: ti.render_templates() except Exception as e: flash("Error rendering template: " + str(e), "error") title = "Rendered Template" html_dict = {} for template_field in task.__class__.template_fields: content = getattr(task, template_field) if template_field in attr_renderer: html_dict[template_field] = attr_renderer[template_field](content) else: html_dict[template_field] = ( "<pre><code>" + str(content) + "</pre></code>") return self.render( 'airflow/ti_code.html', html_dict=html_dict, dag=dag, task_id=task_id, execution_date=execution_date, form=form, title=title, ) @expose('/log') @login_required @wwwutils.action_logging def log(self): dag_id = request.args.get('dag_id') task_id = request.args.get('task_id') execution_date = request.args.get('execution_date') dttm = dateutil.parser.parse(execution_date) form = DateTimeForm(data={'execution_date': dttm}) dag = dagbag.get_dag(dag_id) session = Session() ti = session.query(models.TaskInstance).filter( models.TaskInstance.dag_id == dag_id, models.TaskInstance.task_id == task_id, models.TaskInstance.execution_date == dttm).first() if ti is None: logs = ["*** Task instance did not exist in the DB\n"] else: logger = logging.getLogger('airflow.task') task_log_reader = conf.get('core', 'task_log_reader') handler = next((handler for handler in logger.handlers if handler.name == task_log_reader), None) try: ti.task = dag.get_task(ti.task_id) logs = handler.read(ti) except AttributeError as e: logs = ["Task log handler {} does not support read logs.\n{}\n" \ .format(task_log_reader, e.message)] for i, log in enumerate(logs): if PY2 and not isinstance(log, unicode): logs[i] = log.decode('utf-8') return self.render( 'airflow/ti_log.html', logs=logs, dag=dag, title="Log by attempts", task_id=task_id, execution_date=execution_date, form=form) @expose('/task') @login_required @wwwutils.action_logging def task(self): TI = models.TaskInstance dag_id = request.args.get('dag_id') task_id = request.args.get('task_id') # Carrying execution_date through, even though it's irrelevant for # this context execution_date = request.args.get('execution_date') dttm = dateutil.parser.parse(execution_date) form = DateTimeForm(data={'execution_date': dttm}) dag = dagbag.get_dag(dag_id) if not dag or task_id not in dag.task_ids: flash( "Task [{}.{}] doesn't seem to exist" " at the moment".format(dag_id, task_id), "error") return redirect('/admin/') task = copy.copy(dag.get_task(task_id)) task.resolve_template_files() ti = TI(task=task, execution_date=dttm) ti.refresh_from_db() ti_attrs = [] for attr_name in dir(ti): if not attr_name.startswith('_'): attr = getattr(ti, attr_name) if type(attr) != type(self.task): ti_attrs.append((attr_name, str(attr))) task_attrs = [] for attr_name in dir(task): if not attr_name.startswith('_'): attr = getattr(task, attr_name) if type(attr) != type(self.task) and \ attr_name not in attr_renderer: task_attrs.append((attr_name, str(attr))) # Color coding the special attributes that are code special_attrs_rendered = {} for attr_name in attr_renderer: if hasattr(task, attr_name): source = getattr(task, attr_name) special_attrs_rendered[attr_name] = attr_renderer[attr_name](source) no_failed_deps_result = [( "Unknown", dedent("""\ All dependencies are met but the task instance is not running. In most cases this just means that the task will probably be scheduled soon unless:<br/> - The scheduler is down or under heavy load<br/> {} <br/> If this task instance does not start soon please contact your Airflow administrator for assistance.""" .format( "- This task instance already ran and had it's state changed manually (e.g. cleared in the UI)<br/>" if ti.state == State.NONE else "")))] # Use the scheduler's context to figure out which dependencies are not met dep_context = DepContext(SCHEDULER_DEPS) failed_dep_reasons = [(dep.dep_name, dep.reason) for dep in ti.get_failed_dep_statuses( dep_context=dep_context)] title = "Task Instance Details" return self.render( 'airflow/task.html', task_attrs=task_attrs, ti_attrs=ti_attrs, failed_dep_reasons=failed_dep_reasons or no_failed_deps_result, task_id=task_id, execution_date=execution_date, special_attrs_rendered=special_attrs_rendered, form=form, dag=dag, title=title) @expose('/xcom') @login_required @wwwutils.action_logging def xcom(self): dag_id = request.args.get('dag_id') task_id = request.args.get('task_id') # Carrying execution_date through, even though it's irrelevant for # this context execution_date = request.args.get('execution_date') dttm = dateutil.parser.parse(execution_date) form = DateTimeForm(data={'execution_date': dttm}) dag = dagbag.get_dag(dag_id) if not dag or task_id not in dag.task_ids: flash( "Task [{}.{}] doesn't seem to exist" " at the moment".format(dag_id, task_id), "error") return redirect('/admin/') session = Session() xcomlist = session.query(XCom).filter( XCom.dag_id == dag_id, XCom.task_id == task_id, XCom.execution_date == dttm).all() attributes = [] for xcom in xcomlist: if not xcom.key.startswith('_'): attributes.append((xcom.key, xcom.value)) title = "XCom" return self.render( 'airflow/xcom.html', attributes=attributes, task_id=task_id, execution_date=execution_date, form=form, dag=dag, title=title) @expose('/run') @login_required @wwwutils.action_logging @wwwutils.notify_owner def run(self): dag_id = request.args.get('dag_id') task_id = request.args.get('task_id') origin = request.args.get('origin') dag = dagbag.get_dag(dag_id) task = dag.get_task(task_id) execution_date = request.args.get('execution_date') execution_date = dateutil.parser.parse(execution_date) ignore_all_deps = request.args.get('ignore_all_deps') == "true" ignore_task_deps = request.args.get('ignore_task_deps') == "true" ignore_ti_state = request.args.get('ignore_ti_state') == "true" try: from airflow.executors import GetDefaultExecutor from airflow.executors.celery_executor import CeleryExecutor executor = GetDefaultExecutor() if not isinstance(executor, CeleryExecutor): flash("Only works with the CeleryExecutor, sorry", "error") return redirect(origin) except ImportError: # in case CeleryExecutor cannot be imported it is not active either flash("Only works with the CeleryExecutor, sorry", "error") return redirect(origin) ti = models.TaskInstance(task=task, execution_date=execution_date) ti.refresh_from_db() # Make sure the task instance can be queued dep_context = DepContext( deps=QUEUE_DEPS, ignore_all_deps=ignore_all_deps, ignore_task_deps=ignore_task_deps, ignore_ti_state=ignore_ti_state) failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context)) if failed_deps: failed_deps_str = ", ".join( ["{}: {}".format(dep.dep_name, dep.reason) for dep in failed_deps]) flash("Could not queue task instance for execution, dependencies not met: " "{}".format(failed_deps_str), "error") return redirect(origin) executor.start() executor.queue_task_instance( ti, ignore_all_deps=ignore_all_deps, ignore_task_deps=ignore_task_deps, ignore_ti_state=ignore_ti_state) executor.heartbeat() flash( "Sent {} to the message queue, " "it should start any moment now.".format(ti)) return redirect(origin) @expose('/trigger') @login_required @wwwutils.action_logging @wwwutils.notify_owner def trigger(self): dag_id = request.args.get('dag_id') origin = request.args.get('origin') or "/admin/" dag = dagbag.get_dag(dag_id) if not dag: flash("Cannot find dag {}".format(dag_id)) return redirect(origin) execution_date = datetime.utcnow() run_id = "manual__{0}".format(execution_date.isoformat()) dr = DagRun.find(dag_id=dag_id, run_id=run_id) if dr: flash("This run_id {} already exists".format(run_id)) return redirect(origin) run_conf = {} dag.create_dagrun( run_id=run_id, execution_date=execution_date, state=State.RUNNING, conf=run_conf, external_trigger=True ) flash( "Triggered {}, " "it should start any moment now.".format(dag_id)) return redirect(origin) def _clear_dag_tis(self, dags, start_date, end_date, origin, recursive=False, confirmed=False): if confirmed: count = 0 for dag in dags: count += dag.clear( start_date=start_date, end_date=end_date, include_subdags=recursive) flash("{0} task instances have been cleared".format(count)) return redirect(origin) tis = [] for dag in dags: tis.extend(dag.clear( start_date=start_date, end_date=end_date, include_subdags=recursive, dry_run=True)) if not tis: flash("No task instances to clear", 'error') response = redirect(origin) else: details = "\n".join([str(t) for t in tis]) response = self.render( 'airflow/confirm.html', message=("Here's the list of task instances you are about " "to clear:"), details=details) return response @expose('/clear') @login_required @wwwutils.action_logging @wwwutils.notify_owner def clear(self): dag_id = request.args.get('dag_id') task_id = request.args.get('task_id') origin = request.args.get('origin') dag = dagbag.get_dag(dag_id) execution_date = request.args.get('execution_date') execution_date = dateutil.parser.parse(execution_date) confirmed = request.args.get('confirmed') == "true" upstream = request.args.get('upstream') == "true" downstream = request.args.get('downstream') == "true" future = request.args.get('future') == "true" past = request.args.get('past') == "true" recursive = request.args.get('recursive') == "true" descendants = request.args.get('descendants') == "true" dags = [dag.sub_dag( task_regex=r"^{0}$".format(task_id), include_downstream=downstream, include_upstream=upstream)] if descendants: dags.extend(dag.descendants( dagbag, task_ids=[task_id], include_downstream=downstream, include_upstream=upstream, recursive=recursive)) end_date = execution_date if not future else None start_date = execution_date if not past else None return self._clear_dag_tis(dags, start_date, end_date, origin, recursive=recursive, confirmed=confirmed) @expose('/dagrun_clear') @login_required @wwwutils.action_logging @wwwutils.notify_owner def dagrun_clear(self): dag_id = request.args.get('dag_id') task_id = request.args.get('task_id') origin = request.args.get('origin') execution_date = request.args.get('execution_date') confirmed = request.args.get('confirmed') == "true" descendants = request.args.get('descendants') == "true" dag = dagbag.get_dag(dag_id) dags = [dag] if descendants: dags.extend(dag.descendants(dagbag, task_ids=[task_id], recursive=True)) execution_date = dateutil.parser.parse(execution_date) start_date = execution_date end_date = execution_date return self._clear_dag_tis(dags, start_date, end_date, origin, recursive=True, confirmed=confirmed) @expose('/blocked') @login_required def blocked(self): session = settings.Session() DR = models.DagRun dags = ( session.query(DR.dag_id, sqla.func.count(DR.id)) .filter(DR.state == State.RUNNING) .group_by(DR.dag_id) .all() ) payload = [] for dag_id, active_dag_runs in dags: max_active_runs = 0 if dag_id in dagbag.dags: max_active_runs = dagbag.dags[dag_id].max_active_runs payload.append({ 'dag_id': dag_id, 'active_dag_run': active_dag_runs, 'max_active_runs': max_active_runs, }) return wwwutils.json_response(payload) @expose('/dagrun_success') @login_required @wwwutils.action_logging @wwwutils.notify_owner def dagrun_success(self): dag_id = request.args.get('dag_id') execution_date = request.args.get('execution_date') confirmed = request.args.get('confirmed') == 'true' origin = request.args.get('origin') if not execution_date: flash('Invalid execution date', 'error') return redirect(origin) execution_date = dateutil.parser.parse(execution_date) dag = dagbag.get_dag(dag_id) if not dag: flash('Cannot find DAG: {}'.format(dag_id), 'error') return redirect(origin) new_dag_state = set_dag_run_state(dag, execution_date, state=State.SUCCESS, commit=confirmed) if confirmed: flash('Marked success on {} task instances'.format(len(new_dag_state))) return redirect(origin) else: details = '\n'.join([str(t) for t in new_dag_state]) response = self.render('airflow/confirm.html', message=("Here's the list of task instances you are " "about to mark as successful:"), details=details) return response @expose('/success') @login_required @wwwutils.action_logging @wwwutils.notify_owner def success(self): dag_id = request.args.get('dag_id') task_id = request.args.get('task_id') origin = request.args.get('origin') dag = dagbag.get_dag(dag_id) task = dag.get_task(task_id) task.dag = dag execution_date = request.args.get('execution_date') execution_date = dateutil.parser.parse(execution_date) confirmed = request.args.get('confirmed') == "true" upstream = request.args.get('upstream') == "true" downstream = request.args.get('downstream') == "true" future = request.args.get('future') == "true" past = request.args.get('past') == "true" if not dag: flash("Cannot find DAG: {}".format(dag_id)) return redirect(origin) if not task: flash("Cannot find task {} in DAG {}".format(task_id, dag.dag_id)) return redirect(origin) from airflow.api.common.experimental.mark_tasks import set_state if confirmed: altered = set_state(task=task, execution_date=execution_date, upstream=upstream, downstream=downstream, future=future, past=past, state=State.SUCCESS, commit=True) flash("Marked success on {} task instances".format(len(altered))) return redirect(origin) to_be_altered = set_state(task=task, execution_date=execution_date, upstream=upstream, downstream=downstream, future=future, past=past, state=State.SUCCESS, commit=False) details = "\n".join([str(t) for t in to_be_altered]) response = self.render("airflow/confirm.html", message=("Here's the list of task instances you are " "about to mark as successful:"), details=details) return response @expose('/tree') @login_required @wwwutils.gzipped @wwwutils.action_logging def tree(self): dag_id = request.args.get('dag_id') blur = conf.getboolean('webserver', 'demo_mode') dag = dagbag.get_dag(dag_id) root = request.args.get('root') if root: dag = dag.sub_dag( task_regex=root, include_downstream=False, include_upstream=True) session = settings.Session() base_date = request.args.get('base_date') num_runs = request.args.get('num_runs') num_runs = int(num_runs) if num_runs else 25 if base_date: base_date = dateutil.parser.parse(base_date) else: base_date = dag.latest_execution_date or datetime.utcnow() dates = dag.date_range(base_date, num=-abs(num_runs)) min_date = dates[0] if dates else datetime(2000, 1, 1) DR = models.DagRun dag_runs = ( session.query(DR) .filter( DR.dag_id == dag.dag_id, DR.execution_date <= base_date, DR.execution_date >= min_date) .all() ) dag_runs = { dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs} dates = sorted(list(dag_runs.keys())) max_date = max(dates) if dates else None tis = dag.get_task_instances( session, start_date=min_date, end_date=base_date) task_instances = {} for ti in tis: tid = alchemy_to_dict(ti) dr = dag_runs.get(ti.execution_date) tid['external_trigger'] = dr['external_trigger'] if dr else False task_instances[(ti.task_id, ti.execution_date)] = tid expanded = [] # The default recursion traces every path so that tree view has full # expand/collapse functionality. After 5,000 nodes we stop and fall # back on a quick DFS search for performance. See PR #320. node_count = [0] node_limit = 5000 / max(1, len(dag.roots)) def recurse_nodes(task, visited): visited.add(task) node_count[0] += 1 children = [ recurse_nodes(t, visited) for t in task.upstream_list if node_count[0] < node_limit or t not in visited] # D3 tree uses children vs _children to define what is # expanded or not. The following block makes it such that # repeated nodes are collapsed by default. children_key = 'children' if task.task_id not in expanded: expanded.append(task.task_id) elif children: children_key = "_children" def set_duration(tid): if (isinstance(tid, dict) and tid.get("state") == State.RUNNING and tid["start_date"] is not None): d = datetime.utcnow() - dateutil.parser.parse(tid["start_date"]) tid["duration"] = d.total_seconds() return tid return { 'name': task.task_id, 'instances': [ set_duration(task_instances.get((task.task_id, d))) or { 'execution_date': d.isoformat(), 'task_id': task.task_id } for d in dates], children_key: children, 'num_dep': len(task.upstream_list), 'operator': task.task_type, 'retries': task.retries, 'owner': task.owner, 'start_date': task.start_date, 'end_date': task.end_date, 'depends_on_past': task.depends_on_past, 'ui_color': task.ui_color, } data = { 'name': '[DAG]', 'children': [recurse_nodes(t, set()) for t in dag.roots], 'instances': [ dag_runs.get(d) or {'execution_date': d.isoformat()} for d in dates], } data = json.dumps(data, indent=4, default=json_ser) session.commit() session.close() form = DateTimeWithNumRunsForm(data={'base_date': max_date, 'num_runs': num_runs}) return self.render( 'airflow/tree.html', operators=sorted( list(set([op.__class__ for op in dag.tasks])), key=lambda x: x.__name__ ), root=root, form=form, dag=dag, data=data, blur=blur) @expose('/graph') @login_required @wwwutils.gzipped @wwwutils.action_logging def graph(self): session = settings.Session() dag_id = request.args.get('dag_id') blur = conf.getboolean('webserver', 'demo_mode') dag = dagbag.get_dag(dag_id) if dag_id not in dagbag.dags: flash('DAG "{0}" seems to be missing.'.format(dag_id), "error") return redirect('/admin/') root = request.args.get('root') if root: dag = dag.sub_dag( task_regex=root, include_upstream=True, include_downstream=False) arrange = request.args.get('arrange', dag.orientation) nodes = [] edges = [] for task in dag.tasks: nodes.append({ 'id': task.task_id, 'value': { 'label': task.task_id, 'labelStyle': "fill:{0};".format(task.ui_fgcolor), 'style': "fill:{0};".format(task.ui_color), } }) def get_upstream(task): for t in task.upstream_list: edge = { 'u': t.task_id, 'v': task.task_id, } if edge not in edges: edges.append(edge) get_upstream(t) for t in dag.roots: get_upstream(t) dttm = request.args.get('execution_date') if dttm: dttm = dateutil.parser.parse(dttm) else: dttm = dag.latest_execution_date or datetime.utcnow().date() DR = models.DagRun drs = ( session.query(DR) .filter_by(dag_id=dag_id) .order_by(desc(DR.execution_date)).all() ) dr_choices = [] dr_state = None for dr in drs: dr_choices.append((dr.execution_date.isoformat(), dr.run_id)) if dttm == dr.execution_date: dr_state = dr.state class GraphForm(Form): execution_date = SelectField("DAG run", choices=dr_choices) arrange = SelectField("Layout", choices=( ('LR', "Left->Right"), ('RL', "Right->Left"), ('TB', "Top->Bottom"), ('BT', "Bottom->Top"), )) form = GraphForm( data={'execution_date': dttm.isoformat(), 'arrange': arrange}) task_instances = { ti.task_id: alchemy_to_dict(ti) for ti in dag.get_task_instances(session, dttm, dttm)} tasks = { t.task_id: { 'dag_id': t.dag_id, 'task_type': t.task_type, } for t in dag.tasks} if not tasks: flash("No tasks found", "error") session.commit() session.close() doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') and dag.doc_md else '' return self.render( 'airflow/graph.html', dag=dag, form=form, width=request.args.get('width', "100%"), height=request.args.get('height', "800"), execution_date=dttm.isoformat(), state_token=state_token(dr_state), doc_md=doc_md, arrange=arrange, operators=sorted( list(set([op.__class__ for op in dag.tasks])), key=lambda x: x.__name__ ), blur=blur, root=root or '', task_instances=json.dumps(task_instances, indent=2), tasks=json.dumps(tasks, indent=2), nodes=json.dumps(nodes, indent=2), edges=json.dumps(edges, indent=2), ) @expose('/duration') @login_required @wwwutils.action_logging def duration(self): session = settings.Session() dag_id = request.args.get('dag_id') dag = dagbag.get_dag(dag_id) base_date = request.args.get('base_date') num_runs = request.args.get('num_runs') num_runs = int(num_runs) if num_runs else 25 if base_date: base_date = dateutil.parser.parse(base_date) else: base_date = dag.latest_execution_date or datetime.utcnow() dates = dag.date_range(base_date, num=-abs(num_runs)) min_date = dates[0] if dates else datetime(2000, 1, 1) root = request.args.get('root') if root: dag = dag.sub_dag( task_regex=root, include_upstream=True, include_downstream=False) chart_height = get_chart_height(dag) chart = nvd3.lineChart( name="lineChart", x_is_date=True, height=chart_height, width="1200") cum_chart = nvd3.lineChart( name="cumLineChart", x_is_date=True, height=chart_height, width="1200") y = defaultdict(list) x = defaultdict(list) cum_y = defaultdict(list) tis = dag.get_task_instances( session, start_date=min_date, end_date=base_date) TF = models.TaskFail ti_fails = ( session .query(TF) .filter( TF.dag_id == dag.dag_id, TF.execution_date >= min_date, TF.execution_date <= base_date, TF.task_id.in_([t.task_id for t in dag.tasks])) .all() ) fails_totals = defaultdict(int) for tf in ti_fails: dict_key = (tf.dag_id, tf.task_id, tf.execution_date) fails_totals[dict_key] += tf.duration for ti in tis: if ti.duration: dttm = wwwutils.epoch(ti.execution_date) x[ti.task_id].append(dttm) y[ti.task_id].append(float(ti.duration)) fails_dict_key = (ti.dag_id, ti.task_id, ti.execution_date) fails_total = fails_totals[fails_dict_key] cum_y[ti.task_id].append(float(ti.duration + fails_total)) # determine the most relevant time unit for the set of task instance # durations for the DAG y_unit = infer_time_unit([d for t in y.values() for d in t]) cum_y_unit = infer_time_unit([d for t in cum_y.values() for d in t]) # update the y Axis on both charts to have the correct time units chart.create_y_axis('yAxis', format='.02f', custom_format=False, label='Duration ({})'.format(y_unit)) cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False, label='Duration ({})'.format(cum_y_unit)) for task in dag.tasks: if x[task.task_id]: chart.add_serie(name=task.task_id, x=x[task.task_id], y=scale_time_units(y[task.task_id], y_unit)) cum_chart.add_serie(name=task.task_id, x=x[task.task_id], y=scale_time_units(cum_y[task.task_id], cum_y_unit)) dates = sorted(list({ti.execution_date for ti in tis})) max_date = max([ti.execution_date for ti in tis]) if dates else None session.commit() session.close() form = DateTimeWithNumRunsForm(data={'base_date': max_date, 'num_runs': num_runs}) chart.buildcontent() cum_chart.buildcontent() s_index = cum_chart.htmlcontent.rfind('});') cum_chart.htmlcontent = (cum_chart.htmlcontent[:s_index] + "$( document ).trigger('chartload')" + cum_chart.htmlcontent[s_index:]) return self.render( 'airflow/duration_chart.html', dag=dag, demo_mode=conf.getboolean('webserver', 'demo_mode'), root=root, form=form, chart=chart.htmlcontent, cum_chart=cum_chart.htmlcontent ) @expose('/tries') @login_required @wwwutils.action_logging def tries(self): session = settings.Session() dag_id = request.args.get('dag_id') dag = dagbag.get_dag(dag_id) base_date = request.args.get('base_date') num_runs = request.args.get('num_runs') num_runs = int(num_runs) if num_runs else 25 if base_date: base_date = dateutil.parser.parse(base_date) else: base_date = dag.latest_execution_date or datetime.utcnow() dates = dag.date_range(base_date, num=-abs(num_runs)) min_date = dates[0] if dates else datetime(2000, 1, 1) root = request.args.get('root') if root: dag = dag.sub_dag( task_regex=root, include_upstream=True, include_downstream=False) chart_height = get_chart_height(dag) chart = nvd3.lineChart( name="lineChart", x_is_date=True, y_axis_format='d', height=chart_height, width="1200") for task in dag.tasks: y = [] x = [] for ti in task.get_task_instances(session, start_date=min_date, end_date=base_date): dttm = wwwutils.epoch(ti.execution_date) x.append(dttm) y.append(ti.try_number) if x: chart.add_serie(name=task.task_id, x=x, y=y) tis = dag.get_task_instances( session, start_date=min_date, end_date=base_date) tries = sorted(list({ti.try_number for ti in tis})) max_date = max([ti.execution_date for ti in tis]) if tries else None session.commit() session.close() form = DateTimeWithNumRunsForm(data={'base_date': max_date, 'num_runs': num_runs}) chart.buildcontent() return self.render( 'airflow/chart.html', dag=dag, demo_mode=conf.getboolean('webserver', 'demo_mode'), root=root, form=form, chart=chart.htmlcontent ) @expose('/landing_times') @login_required @wwwutils.action_logging def landing_times(self): session = settings.Session() dag_id = request.args.get('dag_id') dag = dagbag.get_dag(dag_id) base_date = request.args.get('base_date') num_runs = request.args.get('num_runs') num_runs = int(num_runs) if num_runs else 25 if base_date: base_date = dateutil.parser.parse(base_date) else: base_date = dag.latest_execution_date or datetime.utcnow() dates = dag.date_range(base_date, num=-abs(num_runs)) min_date = dates[0] if dates else datetime(2000, 1, 1) root = request.args.get('root') if root: dag = dag.sub_dag( task_regex=root, include_upstream=True, include_downstream=False) chart_height = get_chart_height(dag) chart = nvd3.lineChart( name="lineChart", x_is_date=True, height=chart_height, width="1200") y = {} x = {} for task in dag.tasks: y[task.task_id] = [] x[task.task_id] = [] for ti in task.get_task_instances(session, start_date=min_date, end_date=base_date): ts = ti.execution_date if dag.schedule_interval and dag.following_schedule(ts): ts = dag.following_schedule(ts) if ti.end_date: dttm = wwwutils.epoch(ti.execution_date) secs = (ti.end_date - ts).total_seconds() x[ti.task_id].append(dttm) y[ti.task_id].append(secs) # determine the most relevant time unit for the set of landing times # for the DAG y_unit = infer_time_unit([d for t in y.values() for d in t]) # update the y Axis to have the correct time units chart.create_y_axis('yAxis', format='.02f', custom_format=False, label='Landing Time ({})'.format(y_unit)) for task in dag.tasks: if x[task.task_id]: chart.add_serie(name=task.task_id, x=x[task.task_id], y=scale_time_units(y[task.task_id], y_unit)) tis = dag.get_task_instances( session, start_date=min_date, end_date=base_date) dates = sorted(list({ti.execution_date for ti in tis})) max_date = max([ti.execution_date for ti in tis]) if dates else None session.commit() session.close() form = DateTimeWithNumRunsForm(data={'base_date': max_date, 'num_runs': num_runs}) chart.buildcontent() return self.render( 'airflow/chart.html', dag=dag, chart=chart.htmlcontent, height=str(chart_height + 100) + "px", demo_mode=conf.getboolean('webserver', 'demo_mode'), root=root, form=form, ) @expose('/paused', methods=['POST']) @login_required @wwwutils.action_logging def paused(self): DagModel = models.DagModel dag_id = request.args.get('dag_id') session = settings.Session() orm_dag = session.query( DagModel).filter(DagModel.dag_id == dag_id).first() if request.args.get('is_paused') == 'false': orm_dag.is_paused = True else: orm_dag.is_paused = False session.merge(orm_dag) session.commit() session.close() dagbag.get_dag(dag_id) return "OK" @expose('/refresh') @login_required @wwwutils.action_logging def refresh(self): DagModel = models.DagModel dag_id = request.args.get('dag_id') session = settings.Session() orm_dag = session.query( DagModel).filter(DagModel.dag_id == dag_id).first() if orm_dag: orm_dag.last_expired = datetime.utcnow() session.merge(orm_dag) session.commit() session.close() dagbag.get_dag(dag_id) flash("DAG [{}] is now fresh as a daisy".format(dag_id)) return redirect(request.referrer) @expose('/refresh_all') @login_required @wwwutils.action_logging def refresh_all(self): dagbag.collect_dags(only_if_updated=False) flash("All DAGs are now up to date") return redirect('/') @expose('/gantt') @login_required @wwwutils.action_logging def gantt(self): session = settings.Session() dag_id = request.args.get('dag_id') dag = dagbag.get_dag(dag_id) demo_mode = conf.getboolean('webserver', 'demo_mode') root = request.args.get('root') if root: dag = dag.sub_dag( task_regex=root, include_upstream=True, include_downstream=False) dttm = request.args.get('execution_date') if dttm: dttm = dateutil.parser.parse(dttm) else: dttm = dag.latest_execution_date or datetime.utcnow().date() form = DateTimeForm(data={'execution_date': dttm}) tis = [ ti for ti in dag.get_task_instances(session, dttm, dttm) if ti.start_date] tis = sorted(tis, key=lambda ti: ti.start_date) tasks = [] for ti in tis: end_date = ti.end_date if ti.end_date else datetime.utcnow() tasks.append({ 'startDate': wwwutils.epoch(ti.start_date), 'endDate': wwwutils.epoch(end_date), 'isoStart': ti.start_date.isoformat()[:-4], 'isoEnd': end_date.isoformat()[:-4], 'taskName': ti.task_id, 'duration': "{}".format(end_date - ti.start_date)[:-4], 'status': ti.state, 'executionDate': ti.execution_date.isoformat(), }) states = {ti.state: ti.state for ti in tis} data = { 'taskNames': [ti.task_id for ti in tis], 'tasks': tasks, 'taskStatus': states, 'height': len(tis) * 25 + 25, } session.commit() session.close() return self.render( 'airflow/gantt.html', dag=dag, execution_date=dttm.isoformat(), form=form, data=json.dumps(data, indent=2), base_date='', demo_mode=demo_mode, root=root, ) @expose('/object/task_instances') @login_required @wwwutils.action_logging def task_instances(self): session = settings.Session() dag_id = request.args.get('dag_id') dag = dagbag.get_dag(dag_id) dttm = request.args.get('execution_date') if dttm: dttm = dateutil.parser.parse(dttm) else: return ("Error: Invalid execution_date") task_instances = { ti.task_id: alchemy_to_dict(ti) for ti in dag.get_task_instances(session, dttm, dttm)} return json.dumps(task_instances) @expose('/variables/<form>', methods=["GET", "POST"]) @login_required @wwwutils.action_logging def variables(self, form): try: if request.method == 'POST': data = request.json if data: session = settings.Session() var = models.Variable(key=form, val=json.dumps(data)) session.add(var) session.commit() return "" else: return self.render( 'airflow/variables/{}.html'.format(form) ) except: # prevent XSS form = escape(form) return ("Error: form airflow/variables/{}.html " "not found.").format(form), 404 @expose('/varimport', methods=["GET", "POST"]) @login_required @wwwutils.action_logging def varimport(self): try: out = str(request.files['file'].read()) d = json.loads(out) except Exception: flash("Missing file or syntax error.") else: for k, v in d.items(): models.Variable.set(k, v, serialize_json=isinstance(v, dict)) flash("{} variable(s) successfully updated.".format(len(d))) return redirect('/admin/variable') class HomeView(AdminIndexView): @expose("/") @login_required def index(self): session = Session() DM = models.DagModel # restrict the dags shown if filter_by_owner and current user is not superuser do_filter = FILTER_BY_OWNER and (not current_user.is_superuser()) owner_mode = conf.get('webserver', 'OWNER_MODE').strip().lower() hide_paused_dags_by_default = conf.getboolean('webserver', 'hide_paused_dags_by_default') show_paused_arg = request.args.get('showPaused', 'None') def get_int_arg(value, default=0): try: return int(value) except ValueError: return default arg_current_page = request.args.get('page', '0') arg_search_query = request.args.get('search', None) dags_per_page = PAGE_SIZE current_page = get_int_arg(arg_current_page, default=0) if show_paused_arg.strip().lower() == 'false': hide_paused = True elif show_paused_arg.strip().lower() == 'true': hide_paused = False else: hide_paused = hide_paused_dags_by_default # read orm_dags from the db sql_query = session.query(DM) if do_filter and owner_mode == 'ldapgroup': sql_query = sql_query.filter( ~DM.is_subdag, DM.is_active, DM.owners.in_(current_user.ldap_groups) ) elif do_filter and owner_mode == 'user': sql_query = sql_query.filter( ~DM.is_subdag, DM.is_active, DM.owners == current_user.user.username ) else: sql_query = sql_query.filter( ~DM.is_subdag, DM.is_active ) # optionally filter out "paused" dags if hide_paused: sql_query = sql_query.filter(~DM.is_paused) orm_dags = {dag.dag_id: dag for dag in sql_query .all()} import_errors = session.query(models.ImportError).all() for ie in import_errors: flash( "Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie), "error") session.expunge_all() session.commit() session.close() # get a list of all non-subdag dags visible to everyone # optionally filter out "paused" dags if hide_paused: unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if not dag.parent_dag and not dag.is_paused] else: unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if not dag.parent_dag] # optionally filter to get only dags that the user should see if do_filter and owner_mode == 'ldapgroup': # only show dags owned by someone in @current_user.ldap_groups webserver_dags = { dag.dag_id: dag for dag in unfiltered_webserver_dags if dag.owner in current_user.ldap_groups } elif do_filter and owner_mode == 'user': # only show dags owned by @current_user.user.username webserver_dags = { dag.dag_id: dag for dag in unfiltered_webserver_dags if dag.owner == current_user.user.username } else: webserver_dags = { dag.dag_id: dag for dag in unfiltered_webserver_dags } if arg_search_query: lower_search_query = arg_search_query.lower() # filter by dag_id webserver_dags_filtered = { dag_id: dag for dag_id, dag in webserver_dags.items() if (lower_search_query in dag_id.lower() or lower_search_query in dag.owner.lower()) } all_dag_ids = (set([dag.dag_id for dag in orm_dags.values() if lower_search_query in dag.dag_id.lower() or lower_search_query in dag.owners.lower()]) | set(webserver_dags_filtered.keys())) sorted_dag_ids = sorted(all_dag_ids) else: webserver_dags_filtered = webserver_dags sorted_dag_ids = sorted(set(orm_dags.keys()) | set(webserver_dags.keys())) start = current_page * dags_per_page end = start + dags_per_page num_of_all_dags = len(sorted_dag_ids) page_dag_ids = sorted_dag_ids[start:end] num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page))) auto_complete_data = set() for dag in webserver_dags_filtered.values(): auto_complete_data.add(dag.dag_id) auto_complete_data.add(dag.owner) for dag in orm_dags.values(): auto_complete_data.add(dag.dag_id) auto_complete_data.add(dag.owners) return self.render( 'airflow/dags.html', webserver_dags=webserver_dags_filtered, orm_dags=orm_dags, hide_paused=hide_paused, current_page=current_page, search_query=arg_search_query if arg_search_query else '', page_size=dags_per_page, num_of_pages=num_of_pages, num_dag_from=start + 1, num_dag_to=min(end, num_of_all_dags), num_of_all_dags=num_of_all_dags, paging=wwwutils.generate_pages(current_page, num_of_pages, search=arg_search_query, showPaused=not hide_paused), dag_ids_in_page=page_dag_ids, auto_complete_data=auto_complete_data) class QueryView(wwwutils.DataProfilingMixin, BaseView): @expose('/', methods=['POST', 'GET']) @wwwutils.gzipped def query(self): session = settings.Session() dbs = session.query(models.Connection).order_by( models.Connection.conn_id).all() session.expunge_all() db_choices = list( ((db.conn_id, db.conn_id) for db in dbs if db.get_hook())) conn_id_str = request.form.get('conn_id') csv = request.form.get('csv') == "true" sql = request.form.get('sql') class QueryForm(Form): conn_id = SelectField("Layout", choices=db_choices) sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget()) data = { 'conn_id': conn_id_str, 'sql': sql, } results = None has_data = False error = False if conn_id_str: db = [db for db in dbs if db.conn_id == conn_id_str][0] hook = db.get_hook() try: df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type)) # df = hook.get_pandas_df(sql) has_data = len(df) > 0 df = df.fillna('') results = df.to_html( classes=[ 'table', 'table-bordered', 'table-striped', 'no-wrap'], index=False, na_rep='', ) if has_data else '' except Exception as e: flash(str(e), 'error') error = True if has_data and len(df) == QUERY_LIMIT: flash( "Query output truncated at " + str(QUERY_LIMIT) + " rows", 'info') if not has_data and error: flash('No data', 'error') if csv: return Response( response=df.to_csv(index=False), status=200, mimetype="application/text") form = QueryForm(request.form, data=data) session.commit() session.close() return self.render( 'airflow/query.html', form=form, title="Ad Hoc Query", results=results or '', has_data=has_data) class AirflowModelView(ModelView): list_template = 'airflow/model_list.html' edit_template = 'airflow/model_edit.html' create_template = 'airflow/model_create.html' column_display_actions = True page_size = PAGE_SIZE class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView): """ Modifying the base ModelView class for non edit, browse only operations """ named_filter_urls = True can_create = False can_edit = False can_delete = False column_display_pk = True class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView): column_list = ('pool', 'slots', 'used_slots', 'queued_slots') column_formatters = dict( pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots) named_filter_urls = True form_args = { 'pool': { 'validators': [ validators.DataRequired(), ] } } class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly): verbose_name_plural = "SLA misses" verbose_name = "SLA miss" column_list = ( 'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp') column_formatters = dict( task_id=task_instance_link, execution_date=datetime_f, timestamp=datetime_f, dag_id=dag_link) named_filter_urls = True column_searchable_list = ('dag_id', 'task_id',) column_filters = ( 'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date') form_widget_args = { 'email_sent': {'disabled': True}, 'timestamp': {'disabled': True}, } class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView): verbose_name = "chart" verbose_name_plural = "charts" form_columns = ( 'label', 'owner', 'conn_id', 'chart_type', 'show_datatable', 'x_is_date', 'y_log_scale', 'show_sql', 'height', 'sql_layout', 'sql', 'default_params', ) column_list = ( 'label', 'conn_id', 'chart_type', 'owner', 'last_modified', ) column_sortable_list = ( 'label', 'conn_id', 'chart_type', ('owner', 'owner.username'), 'last_modified', ) column_formatters = dict(label=label_link, last_modified=datetime_f) column_default_sort = ('last_modified', True) create_template = 'airflow/chart/create.html' edit_template = 'airflow/chart/edit.html' column_filters = ('label', 'owner.username', 'conn_id') column_searchable_list = ('owner.username', 'label', 'sql') column_descriptions = { 'label': "Can include {{ templated_fields }} and {{ macros }}", 'chart_type': "The type of chart to be displayed", 'sql': "Can include {{ templated_fields }} and {{ macros }}.", 'height': "Height of the chart, in pixels.", 'conn_id': "Source database to run the query against", 'x_is_date': ( "Whether the X axis should be casted as a date field. Expect most " "intelligible date formats to get casted properly." ), 'owner': ( "The chart's owner, mostly used for reference and filtering in " "the list view." ), 'show_datatable': "Whether to display an interactive data table under the chart.", 'default_params': ( 'A dictionary of {"key": "values",} that define what the ' 'templated fields (parameters) values should be by default. ' 'To be valid, it needs to "eval" as a Python dict. ' 'The key values will show up in the url\'s querystring ' 'and can be altered there.' ), 'show_sql': "Whether to display the SQL statement as a collapsible " "section in the chart page.", 'y_log_scale': "Whether to use a log scale for the Y axis.", 'sql_layout': ( "Defines the layout of the SQL that the application should " "expect. Depending on the tables you are sourcing from, it may " "make more sense to pivot / unpivot the metrics." ), } column_labels = { 'sql': "SQL", 'height': "Chart Height", 'sql_layout': "SQL Layout", 'show_sql': "Display the SQL Statement", 'default_params': "Default Parameters", } form_choices = { 'chart_type': [ ('line', 'Line Chart'), ('spline', 'Spline Chart'), ('bar', 'Bar Chart'), ('column', 'Column Chart'), ('area', 'Overlapping Area Chart'), ('stacked_area', 'Stacked Area Chart'), ('percent_area', 'Percent Area Chart'), ('datatable', 'No chart, data table only'), ], 'sql_layout': [ ('series', 'SELECT series, x, y FROM ...'), ('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'), ], 'conn_id': [ (c.conn_id, c.conn_id) for c in ( Session().query(models.Connection.conn_id) .group_by(models.Connection.conn_id) ) ] } def on_model_change(self, form, model, is_created=True): if model.iteration_no is None: model.iteration_no = 0 else: model.iteration_no += 1 if not model.user_id and current_user and hasattr(current_user, 'id'): model.user_id = current_user.id model.last_modified = datetime.utcnow() chart_mapping = ( ('line', 'lineChart'), ('spline', 'lineChart'), ('bar', 'multiBarChart'), ('column', 'multiBarChart'), ('area', 'stackedAreaChart'), ('stacked_area', 'stackedAreaChart'), ('percent_area', 'stackedAreaChart'), ('datatable', 'datatable'), ) chart_mapping = dict(chart_mapping) class KnownEventView(wwwutils.DataProfilingMixin, AirflowModelView): verbose_name = "known event" verbose_name_plural = "known events" form_columns = ( 'label', 'event_type', 'start_date', 'end_date', 'reported_by', 'description', ) form_args = { 'label': { 'validators': [ validators.DataRequired(), ], }, 'event_type': { 'validators': [ validators.DataRequired(), ], }, 'start_date': { 'validators': [ validators.DataRequired(), ], }, 'end_date': { 'validators': [ validators.DataRequired(), GreaterEqualThan(fieldname='start_date'), ], }, 'reported_by': { 'validators': [ validators.DataRequired(), ], } } column_list = ( 'label', 'event_type', 'start_date', 'end_date', 'reported_by', ) column_default_sort = ("start_date", True) column_sortable_list = ( 'label', ('event_type', 'event_type.know_event_type'), 'start_date', 'end_date', ('reported_by', 'reported_by.username'), ) class KnownEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView): pass # NOTE: For debugging / troubleshooting # mv = KnowEventTypeView( # models.KnownEventType, # Session, name="Known Event Types", category="Manage") # admin.add_view(mv) # class DagPickleView(SuperUserMixin, ModelView): # pass # mv = DagPickleView( # models.DagPickle, # Session, name="Pickles", category="Manage") # admin.add_view(mv) class VariableView(wwwutils.DataProfilingMixin, AirflowModelView): verbose_name = "Variable" verbose_name_plural = "Variables" list_template = 'airflow/variable_list.html' def hidden_field_formatter(view, context, model, name): if wwwutils.should_hide_value_for_key(model.key): return Markup('*' * 8) try: return getattr(model, name) except AirflowException: return Markup('<span class="label label-danger">Invalid</span>') form_columns = ( 'key', 'val', ) column_list = ('key', 'val', 'is_encrypted',) column_filters = ('key', 'val') column_searchable_list = ('key', 'val') column_default_sort = ('key', False) form_widget_args = { 'is_encrypted': {'disabled': True}, 'val': { 'rows': 20, } } form_args = { 'key': { 'validators': { validators.DataRequired(), }, }, } column_sortable_list = ( 'key', 'val', 'is_encrypted', ) column_formatters = { 'val': hidden_field_formatter, } # Default flask-admin export functionality doesn't handle serialized json @action('varexport', 'Export', None) def action_varexport(self, ids): V = models.Variable session = settings.Session() qry = session.query(V).filter(V.id.in_(ids)).all() session.close() var_dict = {} d = json.JSONDecoder() for var in qry: val = None try: val = d.decode(var.val) except: val = var.val var_dict[var.key] = val response = make_response(json.dumps(var_dict, sort_keys=True, indent=4)) response.headers["Content-Disposition"] = "attachment; filename=variables.json" return response def on_form_prefill(self, form, id): if wwwutils.should_hide_value_for_key(form.key.data): form.val.data = '*' * 8 class XComView(wwwutils.SuperUserMixin, AirflowModelView): verbose_name = "XCom" verbose_name_plural = "XComs" form_columns = ( 'key', 'value', 'execution_date', 'task_id', 'dag_id', ) form_extra_fields = { 'value': StringField('Value'), } column_filters = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id') column_searchable_list = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id') class JobModelView(ModelViewOnly): verbose_name_plural = "jobs" verbose_name = "job" column_display_actions = False column_default_sort = ('start_date', True) column_filters = ( 'job_type', 'dag_id', 'state', 'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat') column_formatters = dict( start_date=datetime_f, end_date=datetime_f, hostname=nobr_f, state=state_f, latest_heartbeat=datetime_f) class DagRunModelView(ModelViewOnly): verbose_name_plural = "DAG Runs" can_edit = True can_create = True column_editable_list = ('state',) verbose_name = "dag run" column_default_sort = ('execution_date', True) form_choices = { 'state': [ ('success', 'success'), ('running', 'running'), ('failed', 'failed'), ], } form_args = dict( dag_id=dict(validators=[validators.DataRequired()]) ) column_list = ( 'state', 'dag_id', 'execution_date', 'run_id', 'external_trigger') column_filters = column_list column_searchable_list = ('dag_id', 'state', 'run_id') column_formatters = dict( execution_date=datetime_f, state=state_f, start_date=datetime_f, dag_id=dag_link) @action('new_delete', "Delete", "Are you sure you want to delete selected records?") def action_new_delete(self, ids): session = settings.Session() deleted = set(session.query(models.DagRun) .filter(models.DagRun.id.in_(ids)) .all()) session.query(models.DagRun) \ .filter(models.DagRun.id.in_(ids)) \ .delete(synchronize_session='fetch') session.commit() dirty_ids = [] for row in deleted: dirty_ids.append(row.dag_id) models.DagStat.update(dirty_ids, dirty_only=False, session=session) session.close() @action('set_running', "Set state to 'running'", None) def action_set_running(self, ids): self.set_dagrun_state(ids, State.RUNNING) @action('set_failed', "Set state to 'failed'", None) def action_set_failed(self, ids): self.set_dagrun_state(ids, State.FAILED) @action('set_success', "Set state to 'success'", None) def action_set_success(self, ids): self.set_dagrun_state(ids, State.SUCCESS) @provide_session def set_dagrun_state(self, ids, target_state, session=None): try: DR = models.DagRun count = 0 dirty_ids = [] for dr in session.query(DR).filter(DR.id.in_(ids)).all(): dirty_ids.append(dr.dag_id) count += 1 dr.state = target_state if target_state == State.RUNNING: dr.start_date = datetime.utcnow() else: dr.end_date = datetime.utcnow() session.commit() models.DagStat.update(dirty_ids, session=session) flash( "{count} dag runs were set to '{target_state}'".format(**locals())) except Exception as ex: if not self.handle_view_exception(ex): raise Exception("Ooops") flash('Failed to set state', 'error') class LogModelView(ModelViewOnly): verbose_name_plural = "logs" verbose_name = "log" column_display_actions = False column_default_sort = ('dttm', True) column_filters = ('dag_id', 'task_id', 'execution_date') column_formatters = dict( dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link) class TaskInstanceModelView(ModelViewOnly): verbose_name_plural = "task instances" verbose_name = "task instance" column_filters = ( 'state', 'dag_id', 'task_id', 'execution_date', 'hostname', 'queue', 'pool', 'operator', 'start_date', 'end_date') named_filter_urls = True column_formatters = dict( log_url=log_url_formatter, task_id=task_instance_link, hostname=nobr_f, state=state_f, execution_date=datetime_f, start_date=datetime_f, end_date=datetime_f, queued_dttm=datetime_f, dag_id=dag_link, duration=duration_f) column_searchable_list = ('dag_id', 'task_id', 'state') column_default_sort = ('job_id', True) form_choices = { 'state': [ ('success', 'success'), ('running', 'running'), ('failed', 'failed'), ], } column_list = ( 'state', 'dag_id', 'task_id', 'execution_date', 'operator', 'start_date', 'end_date', 'duration', 'job_id', 'hostname', 'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number', 'pool', 'log_url') can_delete = True page_size = PAGE_SIZE @action('set_running', "Set state to 'running'", None) def action_set_running(self, ids): self.set_task_instance_state(ids, State.RUNNING) @action('set_failed', "Set state to 'failed'", None) def action_set_failed(self, ids): self.set_task_instance_state(ids, State.FAILED) @action('set_success', "Set state to 'success'", None) def action_set_success(self, ids): self.set_task_instance_state(ids, State.SUCCESS) @action('set_retry', "Set state to 'up_for_retry'", None) def action_set_retry(self, ids): self.set_task_instance_state(ids, State.UP_FOR_RETRY) @action('delete', lazy_gettext('Delete'), lazy_gettext('Are you sure you want to delete selected records?')) def action_delete(self, ids): """ As a workaround for AIRFLOW-277, this method overrides Flask-Admin's ModelView.action_delete(). TODO: this method should be removed once the below bug is fixed on Flask-Admin side. https://github.com/flask-admin/flask-admin/issues/1226 """ if 'sqlite' in conf.get('core', 'sql_alchemy_conn'): self.delete_task_instances(ids) else: super(TaskInstanceModelView, self).action_delete(ids) @provide_session def set_task_instance_state(self, ids, target_state, session=None): try: TI = models.TaskInstance count = len(ids) for id in ids: task_id, dag_id, execution_date = id.split(',') execution_date = datetime.strptime(execution_date, '%Y-%m-%d %H:%M:%S') ti = session.query(TI).filter(TI.task_id == task_id, TI.dag_id == dag_id, TI.execution_date == execution_date).one() ti.state = target_state session.commit() flash( "{count} task instances were set to '{target_state}'".format(**locals())) except Exception as ex: if not self.handle_view_exception(ex): raise Exception("Ooops") flash('Failed to set state', 'error') @provide_session def delete_task_instances(self, ids, session=None): try: TI = models.TaskInstance count = 0 for id in ids: task_id, dag_id, execution_date = id.split(',') execution_date = datetime.strptime(execution_date, '%Y-%m-%d %H:%M:%S') count += session.query(TI).filter(TI.task_id == task_id, TI.dag_id == dag_id, TI.execution_date == execution_date).delete() session.commit() flash("{count} task instances were deleted".format(**locals())) except Exception as ex: if not self.handle_view_exception(ex): raise Exception("Ooops") flash('Failed to delete', 'error') def get_one(self, id): """ As a workaround for AIRFLOW-252, this method overrides Flask-Admin's ModelView.get_one(). TODO: this method should be removed once the below bug is fixed on Flask-Admin side. https://github.com/flask-admin/flask-admin/issues/1226 """ task_id, dag_id, execution_date = iterdecode(id) execution_date = dateutil.parser.parse(execution_date) return self.session.query(self.model).get((task_id, dag_id, execution_date)) class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView): create_template = 'airflow/conn_create.html' edit_template = 'airflow/conn_edit.html' list_template = 'airflow/conn_list.html' form_columns = ( 'conn_id', 'conn_type', 'host', 'schema', 'login', 'password', 'port', 'extra', 'extra__jdbc__drv_path', 'extra__jdbc__drv_clsname', 'extra__google_cloud_platform__project', 'extra__google_cloud_platform__key_path', 'extra__google_cloud_platform__keyfile_dict', 'extra__google_cloud_platform__scope', ) verbose_name = "Connection" verbose_name_plural = "Connections" column_default_sort = ('conn_id', False) column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted', 'is_extra_encrypted',) form_overrides = dict(_password=PasswordField, _extra=TextAreaField) form_widget_args = { 'is_extra_encrypted': {'disabled': True}, 'is_encrypted': {'disabled': True}, } # Used to customized the form, the forms elements get rendered # and results are stored in the extra field as json. All of these # need to be prefixed with extra__ and then the conn_type ___ as in # extra__{conn_type}__name. You can also hide form elements and rename # others from the connection_form.js file form_extra_fields = { 'extra__jdbc__drv_path': StringField('Driver Path'), 'extra__jdbc__drv_clsname': StringField('Driver Class'), 'extra__google_cloud_platform__project': StringField('Project Id'), 'extra__google_cloud_platform__key_path': StringField('Keyfile Path'), 'extra__google_cloud_platform__keyfile_dict': PasswordField('Keyfile JSON'), 'extra__google_cloud_platform__scope': StringField('Scopes (comma seperated)'), } form_choices = { 'conn_type': models.Connection._types } def on_model_change(self, form, model, is_created): formdata = form.data if formdata['conn_type'] in ['jdbc', 'google_cloud_platform']: extra = { key: formdata[key] for key in self.form_extra_fields.keys() if key in formdata} model.extra = json.dumps(extra) @classmethod def alert_fernet_key(cls): fk = None try: fk = conf.get('core', 'fernet_key') except: pass return fk is None @classmethod def is_secure(cls): """ Used to display a message in the Connection list view making it clear that the passwords and `extra` field can't be encrypted. """ is_secure = False try: import cryptography conf.get('core', 'fernet_key') is_secure = True except: pass return is_secure def on_form_prefill(self, form, id): try: d = json.loads(form.data.get('extra', '{}')) except Exception: d = {} for field in list(self.form_extra_fields.keys()): value = d.get(field, '') if value: field = getattr(form, field) field.data = value class UserModelView(wwwutils.SuperUserMixin, AirflowModelView): verbose_name = "User" verbose_name_plural = "Users" column_default_sort = 'username' class VersionView(wwwutils.SuperUserMixin, BaseView): @expose('/') def version(self): # Look at the version from setup.py try: airflow_version = pkg_resources.require("apache-airflow")[0].version except Exception as e: airflow_version = None logging.error(e) # Get the Git repo and git hash git_version = None try: with open(os.path.join(*[settings.AIRFLOW_HOME, 'airflow', 'git_version'])) as f: git_version = f.readline() except Exception as e: logging.error(e) # Render information title = "Version Info" return self.render('airflow/version.html', title=title, airflow_version=airflow_version, git_version=git_version) class ConfigurationView(wwwutils.SuperUserMixin, BaseView): @expose('/') def conf(self): raw = request.args.get('raw') == "true" title = "Airflow Configuration" subtitle = conf.AIRFLOW_CONFIG if conf.getboolean("webserver", "expose_config"): with open(conf.AIRFLOW_CONFIG, 'r') as f: config = f.read() table = [(section, key, value, source) for section, parameters in conf.as_dict(True, True).items() for key, (value, source) in parameters.items()] else: config = ( "# You Airflow administrator chose not to expose the " "configuration, most likely for security reasons.") table = None if raw: return Response( response=config, status=200, mimetype="application/text") else: code_html = Markup(highlight( config, lexers.IniLexer(), # Lexer call HtmlFormatter(noclasses=True)) ) return self.render( 'airflow/config.html', pre_subtitle=settings.HEADER + " v" + airflow.__version__, code_html=code_html, title=title, subtitle=subtitle, table=table) class DagModelView(wwwutils.SuperUserMixin, ModelView): column_list = ('dag_id', 'owners') column_editable_list = ('is_paused',) form_excluded_columns = ('is_subdag', 'is_active') column_searchable_list = ('dag_id',) column_filters = ( 'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag', 'last_scheduler_run', 'last_expired') form_widget_args = { 'last_scheduler_run': {'disabled': True}, 'fileloc': {'disabled': True}, 'is_paused': {'disabled': True}, 'last_pickled': {'disabled': True}, 'pickle_id': {'disabled': True}, 'last_loaded': {'disabled': True}, 'last_expired': {'disabled': True}, 'pickle_size': {'disabled': True}, 'scheduler_lock': {'disabled': True}, 'owners': {'disabled': True}, } column_formatters = dict( dag_id=dag_link, ) can_delete = False can_create = False page_size = PAGE_SIZE list_template = 'airflow/list_dags.html' named_filter_urls = True def get_query(self): """ Default filters for model """ return ( super(DagModelView, self) .get_query() .filter(or_(models.DagModel.is_active, models.DagModel.is_paused)) .filter(~models.DagModel.is_subdag) ) def get_count_query(self): """ Default filters for model """ return ( super(DagModelView, self) .get_count_query() .filter(models.DagModel.is_active) .filter(~models.DagModel.is_subdag) )
apache-2.0
yuxiang-zhou/menpofit
menpofit/aps/base.py
2
39189
from __future__ import division import warnings import numpy as np from scipy.stats import multivariate_normal from menpo.base import name_of_callable from menpo.feature import no_op from menpo.visualize import print_dynamic, print_progress from menpo.model import GMRFModel from menpo.shape import (DirectedGraph, UndirectedGraph, Tree, PointTree, PointDirectedGraph, PointUndirectedGraph) from menpofit import checks from menpofit.base import batch from menpofit.modelinstance import OrthoPDM from menpofit.builder import (compute_features, scale_images, align_shapes, rescale_images_to_reference_shape, extract_patches, MenpoFitBuilderWarning, compute_reference_shape) class GenerativeAPS(object): r""" Class for training a multi-scale Generative Active Pictorial Structures model. Please see the references for a basic list of relevant papers. Parameters ---------- images : `list` of `menpo.image.Image` The `list` of training images. group : `str` or ``None``, optional The landmark group that will be used to train the AAM. If ``None`` and the images only have a single landmark group, then that is the one that will be used. Note that all the training images need to have the specified landmark group. appearance_graph : `list` of graphs or a single graph or ``None``, optional The graph to be used for the appearance `menpo.model.GMRFModel` training. It must be a `menpo.shape.UndirectedGraph`. If ``None``, then a `menpo.model.PCAModel` is used instead. shape_graph : `list` of graphs or a single graph or ``None``, optional The graph to be used for the shape `menpo.model.GMRFModel` training. It must be a `menpo.shape.UndirectedGraph`. If ``None``, then the shape model is built using `menpo.model.PCAModel`. deformation_graph : `list` of graphs or a single graph or ``None``, optional The graph to be used for the deformation `menpo.model.GMRFModel` training. It must be either a `menpo.shape.DirectedGraph` or a `menpo.shape.Tree`. If ``None``, then the minimum spanning tree of the data is computed. holistic_features : `closure` or `list` of `closure`, optional The features that will be extracted from the training images. Note that the features are extracted before warping the images to the reference shape. If `list`, then it must define a feature function per scale. Please refer to `menpo.feature` for a list of potential features. reference_shape : `menpo.shape.PointCloud` or ``None``, optional The reference shape that will be used for building the APS. The purpose of the reference shape is to normalise the size of the training images. The normalization is performed by rescaling all the training images so that the scale of their ground truth shapes matches the scale of the reference shape. Note that the reference shape is rescaled with respect to the `diagonal` before performing the normalisation. If ``None``, then the mean shape will be used. diagonal : `int` or ``None``, optional This parameter is used to rescale the reference shape so that the diagonal of its bounding box matches the provided value. In other words, this parameter controls the size of the model at the highest scale. If ``None``, then the reference shape does not get rescaled. scales : `float` or `tuple` of `float`, optional The scale value of each scale. They must provided in ascending order, i.e. from lowest to highest scale. If `float`, then a single scale is assumed. patch_shape : (`int`, `int`) or `list` of (`int`, `int`), optional The shape of the patches to be extracted. If a `list` is provided, then it defines a patch shape per scale. patch_normalisation : `list` of `callable` or a single `callable`, optional The normalisation function to be applied on the extracted patches. If `list`, then it must have length equal to the number of scales. If a single patch normalization `callable`, then this is the one applied to all scales. use_procrustes : `bool`, optional If ``True``, then Generalized Procrustes Alignment is applied before building the deformation model. precision_dtype : `numpy.dtype`, optional The data type of the appearance GMRF's precision matrix. For example, it can be set to `numpy.float32` for single precision or to `numpy.float64` for double precision. Even though the precision matrix is stored as a `scipy.sparse` matrix, this parameter has a big impact on the amount of memory required by the model. max_shape_components : `int`, `float`, `list` of those or ``None``, optional The number of shape components to keep. If `int`, then it sets the exact number of components. If `float`, then it defines the variance percentage that will be kept. If `list`, then it should define a value per scale. If a single number, then this will be applied to all scales. If ``None``, then all the components are kept. Note that the unused components will be permanently trimmed. n_appearance_components : `list` of `int` or `int` or ``None``, optional The number of appearance components used for building the appearance `menpo.shape.GMRFModel`. If `list`, then it must have length equal to the number of scales. If a single `int`, then this is the one applied to all scales. If ``None``, the covariance matrix of each edge is inverted using `np.linalg.inv`. If `int`, it is inverted using truncated SVD using the specified number of components. can_be_incremented : `bool`, optional In case you intend to incrementally update the model in the future, then this flag must be set to ``True`` from the first place. Note that if ``True``, the appearance and deformation `menpo.shape.GMRFModel` models will occupy double memory. verbose : `bool`, optional If ``True``, then the progress of building the APS will be printed. batch_size : `int` or ``None``, optional If an `int` is provided, then the training is performed in an incremental fashion on image batches of size equal to the provided value. If ``None``, then the training is performed directly on the all the images. References ---------- .. [1] E. Antonakos, J. Alabort-i-Medina, and S. Zafeiriou, "Active Pictorial Structures", Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Boston, MA, USA, pp. 1872-1882, June 2015. """ def __init__(self, images, group=None, appearance_graph=None, shape_graph=None, deformation_graph=None, holistic_features=no_op, reference_shape=None, diagonal=None, scales=(0.5, 1.0), patch_shape=(17, 17), patch_normalisation=no_op, use_procrustes=True, precision_dtype=np.float32, max_shape_components=None, n_appearance_components=None, can_be_incremented=False, verbose=False, batch_size=None): # Check parameters checks.check_diagonal(diagonal) scales = checks.check_scales(scales) n_scales = len(scales) holistic_features = checks.check_callable(holistic_features, n_scales) patch_shape = checks.check_patch_shape(patch_shape, n_scales) patch_normalisation = checks.check_callable(patch_normalisation, n_scales) max_shape_components = checks.check_max_components( max_shape_components, n_scales, 'max_shape_components') n_appearance_components = checks.check_max_components( n_appearance_components, n_scales, 'n_appearance_components') # Assign attributes self.diagonal = diagonal self.scales = scales self.holistic_features = holistic_features self.patch_shape = patch_shape self.patch_normalisation = patch_normalisation self.reference_shape = reference_shape self.use_procrustes = use_procrustes self.is_incremental = can_be_incremented self.precision_dtype = precision_dtype self.max_shape_components = max_shape_components self.n_appearance_components = n_appearance_components # Check provided graphs self.appearance_graph = checks.check_graph( appearance_graph, UndirectedGraph, 'appearance_graph', n_scales) self.shape_graph = checks.check_graph(shape_graph, UndirectedGraph, 'shape_graph', n_scales) self.deformation_graph = checks.check_graph( deformation_graph, [DirectedGraph, Tree], 'deformation_graph', n_scales) # Initialize models' lists self.shape_models = [] self.appearance_models = [] self.deformation_models = [] # Train APS self._train(images, increment=False, group=group, batch_size=batch_size, verbose=verbose) def _train(self, images, increment=False, group=None, batch_size=None, verbose=False): # If batch_size is not None, then we may have a generator, else we # assume we have a list. if batch_size is not None: # Create a generator of fixed sized batches. Will still work even # on an infinite list. image_batches = batch(images, batch_size) else: image_batches = [list(images)] for k, image_batch in enumerate(image_batches): if k == 0: if self.reference_shape is None: # If no reference shape was given, use the mean of the first # batch if batch_size is not None: warnings.warn('No reference shape was provided. The ' 'mean of the first batch will be the ' 'reference shape. If the batch mean is ' 'not representative of the true mean, ' 'this may cause issues.', MenpoFitBuilderWarning) self.reference_shape = compute_reference_shape( [i.landmarks[group].lms for i in image_batch], self.diagonal, verbose=verbose) # After the first batch, we are incrementing the model if k > 0: increment = True if verbose: print('Computing batch {}'.format(k)) # Train each batch self._train_batch( image_batch, increment=increment, group=group, verbose=verbose) def _train_batch(self, image_batch, increment=False, group=None, verbose=False): # Rescale to existing reference shape image_batch = rescale_images_to_reference_shape( image_batch, group, self.reference_shape, verbose=verbose) # If the deformation graph was not provided (None given), then compute # the MST if None in self.deformation_graph: graph_shapes = [i.landmarks[group].lms for i in image_batch] deformation_mst = _compute_minimum_spanning_tree( graph_shapes, root_vertex=0, prefix='- ', verbose=verbose) self.deformation_graph = [deformation_mst if g is None else g for g in self.deformation_graph] # Build models at each scale if verbose: print_dynamic('- Building models\n') feature_images = [] # for each scale (low --> high) for j in range(self.n_scales): if verbose: if len(self.scales) > 1: scale_prefix = ' - Scale {}: '.format(j) else: scale_prefix = ' - ' else: scale_prefix = None # Handle holistic features if j == 0 and self.holistic_features[j] == no_op: # Saves a lot of memory feature_images = image_batch elif (j == 0 or self.holistic_features[j] is not self.holistic_features[j - 1]): # Compute features only if this is the first pass through # the loop or the features at this scale are different from # the features at the previous scale feature_images = compute_features(image_batch, self.holistic_features[j], prefix=scale_prefix, verbose=verbose) # handle scales if self.scales[j] != 1: # Scale feature images only if scale is different than 1 scaled_images = scale_images(feature_images, self.scales[j], prefix=scale_prefix, verbose=verbose) else: scaled_images = feature_images # Extract potentially rescaled shapes scale_shapes = [i.landmarks[group].lms for i in scaled_images] # Apply procrustes to align the shapes aligned_shapes = align_shapes(scale_shapes) # Build the shape model using the aligned shapes if verbose: print_dynamic('{}Building shape model'.format(scale_prefix)) if not increment: self.shape_models.append(self._build_shape_model( aligned_shapes, self.shape_graph[j], self.max_shape_components[j], verbose=verbose)) else: self.shape_models[j].increment(aligned_shapes, verbose=verbose) # Build the deformation model if verbose: print_dynamic('{}Building deformation model'.format( scale_prefix)) if self.use_procrustes: deformation_shapes = aligned_shapes else: deformation_shapes = scale_shapes if not increment: self.deformation_models.append(self._build_deformation_model( deformation_shapes, self.deformation_graph[j], verbose=verbose)) else: self.deformation_models[j].increment(deformation_shapes, verbose=verbose) # Obtain warped images warped_images = self._warp_images(scaled_images, scale_shapes, j, scale_prefix, verbose) # Build the appearance model if verbose: print_dynamic('{}Building appearance model'.format( scale_prefix)) if not increment: self.appearance_models.append(self._build_appearance_model( warped_images, self.appearance_graph[j], self.n_appearance_components[j], verbose=verbose)) else: self._increment_appearance_model( warped_images, self.appearance_graph[j], self.appearance_models[j], verbose=verbose) if verbose: print_dynamic('{}Done\n'.format(scale_prefix)) def increment(self, images, group=None, batch_size=None, verbose=False): r""" Method that incrementally updates the APS model with a new batch of training images. Parameters ---------- images : `list` of `menpo.image.Image` The `list` of training images. group : `str` or ``None``, optional The landmark group that will be used to train the APS. If ``None`` and the images only have a single landmark group, then that is the one that will be used. Note that all the training images need to have the specified landmark group. batch_size : `int` or ``None``, optional If an `int` is provided, then the training is performed in an incremental fashion on image batches of size equal to the provided value. If ``None``, then the training is performed directly on the all the images. verbose : `bool`, optional If ``True``, then the progress of building the APS will be printed. """ return self._train(images, increment=True, group=group, verbose=verbose, batch_size=batch_size) def _build_shape_model(self, shapes, shape_graph, max_shape_components, verbose=False): # if the provided graph is None, then apply PCA, else use the GMRF if shape_graph is not None: pca_model = GMRFModel( shapes, shape_graph, mode='concatenation', n_components=None, dtype=np.float64, sparse=False, incremental=self.is_incremental, verbose=verbose).principal_components_analysis() return OrthoPDM(pca_model, max_n_components=max_shape_components) else: return OrthoPDM(shapes, max_n_components=max_shape_components) def _build_deformation_model(self, shapes, deformation_graph, verbose=False): return GMRFModel(shapes, deformation_graph, mode='subtraction', n_components=None, dtype=np.float64, sparse=False, incremental=self.is_incremental, verbose=verbose) def _build_appearance_model(self, images, appearance_graph, n_appearance_components, verbose=False): if appearance_graph is not None: return GMRFModel(images, appearance_graph, mode='concatenation', n_components=n_appearance_components, dtype=self.precision_dtype, sparse=True, incremental=self.is_incremental, verbose=verbose) else: raise NotImplementedError('The full appearance model is not ' 'implemented yet.') def _increment_appearance_model(self, images, appearance_graph, appearance_model, verbose=False): if appearance_graph is not None: appearance_model.increment(images, verbose=verbose) else: raise NotImplementedError('The full appearance model is not ' 'implemented yet.') def _warp_images(self, images, shapes, scale_index, prefix, verbose): return extract_patches( images, shapes, self.patch_shape[scale_index], normalise_function=self.patch_normalisation[scale_index], prefix=prefix, verbose=verbose) @property def n_scales(self): """ Returns the number of scales. :type: `int` """ return len(self.scales) @property def _str_title(self): r""" Returns a string containing name of the model. :type: `str` """ return 'Generative Active Pictorial Structures' def instance(self, shape_weights=None, scale_index=-1, as_graph=False): r""" Generates an instance of the shape model. Parameters ---------- shape_weights : ``(n_weights,)`` `ndarray` or `list` or ``None``, optional The weights of the shape model that will be used to create a novel shape instance. If ``None``, the weights are assumed to be zero, thus the mean shape is used. scale_index : `int`, optional The scale to be used. as_graph : `bool`, optional If ``True``, then the instance will be returned as a `menpo.shape.PointTree` or a `menpo.shape.PointDirectedGraph`, depending on the type of the deformation graph. """ if shape_weights is None: shape_weights = [0] sm = self.shape_models[scale_index].model shape_instance = sm.instance(shape_weights, normalized_weights=True) if as_graph: if isinstance(self.deformation_graph[scale_index], Tree): shape_instance = PointTree( shape_instance.points, self.deformation_graph[scale_index].adjacency_matrix, self.deformation_graph[scale_index].root_vertex) else: shape_instance = PointDirectedGraph( shape_instance.points, self.deformation_graph[scale_index].adjacency_matrix) return shape_instance def random_instance(self, scale_index=-1, as_graph=False): r""" Generates a random instance of the APS. Parameters ---------- scale_index : `int`, optional The scale to be used. as_graph : `bool`, optional If ``True``, then the instance will be returned as a `menpo.shape.PointTree` or a `menpo.shape.PointDirectedGraph`, depending on the type of the deformation graph. """ shape_weights = np.random.randn( self.shape_models[scale_index].n_active_components) return self.instance(shape_weights, scale_index=scale_index, as_graph=as_graph) def view_shape_models_widget(self, n_parameters=5, parameters_bounds=(-3.0, 3.0), mode='multiple', figure_size=(10, 8)): r""" Visualizes the shape models of the APS object using an interactive widget. Parameters ---------- n_parameters : `int` or `list` of `int` or ``None``, optional The number of shape principal components to be used for the parameters sliders. If `int`, then the number of sliders per scale is the minimum between `n_parameters` and the number of active components per scale. If `list` of `int`, then a number of sliders is defined per scale. If ``None``, all the active components per scale will have a slider. parameters_bounds : ``(float, float)``, optional The minimum and maximum bounds, in std units, for the sliders. mode : {``single``, ``multiple``}, optional If ``'single'``, only a single slider is constructed along with a drop down menu. If ``'multiple'``, a slider is constructed for each parameter. figure_size : (`int`, `int`), optional The size of the rendered figure. """ try: from menpowidgets import visualize_shape_model visualize_shape_model( [sm.model for sm in self.shape_models], n_parameters=n_parameters, parameters_bounds=parameters_bounds, figure_size=figure_size, mode=mode) except ImportError: from menpo.visualize.base import MenpowidgetsMissingError raise MenpowidgetsMissingError() def view_shape_graph_widget(self, scale_index=-1, figure_size=(10, 8)): r""" Visualize the shape graph using an interactive widget. Parameters ---------- scale_index : `int`, optional The scale to be used. figure_size : (`int`, `int`), optional The size of the rendered figure. Raises ------ ValueError Scale level {scale_index} uses a PCA shape model, so there is no graph """ if self.shape_graph[scale_index] is not None: PointUndirectedGraph( self.shape_models[scale_index].model.mean().points, self.shape_graph[scale_index].adjacency_matrix).view_widget( figure_size=figure_size) else: raise ValueError("Scale level {} uses a PCA shape model, so there " "is no graph".format(scale_index)) def view_deformation_graph_widget(self, scale_index=-1, figure_size=(10, 8)): r""" Visualize the deformation graph using an interactive widget. Parameters ---------- scale_index : `int`, optional The scale to be used. figure_size : (`int`, `int`), optional The size of the rendered figure. """ if isinstance(self.deformation_graph[scale_index], Tree): dg = PointTree(self.shape_models[scale_index].model.mean().points, self.deformation_graph[scale_index].adjacency_matrix, self.deformation_graph[scale_index].root_vertex) else: dg = PointDirectedGraph( self.shape_models[scale_index].model.mean().points, self.deformation_graph[scale_index].adjacency_matrix) dg.view_widget(figure_size=figure_size) def view_appearance_graph_widget(self, scale_index=-1, figure_size=(10, 8)): r""" Visualize the appearance graph using an interactive widget. Parameters ---------- scale_index : `int`, optional The scale to be used. figure_size : (`int`, `int`), optional The size of the rendered figure. Raises ------ ValueError Scale level {scale_index} uses a PCA appearance model, so there is no graph """ if self.appearance_graph[scale_index] is not None: PointUndirectedGraph( self.shape_models[scale_index].model.mean().points, self.appearance_graph[scale_index].adjacency_matrix).\ view_widget(figure_size=figure_size) else: raise ValueError("Scale level {} uses a PCA appearance model, " "so there is no graph".format(scale_index)) def view_deformation_model(self, scale_index=-1, n_std=2, render_colour_bar=False, colour_map='jet', image_view=True, figure_id=None, new_figure=False, render_graph_lines=True, graph_line_colour='b', graph_line_style='-', graph_line_width=1., ellipse_line_colour='r', ellipse_line_style='-', ellipse_line_width=1., render_markers=True, marker_style='o', marker_size=5, marker_face_colour='k', marker_edge_colour='k', marker_edge_width=1., render_axes=False, axes_font_name='sans-serif', axes_font_size=10, axes_font_style='normal', axes_font_weight='normal', crop_proportion=0.1, figure_size=(10, 8)): r""" Visualize the deformation model by plotting a Gaussian ellipsis per graph edge. Parameters ---------- scale_index : `int`, optional The scale to be used. n_std : `float`, optional This defines the size of the ellipses in terms of number of standard deviations. render_colour_bar : `bool`, optional If ``True``, then the ellipses will be coloured based on their normalized standard deviations and a colour bar will also appear on the side. If ``False``, then all the ellipses will have the same colour. colour_map : `str`, optional A valid Matplotlib colour map. For more info, please refer to `matplotlib.cm`. image_view : `bool`, optional If ``True`` the ellipses will be rendered in the image coordinates system. figure_id : `object`, optional The id of the figure to be used. new_figure : `bool`, optional If ``True``, a new figure is created. render_graph_lines : `bool`, optional Defines whether to plot the graph's edges. graph_line_colour : See Below, optional The colour of the lines of the graph's edges. Example options:: {r, g, b, c, m, k, w} or (3, ) ndarray graph_line_style : ``{-, --, -., :}``, optional The style of the lines of the graph's edges. graph_line_width : `float`, optional The width of the lines of the graph's edges. ellipse_line_colour : See Below, optional The colour of the lines of the ellipses. Example options:: {r, g, b, c, m, k, w} or (3, ) ndarray ellipse_line_style : ``{-, --, -., :}``, optional The style of the lines of the ellipses. ellipse_line_width : `float`, optional The width of the lines of the ellipses. render_markers : `bool`, optional If ``True``, the centers of the ellipses will be rendered. marker_style : See Below, optional The style of the centers of the ellipses. Example options :: {., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8} marker_size : `int`, optional The size of the centers of the ellipses in points. marker_face_colour : See Below, optional The face (filling) colour of the centers of the ellipses. Example options :: {r, g, b, c, m, k, w} or (3, ) ndarray marker_edge_colour : See Below, optional The edge colour of the centers of the ellipses. Example options :: {r, g, b, c, m, k, w} or (3, ) ndarray marker_edge_width : `float`, optional The edge width of the centers of the ellipses. render_axes : `bool`, optional If ``True``, the axes will be rendered. axes_font_name : See Below, optional The font of the axes. Example options :: {serif, sans-serif, cursive, fantasy, monospace} axes_font_size : `int`, optional The font size of the axes. axes_font_style : ``{normal, italic, oblique}``, optional The font style of the axes. axes_font_weight : See Below, optional The font weight of the axes. Example options :: {ultralight, light, normal, regular, book, medium, roman, semibold,demibold, demi, bold, heavy, extra bold, black} crop_proportion : `float`, optional The proportion to be left around the centers' pointcloud. figure_size : (`float`, `float`) `tuple` or ``None`` optional The size of the figure in inches. """ from menpo.visualize import plot_gaussian_ellipses mean_shape = self.shape_models[scale_index].model.mean().points deformation_graph = self.deformation_graph[scale_index] # get covariance matrices covariances = [] means = [] for e in range(deformation_graph.n_edges): # find vertices parent = deformation_graph.edges[e, 0] child = deformation_graph.edges[e, 1] # relative location mean means.append(mean_shape[child, :]) # relative location cov s1 = -self.deformation_models[scale_index].precision[2 * child, 2 * parent] s2 = -self.deformation_models[scale_index].precision[2 * child + 1, 2 * parent + 1] s3 = -self.deformation_models[scale_index].precision[2 * child, 2 * parent + 1] covariances.append(np.linalg.inv(np.array([[s1, s3], [s3, s2]]))) # plot deformation graph if isinstance(deformation_graph, Tree): renderer = PointTree( mean_shape, deformation_graph.adjacency_matrix, deformation_graph.root_vertex).view( figure_id=figure_id, new_figure=new_figure, image_view=image_view, render_lines=render_graph_lines, line_colour=graph_line_colour, line_style=graph_line_style, line_width=graph_line_width, render_markers=render_markers, marker_style=marker_style, marker_size=marker_size, marker_face_colour=marker_face_colour, marker_edge_colour=marker_edge_colour, marker_edge_width=marker_edge_width, render_axes=render_axes, axes_font_name=axes_font_name, axes_font_size=axes_font_size, axes_font_style=axes_font_style, axes_font_weight=axes_font_weight, figure_size=figure_size) else: renderer = PointDirectedGraph( mean_shape, deformation_graph.adjacency_matrix).view( figure_id=figure_id, new_figure=new_figure, image_view=image_view, render_lines=render_graph_lines, line_colour=graph_line_colour, line_style=graph_line_style, line_width=graph_line_width, render_markers=render_markers, marker_style=marker_style, marker_size=marker_size, marker_face_colour=marker_face_colour, marker_edge_colour=marker_edge_colour, marker_edge_width=marker_edge_width, render_axes=render_axes, axes_font_name=axes_font_name, axes_font_size=axes_font_size, axes_font_style=axes_font_style, axes_font_weight=axes_font_weight, figure_size=figure_size) # plot ellipses renderer = plot_gaussian_ellipses( covariances, means, n_std=n_std, render_colour_bar=render_colour_bar, colour_bar_label='Normalized Standard Deviation', colour_map=colour_map, figure_id=renderer.figure_id, new_figure=False, image_view=image_view, line_colour=ellipse_line_colour, line_style=ellipse_line_style, line_width=ellipse_line_width, render_markers=render_markers, marker_edge_colour=marker_edge_colour, marker_face_colour=marker_face_colour, marker_edge_width=marker_edge_width, marker_size=marker_size, marker_style=marker_style, render_axes=render_axes, axes_font_name=axes_font_name, axes_font_size=axes_font_size, axes_font_style=axes_font_style, axes_font_weight=axes_font_weight, crop_proportion=crop_proportion, figure_size=figure_size) return renderer def __str__(self): return _aps_str(self) def _compute_minimum_spanning_tree(shapes, root_vertex=0, prefix='', verbose=False): # initialize weights matrix n_vertices = shapes[0].n_points weights = np.zeros((n_vertices, n_vertices)) # print progress if requested range1 = range(n_vertices-1) if verbose: range1 = print_progress( range1, end_with_newline=False, prefix='{}Deformation graph - Computing complete graph`s ' 'weights'.format(prefix)) # compute weights for i in range1: for j in range(i+1, n_vertices, 1): # create data matrix of edge diffs_x = [s.points[i, 0] - s.points[j, 0] for s in shapes] diffs_y = [s.points[i, 1] - s.points[j, 1] for s in shapes] coords = np.array([diffs_x, diffs_y]) # compute mean and covariance m = np.mean(coords, axis=1) c = np.cov(coords) # get weight for im in range(len(shapes)): weights[i, j] += -np.log(multivariate_normal.pdf(coords[:, im], mean=m, cov=c)) weights[j, i] = weights[i, j] # create undirected graph complete_graph = UndirectedGraph(weights) if verbose: print_dynamic('{}Deformation graph - Minimum spanning graph ' 'computed.\n'.format(prefix)) # compute minimum spanning graph return complete_graph.minimum_spanning_tree(root_vertex) def _aps_str(aps): if aps.diagonal is not None: diagonal = aps.diagonal else: y, x = aps.reference_shape.range() diagonal = np.sqrt(x ** 2 + y ** 2) # Compute scale info strings scales_info = [] lvl_str_tmplt = r""" - Scale {} - Holistic feature: {} - Patch shape: {} - Appearance model class: {} - {} - {} features per point ({} in total) - {} - Shape model class: {} - {} - {} shape components - {} similarity transform parameters - Deformation model class: {} - {}""" for k, s in enumerate(aps.scales): comp_str = "No SVD used" if aps.appearance_models[k].n_components is not None: comp_str = "{} SVD components".format(aps.appearance_models[k].n_components) shape_model_str = "Trained using PCA" if aps.shape_graph[k] is not None: shape_model_str = "Trained using GMRF: {}".format(aps.shape_graph[k].__str__()) scales_info.append(lvl_str_tmplt.format( s, name_of_callable(aps.holistic_features[k]), aps.patch_shape[k], name_of_callable(aps.appearance_models[k]), aps.appearance_models[k].graph.__str__(), aps.appearance_models[k].n_features_per_vertex, aps.appearance_models[k].n_features, comp_str, name_of_callable(aps.shape_models[k]), shape_model_str, aps.shape_models[k].model.n_components, aps.shape_models[k].n_global_parameters, name_of_callable(aps.deformation_models[k]), aps.deformation_models[k].graph.__str__())) scales_info = '\n'.join(scales_info) cls_str = r"""{class_title} - Images scaled to diagonal: {diagonal:.2f} - Scales: {scales} {scales_info} """.format(class_title=aps._str_title, diagonal=diagonal, scales=aps.scales, scales_info=scales_info) return cls_str
bsd-3-clause
kelseyoo14/Wander
venv_2_7/lib/python2.7/site-packages/pandas/tests/test_categorical.py
9
162878
# -*- coding: utf-8 -*- # pylint: disable=E1101,E1103,W0232 from datetime import datetime from pandas.compat import range, lrange, u, PY3 import os import pickle import re from distutils.version import LooseVersion import numpy as np import pandas as pd from pandas import Categorical, Index, Series, DataFrame, PeriodIndex, Timestamp, CategoricalIndex from pandas.core.config import option_context import pandas.core.common as com import pandas.compat as compat import pandas.util.testing as tm class TestCategorical(tm.TestCase): _multiprocess_can_split_ = True def setUp(self): self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'], ordered=True) def test_getitem(self): self.assertEqual(self.factor[0], 'a') self.assertEqual(self.factor[-1], 'c') subf = self.factor[[0, 1, 2]] tm.assert_almost_equal(subf._codes, [0, 1, 1]) subf = self.factor[np.asarray(self.factor) == 'c'] tm.assert_almost_equal(subf._codes, [2, 2, 2]) def test_getitem_listlike(self): # GH 9469 # properly coerce the input indexers np.random.seed(1) c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8)) result = c.codes[np.array([100000]).astype(np.int64)] expected = c[np.array([100000]).astype(np.int64)].codes self.assert_numpy_array_equal(result, expected) def test_setitem(self): # int/positional c = self.factor.copy() c[0] = 'b' self.assertEqual(c[0], 'b') c[-1] = 'a' self.assertEqual(c[-1], 'a') # boolean c = self.factor.copy() indexer = np.zeros(len(c),dtype='bool') indexer[0] = True indexer[-1] = True c[indexer] = 'c' expected = Categorical.from_array(['c', 'b', 'b', 'a', 'a', 'c', 'c', 'c'], ordered=True) self.assert_categorical_equal(c, expected) def test_setitem_listlike(self): # GH 9469 # properly coerce the input indexers np.random.seed(1) c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8)).add_categories([-1000]) indexer = np.array([100000]).astype(np.int64) c[indexer] = -1000 # we are asserting the code result here # which maps to the -1000 category result = c.codes[np.array([100000]).astype(np.int64)] self.assertEqual(result, np.array([5], dtype='int8')) def test_constructor_unsortable(self): # it works! arr = np.array([1, 2, 3, datetime.now()], dtype='O') factor = Categorical.from_array(arr, ordered=False) self.assertFalse(factor.ordered) if compat.PY3: self.assertRaises(TypeError, lambda : Categorical.from_array(arr, ordered=True)) else: # this however will raise as cannot be sorted (on PY3 or older numpies) if LooseVersion(np.__version__) < "1.10": self.assertRaises(TypeError, lambda : Categorical.from_array(arr, ordered=True)) else: Categorical.from_array(arr, ordered=True) def test_is_equal_dtype(self): # test dtype comparisons between cats c1 = Categorical(list('aabca'),categories=list('abc'),ordered=False) c2 = Categorical(list('aabca'),categories=list('cab'),ordered=False) c3 = Categorical(list('aabca'),categories=list('cab'),ordered=True) self.assertTrue(c1.is_dtype_equal(c1)) self.assertTrue(c2.is_dtype_equal(c2)) self.assertTrue(c3.is_dtype_equal(c3)) self.assertFalse(c1.is_dtype_equal(c2)) self.assertFalse(c1.is_dtype_equal(c3)) self.assertFalse(c1.is_dtype_equal(Index(list('aabca')))) self.assertFalse(c1.is_dtype_equal(c1.astype(object))) self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1))) self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1,categories=list('cab')))) self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1,ordered=True))) def test_constructor(self): exp_arr = np.array(["a", "b", "c", "a", "b", "c"]) c1 = Categorical(exp_arr) self.assert_numpy_array_equal(c1.__array__(), exp_arr) c2 = Categorical(exp_arr, categories=["a","b","c"]) self.assert_numpy_array_equal(c2.__array__(), exp_arr) c2 = Categorical(exp_arr, categories=["c","b","a"]) self.assert_numpy_array_equal(c2.__array__(), exp_arr) # categories must be unique def f(): Categorical([1,2], [1,2,2]) self.assertRaises(ValueError, f) def f(): Categorical(["a","b"], ["a","b","b"]) self.assertRaises(ValueError, f) def f(): with tm.assert_produces_warning(FutureWarning): Categorical([1,2], [1,2,np.nan, np.nan]) self.assertRaises(ValueError, f) # The default should be unordered c1 = Categorical(["a", "b", "c", "a"]) self.assertFalse(c1.ordered) # Categorical as input c1 = Categorical(["a", "b", "c", "a"]) c2 = Categorical(c1) self.assertTrue(c1.equals(c2)) c1 = Categorical(["a", "b", "c", "a"], categories=["a","b","c","d"]) c2 = Categorical(c1) self.assertTrue(c1.equals(c2)) c1 = Categorical(["a", "b", "c", "a"], categories=["a","c","b"]) c2 = Categorical(c1) self.assertTrue(c1.equals(c2)) c1 = Categorical(["a", "b", "c", "a"], categories=["a","c","b"]) c2 = Categorical(c1, categories=["a","b","c"]) self.assert_numpy_array_equal(c1.__array__(), c2.__array__()) self.assert_numpy_array_equal(c2.categories, np.array(["a","b","c"])) # Series of dtype category c1 = Categorical(["a", "b", "c", "a"], categories=["a","b","c","d"]) c2 = Categorical(Series(c1)) self.assertTrue(c1.equals(c2)) c1 = Categorical(["a", "b", "c", "a"], categories=["a","c","b"]) c2 = Categorical(Series(c1)) self.assertTrue(c1.equals(c2)) # Series c1 = Categorical(["a", "b", "c", "a"]) c2 = Categorical(Series(["a", "b", "c", "a"])) self.assertTrue(c1.equals(c2)) c1 = Categorical(["a", "b", "c", "a"], categories=["a","b","c","d"]) c2 = Categorical(Series(["a", "b", "c", "a"]), categories=["a","b","c","d"]) self.assertTrue(c1.equals(c2)) # This should result in integer categories, not float! cat = pd.Categorical([1,2,3,np.nan], categories=[1,2,3]) self.assertTrue(com.is_integer_dtype(cat.categories)) # https://github.com/pydata/pandas/issues/3678 cat = pd.Categorical([np.nan,1, 2, 3]) self.assertTrue(com.is_integer_dtype(cat.categories)) # this should result in floats cat = pd.Categorical([np.nan, 1, 2., 3 ]) self.assertTrue(com.is_float_dtype(cat.categories)) cat = pd.Categorical([np.nan, 1., 2., 3. ]) self.assertTrue(com.is_float_dtype(cat.categories)) # Deprecating NaNs in categoires (GH #10748) # preserve int as far as possible by converting to object if NaN is in categories with tm.assert_produces_warning(FutureWarning): cat = pd.Categorical([np.nan, 1, 2, 3], categories=[np.nan, 1, 2, 3]) self.assertTrue(com.is_object_dtype(cat.categories)) # This doesn't work -> this would probably need some kind of "remember the original type" # feature to try to cast the array interface result to... #vals = np.asarray(cat[cat.notnull()]) #self.assertTrue(com.is_integer_dtype(vals)) with tm.assert_produces_warning(FutureWarning): cat = pd.Categorical([np.nan,"a", "b", "c"], categories=[np.nan,"a", "b", "c"]) self.assertTrue(com.is_object_dtype(cat.categories)) # but don't do it for floats with tm.assert_produces_warning(FutureWarning): cat = pd.Categorical([np.nan, 1., 2., 3.], categories=[np.nan, 1., 2., 3.]) self.assertTrue(com.is_float_dtype(cat.categories)) # corner cases cat = pd.Categorical([1]) self.assertTrue(len(cat.categories) == 1) self.assertTrue(cat.categories[0] == 1) self.assertTrue(len(cat.codes) == 1) self.assertTrue(cat.codes[0] == 0) cat = pd.Categorical(["a"]) self.assertTrue(len(cat.categories) == 1) self.assertTrue(cat.categories[0] == "a") self.assertTrue(len(cat.codes) == 1) self.assertTrue(cat.codes[0] == 0) # Scalars should be converted to lists cat = pd.Categorical(1) self.assertTrue(len(cat.categories) == 1) self.assertTrue(cat.categories[0] == 1) self.assertTrue(len(cat.codes) == 1) self.assertTrue(cat.codes[0] == 0) cat = pd.Categorical([1], categories=1) self.assertTrue(len(cat.categories) == 1) self.assertTrue(cat.categories[0] == 1) self.assertTrue(len(cat.codes) == 1) self.assertTrue(cat.codes[0] == 0) # Catch old style constructor useage: two arrays, codes + categories # We can only catch two cases: # - when the first is an integer dtype and the second is not # - when the resulting codes are all -1/NaN with tm.assert_produces_warning(RuntimeWarning): c_old = Categorical([0,1,2,0,1,2], categories=["a","b","c"]) with tm.assert_produces_warning(RuntimeWarning): c_old = Categorical([0,1,2,0,1,2], categories=[3,4,5]) # the next one are from the old docs, but unfortunately these don't trigger :-( with tm.assert_produces_warning(None): c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) cat = Categorical([1,2], categories=[1,2,3]) # this is a legitimate constructor with tm.assert_produces_warning(None): c = Categorical(np.array([],dtype='int64'),categories=[3,2,1],ordered=True) def test_constructor_with_index(self): ci = CategoricalIndex(list('aabbca'),categories=list('cab')) self.assertTrue(ci.values.equals(Categorical(ci))) ci = CategoricalIndex(list('aabbca'),categories=list('cab')) self.assertTrue(ci.values.equals(Categorical(ci.astype(object),categories=ci.categories))) def test_constructor_with_generator(self): # This was raising an Error in isnull(single_val).any() because isnull returned a scalar # for a generator from pandas.compat import range as xrange exp = Categorical([0,1,2]) cat = Categorical((x for x in [0,1,2])) self.assertTrue(cat.equals(exp)) cat = Categorical(xrange(3)) self.assertTrue(cat.equals(exp)) # This uses xrange internally from pandas.core.index import MultiIndex MultiIndex.from_product([range(5), ['a', 'b', 'c']]) # check that categories accept generators and sequences cat = pd.Categorical([0,1,2], categories=(x for x in [0,1,2])) self.assertTrue(cat.equals(exp)) cat = pd.Categorical([0,1,2], categories=xrange(3)) self.assertTrue(cat.equals(exp)) def test_from_codes(self): # too few categories def f(): Categorical.from_codes([1,2], [1,2]) self.assertRaises(ValueError, f) # no int codes def f(): Categorical.from_codes(["a"], [1,2]) self.assertRaises(ValueError, f) # no unique categories def f(): Categorical.from_codes([0,1,2], ["a","a","b"]) self.assertRaises(ValueError, f) # too negative def f(): Categorical.from_codes([-2,1,2], ["a","b","c"]) self.assertRaises(ValueError, f) exp = Categorical(["a","b","c"], ordered=False) res = Categorical.from_codes([0,1,2], ["a","b","c"]) self.assertTrue(exp.equals(res)) # Not available in earlier numpy versions if hasattr(np.random, "choice"): codes = np.random.choice([0,1], 5, p=[0.9,0.1]) pd.Categorical.from_codes(codes, categories=["train", "test"]) def test_comparisons(self): result = self.factor[self.factor == 'a'] expected = self.factor[np.asarray(self.factor) == 'a'] self.assertTrue(result.equals(expected)) result = self.factor[self.factor != 'a'] expected = self.factor[np.asarray(self.factor) != 'a'] self.assertTrue(result.equals(expected)) result = self.factor[self.factor < 'c'] expected = self.factor[np.asarray(self.factor) < 'c'] self.assertTrue(result.equals(expected)) result = self.factor[self.factor > 'a'] expected = self.factor[np.asarray(self.factor) > 'a'] self.assertTrue(result.equals(expected)) result = self.factor[self.factor >= 'b'] expected = self.factor[np.asarray(self.factor) >= 'b'] self.assertTrue(result.equals(expected)) result = self.factor[self.factor <= 'b'] expected = self.factor[np.asarray(self.factor) <= 'b'] self.assertTrue(result.equals(expected)) n = len(self.factor) other = self.factor[np.random.permutation(n)] result = self.factor == other expected = np.asarray(self.factor) == np.asarray(other) self.assert_numpy_array_equal(result, expected) result = self.factor == 'd' expected = np.repeat(False, len(self.factor)) self.assert_numpy_array_equal(result, expected) # comparisons with categoricals cat_rev = pd.Categorical(["a","b","c"], categories=["c","b","a"], ordered=True) cat_rev_base = pd.Categorical(["b","b","b"], categories=["c","b","a"], ordered=True) cat = pd.Categorical(["a","b","c"], ordered=True) cat_base = pd.Categorical(["b","b","b"], categories=cat.categories, ordered=True) # comparisons need to take categories ordering into account res_rev = cat_rev > cat_rev_base exp_rev = np.array([True, False, False]) self.assert_numpy_array_equal(res_rev, exp_rev) res_rev = cat_rev < cat_rev_base exp_rev = np.array([False, False, True]) self.assert_numpy_array_equal(res_rev, exp_rev) res = cat > cat_base exp = np.array([False, False, True]) self.assert_numpy_array_equal(res, exp) # Only categories with same categories can be compared def f(): cat > cat_rev self.assertRaises(TypeError, f) cat_rev_base2 = pd.Categorical(["b","b","b"], categories=["c","b","a","d"]) def f(): cat_rev > cat_rev_base2 self.assertRaises(TypeError, f) # Only categories with same ordering information can be compared cat_unorderd = cat.set_ordered(False) self.assertFalse((cat > cat).any()) def f(): cat > cat_unorderd self.assertRaises(TypeError, f) # comparison (in both directions) with Series will raise s = Series(["b","b","b"]) self.assertRaises(TypeError, lambda: cat > s) self.assertRaises(TypeError, lambda: cat_rev > s) self.assertRaises(TypeError, lambda: s < cat) self.assertRaises(TypeError, lambda: s < cat_rev) # comparison with numpy.array will raise in both direction, but only on newer # numpy versions a = np.array(["b","b","b"]) self.assertRaises(TypeError, lambda: cat > a) self.assertRaises(TypeError, lambda: cat_rev > a) # The following work via '__array_priority__ = 1000' # works only on numpy >= 1.7.1 if LooseVersion(np.__version__) > "1.7.1": self.assertRaises(TypeError, lambda: a < cat) self.assertRaises(TypeError, lambda: a < cat_rev) # Make sure that unequal comparison take the categories order in account cat_rev = pd.Categorical(list("abc"), categories=list("cba"), ordered=True) exp = np.array([True, False, False]) res = cat_rev > "b" self.assert_numpy_array_equal(res, exp) def test_na_flags_int_categories(self): # #1457 categories = lrange(10) labels = np.random.randint(0, 10, 20) labels[::5] = -1 cat = Categorical(labels, categories, fastpath=True) repr(cat) self.assert_numpy_array_equal(com.isnull(cat), labels == -1) def test_categories_none(self): factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'], ordered=True) self.assertTrue(factor.equals(self.factor)) def test_describe(self): # string type desc = self.factor.describe() expected = DataFrame({'counts': [3, 2, 3], 'freqs': [3/8., 2/8., 3/8.]}, index=pd.CategoricalIndex(['a', 'b', 'c'], name='categories')) tm.assert_frame_equal(desc, expected) # check unused categories cat = self.factor.copy() cat.set_categories(["a","b","c","d"], inplace=True) desc = cat.describe() expected = DataFrame({'counts': [3, 2, 3, 0], 'freqs': [3/8., 2/8., 3/8., 0]}, index=pd.CategoricalIndex(['a', 'b', 'c', 'd'], name='categories')) tm.assert_frame_equal(desc, expected) # check an integer one desc = Categorical([1,2,3,1,2,3,3,2,1,1,1]).describe() expected = DataFrame({'counts': [5, 3, 3], 'freqs': [5/11., 3/11., 3/11.]}, index=pd.CategoricalIndex([1, 2, 3], name='categories')) tm.assert_frame_equal(desc, expected) # https://github.com/pydata/pandas/issues/3678 # describe should work with NaN cat = pd.Categorical([np.nan,1, 2, 2]) desc = cat.describe() expected = DataFrame({'counts': [1, 2, 1], 'freqs': [1/4., 2/4., 1/4.]}, index=pd.CategoricalIndex([1, 2, np.nan], categories=[1, 2], name='categories')) tm.assert_frame_equal(desc, expected) # NA as a category with tm.assert_produces_warning(FutureWarning): cat = pd.Categorical(["a", "c", "c", np.nan], categories=["b", "a", "c", np.nan]) result = cat.describe() expected = DataFrame([[0,0],[1,0.25],[2,0.5],[1,0.25]], columns=['counts','freqs'], index=pd.CategoricalIndex(['b', 'a', 'c', np.nan], name='categories')) tm.assert_frame_equal(result,expected) # NA as an unused category with tm.assert_produces_warning(FutureWarning): cat = pd.Categorical(["a", "c", "c"], categories=["b", "a", "c", np.nan]) result = cat.describe() exp_idx = pd.CategoricalIndex(['b', 'a', 'c', np.nan], name='categories') expected = DataFrame([[0, 0], [1, 1/3.], [2, 2/3.], [0, 0]], columns=['counts', 'freqs'], index=exp_idx) tm.assert_frame_equal(result,expected) def test_print(self): expected = ["[a, b, b, a, a, c, c, c]", "Categories (3, object): [a < b < c]"] expected = "\n".join(expected) actual = repr(self.factor) self.assertEqual(actual, expected) def test_big_print(self): factor = Categorical([0,1,2,0,1,2]*100, ['a', 'b', 'c'], name='cat', fastpath=True) expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600", "Categories (3, object): [a, b, c]"] expected = "\n".join(expected) actual = repr(factor) self.assertEqual(actual, expected) def test_empty_print(self): factor = Categorical([], ["a","b","c"]) expected = ("[], Categories (3, object): [a, b, c]") # hack because array_repr changed in numpy > 1.6.x actual = repr(factor) self.assertEqual(actual, expected) self.assertEqual(expected, actual) factor = Categorical([], ["a","b","c"], ordered=True) expected = ("[], Categories (3, object): [a < b < c]") actual = repr(factor) self.assertEqual(expected, actual) factor = Categorical([], []) expected = ("[], Categories (0, object): []") self.assertEqual(expected, repr(factor)) def test_print_none_width(self): # GH10087 a = pd.Series(pd.Categorical([1,2,3,4])) exp = u("0 1\n1 2\n2 3\n3 4\n" + "dtype: category\nCategories (4, int64): [1, 2, 3, 4]") with option_context("display.width", None): self.assertEqual(exp, repr(a)) def test_unicode_print(self): if PY3: _rep = repr else: _rep = unicode c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20) expected = u"""[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc] Length: 60 Categories (3, object): [aaaaa, bb, cccc]""" self.assertEqual(_rep(c), expected) c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう'] * 20) expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう] Length: 60 Categories (3, object): [ああああ, いいいいい, ううううううう]""" self.assertEqual(_rep(c), expected) # unicode option should not affect to Categorical, as it doesn't care the repr width with option_context('display.unicode.east_asian_width', True): c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう'] * 20) expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう] Length: 60 Categories (3, object): [ああああ, いいいいい, ううううううう]""" self.assertEqual(_rep(c), expected) def test_periodindex(self): idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02', '2014-03', '2014-03'], freq='M') cat1 = Categorical.from_array(idx1) str(cat1) exp_arr = np.array([0, 0, 1, 1, 2, 2],dtype='int64') exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M') self.assert_numpy_array_equal(cat1._codes, exp_arr) self.assertTrue(cat1.categories.equals(exp_idx)) idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01', '2014-03', '2014-01'], freq='M') cat2 = Categorical.from_array(idx2, ordered=True) str(cat2) exp_arr = np.array([2, 2, 1, 0, 2, 0],dtype='int64') exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M') self.assert_numpy_array_equal(cat2._codes, exp_arr) self.assertTrue(cat2.categories.equals(exp_idx2)) idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09', '2013-08', '2013-07', '2013-05'], freq='M') cat3 = Categorical.from_array(idx3, ordered=True) exp_arr = np.array([6, 5, 4, 3, 2, 1, 0],dtype='int64') exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09', '2013-10', '2013-11', '2013-12'], freq='M') self.assert_numpy_array_equal(cat3._codes, exp_arr) self.assertTrue(cat3.categories.equals(exp_idx)) def test_categories_assigments(self): s = pd.Categorical(["a","b","c","a"]) exp = np.array([1,2,3,1]) s.categories = [1,2,3] self.assert_numpy_array_equal(s.__array__(), exp) self.assert_numpy_array_equal(s.categories, np.array([1,2,3])) # lengthen def f(): s.categories = [1,2,3,4] self.assertRaises(ValueError, f) # shorten def f(): s.categories = [1,2] self.assertRaises(ValueError, f) def test_construction_with_ordered(self): # GH 9347, 9190 cat = Categorical([0,1,2]) self.assertFalse(cat.ordered) cat = Categorical([0,1,2],ordered=False) self.assertFalse(cat.ordered) cat = Categorical([0,1,2],ordered=True) self.assertTrue(cat.ordered) def test_ordered_api(self): # GH 9347 cat1 = pd.Categorical(["a","c","b"], ordered=False) self.assertTrue(cat1.categories.equals(Index(['a','b','c']))) self.assertFalse(cat1.ordered) cat2 = pd.Categorical(["a","c","b"], categories=['b','c','a'], ordered=False) self.assertTrue(cat2.categories.equals(Index(['b','c','a']))) self.assertFalse(cat2.ordered) cat3 = pd.Categorical(["a","c","b"], ordered=True) self.assertTrue(cat3.categories.equals(Index(['a','b','c']))) self.assertTrue(cat3.ordered) cat4 = pd.Categorical(["a","c","b"], categories=['b','c','a'], ordered=True) self.assertTrue(cat4.categories.equals(Index(['b','c','a']))) self.assertTrue(cat4.ordered) def test_set_ordered(self): cat = Categorical(["a","b","c","a"], ordered=True) cat2 = cat.as_unordered() self.assertFalse(cat2.ordered) cat2 = cat.as_ordered() self.assertTrue(cat2.ordered) cat2.as_unordered(inplace=True) self.assertFalse(cat2.ordered) cat2.as_ordered(inplace=True) self.assertTrue(cat2.ordered) self.assertTrue(cat2.set_ordered(True).ordered) self.assertFalse(cat2.set_ordered(False).ordered) cat2.set_ordered(True, inplace=True) self.assertTrue(cat2.ordered) cat2.set_ordered(False, inplace=True) self.assertFalse(cat2.ordered) # deperecated in v0.16.0 with tm.assert_produces_warning(FutureWarning): cat.ordered = False self.assertFalse(cat.ordered) with tm.assert_produces_warning(FutureWarning): cat.ordered = True self.assertTrue(cat.ordered) def test_set_categories(self): cat = Categorical(["a","b","c","a"], ordered=True) exp_categories = np.array(["c","b","a"]) exp_values = np.array(["a","b","c","a"]) res = cat.set_categories(["c","b","a"], inplace=True) self.assert_numpy_array_equal(cat.categories, exp_categories) self.assert_numpy_array_equal(cat.__array__(), exp_values) self.assertIsNone(res) res = cat.set_categories(["a","b","c"]) # cat must be the same as before self.assert_numpy_array_equal(cat.categories, exp_categories) self.assert_numpy_array_equal(cat.__array__(), exp_values) # only res is changed exp_categories_back = np.array(["a","b","c"]) self.assert_numpy_array_equal(res.categories, exp_categories_back) self.assert_numpy_array_equal(res.__array__(), exp_values) # not all "old" included in "new" -> all not included ones are now np.nan cat = Categorical(["a","b","c","a"], ordered=True) res = cat.set_categories(["a"]) self.assert_numpy_array_equal(res.codes, np.array([0,-1,-1,0])) # still not all "old" in "new" res = cat.set_categories(["a","b","d"]) self.assert_numpy_array_equal(res.codes, np.array([0,1,-1,0])) self.assert_numpy_array_equal(res.categories, np.array(["a","b","d"])) # all "old" included in "new" cat = cat.set_categories(["a","b","c","d"]) exp_categories = np.array(["a","b","c","d"]) self.assert_numpy_array_equal(cat.categories, exp_categories) # internals... c = Categorical([1,2,3,4,1], categories=[1,2,3,4], ordered=True) self.assert_numpy_array_equal(c._codes, np.array([0,1,2,3,0])) self.assert_numpy_array_equal(c.categories , np.array([1,2,3,4] )) self.assert_numpy_array_equal(c.get_values(), np.array([1,2,3,4,1] )) c = c.set_categories([4,3,2,1]) # all "pointers" to '4' must be changed from 3 to 0,... self.assert_numpy_array_equal(c._codes, np.array([3,2,1,0,3])) # positions are changed self.assert_numpy_array_equal(c.categories, np.array([4,3,2,1])) # categories are now in new order self.assert_numpy_array_equal(c.get_values(), np.array([1,2,3,4,1])) # output is the same self.assertTrue(c.min(), 4) self.assertTrue(c.max(), 1) # set_categories should set the ordering if specified c2 = c.set_categories([4,3,2,1],ordered=False) self.assertFalse(c2.ordered) self.assert_numpy_array_equal(c.get_values(), c2.get_values()) # set_categories should pass thru the ordering c2 = c.set_ordered(False).set_categories([4,3,2,1]) self.assertFalse(c2.ordered) self.assert_numpy_array_equal(c.get_values(), c2.get_values()) def test_rename_categories(self): cat = pd.Categorical(["a","b","c","a"]) # inplace=False: the old one must not be changed res = cat.rename_categories([1,2,3]) self.assert_numpy_array_equal(res.__array__(), np.array([1,2,3,1])) self.assert_numpy_array_equal(res.categories, np.array([1,2,3])) self.assert_numpy_array_equal(cat.__array__(), np.array(["a","b","c","a"])) self.assert_numpy_array_equal(cat.categories, np.array(["a","b","c"])) res = cat.rename_categories([1,2,3], inplace=True) # and now inplace self.assertIsNone(res) self.assert_numpy_array_equal(cat.__array__(), np.array([1,2,3,1])) self.assert_numpy_array_equal(cat.categories, np.array([1,2,3])) # lengthen def f(): cat.rename_categories([1,2,3,4]) self.assertRaises(ValueError, f) # shorten def f(): cat.rename_categories([1,2]) self.assertRaises(ValueError, f) def test_reorder_categories(self): cat = Categorical(["a","b","c","a"], ordered=True) old = cat.copy() new = Categorical(["a","b","c","a"], categories=["c","b","a"], ordered=True) # first inplace == False res = cat.reorder_categories(["c","b","a"]) # cat must be the same as before self.assert_categorical_equal(cat, old) # only res is changed self.assert_categorical_equal(res, new) # inplace == True res = cat.reorder_categories(["c","b","a"], inplace=True) self.assertIsNone(res) self.assert_categorical_equal(cat, new) # not all "old" included in "new" cat = Categorical(["a","b","c","a"], ordered=True) def f(): cat.reorder_categories(["a"]) self.assertRaises(ValueError, f) # still not all "old" in "new" def f(): cat.reorder_categories(["a","b","d"]) self.assertRaises(ValueError, f) # all "old" included in "new", but too long def f(): cat.reorder_categories(["a","b","c","d"]) self.assertRaises(ValueError, f) def test_add_categories(self): cat = Categorical(["a","b","c","a"], ordered=True) old = cat.copy() new = Categorical(["a","b","c","a"], categories=["a","b","c","d"], ordered=True) # first inplace == False res = cat.add_categories("d") self.assert_categorical_equal(cat, old) self.assert_categorical_equal(res, new) res = cat.add_categories(["d"]) self.assert_categorical_equal(cat, old) self.assert_categorical_equal(res, new) # inplace == True res = cat.add_categories("d", inplace=True) self.assert_categorical_equal(cat, new) self.assertIsNone(res) # new is in old categories def f(): cat.add_categories(["d"]) self.assertRaises(ValueError, f) # GH 9927 cat = Categorical(list("abc"), ordered=True) expected = Categorical(list("abc"), categories=list("abcde"), ordered=True) # test with Series, np.array, index, list res = cat.add_categories(Series(["d", "e"])) self.assert_categorical_equal(res, expected) res = cat.add_categories(np.array(["d", "e"])) self.assert_categorical_equal(res, expected) res = cat.add_categories(Index(["d", "e"])) self.assert_categorical_equal(res, expected) res = cat.add_categories(["d", "e"]) self.assert_categorical_equal(res, expected) def test_remove_categories(self): cat = Categorical(["a","b","c","a"], ordered=True) old = cat.copy() new = Categorical(["a","b",np.nan,"a"], categories=["a","b"], ordered=True) # first inplace == False res = cat.remove_categories("c") self.assert_categorical_equal(cat, old) self.assert_categorical_equal(res, new) res = cat.remove_categories(["c"]) self.assert_categorical_equal(cat, old) self.assert_categorical_equal(res, new) # inplace == True res = cat.remove_categories("c", inplace=True) self.assert_categorical_equal(cat, new) self.assertIsNone(res) # removal is not in categories def f(): cat.remove_categories(["c"]) self.assertRaises(ValueError, f) def test_remove_unused_categories(self): c = Categorical(["a","b","c","d","a"], categories=["a","b","c","d","e"]) exp_categories_all = np.array(["a","b","c","d","e"]) exp_categories_dropped = np.array(["a","b","c","d"]) self.assert_numpy_array_equal(c.categories, exp_categories_all) res = c.remove_unused_categories() self.assert_numpy_array_equal(res.categories, exp_categories_dropped) self.assert_numpy_array_equal(c.categories, exp_categories_all) res = c.remove_unused_categories(inplace=True) self.assert_numpy_array_equal(c.categories, exp_categories_dropped) self.assertIsNone(res) # with NaN values (GH11599) c = Categorical(["a","b","c",np.nan], categories=["a","b","c","d","e"]) res = c.remove_unused_categories() self.assert_numpy_array_equal(res.categories, np.array(["a","b","c"])) self.assert_numpy_array_equal(c.categories, exp_categories_all) val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan] cat = pd.Categorical(values=val, categories=list('ABCDEFG')) out = cat.remove_unused_categories() self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F']) self.assert_numpy_array_equal(out.codes, [ 2, -1, 1, 0, 1, 2, -1]) self.assertEqual(out.get_values().tolist(), val) alpha = list('abcdefghijklmnopqrstuvwxyz') val = np.random.choice(alpha[::2], 10000).astype('object') val[np.random.choice(len(val), 100)] = np.nan cat = pd.Categorical(values=val, categories=alpha) out = cat.remove_unused_categories() self.assertEqual(out.get_values().tolist(), val.tolist()) def test_nan_handling(self): # Nans are represented as -1 in codes c = Categorical(["a","b",np.nan,"a"]) self.assert_numpy_array_equal(c.categories , np.array(["a","b"])) self.assert_numpy_array_equal(c._codes , np.array([0,1,-1,0])) c[1] = np.nan self.assert_numpy_array_equal(c.categories , np.array(["a","b"])) self.assert_numpy_array_equal(c._codes , np.array([0,-1,-1,0])) # If categories have nan included, the code should point to that instead with tm.assert_produces_warning(FutureWarning): c = Categorical(["a","b",np.nan,"a"], categories=["a","b",np.nan]) self.assert_numpy_array_equal(c.categories, np.array(["a","b",np.nan], dtype=np.object_)) self.assert_numpy_array_equal(c._codes, np.array([0,1,2,0])) c[1] = np.nan self.assert_numpy_array_equal(c.categories, np.array(["a","b",np.nan], dtype=np.object_)) self.assert_numpy_array_equal(c._codes, np.array([0,2,2,0])) # Changing categories should also make the replaced category np.nan c = Categorical(["a","b","c","a"]) with tm.assert_produces_warning(FutureWarning): c.categories = ["a","b",np.nan] self.assert_numpy_array_equal(c.categories, np.array(["a","b",np.nan], dtype=np.object_)) self.assert_numpy_array_equal(c._codes, np.array([0,1,2,0])) # Adding nan to categories should make assigned nan point to the category! c = Categorical(["a","b",np.nan,"a"]) self.assert_numpy_array_equal(c.categories , np.array(["a","b"])) self.assert_numpy_array_equal(c._codes , np.array([0,1,-1,0])) with tm.assert_produces_warning(FutureWarning): c.set_categories(["a","b",np.nan], rename=True, inplace=True) self.assert_numpy_array_equal(c.categories, np.array(["a","b",np.nan], dtype=np.object_)) self.assert_numpy_array_equal(c._codes, np.array([0,1,-1,0])) c[1] = np.nan self.assert_numpy_array_equal(c.categories , np.array(["a","b",np.nan], dtype=np.object_)) self.assert_numpy_array_equal(c._codes, np.array([0,2,-1,0])) # Remove null categories (GH 10156) cases = [ ([1.0, 2.0, np.nan], [1.0, 2.0]), (['a', 'b', None], ['a', 'b']), ([pd.Timestamp('2012-05-01'), pd.NaT], [pd.Timestamp('2012-05-01')]) ] null_values = [np.nan, None, pd.NaT] for with_null, without in cases: with tm.assert_produces_warning(FutureWarning): base = Categorical([], with_null) expected = Categorical([], without) for nullval in null_values: result = base.remove_categories(nullval) self.assert_categorical_equal(result, expected) # Different null values are indistinguishable for i, j in [(0, 1), (0, 2), (1, 2)]: nulls = [null_values[i], null_values[j]] def f(): with tm.assert_produces_warning(FutureWarning): Categorical([], categories=nulls) self.assertRaises(ValueError, f) def test_isnull(self): exp = np.array([False, False, True]) c = Categorical(["a","b",np.nan]) res = c.isnull() self.assert_numpy_array_equal(res, exp) with tm.assert_produces_warning(FutureWarning): c = Categorical(["a","b",np.nan], categories=["a","b",np.nan]) res = c.isnull() self.assert_numpy_array_equal(res, exp) # test both nan in categories and as -1 exp = np.array([True, False, True]) c = Categorical(["a","b",np.nan]) with tm.assert_produces_warning(FutureWarning): c.set_categories(["a","b",np.nan], rename=True, inplace=True) c[0] = np.nan res = c.isnull() self.assert_numpy_array_equal(res, exp) def test_codes_immutable(self): # Codes should be read only c = Categorical(["a","b","c","a", np.nan]) exp = np.array([0,1,2,0,-1],dtype='int8') self.assert_numpy_array_equal(c.codes, exp) # Assignments to codes should raise def f(): c.codes = np.array([0,1,2,0,1],dtype='int8') self.assertRaises(ValueError, f) # changes in the codes array should raise # np 1.6.1 raises RuntimeError rather than ValueError codes= c.codes def f(): codes[4] = 1 self.assertRaises(ValueError, f) # But even after getting the codes, the original array should still be writeable! c[4] = "a" exp = np.array([0,1,2,0,0],dtype='int8') self.assert_numpy_array_equal(c.codes, exp) c._codes[4] = 2 exp = np.array([0,1,2,0, 2],dtype='int8') self.assert_numpy_array_equal(c.codes, exp) def test_min_max(self): # unordered cats have no min/max cat = Categorical(["a","b","c","d"], ordered=False) self.assertRaises(TypeError, lambda : cat.min()) self.assertRaises(TypeError, lambda : cat.max()) cat = Categorical(["a","b","c","d"], ordered=True) _min = cat.min() _max = cat.max() self.assertEqual(_min, "a") self.assertEqual(_max, "d") cat = Categorical(["a","b","c","d"], categories=['d','c','b','a'], ordered=True) _min = cat.min() _max = cat.max() self.assertEqual(_min, "d") self.assertEqual(_max, "a") cat = Categorical([np.nan,"b","c",np.nan], categories=['d','c','b','a'], ordered=True) _min = cat.min() _max = cat.max() self.assertTrue(np.isnan(_min)) self.assertEqual(_max, "b") _min = cat.min(numeric_only=True) self.assertEqual(_min, "c") _max = cat.max(numeric_only=True) self.assertEqual(_max, "b") cat = Categorical([np.nan,1,2,np.nan], categories=[5,4,3,2,1], ordered=True) _min = cat.min() _max = cat.max() self.assertTrue(np.isnan(_min)) self.assertEqual(_max, 1) _min = cat.min(numeric_only=True) self.assertEqual(_min, 2) _max = cat.max(numeric_only=True) self.assertEqual(_max, 1) def test_unique(self): # categories are reordered based on value when ordered=False cat = Categorical(["a", "b"]) exp = np.asarray(["a", "b"]) res = cat.unique() self.assert_numpy_array_equal(res, exp) cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"]) res = cat.unique() self.assert_numpy_array_equal(res, exp) tm.assert_categorical_equal(res, Categorical(exp)) cat = Categorical(["c", "a", "b", "a", "a"], categories=["a", "b", "c"]) exp = np.asarray(["c", "a", "b"]) res = cat.unique() self.assert_numpy_array_equal(res, exp) tm.assert_categorical_equal(res, Categorical(exp, categories=['c', 'a', 'b'])) # nan must be removed cat = Categorical(["b", np.nan, "b", np.nan, "a"], categories=["a", "b", "c"]) res = cat.unique() exp = np.asarray(["b", np.nan, "a"], dtype=object) self.assert_numpy_array_equal(res, exp) tm.assert_categorical_equal(res, Categorical(["b", np.nan, "a"], categories=["b", "a"])) def test_unique_ordered(self): # keep categories order when ordered=True cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True) res = cat.unique() exp = np.asarray(['b', 'a']) exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True) self.assert_numpy_array_equal(res, exp) tm.assert_categorical_equal(res, exp_cat) cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'], ordered=True) res = cat.unique() exp = np.asarray(['c', 'b', 'a']) exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True) self.assert_numpy_array_equal(res, exp) tm.assert_categorical_equal(res, exp_cat) cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'], ordered=True) res = cat.unique() exp = np.asarray(['b', 'a']) exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True) self.assert_numpy_array_equal(res, exp) tm.assert_categorical_equal(res, exp_cat) cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'], ordered=True) res = cat.unique() exp = np.asarray(['b', np.nan, 'a'], dtype=object) exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True) self.assert_numpy_array_equal(res, exp) tm.assert_categorical_equal(res, exp_cat) def test_mode(self): s = Categorical([1,1,2,4,5,5,5], categories=[5,4,3,2,1], ordered=True) res = s.mode() exp = Categorical([5], categories=[5,4,3,2,1], ordered=True) self.assertTrue(res.equals(exp)) s = Categorical([1,1,1,4,5,5,5], categories=[5,4,3,2,1], ordered=True) res = s.mode() exp = Categorical([5,1], categories=[5,4,3,2,1], ordered=True) self.assertTrue(res.equals(exp)) s = Categorical([1,2,3,4,5], categories=[5,4,3,2,1], ordered=True) res = s.mode() exp = Categorical([], categories=[5,4,3,2,1], ordered=True) self.assertTrue(res.equals(exp)) # NaN should not become the mode! s = Categorical([np.nan,np.nan,np.nan,4,5], categories=[5,4,3,2,1], ordered=True) res = s.mode() exp = Categorical([], categories=[5,4,3,2,1], ordered=True) self.assertTrue(res.equals(exp)) s = Categorical([np.nan,np.nan,np.nan,4,5,4], categories=[5,4,3,2,1], ordered=True) res = s.mode() exp = Categorical([4], categories=[5,4,3,2,1], ordered=True) self.assertTrue(res.equals(exp)) s = Categorical([np.nan,np.nan,4,5,4], categories=[5,4,3,2,1], ordered=True) res = s.mode() exp = Categorical([4], categories=[5,4,3,2,1], ordered=True) self.assertTrue(res.equals(exp)) def test_sort(self): # unordered cats are sortable cat = Categorical(["a","b","b","a"], ordered=False) cat.sort_values() cat.sort() cat = Categorical(["a","c","b","d"], ordered=True) # sort_values res = cat.sort_values() exp = np.array(["a","b","c","d"],dtype=object) self.assert_numpy_array_equal(res.__array__(), exp) cat = Categorical(["a","c","b","d"], categories=["a","b","c","d"], ordered=True) res = cat.sort_values() exp = np.array(["a","b","c","d"],dtype=object) self.assert_numpy_array_equal(res.__array__(), exp) res = cat.sort_values(ascending=False) exp = np.array(["d","c","b","a"],dtype=object) self.assert_numpy_array_equal(res.__array__(), exp) # sort (inplace order) cat1 = cat.copy() cat1.sort() exp = np.array(["a","b","c","d"],dtype=object) self.assert_numpy_array_equal(cat1.__array__(), exp) def test_slicing_directly(self): cat = Categorical(["a","b","c","d","a","b","c"]) sliced = cat[3] tm.assert_equal(sliced, "d") sliced = cat[3:5] expected = Categorical(["d","a"], categories=['a', 'b', 'c', 'd']) self.assert_numpy_array_equal(sliced._codes, expected._codes) tm.assert_index_equal(sliced.categories, expected.categories) def test_set_item_nan(self): cat = pd.Categorical([1,2,3]) exp = pd.Categorical([1,np.nan,3], categories=[1,2,3]) cat[1] = np.nan self.assertTrue(cat.equals(exp)) # if nan in categories, the proper code should be set! cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3]) with tm.assert_produces_warning(FutureWarning): cat.set_categories([1,2,3, np.nan], rename=True, inplace=True) cat[1] = np.nan exp = np.array([0,3,2,-1]) self.assert_numpy_array_equal(cat.codes, exp) cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3]) with tm.assert_produces_warning(FutureWarning): cat.set_categories([1,2,3, np.nan], rename=True, inplace=True) cat[1:3] = np.nan exp = np.array([0,3,3,-1]) self.assert_numpy_array_equal(cat.codes, exp) cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3]) with tm.assert_produces_warning(FutureWarning): cat.set_categories([1,2,3, np.nan], rename=True, inplace=True) cat[1:3] = [np.nan, 1] exp = np.array([0,3,0,-1]) self.assert_numpy_array_equal(cat.codes, exp) cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3]) with tm.assert_produces_warning(FutureWarning): cat.set_categories([1,2,3, np.nan], rename=True, inplace=True) cat[1:3] = [np.nan, np.nan] exp = np.array([0,3,3,-1]) self.assert_numpy_array_equal(cat.codes, exp) cat = pd.Categorical([1,2, np.nan, 3], categories=[1,2,3]) with tm.assert_produces_warning(FutureWarning): cat.set_categories([1,2,3, np.nan], rename=True, inplace=True) cat[pd.isnull(cat)] = np.nan exp = np.array([0,1,3,2]) self.assert_numpy_array_equal(cat.codes, exp) def test_shift(self): # GH 9416 cat = pd.Categorical(['a', 'b', 'c', 'd', 'a']) # shift forward sp1 = cat.shift(1) xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd']) self.assert_categorical_equal(sp1, xp1) self.assert_categorical_equal(cat[:-1], sp1[1:]) # shift back sn2 = cat.shift(-2) xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan], categories=['a', 'b', 'c', 'd']) self.assert_categorical_equal(sn2, xp2) self.assert_categorical_equal(cat[2:], sn2[:-2]) # shift by zero self.assert_categorical_equal(cat, cat.shift(0)) def test_nbytes(self): cat = pd.Categorical([1,2,3]) exp = cat._codes.nbytes + cat._categories.values.nbytes self.assertEqual(cat.nbytes, exp) def test_memory_usage(self): cat = pd.Categorical([1,2,3]) self.assertEqual(cat.nbytes, cat.memory_usage()) self.assertEqual(cat.nbytes, cat.memory_usage(deep=True)) cat = pd.Categorical(['foo','foo','bar']) self.assertEqual(cat.nbytes, cat.memory_usage()) self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes) def test_searchsorted(self): # https://github.com/pydata/pandas/issues/8420 s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk' ]) s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts' ]) c1 = pd.Categorical(s1, ordered=True) c2 = pd.Categorical(s2, ordered=True) # Single item array res = c1.searchsorted(['bread']) chk = s1.searchsorted(['bread']) exp = np.array([1]) self.assert_numpy_array_equal(res, exp) self.assert_numpy_array_equal(res, chk) # Scalar version of single item array # Categorical return np.array like pd.Series, but different from np.array.searchsorted() res = c1.searchsorted('bread') chk = s1.searchsorted('bread') exp = np.array([1]) self.assert_numpy_array_equal(res, exp) self.assert_numpy_array_equal(res, chk) # Searching for a value that is not present in the Categorical res = c1.searchsorted(['bread', 'eggs']) chk = s1.searchsorted(['bread', 'eggs']) exp = np.array([1, 4]) self.assert_numpy_array_equal(res, exp) self.assert_numpy_array_equal(res, chk) # Searching for a value that is not present, to the right res = c1.searchsorted(['bread', 'eggs'], side='right') chk = s1.searchsorted(['bread', 'eggs'], side='right') exp = np.array([3, 4]) # eggs before milk self.assert_numpy_array_equal(res, exp) self.assert_numpy_array_equal(res, chk) # As above, but with a sorter array to reorder an unsorted array res = c2.searchsorted(['bread', 'eggs'], side='right', sorter=[0, 1, 2, 3, 5, 4]) chk = s2.searchsorted(['bread', 'eggs'], side='right', sorter=[0, 1, 2, 3, 5, 4]) exp = np.array([3, 5]) # eggs after donuts, after switching milk and donuts self.assert_numpy_array_equal(res, exp) self.assert_numpy_array_equal(res, chk) def test_deprecated_labels(self): # TODO: labels is deprecated and should be removed in 0.18 or 2017, whatever is earlier cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3]) exp = cat.codes with tm.assert_produces_warning(FutureWarning): res = cat.labels self.assert_numpy_array_equal(res, exp) self.assertFalse(LooseVersion(pd.__version__) >= '0.18') def test_deprecated_levels(self): # TODO: levels is deprecated and should be removed in 0.18 or 2017, whatever is earlier cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3]) exp = cat.categories with tm.assert_produces_warning(FutureWarning): res = cat.levels self.assert_numpy_array_equal(res, exp) with tm.assert_produces_warning(FutureWarning): res = pd.Categorical([1,2,3, np.nan], levels=[1,2,3]) self.assert_numpy_array_equal(res.categories, exp) self.assertFalse(LooseVersion(pd.__version__) >= '0.18') def test_removed_names_produces_warning(self): # 10482 with tm.assert_produces_warning(UserWarning): Categorical([0,1], name="a") with tm.assert_produces_warning(UserWarning): Categorical.from_codes([1,2], ["a","b","c"], name="a") def test_datetime_categorical_comparison(self): dt_cat = pd.Categorical(pd.date_range('2014-01-01', periods=3), ordered=True) self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True]) self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True]) def test_reflected_comparison_with_scalars(self): # GH8658 cat = pd.Categorical([1, 2, 3], ordered=True) self.assert_numpy_array_equal(cat > cat[0], [False, True, True]) self.assert_numpy_array_equal(cat[0] < cat, [False, True, True]) def test_comparison_with_unknown_scalars(self): # https://github.com/pydata/pandas/issues/9836#issuecomment-92123057 and following # comparisons with scalars not in categories should raise for unequal comps, but not for # equal/not equal cat = pd.Categorical([1, 2, 3], ordered=True) self.assertRaises(TypeError, lambda: cat < 4) self.assertRaises(TypeError, lambda: cat > 4) self.assertRaises(TypeError, lambda: 4 < cat) self.assertRaises(TypeError, lambda: 4 > cat) self.assert_numpy_array_equal(cat == 4 , [False, False, False]) self.assert_numpy_array_equal(cat != 4 , [True, True, True]) class TestCategoricalAsBlock(tm.TestCase): _multiprocess_can_split_ = True def setUp(self): self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']) df = DataFrame({'value': np.random.randint(0, 10000, 100)}) labels = [ "{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500) ] df = df.sort_values(by=['value'], ascending=True) df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels) self.cat = df def test_dtypes(self): # GH8143 index = ['cat','obj','num'] cat = pd.Categorical(['a', 'b', 'c']) obj = pd.Series(['a', 'b', 'c']) num = pd.Series([1, 2, 3]) df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index) result = df.dtypes == 'object' expected = Series([False,True,False],index=index) tm.assert_series_equal(result, expected) result = df.dtypes == 'int64' expected = Series([False,False,True],index=index) tm.assert_series_equal(result, expected) result = df.dtypes == 'category' expected = Series([True,False,False],index=index) tm.assert_series_equal(result, expected) def test_codes_dtypes(self): # GH 8453 result = Categorical(['foo','bar','baz']) self.assertTrue(result.codes.dtype == 'int8') result = Categorical(['foo%05d' % i for i in range(400) ]) self.assertTrue(result.codes.dtype == 'int16') result = Categorical(['foo%05d' % i for i in range(40000) ]) self.assertTrue(result.codes.dtype == 'int32') # adding cats result = Categorical(['foo','bar','baz']) self.assertTrue(result.codes.dtype == 'int8') result = result.add_categories(['foo%05d' % i for i in range(400) ]) self.assertTrue(result.codes.dtype == 'int16') # removing cats result = result.remove_categories(['foo%05d' % i for i in range(300) ]) self.assertTrue(result.codes.dtype == 'int8') def test_basic(self): # test basic creation / coercion of categoricals s = Series(self.factor, name='A') self.assertEqual(s.dtype,'category') self.assertEqual(len(s),len(self.factor)) str(s.values) str(s) # in a frame df = DataFrame({'A' : self.factor }) result = df['A'] tm.assert_series_equal(result,s) result = df.iloc[:,0] tm.assert_series_equal(result,s) self.assertEqual(len(df),len(self.factor)) str(df.values) str(df) df = DataFrame({'A' : s }) result = df['A'] tm.assert_series_equal(result,s) self.assertEqual(len(df),len(self.factor)) str(df.values) str(df) # multiples df = DataFrame({'A' : s, 'B' : s, 'C' : 1}) result1 = df['A'] result2 = df['B'] tm.assert_series_equal(result1, s) tm.assert_series_equal(result2, s, check_names=False) self.assertEqual(result2.name, 'B') self.assertEqual(len(df),len(self.factor)) str(df.values) str(df) # GH8623 x = pd.DataFrame([[1,'John P. Doe'],[2,'Jane Dove'],[1,'John P. Doe']], columns=['person_id','person_name']) x['person_name'] = pd.Categorical(x.person_name) # doing this breaks transform expected = x.iloc[0].person_name result = x.person_name.iloc[0] self.assertEqual(result,expected) result = x.person_name[0] self.assertEqual(result,expected) result = x.person_name.loc[0] self.assertEqual(result,expected) def test_creation_astype(self): l = ["a","b","c","a"] s = pd.Series(l) exp = pd.Series(Categorical(l)) res = s.astype('category') tm.assert_series_equal(res, exp) l = [1,2,3,1] s = pd.Series(l) exp = pd.Series(Categorical(l)) res = s.astype('category') tm.assert_series_equal(res, exp) df = pd.DataFrame({"cats":[1,2,3,4,5,6], "vals":[1,2,3,4,5,6]}) cats = Categorical([1,2,3,4,5,6]) exp_df = pd.DataFrame({"cats":cats, "vals":[1,2,3,4,5,6]}) df["cats"] = df["cats"].astype("category") tm.assert_frame_equal(exp_df, df) df = pd.DataFrame({"cats":['a', 'b', 'b', 'a', 'a', 'd'], "vals":[1,2,3,4,5,6]}) cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd']) exp_df = pd.DataFrame({"cats":cats, "vals":[1,2,3,4,5,6]}) df["cats"] = df["cats"].astype("category") tm.assert_frame_equal(exp_df, df) # with keywords l = ["a","b","c","a"] s = pd.Series(l) exp = pd.Series(Categorical(l, ordered=True)) res = s.astype('category', ordered=True) tm.assert_series_equal(res, exp) exp = pd.Series(Categorical(l, categories=list('abcdef'), ordered=True)) res = s.astype('category', categories=list('abcdef'), ordered=True) tm.assert_series_equal(res, exp) def test_construction_series(self): l = [1,2,3,1] exp = Series(l).astype('category') res = Series(l,dtype='category') tm.assert_series_equal(res, exp) l = ["a","b","c","a"] exp = Series(l).astype('category') res = Series(l,dtype='category') tm.assert_series_equal(res, exp) # insert into frame with different index # GH 8076 index = pd.date_range('20000101', periods=3) expected = Series(Categorical(values=[np.nan,np.nan,np.nan],categories=['a', 'b', 'c'])) expected.index = index expected = DataFrame({'x': expected}) df = DataFrame({'x': Series(['a', 'b', 'c'],dtype='category')}, index=index) tm.assert_frame_equal(df, expected) def test_construction_frame(self): # GH8626 # dict creation df = DataFrame({ 'A' : list('abc') }, dtype='category') expected = Series(list('abc'), dtype='category', name='A') tm.assert_series_equal(df['A'], expected) # to_frame s = Series(list('abc'), dtype='category') result = s.to_frame() expected = Series(list('abc'), dtype='category', name=0) tm.assert_series_equal(result[0], expected) result = s.to_frame(name='foo') expected = Series(list('abc'), dtype='category', name='foo') tm.assert_series_equal(result['foo'], expected) # list-like creation df = DataFrame(list('abc'), dtype='category') expected = Series(list('abc'), dtype='category', name=0) tm.assert_series_equal(df[0], expected) # ndim != 1 df = DataFrame([pd.Categorical(list('abc'))]) expected = DataFrame({ 0 : Series(list('abc'),dtype='category')}) tm.assert_frame_equal(df,expected) df = DataFrame([pd.Categorical(list('abc')),pd.Categorical(list('abd'))]) expected = DataFrame({ 0 : Series(list('abc'),dtype='category'), 1 : Series(list('abd'),dtype='category')},columns=[0,1]) tm.assert_frame_equal(df,expected) # mixed df = DataFrame([pd.Categorical(list('abc')),list('def')]) expected = DataFrame({ 0 : Series(list('abc'),dtype='category'), 1 : list('def')},columns=[0,1]) tm.assert_frame_equal(df,expected) # invalid (shape) self.assertRaises(ValueError, lambda : DataFrame([pd.Categorical(list('abc')),pd.Categorical(list('abdefg'))])) # ndim > 1 self.assertRaises(NotImplementedError, lambda : pd.Categorical(np.array([list('abcd')]))) def test_reshaping(self): p = tm.makePanel() p['str'] = 'foo' df = p.to_frame() df['category'] = df['str'].astype('category') result = df['category'].unstack() c = Categorical(['foo']*len(p.major_axis)) expected = DataFrame({'A' : c.copy(), 'B' : c.copy(), 'C' : c.copy(), 'D' : c.copy()}, columns=Index(list('ABCD'),name='minor'), index=p.major_axis.set_names('major')) tm.assert_frame_equal(result, expected) def test_reindex(self): index = pd.date_range('20000101', periods=3) # reindexing to an invalid Categorical s = Series(['a', 'b', 'c'],dtype='category') result = s.reindex(index) expected = Series(Categorical(values=[np.nan,np.nan,np.nan],categories=['a', 'b', 'c'])) expected.index = index tm.assert_series_equal(result, expected) # partial reindexing expected = Series(Categorical(values=['b','c'],categories=['a', 'b', 'c'])) expected.index = [1,2] result = s.reindex([1,2]) tm.assert_series_equal(result, expected) expected = Series(Categorical(values=['c',np.nan],categories=['a', 'b', 'c'])) expected.index = [2,3] result = s.reindex([2,3]) tm.assert_series_equal(result, expected) def test_sideeffects_free(self): # Passing a categorical to a Series and then changing values in either the series or the # categorical should not change the values in the other one, IF you specify copy! cat = Categorical(["a","b","c","a"]) s = pd.Series(cat, copy=True) self.assertFalse(s.cat is cat) s.cat.categories = [1,2,3] exp_s = np.array([1,2,3,1]) exp_cat = np.array(["a","b","c","a"]) self.assert_numpy_array_equal(s.__array__(), exp_s) self.assert_numpy_array_equal(cat.__array__(), exp_cat) # setting s[0] = 2 exp_s2 = np.array([2,2,3,1]) self.assert_numpy_array_equal(s.__array__(), exp_s2) self.assert_numpy_array_equal(cat.__array__(), exp_cat) # however, copy is False by default # so this WILL change values cat = Categorical(["a","b","c","a"]) s = pd.Series(cat) self.assertTrue(s.values is cat) s.cat.categories = [1,2,3] exp_s = np.array([1,2,3,1]) self.assert_numpy_array_equal(s.__array__(), exp_s) self.assert_numpy_array_equal(cat.__array__(), exp_s) s[0] = 2 exp_s2 = np.array([2,2,3,1]) self.assert_numpy_array_equal(s.__array__(), exp_s2) self.assert_numpy_array_equal(cat.__array__(), exp_s2) def test_nan_handling(self): # Nans are represented as -1 in labels s = Series(Categorical(["a","b",np.nan,"a"])) self.assert_numpy_array_equal(s.cat.categories, np.array(["a","b"])) self.assert_numpy_array_equal(s.values.codes, np.array([0,1,-1,0])) # If categories have nan included, the label should point to that instead with tm.assert_produces_warning(FutureWarning): s2 = Series(Categorical(["a","b",np.nan,"a"], categories=["a","b",np.nan])) self.assert_numpy_array_equal(s2.cat.categories, np.array(["a","b",np.nan], dtype=np.object_)) self.assert_numpy_array_equal(s2.values.codes, np.array([0,1,2,0])) # Changing categories should also make the replaced category np.nan s3 = Series(Categorical(["a","b","c","a"])) with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): s3.cat.categories = ["a","b",np.nan] self.assert_numpy_array_equal(s3.cat.categories, np.array(["a","b",np.nan], dtype=np.object_)) self.assert_numpy_array_equal(s3.values.codes, np.array([0,1,2,0])) def test_cat_accessor(self): s = Series(Categorical(["a","b",np.nan,"a"])) self.assert_numpy_array_equal(s.cat.categories, np.array(["a","b"])) self.assertEqual(s.cat.ordered, False) exp = Categorical(["a","b",np.nan,"a"], categories=["b","a"]) s.cat.set_categories(["b", "a"], inplace=True) self.assertTrue(s.values.equals(exp)) res = s.cat.set_categories(["b", "a"]) self.assertTrue(res.values.equals(exp)) exp = Categorical(["a","b",np.nan,"a"], categories=["b","a"]) s[:] = "a" s = s.cat.remove_unused_categories() self.assert_numpy_array_equal(s.cat.categories, np.array(["a"])) def test_sequence_like(self): # GH 7839 # make sure can iterate df = DataFrame({"id":[1,2,3,4,5,6], "raw_grade":['a', 'b', 'b', 'a', 'a', 'e']}) df['grade'] = Categorical(df['raw_grade']) # basic sequencing testing result = list(df.grade.values) expected = np.array(df.grade.values).tolist() tm.assert_almost_equal(result,expected) # iteration for t in df.itertuples(index=False): str(t) for row, s in df.iterrows(): str(s) for c, col in df.iteritems(): str(s) def test_series_delegations(self): # invalid accessor self.assertRaises(AttributeError, lambda : Series([1,2,3]).cat) tm.assertRaisesRegexp(AttributeError, r"Can only use .cat accessor with a 'category' dtype", lambda : Series([1,2,3]).cat) self.assertRaises(AttributeError, lambda : Series(['a','b','c']).cat) self.assertRaises(AttributeError, lambda : Series(np.arange(5.)).cat) self.assertRaises(AttributeError, lambda : Series([Timestamp('20130101')]).cat) # Series should delegate calls to '.categories', '.codes', '.ordered' and the # methods '.set_categories()' 'drop_unused_categories()' to the categorical s = Series(Categorical(["a","b","c","a"], ordered=True)) exp_categories = np.array(["a","b","c"]) self.assert_numpy_array_equal(s.cat.categories, exp_categories) s.cat.categories = [1,2,3] exp_categories = np.array([1,2,3]) self.assert_numpy_array_equal(s.cat.categories, exp_categories) exp_codes = Series([0,1,2,0],dtype='int8') tm.assert_series_equal(s.cat.codes, exp_codes) self.assertEqual(s.cat.ordered, True) s = s.cat.as_unordered() self.assertEqual(s.cat.ordered, False) s.cat.as_ordered(inplace=True) self.assertEqual(s.cat.ordered, True) # reorder s = Series(Categorical(["a","b","c","a"], ordered=True)) exp_categories = np.array(["c","b","a"]) exp_values = np.array(["a","b","c","a"]) s = s.cat.set_categories(["c","b","a"]) self.assert_numpy_array_equal(s.cat.categories, exp_categories) self.assert_numpy_array_equal(s.values.__array__(), exp_values) self.assert_numpy_array_equal(s.__array__(), exp_values) # remove unused categories s = Series(Categorical(["a","b","b","a"], categories=["a","b","c"])) exp_categories = np.array(["a","b"]) exp_values = np.array(["a","b","b","a"]) s = s.cat.remove_unused_categories() self.assert_numpy_array_equal(s.cat.categories, exp_categories) self.assert_numpy_array_equal(s.values.__array__(), exp_values) self.assert_numpy_array_equal(s.__array__(), exp_values) # This method is likely to be confused, so test that it raises an error on wrong inputs: def f(): s.set_categories([4,3,2,1]) self.assertRaises(Exception, f) # right: s.cat.set_categories([4,3,2,1]) def test_series_functions_no_warnings(self): df = pd.DataFrame({'value': np.random.randint(0, 100, 20)}) labels = [ "{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)] with tm.assert_produces_warning(False): df['group'] = pd.cut(df.value, range(0, 105, 10), right=False, labels=labels) def test_assignment_to_dataframe(self): # assignment df = DataFrame({'value': np.array(np.random.randint(0, 10000, 100),dtype='int32')}) labels = [ "{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500) ] df = df.sort_values(by=['value'], ascending=True) s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels) d = s.values df['D'] = d str(df) result = df.dtypes expected = Series([np.dtype('int32'), com.CategoricalDtype()],index=['value','D']) tm.assert_series_equal(result,expected) df['E'] = s str(df) result = df.dtypes expected = Series([np.dtype('int32'), com.CategoricalDtype(), com.CategoricalDtype()], index=['value','D','E']) tm.assert_series_equal(result,expected) result1 = df['D'] result2 = df['E'] self.assertTrue(result1._data._block.values.equals(d)) # sorting s.name = 'E' self.assertTrue(result2.sort_index().equals(s.sort_index())) cat = pd.Categorical([1,2,3,10], categories=[1,2,3,4,10]) df = pd.DataFrame(pd.Series(cat)) def test_describe(self): # Categoricals should not show up together with numerical columns result = self.cat.describe() self.assertEqual(len(result.columns),1) # In a frame, describe() for the cat should be the same as for string arrays (count, unique, # top, freq) cat = Categorical(["a","b","b","b"], categories=['a','b','c'], ordered=True) s = Series(cat) result = s.describe() expected = Series([4,2,"b",3],index=['count','unique','top', 'freq']) tm.assert_series_equal(result,expected) cat = pd.Series(pd.Categorical(["a","b","c","c"])) df3 = pd.DataFrame({"cat":cat, "s":["a","b","c","c"]}) res = df3.describe() self.assert_numpy_array_equal(res["cat"].values, res["s"].values) def test_repr(self): a = pd.Series(pd.Categorical([1,2,3,4])) exp = u("0 1\n1 2\n2 3\n3 4\n" + "dtype: category\nCategories (4, int64): [1, 2, 3, 4]") self.assertEqual(exp, a.__unicode__()) a = pd.Series(pd.Categorical(["a","b"] *25)) exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" + "dtype: category\nCategories (2, object): [a, b]") with option_context("display.max_rows", 5): self.assertEqual(exp, repr(a)) levs = list("abcdefghijklmnopqrstuvwxyz") a = pd.Series(pd.Categorical(["a","b"], categories=levs, ordered=True)) exp = u("0 a\n1 b\n" + "dtype: category\n" "Categories (26, object): [a < b < c < d ... w < x < y < z]") self.assertEqual(exp,a.__unicode__()) def test_categorical_repr(self): c = pd.Categorical([1, 2 ,3]) exp = """[1, 2, 3] Categories (3, int64): [1, 2, 3]""" self.assertEqual(repr(c), exp) c = pd.Categorical([1, 2 ,3, 1, 2 ,3], categories=[1, 2, 3]) exp = """[1, 2, 3, 1, 2, 3] Categories (3, int64): [1, 2, 3]""" self.assertEqual(repr(c), exp) c = pd.Categorical([1, 2, 3, 4, 5] * 10) exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5] Length: 50 Categories (5, int64): [1, 2, 3, 4, 5]""" self.assertEqual(repr(c), exp) c = pd.Categorical(np.arange(20)) exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19] Length: 20 Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]""" self.assertEqual(repr(c), exp) def test_categorical_repr_ordered(self): c = pd.Categorical([1, 2 ,3], ordered=True) exp = """[1, 2, 3] Categories (3, int64): [1 < 2 < 3]""" self.assertEqual(repr(c), exp) c = pd.Categorical([1, 2 ,3, 1, 2 ,3], categories=[1, 2, 3], ordered=True) exp = """[1, 2, 3, 1, 2, 3] Categories (3, int64): [1 < 2 < 3]""" self.assertEqual(repr(c), exp) c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True) exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5] Length: 50 Categories (5, int64): [1 < 2 < 3 < 4 < 5]""" self.assertEqual(repr(c), exp) c = pd.Categorical(np.arange(20), ordered=True) exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19] Length: 20 Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]""" self.assertEqual(repr(c), exp) def test_categorical_repr_datetime(self): idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5) c = pd.Categorical(idx) exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00] Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]""" self.assertEqual(repr(c), exp) c = pd.Categorical(idx.append(idx), categories=idx) exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00] Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]""" self.assertEqual(repr(c), exp) idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern') c = pd.Categorical(idx) exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00] Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n 2011-01-01 13:00:00-05:00]""" self.assertEqual(repr(c), exp) c = pd.Categorical(idx.append(idx), categories=idx) exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00] Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]""" self.assertEqual(repr(c), exp) def test_categorical_repr_datetime_ordered(self): idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5) c = pd.Categorical(idx, ordered=True) exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00] Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 < 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" self.assertEqual(repr(c), exp) c = pd.Categorical(idx.append(idx), categories=idx, ordered=True) exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00] Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 < 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" self.assertEqual(repr(c), exp) idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern') c = pd.Categorical(idx, ordered=True) exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00] Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 < 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 < 2011-01-01 13:00:00-05:00]""" self.assertEqual(repr(c), exp) c = pd.Categorical(idx.append(idx), categories=idx, ordered=True) exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00] Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 < 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 < 2011-01-01 13:00:00-05:00]""" self.assertEqual(repr(c), exp) def test_categorical_repr_period(self): idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5) c = pd.Categorical(idx) exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]""" self.assertEqual(repr(c), exp) c = pd.Categorical(idx.append(idx), categories=idx) exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]""" self.assertEqual(repr(c), exp) idx = pd.period_range('2011-01', freq='M', periods=5) c = pd.Categorical(idx) exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05] Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]""" self.assertEqual(repr(c), exp) c = pd.Categorical(idx.append(idx), categories=idx) exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05] Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]""" self.assertEqual(repr(c), exp) def test_categorical_repr_period_ordered(self): idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5) c = pd.Categorical(idx, ordered=True) exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 < 2011-01-01 13:00]""" self.assertEqual(repr(c), exp) c = pd.Categorical(idx.append(idx), categories=idx, ordered=True) exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 < 2011-01-01 13:00]""" self.assertEqual(repr(c), exp) idx = pd.period_range('2011-01', freq='M', periods=5) c = pd.Categorical(idx, ordered=True) exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05] Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]""" self.assertEqual(repr(c), exp) c = pd.Categorical(idx.append(idx), categories=idx, ordered=True) exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05] Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]""" self.assertEqual(repr(c), exp) def test_categorical_repr_timedelta(self): idx = pd.timedelta_range('1 days', periods=5) c = pd.Categorical(idx) exp = """[1 days, 2 days, 3 days, 4 days, 5 days] Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]""" self.assertEqual(repr(c), exp) c = pd.Categorical(idx.append(idx), categories=idx) exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days] Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]""" self.assertEqual(repr(c), exp) idx = pd.timedelta_range('1 hours', periods=20) c = pd.Categorical(idx) exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00] Length: 20 Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]""" self.assertEqual(repr(c), exp) c = pd.Categorical(idx.append(idx), categories=idx) exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00] Length: 40 Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]""" self.assertEqual(repr(c), exp) def test_categorical_repr_timedelta_ordered(self): idx = pd.timedelta_range('1 days', periods=5) c = pd.Categorical(idx, ordered=True) exp = """[1 days, 2 days, 3 days, 4 days, 5 days] Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" self.assertEqual(repr(c), exp) c = pd.Categorical(idx.append(idx), categories=idx, ordered=True) exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days] Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" self.assertEqual(repr(c), exp) idx = pd.timedelta_range('1 hours', periods=20) c = pd.Categorical(idx, ordered=True) exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00] Length: 20 Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 < 3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 < 18 days 01:00:00 < 19 days 01:00:00]""" self.assertEqual(repr(c), exp) c = pd.Categorical(idx.append(idx), categories=idx, ordered=True) exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00] Length: 40 Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 < 3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 < 18 days 01:00:00 < 19 days 01:00:00]""" self.assertEqual(repr(c), exp) def test_categorical_series_repr(self): s = pd.Series(pd.Categorical([1, 2 ,3])) exp = """0 1 1 2 2 3 dtype: category Categories (3, int64): [1, 2, 3]""" self.assertEqual(repr(s), exp) s = pd.Series(pd.Categorical(np.arange(10))) exp = """0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 dtype: category Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]""" self.assertEqual(repr(s), exp) def test_categorical_series_repr_ordered(self): s = pd.Series(pd.Categorical([1, 2 ,3], ordered=True)) exp = """0 1 1 2 2 3 dtype: category Categories (3, int64): [1 < 2 < 3]""" self.assertEqual(repr(s), exp) s = pd.Series(pd.Categorical(np.arange(10), ordered=True)) exp = """0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 dtype: category Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]""" self.assertEqual(repr(s), exp) def test_categorical_series_repr_datetime(self): idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5) s = pd.Series(pd.Categorical(idx)) exp = """0 2011-01-01 09:00:00 1 2011-01-01 10:00:00 2 2011-01-01 11:00:00 3 2011-01-01 12:00:00 4 2011-01-01 13:00:00 dtype: category Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]""" self.assertEqual(repr(s), exp) idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern') s = pd.Series(pd.Categorical(idx)) exp = """0 2011-01-01 09:00:00-05:00 1 2011-01-01 10:00:00-05:00 2 2011-01-01 11:00:00-05:00 3 2011-01-01 12:00:00-05:00 4 2011-01-01 13:00:00-05:00 dtype: category Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]""" self.assertEqual(repr(s), exp) def test_categorical_series_repr_datetime_ordered(self): idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5) s = pd.Series(pd.Categorical(idx, ordered=True)) exp = """0 2011-01-01 09:00:00 1 2011-01-01 10:00:00 2 2011-01-01 11:00:00 3 2011-01-01 12:00:00 4 2011-01-01 13:00:00 dtype: category Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 < 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" self.assertEqual(repr(s), exp) idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern') s = pd.Series(pd.Categorical(idx, ordered=True)) exp = """0 2011-01-01 09:00:00-05:00 1 2011-01-01 10:00:00-05:00 2 2011-01-01 11:00:00-05:00 3 2011-01-01 12:00:00-05:00 4 2011-01-01 13:00:00-05:00 dtype: category Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 < 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 < 2011-01-01 13:00:00-05:00]""" self.assertEqual(repr(s), exp) def test_categorical_series_repr_period(self): idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5) s = pd.Series(pd.Categorical(idx)) exp = """0 2011-01-01 09:00 1 2011-01-01 10:00 2 2011-01-01 11:00 3 2011-01-01 12:00 4 2011-01-01 13:00 dtype: category Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]""" self.assertEqual(repr(s), exp) idx = pd.period_range('2011-01', freq='M', periods=5) s = pd.Series(pd.Categorical(idx)) exp = """0 2011-01 1 2011-02 2 2011-03 3 2011-04 4 2011-05 dtype: category Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]""" self.assertEqual(repr(s), exp) def test_categorical_series_repr_period_ordered(self): idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5) s = pd.Series(pd.Categorical(idx, ordered=True)) exp = """0 2011-01-01 09:00 1 2011-01-01 10:00 2 2011-01-01 11:00 3 2011-01-01 12:00 4 2011-01-01 13:00 dtype: category Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 < 2011-01-01 13:00]""" self.assertEqual(repr(s), exp) idx = pd.period_range('2011-01', freq='M', periods=5) s = pd.Series(pd.Categorical(idx, ordered=True)) exp = """0 2011-01 1 2011-02 2 2011-03 3 2011-04 4 2011-05 dtype: category Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]""" self.assertEqual(repr(s), exp) def test_categorical_series_repr_timedelta(self): idx = pd.timedelta_range('1 days', periods=5) s = pd.Series(pd.Categorical(idx)) exp = """0 1 days 1 2 days 2 3 days 3 4 days 4 5 days dtype: category Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]""" self.assertEqual(repr(s), exp) idx = pd.timedelta_range('1 hours', periods=10) s = pd.Series(pd.Categorical(idx)) exp = """0 0 days 01:00:00 1 1 days 01:00:00 2 2 days 01:00:00 3 3 days 01:00:00 4 4 days 01:00:00 5 5 days 01:00:00 6 6 days 01:00:00 7 7 days 01:00:00 8 8 days 01:00:00 9 9 days 01:00:00 dtype: category Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00, 8 days 01:00:00, 9 days 01:00:00]""" self.assertEqual(repr(s), exp) def test_categorical_series_repr_timedelta_ordered(self): idx = pd.timedelta_range('1 days', periods=5) s = pd.Series(pd.Categorical(idx, ordered=True)) exp = """0 1 days 1 2 days 2 3 days 3 4 days 4 5 days dtype: category Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" self.assertEqual(repr(s), exp) idx = pd.timedelta_range('1 hours', periods=10) s = pd.Series(pd.Categorical(idx, ordered=True)) exp = """0 0 days 01:00:00 1 1 days 01:00:00 2 2 days 01:00:00 3 3 days 01:00:00 4 4 days 01:00:00 5 5 days 01:00:00 6 6 days 01:00:00 7 7 days 01:00:00 8 8 days 01:00:00 9 9 days 01:00:00 dtype: category Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 < 3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 < 8 days 01:00:00 < 9 days 01:00:00]""" self.assertEqual(repr(s), exp) def test_categorical_index_repr(self): idx = pd.CategoricalIndex(pd.Categorical([1, 2 ,3])) exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')""" self.assertEqual(repr(idx), exp) i = pd.CategoricalIndex(pd.Categorical(np.arange(10))) exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')""" self.assertEqual(repr(i), exp) def test_categorical_index_repr_ordered(self): i = pd.CategoricalIndex(pd.Categorical([1, 2 ,3], ordered=True)) exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')""" self.assertEqual(repr(i), exp) i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True)) exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')""" self.assertEqual(repr(i), exp) def test_categorical_index_repr_datetime(self): idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5) i = pd.CategoricalIndex(pd.Categorical(idx)) exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00', '2011-01-01 11:00:00', '2011-01-01 12:00:00', '2011-01-01 13:00:00'], categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')""" self.assertEqual(repr(i), exp) idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern') i = pd.CategoricalIndex(pd.Categorical(idx)) exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'], categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')""" self.assertEqual(repr(i), exp) def test_categorical_index_repr_datetime_ordered(self): idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5) i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True)) exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00', '2011-01-01 11:00:00', '2011-01-01 12:00:00', '2011-01-01 13:00:00'], categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')""" self.assertEqual(repr(i), exp) idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern') i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True)) exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'], categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" self.assertEqual(repr(i), exp) i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True)) exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'], categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" self.assertEqual(repr(i), exp) def test_categorical_index_repr_period(self): # test all length idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1) i = pd.CategoricalIndex(pd.Categorical(idx)) exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')""" self.assertEqual(repr(i), exp) idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2) i = pd.CategoricalIndex(pd.Categorical(idx)) exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')""" self.assertEqual(repr(i), exp) idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3) i = pd.CategoricalIndex(pd.Categorical(idx)) exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')""" self.assertEqual(repr(i), exp) idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5) i = pd.CategoricalIndex(pd.Categorical(idx)) exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00', '2011-01-01 13:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" self.assertEqual(repr(i), exp) i = pd.CategoricalIndex(pd.Categorical(idx.append(idx))) exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00', '2011-01-01 13:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" self.assertEqual(repr(i), exp) idx = pd.period_range('2011-01', freq='M', periods=5) i = pd.CategoricalIndex(pd.Categorical(idx)) exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')""" self.assertEqual(repr(i), exp) def test_categorical_index_repr_period_ordered(self): idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5) i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True)) exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00', '2011-01-01 13:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')""" self.assertEqual(repr(i), exp) idx = pd.period_range('2011-01', freq='M', periods=5) i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True)) exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')""" self.assertEqual(repr(i), exp) def test_categorical_index_repr_timedelta(self): idx = pd.timedelta_range('1 days', periods=5) i = pd.CategoricalIndex(pd.Categorical(idx)) exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')""" self.assertEqual(repr(i), exp) idx = pd.timedelta_range('1 hours', periods=10) i = pd.CategoricalIndex(pd.Categorical(idx)) exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00', '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00', '6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00', '9 days 01:00:00'], categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')""" self.assertEqual(repr(i), exp) def test_categorical_index_repr_timedelta_ordered(self): idx = pd.timedelta_range('1 days', periods=5) i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True)) exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')""" self.assertEqual(repr(i), exp) idx = pd.timedelta_range('1 hours', periods=10) i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True)) exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00', '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00', '6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00', '9 days 01:00:00'], categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')""" self.assertEqual(repr(i), exp) def test_categorical_frame(self): # normal DataFrame dt = pd.date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern') p = pd.period_range('2011-01', freq='M', periods=5) df = pd.DataFrame({'dt': dt, 'p': p}) exp = """ dt p 0 2011-01-01 09:00:00-05:00 2011-01 1 2011-01-01 10:00:00-05:00 2011-02 2 2011-01-01 11:00:00-05:00 2011-03 3 2011-01-01 12:00:00-05:00 2011-04 4 2011-01-01 13:00:00-05:00 2011-05""" df = pd.DataFrame({'dt': pd.Categorical(dt), 'p': pd.Categorical(p)}) self.assertEqual(repr(df), exp) def test_info(self): # make sure it works n = 2500 df = DataFrame({ 'int64' : np.random.randint(100,size=n) }) df['category'] = Series(np.array(list('abcdefghij')).take(np.random.randint(0,10,size=n))).astype('category') df.isnull() df.info() df2 = df[df['category']=='d'] df2.info() def test_groupby_sort(self): # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby # This should result in a properly sorted Series so that the plot # has a sorted x axis #self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar') res = self.cat.groupby(['value_group'])['value_group'].count() exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))] exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name) tm.assert_series_equal(res, exp) def test_min_max(self): # unordered cats have no min/max cat = Series(Categorical(["a","b","c","d"], ordered=False)) self.assertRaises(TypeError, lambda : cat.min()) self.assertRaises(TypeError, lambda : cat.max()) cat = Series(Categorical(["a","b","c","d"], ordered=True)) _min = cat.min() _max = cat.max() self.assertEqual(_min, "a") self.assertEqual(_max, "d") cat = Series(Categorical(["a","b","c","d"], categories=['d','c','b','a'], ordered=True)) _min = cat.min() _max = cat.max() self.assertEqual(_min, "d") self.assertEqual(_max, "a") cat = Series(Categorical([np.nan,"b","c",np.nan], categories=['d','c','b','a'], ordered=True)) _min = cat.min() _max = cat.max() self.assertTrue(np.isnan(_min)) self.assertEqual(_max, "b") cat = Series(Categorical([np.nan,1,2,np.nan], categories=[5,4,3,2,1], ordered=True)) _min = cat.min() _max = cat.max() self.assertTrue(np.isnan(_min)) self.assertEqual(_max, 1) def test_mode(self): s = Series(Categorical([1,1,2,4,5,5,5], categories=[5,4,3,2,1], ordered=True)) res = s.mode() exp = Series(Categorical([5], categories=[5,4,3,2,1], ordered=True)) tm.assert_series_equal(res, exp) s = Series(Categorical([1,1,1,4,5,5,5], categories=[5,4,3,2,1], ordered=True)) res = s.mode() exp = Series(Categorical([5,1], categories=[5,4,3,2,1], ordered=True)) tm.assert_series_equal(res, exp) s = Series(Categorical([1,2,3,4,5], categories=[5,4,3,2,1], ordered=True)) res = s.mode() exp = Series(Categorical([], categories=[5,4,3,2,1], ordered=True)) tm.assert_series_equal(res, exp) def test_value_counts(self): s = pd.Series(pd.Categorical(["a","b","c","c","c","b"], categories=["c","a","b","d"])) res = s.value_counts(sort=False) exp = Series([3,1,2,0], index=pd.CategoricalIndex(["c","a","b","d"])) tm.assert_series_equal(res, exp) res = s.value_counts(sort=True) exp = Series([3,2,1,0], index=pd.CategoricalIndex(["c","b","a","d"])) tm.assert_series_equal(res, exp) def test_value_counts_with_nan(self): # https://github.com/pydata/pandas/issues/9443 s = pd.Series(["a", "b", "a"], dtype="category") tm.assert_series_equal( s.value_counts(dropna=True), pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"]))) tm.assert_series_equal( s.value_counts(dropna=False), pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"]))) s = pd.Series(["a", "b", None, "a", None, None], dtype="category") tm.assert_series_equal( s.value_counts(dropna=True), pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"]))) tm.assert_series_equal( s.value_counts(dropna=False), pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"]))) # When we aren't sorting by counts, and np.nan isn't a # category, it should be last. tm.assert_series_equal( s.value_counts(dropna=False, sort=False), pd.Series([2, 1, 3], index=pd.CategoricalIndex(["a", "b", np.nan]))) with tm.assert_produces_warning(FutureWarning): s = pd.Series(pd.Categorical(["a", "b", "a"], categories=["a", "b", np.nan])) tm.assert_series_equal( s.value_counts(dropna=True), pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"]))) tm.assert_series_equal( s.value_counts(dropna=False), pd.Series([2, 1, 0], index=pd.CategoricalIndex(["a", "b", np.nan]))) with tm.assert_produces_warning(FutureWarning): s = pd.Series(pd.Categorical(["a", "b", None, "a", None, None], categories=["a", "b", np.nan])) tm.assert_series_equal( s.value_counts(dropna=True), pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"]))) tm.assert_series_equal( s.value_counts(dropna=False), pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"]))) def test_groupby(self): cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"], categories=["a","b","c","d"], ordered=True) data = DataFrame({"a":[1,1,1,2,2,2,3,4,5], "b":cats}) expected = DataFrame({'a': Series([1, 2, 4, np.nan], index=pd.CategoricalIndex(['a', 'b', 'c', 'd'], name='b'))}) result = data.groupby("b").mean() tm.assert_frame_equal(result, expected) raw_cat1 = Categorical(["a","a","b","b"], categories=["a","b","z"], ordered=True) raw_cat2 = Categorical(["c","d","c","d"], categories=["c","d","y"], ordered=True) df = DataFrame({"A":raw_cat1,"B":raw_cat2, "values":[1,2,3,4]}) # single grouper gb = df.groupby("A") exp_idx = pd.CategoricalIndex(['a', 'b', 'z'], name='A') expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)}) result = gb.sum() tm.assert_frame_equal(result, expected) # multiple groupers gb = df.groupby(['A','B']) expected = DataFrame({ 'values' : Series([1,2,np.nan,3,4,np.nan,np.nan,np.nan,np.nan], index=pd.MultiIndex.from_product([['a','b','z'],['c','d','y']],names=['A','B'])) }) result = gb.sum() tm.assert_frame_equal(result, expected) # multiple groupers with a non-cat df = df.copy() df['C'] = ['foo','bar']*2 gb = df.groupby(['A','B','C']) expected = DataFrame({ 'values' : Series(np.nan,index=pd.MultiIndex.from_product([['a','b','z'], ['c','d','y'], ['foo','bar']], names=['A','B','C'])) }).sortlevel() expected.iloc[[1,2,7,8],0] = [1,2,3,4] result = gb.sum() tm.assert_frame_equal(result, expected) # GH 8623 x=pd.DataFrame([[1,'John P. Doe'],[2,'Jane Dove'],[1,'John P. Doe']], columns=['person_id','person_name']) x['person_name'] = pd.Categorical(x.person_name) g = x.groupby(['person_id']) result = g.transform(lambda x:x) tm.assert_frame_equal(result, x[['person_name']]) result = x.drop_duplicates('person_name') expected = x.iloc[[0,1]] tm.assert_frame_equal(result, expected) def f(x): return x.drop_duplicates('person_name').iloc[0] result = g.apply(f) expected = x.iloc[[0,1]].copy() expected.index = Index([1,2],name='person_id') expected['person_name'] = expected['person_name'].astype('object') tm.assert_frame_equal(result, expected) # GH 9921 # Monotonic df = DataFrame({"a": [5, 15, 25]}) c = pd.cut(df.a, bins=[0,10,20,30,40]) result = df.a.groupby(c).transform(sum) tm.assert_series_equal(result, df['a'], check_names=False) self.assertTrue(result.name is None) tm.assert_series_equal(df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a']) tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']]) tm.assert_frame_equal(df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']]) # Filter tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a']) tm.assert_frame_equal(df.groupby(c).filter(np.all), df) # Non-monotonic df = DataFrame({"a": [5, 15, 25, -5]}) c = pd.cut(df.a, bins=[-10, 0,10,20,30,40]) result = df.a.groupby(c).transform(sum) tm.assert_series_equal(result, df['a'], check_names=False) self.assertTrue(result.name is None) tm.assert_series_equal(df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a']) tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']]) tm.assert_frame_equal(df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']]) # GH 9603 df = pd.DataFrame({'a': [1, 0, 0, 0]}) c = pd.cut(df.a, [0, 1, 2, 3, 4]) result = df.groupby(c).apply(len) expected = pd.Series([1, 0, 0, 0], index=pd.CategoricalIndex(c.values.categories)) expected.index.name = 'a' tm.assert_series_equal(result, expected) def test_pivot_table(self): raw_cat1 = Categorical(["a","a","b","b"], categories=["a","b","z"], ordered=True) raw_cat2 = Categorical(["c","d","c","d"], categories=["c","d","y"], ordered=True) df = DataFrame({"A":raw_cat1,"B":raw_cat2, "values":[1,2,3,4]}) result = pd.pivot_table(df, values='values', index=['A', 'B']) expected = Series([1,2,np.nan,3,4,np.nan,np.nan,np.nan,np.nan], index=pd.MultiIndex.from_product([['a','b','z'],['c','d','y']],names=['A','B']), name='values') tm.assert_series_equal(result, expected) def test_count(self): s = Series(Categorical([np.nan,1,2,np.nan], categories=[5,4,3,2,1], ordered=True)) result = s.count() self.assertEqual(result, 2) def test_sort(self): c = Categorical(["a","b","b","a"], ordered=False) cat = Series(c) # 9816 deprecated with tm.assert_produces_warning(FutureWarning): c.order() # sort in the categories order expected = Series(Categorical(["a","a","b","b"], ordered=False),index=[0,3,1,2]) result = cat.sort_values() tm.assert_series_equal(result, expected) cat = Series(Categorical(["a","c","b","d"], ordered=True)) res = cat.sort_values() exp = np.array(["a","b","c","d"]) self.assert_numpy_array_equal(res.__array__(), exp) cat = Series(Categorical(["a","c","b","d"], categories=["a","b","c","d"], ordered=True)) res = cat.sort_values() exp = np.array(["a","b","c","d"]) self.assert_numpy_array_equal(res.__array__(), exp) res = cat.sort_values(ascending=False) exp = np.array(["d","c","b","a"]) self.assert_numpy_array_equal(res.__array__(), exp) raw_cat1 = Categorical(["a","b","c","d"], categories=["a","b","c","d"], ordered=False) raw_cat2 = Categorical(["a","b","c","d"], categories=["d","c","b","a"], ordered=True) s = ["a","b","c","d"] df = DataFrame({"unsort":raw_cat1,"sort":raw_cat2, "string":s, "values":[1,2,3,4]}) # Cats must be sorted in a dataframe res = df.sort_values(by=["string"], ascending=False) exp = np.array(["d", "c", "b", "a"]) self.assert_numpy_array_equal(res["sort"].values.__array__(), exp) self.assertEqual(res["sort"].dtype, "category") res = df.sort_values(by=["sort"], ascending=False) exp = df.sort_values(by=["string"], ascending=True) self.assert_numpy_array_equal(res["values"], exp["values"]) self.assertEqual(res["sort"].dtype, "category") self.assertEqual(res["unsort"].dtype, "category") # unordered cat, but we allow this df.sort_values(by=["unsort"], ascending=False) # multi-columns sort # GH 7848 df = DataFrame({"id":[6,5,4,3,2,1], "raw_grade":['a', 'b', 'b', 'a', 'a', 'e']}) df["grade"] = pd.Categorical(df["raw_grade"], ordered=True) df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a']) # sorts 'grade' according to the order of the categories result = df.sort_values(by=['grade']) expected = df.iloc[[1,2,5,0,3,4]] tm.assert_frame_equal(result,expected) # multi result = df.sort_values(by=['grade', 'id']) expected = df.iloc[[2,1,5,4,3,0]] tm.assert_frame_equal(result,expected) # reverse cat = Categorical(["a","c","c","b","d"], ordered=True) res = cat.sort_values(ascending=False) exp_val = np.array(["d","c", "c", "b","a"],dtype=object) exp_categories = np.array(["a","b","c","d"],dtype=object) self.assert_numpy_array_equal(res.__array__(), exp_val) self.assert_numpy_array_equal(res.categories, exp_categories) # some NaN positions cat = Categorical(["a","c","b","d", np.nan], ordered=True) res = cat.sort_values(ascending=False, na_position='last') exp_val = np.array(["d","c","b","a", np.nan],dtype=object) exp_categories = np.array(["a","b","c","d"],dtype=object) self.assert_numpy_array_equal(res.__array__(), exp_val) self.assert_numpy_array_equal(res.categories, exp_categories) cat = Categorical(["a","c","b","d", np.nan], ordered=True) res = cat.sort_values(ascending=False, na_position='first') exp_val = np.array([np.nan, "d","c","b","a"],dtype=object) exp_categories = np.array(["a","b","c","d"],dtype=object) self.assert_numpy_array_equal(res.__array__(), exp_val) self.assert_numpy_array_equal(res.categories, exp_categories) cat = Categorical(["a","c","b","d", np.nan], ordered=True) res = cat.sort_values(ascending=False, na_position='first') exp_val = np.array([np.nan, "d","c","b","a"],dtype=object) exp_categories = np.array(["a","b","c","d"],dtype=object) self.assert_numpy_array_equal(res.__array__(), exp_val) self.assert_numpy_array_equal(res.categories, exp_categories) cat = Categorical(["a","c","b","d", np.nan], ordered=True) res = cat.sort_values(ascending=False, na_position='last') exp_val = np.array(["d","c","b","a",np.nan],dtype=object) exp_categories = np.array(["a","b","c","d"],dtype=object) self.assert_numpy_array_equal(res.__array__(), exp_val) self.assert_numpy_array_equal(res.categories, exp_categories) def test_slicing(self): cat = Series(Categorical([1,2,3,4])) reversed = cat[::-1] exp = np.array([4,3,2,1]) self.assert_numpy_array_equal(reversed.__array__(), exp) df = DataFrame({'value': (np.arange(100)+1).astype('int64')}) df['D'] = pd.cut(df.value, bins=[0,25,50,75,100]) expected = Series([11,'(0, 25]'], index=['value','D'], name=10) result = df.iloc[10] tm.assert_series_equal(result, expected) expected = DataFrame({'value': np.arange(11,21).astype('int64')}, index=np.arange(10,20).astype('int64')) expected['D'] = pd.cut(expected.value, bins=[0,25,50,75,100]) result = df.iloc[10:20] tm.assert_frame_equal(result, expected) expected = Series([9,'(0, 25]'],index=['value', 'D'], name=8) result = df.loc[8] tm.assert_series_equal(result, expected) def test_slicing_and_getting_ops(self): # systematically test the slicing operations: # for all slicing ops: # - returning a dataframe # - returning a column # - returning a row # - returning a single value cats = pd.Categorical(["a","c","b","c","c","c","c"], categories=["a","b","c"]) idx = pd.Index(["h","i","j","k","l","m","n"]) values= [1,2,3,4,5,6,7] df = pd.DataFrame({"cats":cats,"values":values}, index=idx) # the expected values cats2 = pd.Categorical(["b","c"], categories=["a","b","c"]) idx2 = pd.Index(["j","k"]) values2= [3,4] # 2:4,: | "j":"k",: exp_df = pd.DataFrame({"cats":cats2,"values":values2}, index=idx2) # :,"cats" | :,0 exp_col = pd.Series(cats,index=idx,name='cats') # "j",: | 2,: exp_row = pd.Series(["b",3], index=["cats","values"], dtype="object", name="j") # "j","cats | 2,0 exp_val = "b" # iloc # frame res_df = df.iloc[2:4,:] tm.assert_frame_equal(res_df, exp_df) self.assertTrue(com.is_categorical_dtype(res_df["cats"])) # row res_row = df.iloc[2,:] tm.assert_series_equal(res_row, exp_row) tm.assertIsInstance(res_row["cats"], compat.string_types) # col res_col = df.iloc[:,0] tm.assert_series_equal(res_col, exp_col) self.assertTrue(com.is_categorical_dtype(res_col)) # single value res_val = df.iloc[2,0] self.assertEqual(res_val, exp_val) # loc # frame res_df = df.loc["j":"k",:] tm.assert_frame_equal(res_df, exp_df) self.assertTrue(com.is_categorical_dtype(res_df["cats"])) # row res_row = df.loc["j",:] tm.assert_series_equal(res_row, exp_row) tm.assertIsInstance(res_row["cats"], compat.string_types) # col res_col = df.loc[:,"cats"] tm.assert_series_equal(res_col, exp_col) self.assertTrue(com.is_categorical_dtype(res_col)) # single value res_val = df.loc["j","cats"] self.assertEqual(res_val, exp_val) # ix # frame #res_df = df.ix["j":"k",[0,1]] # doesn't work? res_df = df.ix["j":"k",:] tm.assert_frame_equal(res_df, exp_df) self.assertTrue(com.is_categorical_dtype(res_df["cats"])) # row res_row = df.ix["j",:] tm.assert_series_equal(res_row, exp_row) tm.assertIsInstance(res_row["cats"], compat.string_types) # col res_col = df.ix[:,"cats"] tm.assert_series_equal(res_col, exp_col) self.assertTrue(com.is_categorical_dtype(res_col)) # single value res_val = df.ix["j",0] self.assertEqual(res_val, exp_val) # iat res_val = df.iat[2,0] self.assertEqual(res_val, exp_val) # at res_val = df.at["j","cats"] self.assertEqual(res_val, exp_val) # fancy indexing exp_fancy = df.iloc[[2]] res_fancy = df[df["cats"] == "b"] tm.assert_frame_equal(res_fancy,exp_fancy) res_fancy = df[df["values"] == 3] tm.assert_frame_equal(res_fancy,exp_fancy) # get_value res_val = df.get_value("j","cats") self.assertEqual(res_val, exp_val) # i : int, slice, or sequence of integers res_row = df.iloc[2] tm.assert_series_equal(res_row, exp_row) tm.assertIsInstance(res_row["cats"], compat.string_types) res_df = df.iloc[slice(2,4)] tm.assert_frame_equal(res_df, exp_df) self.assertTrue(com.is_categorical_dtype(res_df["cats"])) res_df = df.iloc[[2,3]] tm.assert_frame_equal(res_df, exp_df) self.assertTrue(com.is_categorical_dtype(res_df["cats"])) res_col = df.iloc[:,0] tm.assert_series_equal(res_col, exp_col) self.assertTrue(com.is_categorical_dtype(res_col)) res_df = df.iloc[:,slice(0,2)] tm.assert_frame_equal(res_df, df) self.assertTrue(com.is_categorical_dtype(res_df["cats"])) res_df = df.iloc[:,[0,1]] tm.assert_frame_equal(res_df, df) self.assertTrue(com.is_categorical_dtype(res_df["cats"])) def test_slicing_doc_examples(self): #GH 7918 cats = Categorical(["a","b","b","b","c","c","c"], categories=["a","b","c"]) idx = Index(["h","i","j","k","l","m","n",]) values= [1,2,2,2,3,4,5] df = DataFrame({"cats":cats,"values":values}, index=idx) result = df.iloc[2:4,:] expected = DataFrame({"cats":Categorical(['b','b'],categories=['a','b','c']),"values":[2,2]}, index=['j','k']) tm.assert_frame_equal(result, expected) result = df.iloc[2:4,:].dtypes expected = Series(['category','int64'],['cats','values']) tm.assert_series_equal(result, expected) result = df.loc["h":"j","cats"] expected = Series(Categorical(['a','b','b'], categories=['a','b','c']), index=['h','i','j'], name='cats') tm.assert_series_equal(result, expected) result = df.ix["h":"j",0:1] expected = DataFrame({'cats' : Series(Categorical(['a','b','b'],categories=['a','b','c']),index=['h','i','j']) }) tm.assert_frame_equal(result, expected) def test_assigning_ops(self): # systematically test the assigning operations: # for all slicing ops: # for value in categories and value not in categories: # - assign a single value -> exp_single_cats_value # - assign a complete row (mixed values) -> exp_single_row # - assign multiple rows (mixed values) (-> array) -> exp_multi_row # - assign a part of a column with dtype == categorical -> exp_parts_cats_col # - assign a part of a column with dtype != categorical -> exp_parts_cats_col cats = pd.Categorical(["a","a","a","a","a","a","a"], categories=["a","b"]) idx = pd.Index(["h","i","j","k","l","m","n"]) values = [1,1,1,1,1,1,1] orig = pd.DataFrame({"cats":cats,"values":values}, index=idx) ### the expected values # changed single row cats1 = pd.Categorical(["a","a","b","a","a","a","a"], categories=["a","b"]) idx1 = pd.Index(["h","i","j","k","l","m","n"]) values1 = [1,1,2,1,1,1,1] exp_single_row = pd.DataFrame({"cats":cats1,"values":values1}, index=idx1) #changed multiple rows cats2 = pd.Categorical(["a","a","b","b","a","a","a"], categories=["a","b"]) idx2 = pd.Index(["h","i","j","k","l","m","n"]) values2 = [1,1,2,2,1,1,1] exp_multi_row = pd.DataFrame({"cats":cats2,"values":values2}, index=idx2) # changed part of the cats column cats3 = pd.Categorical(["a","a","b","b","a","a","a"], categories=["a","b"]) idx3 = pd.Index(["h","i","j","k","l","m","n"]) values3 = [1,1,1,1,1,1,1] exp_parts_cats_col = pd.DataFrame({"cats":cats3,"values":values3}, index=idx3) # changed single value in cats col cats4 = pd.Categorical(["a","a","b","a","a","a","a"], categories=["a","b"]) idx4 = pd.Index(["h","i","j","k","l","m","n"]) values4 = [1,1,1,1,1,1,1] exp_single_cats_value = pd.DataFrame({"cats":cats4,"values":values4}, index=idx4) #### iloc ##### ################ # - assign a single value -> exp_single_cats_value df = orig.copy() df.iloc[2,0] = "b" tm.assert_frame_equal(df, exp_single_cats_value) df = orig.copy() df.iloc[df.index == "j",0] = "b" tm.assert_frame_equal(df, exp_single_cats_value) # - assign a single value not in the current categories set def f(): df = orig.copy() df.iloc[2,0] = "c" self.assertRaises(ValueError, f) # - assign a complete row (mixed values) -> exp_single_row df = orig.copy() df.iloc[2,:] = ["b",2] tm.assert_frame_equal(df, exp_single_row) # - assign a complete row (mixed values) not in categories set def f(): df = orig.copy() df.iloc[2,:] = ["c",2] self.assertRaises(ValueError, f) # - assign multiple rows (mixed values) -> exp_multi_row df = orig.copy() df.iloc[2:4,:] = [["b",2],["b",2]] tm.assert_frame_equal(df, exp_multi_row) def f(): df = orig.copy() df.iloc[2:4,:] = [["c",2],["c",2]] self.assertRaises(ValueError, f) # - assign a part of a column with dtype == categorical -> exp_parts_cats_col df = orig.copy() df.iloc[2:4,0] = pd.Categorical(["b","b"], categories=["a","b"]) tm.assert_frame_equal(df, exp_parts_cats_col) with tm.assertRaises(ValueError): # different categories -> not sure if this should fail or pass df = orig.copy() df.iloc[2:4,0] = pd.Categorical(["b","b"], categories=["a","b","c"]) with tm.assertRaises(ValueError): # different values df = orig.copy() df.iloc[2:4,0] = pd.Categorical(["c","c"], categories=["a","b","c"]) # - assign a part of a column with dtype != categorical -> exp_parts_cats_col df = orig.copy() df.iloc[2:4,0] = ["b","b"] tm.assert_frame_equal(df, exp_parts_cats_col) with tm.assertRaises(ValueError): df.iloc[2:4,0] = ["c","c"] #### loc ##### ################ # - assign a single value -> exp_single_cats_value df = orig.copy() df.loc["j","cats"] = "b" tm.assert_frame_equal(df, exp_single_cats_value) df = orig.copy() df.loc[df.index == "j","cats"] = "b" tm.assert_frame_equal(df, exp_single_cats_value) # - assign a single value not in the current categories set def f(): df = orig.copy() df.loc["j","cats"] = "c" self.assertRaises(ValueError, f) # - assign a complete row (mixed values) -> exp_single_row df = orig.copy() df.loc["j",:] = ["b",2] tm.assert_frame_equal(df, exp_single_row) # - assign a complete row (mixed values) not in categories set def f(): df = orig.copy() df.loc["j",:] = ["c",2] self.assertRaises(ValueError, f) # - assign multiple rows (mixed values) -> exp_multi_row df = orig.copy() df.loc["j":"k",:] = [["b",2],["b",2]] tm.assert_frame_equal(df, exp_multi_row) def f(): df = orig.copy() df.loc["j":"k",:] = [["c",2],["c",2]] self.assertRaises(ValueError, f) # - assign a part of a column with dtype == categorical -> exp_parts_cats_col df = orig.copy() df.loc["j":"k","cats"] = pd.Categorical(["b","b"], categories=["a","b"]) tm.assert_frame_equal(df, exp_parts_cats_col) with tm.assertRaises(ValueError): # different categories -> not sure if this should fail or pass df = orig.copy() df.loc["j":"k","cats"] = pd.Categorical(["b","b"], categories=["a","b","c"]) with tm.assertRaises(ValueError): # different values df = orig.copy() df.loc["j":"k","cats"] = pd.Categorical(["c","c"], categories=["a","b","c"]) # - assign a part of a column with dtype != categorical -> exp_parts_cats_col df = orig.copy() df.loc["j":"k","cats"] = ["b","b"] tm.assert_frame_equal(df, exp_parts_cats_col) with tm.assertRaises(ValueError): df.loc["j":"k","cats"] = ["c","c"] #### ix ##### ################ # - assign a single value -> exp_single_cats_value df = orig.copy() df.ix["j",0] = "b" tm.assert_frame_equal(df, exp_single_cats_value) df = orig.copy() df.ix[df.index == "j",0] = "b" tm.assert_frame_equal(df, exp_single_cats_value) # - assign a single value not in the current categories set def f(): df = orig.copy() df.ix["j",0] = "c" self.assertRaises(ValueError, f) # - assign a complete row (mixed values) -> exp_single_row df = orig.copy() df.ix["j",:] = ["b",2] tm.assert_frame_equal(df, exp_single_row) # - assign a complete row (mixed values) not in categories set def f(): df = orig.copy() df.ix["j",:] = ["c",2] self.assertRaises(ValueError, f) # - assign multiple rows (mixed values) -> exp_multi_row df = orig.copy() df.ix["j":"k",:] = [["b",2],["b",2]] tm.assert_frame_equal(df, exp_multi_row) def f(): df = orig.copy() df.ix["j":"k",:] = [["c",2],["c",2]] self.assertRaises(ValueError, f) # - assign a part of a column with dtype == categorical -> exp_parts_cats_col df = orig.copy() df.ix["j":"k",0] = pd.Categorical(["b","b"], categories=["a","b"]) tm.assert_frame_equal(df, exp_parts_cats_col) with tm.assertRaises(ValueError): # different categories -> not sure if this should fail or pass df = orig.copy() df.ix["j":"k",0] = pd.Categorical(["b","b"], categories=["a","b","c"]) with tm.assertRaises(ValueError): # different values df = orig.copy() df.ix["j":"k",0] = pd.Categorical(["c","c"], categories=["a","b","c"]) # - assign a part of a column with dtype != categorical -> exp_parts_cats_col df = orig.copy() df.ix["j":"k",0] = ["b","b"] tm.assert_frame_equal(df, exp_parts_cats_col) with tm.assertRaises(ValueError): df.ix["j":"k",0] = ["c","c"] # iat df = orig.copy() df.iat[2,0] = "b" tm.assert_frame_equal(df, exp_single_cats_value) # - assign a single value not in the current categories set def f(): df = orig.copy() df.iat[2,0] = "c" self.assertRaises(ValueError, f) # at # - assign a single value -> exp_single_cats_value df = orig.copy() df.at["j","cats"] = "b" tm.assert_frame_equal(df, exp_single_cats_value) # - assign a single value not in the current categories set def f(): df = orig.copy() df.at["j","cats"] = "c" self.assertRaises(ValueError, f) # fancy indexing catsf = pd.Categorical(["a","a","c","c","a","a","a"], categories=["a","b","c"]) idxf = pd.Index(["h","i","j","k","l","m","n"]) valuesf = [1,1,3,3,1,1,1] df = pd.DataFrame({"cats":catsf,"values":valuesf}, index=idxf) exp_fancy = exp_multi_row.copy() exp_fancy["cats"].cat.set_categories(["a","b","c"], inplace=True) df[df["cats"] == "c"] = ["b",2] tm.assert_frame_equal(df, exp_multi_row) # set_value df = orig.copy() df.set_value("j","cats", "b") tm.assert_frame_equal(df, exp_single_cats_value) def f(): df = orig.copy() df.set_value("j","cats", "c") self.assertRaises(ValueError, f) # Assigning a Category to parts of a int/... column uses the values of the Catgorical df = pd.DataFrame({"a":[1,1,1,1,1], "b":["a","a","a","a","a"]}) exp = pd.DataFrame({"a":[1,"b","b",1,1], "b":["a","a","b","b","a"]}) df.loc[1:2,"a"] = pd.Categorical(["b","b"], categories=["a","b"]) df.loc[2:3,"b"] = pd.Categorical(["b","b"], categories=["a","b"]) tm.assert_frame_equal(df, exp) ######### Series ########## orig = Series(pd.Categorical(["b","b"], categories=["a","b"])) s = orig.copy() s[:] = "a" exp = Series(pd.Categorical(["a","a"], categories=["a","b"])) tm.assert_series_equal(s, exp) s = orig.copy() s[1] = "a" exp = Series(pd.Categorical(["b","a"], categories=["a","b"])) tm.assert_series_equal(s, exp) s = orig.copy() s[s.index > 0] = "a" exp = Series(pd.Categorical(["b","a"], categories=["a","b"])) tm.assert_series_equal(s, exp) s = orig.copy() s[[False, True]] = "a" exp = Series(pd.Categorical(["b","a"], categories=["a","b"])) tm.assert_series_equal(s, exp) s = orig.copy() s.index = ["x", "y"] s["y"] = "a" exp = Series(pd.Categorical(["b","a"], categories=["a","b"]), index=["x", "y"]) tm.assert_series_equal(s, exp) # ensure that one can set something to np.nan s = Series(Categorical([1,2,3])) exp = Series(Categorical([1,np.nan,3])) s[1] = np.nan tm.assert_series_equal(s, exp) def test_comparisons(self): tests_data = [(list("abc"), list("cba"), list("bbb")), ([1,2,3], [3,2,1], [2,2,2])] for data , reverse, base in tests_data: cat_rev = pd.Series(pd.Categorical(data, categories=reverse, ordered=True)) cat_rev_base = pd.Series(pd.Categorical(base, categories=reverse, ordered=True)) cat = pd.Series(pd.Categorical(data, ordered=True)) cat_base = pd.Series(pd.Categorical(base, categories=cat.cat.categories, ordered=True)) s = Series(base) a = np.array(base) # comparisons need to take categories ordering into account res_rev = cat_rev > cat_rev_base exp_rev = Series([True, False, False]) tm.assert_series_equal(res_rev, exp_rev) res_rev = cat_rev < cat_rev_base exp_rev = Series([False, False, True]) tm.assert_series_equal(res_rev, exp_rev) res = cat > cat_base exp = Series([False, False, True]) tm.assert_series_equal(res, exp) scalar = base[1] res = cat > scalar exp = Series([False, False, True]) exp2 = cat.values > scalar tm.assert_series_equal(res, exp) tm.assert_numpy_array_equal(res.values, exp2) res_rev = cat_rev > scalar exp_rev = Series([True, False, False]) exp_rev2 = cat_rev.values > scalar tm.assert_series_equal(res_rev, exp_rev) tm.assert_numpy_array_equal(res_rev.values, exp_rev2) # Only categories with same categories can be compared def f(): cat > cat_rev self.assertRaises(TypeError, f) # categorical cannot be compared to Series or numpy array, and also not the other way # around self.assertRaises(TypeError, lambda: cat > s) self.assertRaises(TypeError, lambda: cat_rev > s) self.assertRaises(TypeError, lambda: cat > a) self.assertRaises(TypeError, lambda: cat_rev > a) self.assertRaises(TypeError, lambda: s < cat) self.assertRaises(TypeError, lambda: s < cat_rev) self.assertRaises(TypeError, lambda: a < cat) self.assertRaises(TypeError, lambda: a < cat_rev) # unequal comparison should raise for unordered cats cat = Series(Categorical(list("abc"))) def f(): cat > "b" self.assertRaises(TypeError, f) cat = Series(Categorical(list("abc"), ordered=False)) def f(): cat > "b" self.assertRaises(TypeError, f) # https://github.com/pydata/pandas/issues/9836#issuecomment-92123057 and following # comparisons with scalars not in categories should raise for unequal comps, but not for # equal/not equal cat = Series(Categorical(list("abc"), ordered=True)) self.assertRaises(TypeError, lambda: cat < "d") self.assertRaises(TypeError, lambda: cat > "d") self.assertRaises(TypeError, lambda: "d" < cat) self.assertRaises(TypeError, lambda: "d" > cat) self.assert_series_equal(cat == "d" , Series([False, False, False])) self.assert_series_equal(cat != "d" , Series([True, True, True])) # And test NaN handling... cat = Series(Categorical(["a","b","c", np.nan])) exp = Series([True, True, True, False]) res = (cat == cat) tm.assert_series_equal(res, exp) def test_cat_equality(self): # GH 8938 # allow equality comparisons a = Series(list('abc'),dtype="category") b = Series(list('abc'),dtype="object") c = Series(['a','b','cc'],dtype="object") d = Series(list('acb'),dtype="object") e = Categorical(list('abc')) f = Categorical(list('acb')) # vs scalar self.assertFalse((a=='a').all()) self.assertTrue(((a!='a') == ~(a=='a')).all()) self.assertFalse(('a'==a).all()) self.assertTrue((a=='a')[0]) self.assertTrue(('a'==a)[0]) self.assertFalse(('a'!=a)[0]) # vs list-like self.assertTrue((a==a).all()) self.assertFalse((a!=a).all()) self.assertTrue((a==list(a)).all()) self.assertTrue((a==b).all()) self.assertTrue((b==a).all()) self.assertTrue(((~(a==b))==(a!=b)).all()) self.assertTrue(((~(b==a))==(b!=a)).all()) self.assertFalse((a==c).all()) self.assertFalse((c==a).all()) self.assertFalse((a==d).all()) self.assertFalse((d==a).all()) # vs a cat-like self.assertTrue((a==e).all()) self.assertTrue((e==a).all()) self.assertFalse((a==f).all()) self.assertFalse((f==a).all()) self.assertTrue(((~(a==e)==(a!=e)).all())) self.assertTrue(((~(e==a)==(e!=a)).all())) self.assertTrue(((~(a==f)==(a!=f)).all())) self.assertTrue(((~(f==a)==(f!=a)).all())) # non-equality is not comparable self.assertRaises(TypeError, lambda: a < b) self.assertRaises(TypeError, lambda: b < a) self.assertRaises(TypeError, lambda: a > b) self.assertRaises(TypeError, lambda: b > a) def test_concat(self): cat = pd.Categorical(["a","b"], categories=["a","b"]) vals = [1,2] df = pd.DataFrame({"cats":cat, "vals":vals}) cat2 = pd.Categorical(["a","b","a","b"], categories=["a","b"]) vals2 = [1,2,1,2] exp = pd.DataFrame({"cats":cat2, "vals":vals2}, index=pd.Index([0, 1, 0, 1])) res = pd.concat([df,df]) tm.assert_frame_equal(exp, res) # Concat should raise if the two categoricals do not have the same categories cat3 = pd.Categorical(["a","b"], categories=["a","b","c"]) vals3 = [1,2] df_wrong_categories = pd.DataFrame({"cats":cat3, "vals":vals3}) def f(): pd.concat([df,df_wrong_categories]) self.assertRaises(ValueError, f) # GH 7864 # make sure ordering is preserverd df = pd.DataFrame({"id":[1,2,3,4,5,6], "raw_grade":['a', 'b', 'b', 'a', 'a', 'e']}) df["grade"] = pd.Categorical(df["raw_grade"]) df['grade'].cat.set_categories(['e', 'a', 'b']) df1 = df[0:3] df2 = df[3:] self.assert_numpy_array_equal(df['grade'].cat.categories, df1['grade'].cat.categories) self.assert_numpy_array_equal(df['grade'].cat.categories, df2['grade'].cat.categories) dfx = pd.concat([df1, df2]) dfx['grade'].cat.categories self.assert_numpy_array_equal(df['grade'].cat.categories, dfx['grade'].cat.categories) def test_concat_preserve(self): # GH 8641 # series concat not preserving category dtype s = Series(list('abc'),dtype='category') s2 = Series(list('abd'),dtype='category') def f(): pd.concat([s,s2]) self.assertRaises(ValueError, f) result = pd.concat([s,s],ignore_index=True) expected = Series(list('abcabc')).astype('category') tm.assert_series_equal(result, expected) result = pd.concat([s,s]) expected = Series(list('abcabc'),index=[0,1,2,0,1,2]).astype('category') tm.assert_series_equal(result, expected) a = Series(np.arange(6,dtype='int64')) b = Series(list('aabbca')) df2 = DataFrame({'A' : a, 'B' : b.astype('category',categories=list('cab')) }) result = pd.concat([df2,df2]) expected = DataFrame({'A' : pd.concat([a,a]), 'B' : pd.concat([b,b]).astype('category',categories=list('cab')) }) tm.assert_frame_equal(result, expected) def test_categorical_index_preserver(self): a = Series(np.arange(6,dtype='int64')) b = Series(list('aabbca')) df2 = DataFrame({'A' : a, 'B' : b.astype('category',categories=list('cab')) }).set_index('B') result = pd.concat([df2,df2]) expected = DataFrame({'A' : pd.concat([a,a]), 'B' : pd.concat([b,b]).astype('category',categories=list('cab')) }).set_index('B') tm.assert_frame_equal(result, expected) # wrong catgories df3 = DataFrame({'A' : a, 'B' : b.astype('category',categories=list('abc')) }).set_index('B') self.assertRaises(TypeError, lambda : pd.concat([df2,df3])) def test_append(self): cat = pd.Categorical(["a","b"], categories=["a","b"]) vals = [1,2] df = pd.DataFrame({"cats":cat, "vals":vals}) cat2 = pd.Categorical(["a","b","a","b"], categories=["a","b"]) vals2 = [1,2,1,2] exp = pd.DataFrame({"cats":cat2, "vals":vals2}, index=pd.Index([0, 1, 0, 1])) res = df.append(df) tm.assert_frame_equal(exp, res) # Concat should raise if the two categoricals do not have the same categories cat3 = pd.Categorical(["a","b"], categories=["a","b","c"]) vals3 = [1,2] df_wrong_categories = pd.DataFrame({"cats":cat3, "vals":vals3}) def f(): df.append(df_wrong_categories) self.assertRaises(ValueError, f) def test_merge(self): # GH 9426 right = DataFrame({'c': {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e'}, 'd': {0: 'null', 1: 'null', 2: 'null', 3: 'null', 4: 'null'}}) left = DataFrame({'a': {0: 'f', 1: 'f', 2: 'f', 3: 'f', 4: 'f'}, 'b': {0: 'g', 1: 'g', 2: 'g', 3: 'g', 4: 'g'}}) df = pd.merge(left, right, how='left', left_on='b', right_on='c') # object-object expected = df.copy() # object-cat cright = right.copy() cright['d'] = cright['d'].astype('category') result = pd.merge(left, cright, how='left', left_on='b', right_on='c') tm.assert_frame_equal(result, expected) # cat-object cleft = left.copy() cleft['b'] = cleft['b'].astype('category') result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c') tm.assert_frame_equal(result, expected) # cat-cat cright = right.copy() cright['d'] = cright['d'].astype('category') cleft = left.copy() cleft['b'] = cleft['b'].astype('category') result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c') tm.assert_frame_equal(result, expected) def test_repeat(self): #GH10183 cat = pd.Categorical(["a","b"], categories=["a","b"]) exp = pd.Categorical(["a", "a", "b", "b"], categories=["a","b"]) res = cat.repeat(2) self.assert_categorical_equal(res, exp) def test_na_actions(self): cat = pd.Categorical([1,2,3,np.nan], categories=[1,2,3]) vals = ["a","b",np.nan,"d"] df = pd.DataFrame({"cats":cat, "vals":vals}) cat2 = pd.Categorical([1,2,3,3], categories=[1,2,3]) vals2 = ["a","b","b","d"] df_exp_fill = pd.DataFrame({"cats":cat2, "vals":vals2}) cat3 = pd.Categorical([1,2,3], categories=[1,2,3]) vals3 = ["a","b",np.nan] df_exp_drop_cats = pd.DataFrame({"cats":cat3, "vals":vals3}) cat4 = pd.Categorical([1,2], categories=[1,2,3]) vals4 = ["a","b"] df_exp_drop_all = pd.DataFrame({"cats":cat4, "vals":vals4}) # fillna res = df.fillna(value={"cats":3, "vals":"b"}) tm.assert_frame_equal(res, df_exp_fill) def f(): df.fillna(value={"cats":4, "vals":"c"}) self.assertRaises(ValueError, f) res = df.fillna(method='pad') tm.assert_frame_equal(res, df_exp_fill) res = df.dropna(subset=["cats"]) tm.assert_frame_equal(res, df_exp_drop_cats) res = df.dropna() tm.assert_frame_equal(res, df_exp_drop_all) # make sure that fillna takes both missing values and NA categories into account c = Categorical(["a","b",np.nan]) with tm.assert_produces_warning(FutureWarning): c.set_categories(["a","b",np.nan], rename=True, inplace=True) c[0] = np.nan df = pd.DataFrame({"cats":c, "vals":[1,2,3]}) df_exp = pd.DataFrame({"cats": Categorical(["a","b","a"]), "vals": [1,2,3]}) res = df.fillna("a") tm.assert_frame_equal(res, df_exp) def test_astype_to_other(self): s = self.cat['value_group'] expected = s tm.assert_series_equal(s.astype('category'),expected) tm.assert_series_equal(s.astype(com.CategoricalDtype()),expected) self.assertRaises(ValueError, lambda : s.astype('float64')) cat = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])) exp = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']) tm.assert_series_equal(cat.astype('str'), exp) s2 = Series(Categorical.from_array(['1', '2', '3', '4'])) exp2 = Series([1,2,3,4]).astype(int) tm.assert_series_equal(s2.astype('int') , exp2) # object don't sort correctly, so just compare that we have the same values def cmp(a,b): tm.assert_almost_equal(np.sort(np.unique(a)),np.sort(np.unique(b))) expected = Series(np.array(s.values),name='value_group') cmp(s.astype('object'),expected) cmp(s.astype(np.object_),expected) # array conversion tm.assert_almost_equal(np.array(s),np.array(s.values)) # valid conversion for valid in [lambda x: x.astype('category'), lambda x: x.astype(com.CategoricalDtype()), lambda x: x.astype('object').astype('category'), lambda x: x.astype('object').astype(com.CategoricalDtype())]: result = valid(s) tm.assert_series_equal(result,s) # invalid conversion (these are NOT a dtype) for invalid in [lambda x: x.astype(pd.Categorical), lambda x: x.astype('object').astype(pd.Categorical)]: self.assertRaises(TypeError, lambda : invalid(s)) def test_astype_categorical(self): cat = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']) tm.assert_categorical_equal(cat,cat.astype('category')) tm.assert_almost_equal(np.array(cat),cat.astype('object')) self.assertRaises(ValueError, lambda : cat.astype(float)) def test_to_records(self): # GH8626 # dict creation df = DataFrame({ 'A' : list('abc') }, dtype='category') expected = Series(list('abc'), dtype='category', name='A') tm.assert_series_equal(df['A'], expected) # list-like creation df = DataFrame(list('abc'), dtype='category') expected = Series(list('abc'), dtype='category', name=0) tm.assert_series_equal(df[0], expected) # to record array # this coerces result = df.to_records() expected = np.rec.array([(0, 'a'), (1, 'b'), (2, 'c')], dtype=[('index', '=i8'), ('0', 'O')]) tm.assert_almost_equal(result, expected) def test_numeric_like_ops(self): # numeric ops should not succeed for op in ['__add__','__sub__','__mul__','__truediv__']: self.assertRaises(TypeError, lambda : getattr(self.cat,op)(self.cat)) # reduction ops should not succeed (unless specifically defined, e.g. min/max) s = self.cat['value_group'] for op in ['kurt','skew','var','std','mean','sum','median']: self.assertRaises(TypeError, lambda : getattr(s,op)(numeric_only=False)) # mad technically works because it takes always the numeric data # numpy ops s = pd.Series(pd.Categorical([1,2,3,4])) self.assertRaises(TypeError, lambda : np.sum(s)) # numeric ops on a Series for op in ['__add__','__sub__','__mul__','__truediv__']: self.assertRaises(TypeError, lambda : getattr(s,op)(2)) # invalid ufunc self.assertRaises(TypeError, lambda : np.log(s)) def test_cat_tab_completition(self): # test the tab completion display ok_for_cat = ['categories','codes','ordered','set_categories', 'add_categories', 'remove_categories', 'rename_categories', 'reorder_categories', 'remove_unused_categories', 'as_ordered', 'as_unordered'] def get_dir(s): results = [ r for r in s.cat.__dir__() if not r.startswith('_') ] return list(sorted(set(results))) s = Series(list('aabbcde')).astype('category') results = get_dir(s) tm.assert_almost_equal(results,list(sorted(set(ok_for_cat)))) def test_cat_accessor_api(self): # GH 9322 from pandas.core.categorical import CategoricalAccessor self.assertIs(Series.cat, CategoricalAccessor) s = Series(list('aabbcde')).astype('category') self.assertIsInstance(s.cat, CategoricalAccessor) invalid = Series([1]) with tm.assertRaisesRegexp(AttributeError, "only use .cat accessor"): invalid.cat self.assertFalse(hasattr(invalid, 'cat')) def test_cat_accessor_no_new_attributes(self): # https://github.com/pydata/pandas/issues/10673 c = Series(list('aabbcde')).astype('category') with tm.assertRaisesRegexp(AttributeError, "You cannot add any new attribute"): c.cat.xlabel = "a" def test_str_accessor_api_for_categorical(self): # https://github.com/pydata/pandas/issues/10661 from pandas.core.strings import StringMethods s = Series(list('aabb')) s = s + " " + s c = s.astype('category') self.assertIsInstance(c.str, StringMethods) # str functions, which need special arguments special_func_defs = [ ('cat', (list("zyxw"),), {"sep": ","}), ('center', (10,), {}), ('contains', ("a",), {}), ('count', ("a",), {}), ('decode', ("UTF-8",), {}), ('encode', ("UTF-8",), {}), ('endswith', ("a",), {}), ('extract', ("([a-z]*) ",), {}), ('find', ("a",), {}), ('findall', ("a",), {}), ('index', (" ",), {}), ('ljust', (10,), {}), ('match', ("a"), {}), # deprecated... ('normalize', ("NFC",), {}), ('pad', (10,), {}), ('partition', (" ",), {"expand": False}), # not default ('partition', (" ",), {"expand": True}), # default ('repeat', (3,), {}), ('replace', ("a", "z"), {}), ('rfind', ("a",), {}), ('rindex', (" ",), {}), ('rjust', (10,), {}), ('rpartition', (" ",), {"expand": False}), # not default ('rpartition', (" ",), {"expand": True}), # default ('slice', (0,1), {}), ('slice_replace', (0,1,"z"), {}), ('split', (" ",), {"expand":False}), #default ('split', (" ",), {"expand":True}), # not default ('startswith', ("a",), {}), ('wrap', (2,), {}), ('zfill', (10,), {}) ] _special_func_names = [f[0] for f in special_func_defs] # * get, join: they need a individual elements of type lists, but # we can't make a categorical with lists as individual categories. # -> `s.str.split(" ").astype("category")` will error! # * `translate` has different interfaces for py2 vs. py3 _ignore_names = ["get", "join", "translate"] str_func_names = [f for f in dir(s.str) if not (f.startswith("_") or f in _special_func_names or f in _ignore_names)] func_defs = [(f, (), {}) for f in str_func_names] func_defs.extend(special_func_defs) for func, args, kwargs in func_defs: res = getattr(c.str, func)(*args, **kwargs) exp = getattr(s.str, func)(*args, **kwargs) if isinstance(res, pd.DataFrame): tm.assert_frame_equal(res, exp) else: tm.assert_series_equal(res, exp) invalid = Series([1,2,3]).astype('category') with tm.assertRaisesRegexp(AttributeError, "Can only use .str accessor with string"): invalid.str self.assertFalse(hasattr(invalid, 'str')) def test_dt_accessor_api_for_categorical(self): # https://github.com/pydata/pandas/issues/10661 from pandas.tseries.common import Properties from pandas.tseries.index import date_range, DatetimeIndex from pandas.tseries.period import period_range, PeriodIndex from pandas.tseries.tdi import timedelta_range, TimedeltaIndex s_dr = Series(date_range('1/1/2015', periods=5, tz="MET")) c_dr = s_dr.astype("category") s_pr = Series(period_range('1/1/2015', freq='D', periods=5)) c_pr = s_pr.astype("category") s_tdr = Series(timedelta_range('1 days','10 days')) c_tdr = s_tdr.astype("category") test_data = [ ("Datetime", DatetimeIndex._datetimelike_ops, s_dr, c_dr), ("Period", PeriodIndex._datetimelike_ops, s_pr, c_pr), ("Timedelta", TimedeltaIndex._datetimelike_ops, s_tdr, c_tdr)] self.assertIsInstance(c_dr.dt, Properties) special_func_defs = [ ('strftime', ("%Y-%m-%d",), {}), ('tz_convert', ("EST",), {}), #('tz_localize', ("UTC",), {}), ] _special_func_names = [f[0] for f in special_func_defs] # the series is already localized _ignore_names = ['tz_localize'] for name, attr_names, s, c in test_data: func_names = [f for f in dir(s.dt) if not (f.startswith("_") or f in attr_names or f in _special_func_names or f in _ignore_names)] func_defs = [(f, (), {}) for f in func_names] for f_def in special_func_defs: if f_def[0] in dir(s.dt): func_defs.append(f_def) for func, args, kwargs in func_defs: res = getattr(c.dt, func)(*args, **kwargs) exp = getattr(s.dt, func)(*args, **kwargs) if isinstance(res, pd.DataFrame): tm.assert_frame_equal(res, exp) elif isinstance(res, pd.Series): tm.assert_series_equal(res, exp) else: tm.assert_numpy_array_equal(res, exp) for attr in attr_names: try: res = getattr(c.dt, attr) exp = getattr(s.dt, attr) except Exception as e: print(name, attr) raise e if isinstance(res, pd.DataFrame): tm.assert_frame_equal(res, exp) elif isinstance(res, pd.Series): tm.assert_series_equal(res, exp) else: tm.assert_numpy_array_equal(res, exp) invalid = Series([1,2,3]).astype('category') with tm.assertRaisesRegexp(AttributeError, "Can only use .dt accessor with datetimelike"): invalid.dt self.assertFalse(hasattr(invalid, 'str')) def test_pickle_v0_14_1(self): # we have the name warning # 10482 with tm.assert_produces_warning(UserWarning): cat = pd.Categorical(values=['a', 'b', 'c'], categories=['a', 'b', 'c', 'd'], name='foobar', ordered=False) pickle_path = os.path.join(tm.get_data_path(), 'categorical_0_14_1.pickle') # This code was executed once on v0.14.1 to generate the pickle: # # cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'], # name='foobar') # with open(pickle_path, 'wb') as f: pickle.dump(cat, f) # self.assert_categorical_equal(cat, pd.read_pickle(pickle_path)) def test_pickle_v0_15_2(self): # ordered -> _ordered # GH 9347 # we have the name warning # 10482 with tm.assert_produces_warning(UserWarning): cat = pd.Categorical(values=['a', 'b', 'c'], categories=['a', 'b', 'c', 'd'], name='foobar', ordered=False) pickle_path = os.path.join(tm.get_data_path(), 'categorical_0_15_2.pickle') # This code was executed once on v0.15.2 to generate the pickle: # # cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'], # name='foobar') # with open(pickle_path, 'wb') as f: pickle.dump(cat, f) # self.assert_categorical_equal(cat, pd.read_pickle(pickle_path)) def test_concat_categorical(self): # See GH 10177 df1 = pd.DataFrame(np.arange(18, dtype='int64').reshape(6, 3), columns=["a", "b", "c"]) df2 = pd.DataFrame(np.arange(14, dtype='int64').reshape(7, 2), columns=["a", "c"]) df2['h'] = pd.Series(pd.Categorical(["one", "one", "two", "one", "two", "two", "one"])) df_concat = pd.concat((df1, df2), axis=0).reset_index(drop=True) df_expected = pd.DataFrame({'a': [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12], 'b': [1, 4, 7, 10, 13, 16, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], 'c': [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13]}) df_expected['h'] = pd.Series(pd.Categorical([None, None, None, None, None, None, "one", "one", "two", "one", "two", "two", "one"])) tm.assert_frame_equal(df_expected, df_concat) if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], # '--with-coverage', '--cover-package=pandas.core'] exit=False)
artistic-2.0
aselle/tensorflow
tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py
39
32726
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implementations of different data feeders to provide data for TF trainer (deprecated). This module and all its submodules are deprecated. See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md) for migration instructions. """ # TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues. from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import math import numpy as np import six from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.deprecation import deprecated # pylint: disable=g-multiple-import,g-bad-import-order from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels # pylint: enable=g-multiple-import,g-bad-import-order def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None): """Returns shape for input and output of the data feeder.""" x_is_dict, y_is_dict = isinstance( x_shape, dict), y_shape is not None and isinstance(y_shape, dict) if y_is_dict and n_classes is not None: assert isinstance(n_classes, dict) if batch_size is None: batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0] elif batch_size <= 0: raise ValueError('Invalid batch_size %d.' % batch_size) if x_is_dict: input_shape = {} for k, v in list(x_shape.items()): input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1]) else: x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1] input_shape = [batch_size] + x_shape if y_shape is None: return input_shape, None, batch_size def out_el_shape(out_shape, num_classes): out_shape = list(out_shape[1:]) if len(out_shape) > 1 else [] # Skip first dimension if it is 1. if out_shape and out_shape[0] == 1: out_shape = out_shape[1:] if num_classes is not None and num_classes > 1: return [batch_size] + out_shape + [num_classes] else: return [batch_size] + out_shape if not y_is_dict: output_shape = out_el_shape(y_shape, n_classes) else: output_shape = dict([(k, out_el_shape(v, n_classes[k] if n_classes is not None and k in n_classes else None)) for k, v in list(y_shape.items())]) return input_shape, output_shape, batch_size def _data_type_filter(x, y): """Filter data types into acceptable format.""" if HAS_DASK: x = extract_dask_data(x) if y is not None: y = extract_dask_labels(y) if HAS_PANDAS: x = extract_pandas_data(x) if y is not None: y = extract_pandas_labels(y) return x, y def _is_iterable(x): return hasattr(x, 'next') or hasattr(x, '__next__') @deprecated(None, 'Please use tensorflow/transform or tf.data.') def setup_train_data_feeder(x, y, n_classes, batch_size=None, shuffle=True, epochs=None): """Create data feeder, to sample inputs from dataset. If `x` and `y` are iterators, use `StreamingDataFeeder`. Args: x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also supports iterables. y: numpy, pandas or Dask array or dictionary of aforementioned. Also supports iterables. n_classes: number of classes. Must be None or same type as y. In case, `y` is `dict` (or iterable which returns dict) such that `n_classes[key] = n_classes for y[key]` batch_size: size to split data into parts. Must be >= 1. shuffle: Whether to shuffle the inputs. epochs: Number of epochs to run. Returns: DataFeeder object that returns training data. Raises: ValueError: if one of `x` and `y` is iterable and the other is not. """ x, y = _data_type_filter(x, y) if HAS_DASK: # pylint: disable=g-import-not-at-top import dask.dataframe as dd if (isinstance(x, (dd.Series, dd.DataFrame)) and (y is None or isinstance(y, (dd.Series, dd.DataFrame)))): data_feeder_cls = DaskDataFeeder else: data_feeder_cls = DataFeeder else: data_feeder_cls = DataFeeder if _is_iterable(x): if y is not None and not _is_iterable(y): raise ValueError('Both x and y should be iterators for ' 'streaming learning to work.') return StreamingDataFeeder(x, y, n_classes, batch_size) return data_feeder_cls( x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs) def _batch_data(x, batch_size=None): if (batch_size is not None) and (batch_size <= 0): raise ValueError('Invalid batch_size %d.' % batch_size) x_first_el = six.next(x) x = itertools.chain([x_first_el], x) chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance( x_first_el, dict) else [] chunk_filled = False for data in x: if isinstance(data, dict): for k, v in list(data.items()): chunk[k].append(v) if (batch_size is not None) and (len(chunk[k]) >= batch_size): chunk[k] = np.matrix(chunk[k]) chunk_filled = True if chunk_filled: yield chunk chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance( x_first_el, dict) else [] chunk_filled = False else: chunk.append(data) if (batch_size is not None) and (len(chunk) >= batch_size): yield np.matrix(chunk) chunk = [] if isinstance(x_first_el, dict): for k, v in list(data.items()): chunk[k] = np.matrix(chunk[k]) yield chunk else: yield np.matrix(chunk) @deprecated(None, 'Please use tensorflow/transform or tf.data.') def setup_predict_data_feeder(x, batch_size=None): """Returns an iterable for feeding into predict step. Args: x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports iterable. batch_size: Size of batches to split data into. If `None`, returns one batch of full size. Returns: List or iterator (or dictionary thereof) of parts of data to predict on. Raises: ValueError: if `batch_size` <= 0. """ if HAS_DASK: x = extract_dask_data(x) if HAS_PANDAS: x = extract_pandas_data(x) if _is_iterable(x): return _batch_data(x, batch_size) if len(x.shape) == 1: x = np.reshape(x, (-1, 1)) if batch_size is not None: if batch_size <= 0: raise ValueError('Invalid batch_size %d.' % batch_size) n_batches = int(math.ceil(float(len(x)) / batch_size)) return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)] return [x] @deprecated(None, 'Please use tensorflow/transform or tf.data.') def setup_processor_data_feeder(x): """Sets up processor iterable. Args: x: numpy, pandas or iterable. Returns: Iterable of data to process. """ if HAS_PANDAS: x = extract_pandas_matrix(x) return x @deprecated(None, 'Please convert numpy dtypes explicitly.') def check_array(array, dtype): """Checks array on dtype and converts it if different. Args: array: Input array. dtype: Expected dtype. Returns: Original array or converted. """ # skip check if array is instance of other classes, e.g. h5py.Dataset # to avoid copying array and loading whole data into memory if isinstance(array, (np.ndarray, list)): array = np.array(array, dtype=dtype, order=None, copy=False) return array def _access(data, iloc): """Accesses an element from collection, using integer location based indexing. Args: data: array-like. The collection to access iloc: `int` or `list` of `int`s. Location(s) to access in `collection` Returns: The element of `a` found at location(s) `iloc`. """ if HAS_PANDAS: import pandas as pd # pylint: disable=g-import-not-at-top if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame): return data.iloc[iloc] return data[iloc] def _check_dtype(dtype): if dtypes.as_dtype(dtype) == dtypes.float64: logging.warn( 'float64 is not supported by many models, consider casting to float32.') return dtype class DataFeeder(object): """Data feeder is an example class to sample data for TF trainer. THIS CLASS IS DEPRECATED. See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md) for general migration instructions. """ @deprecated(None, 'Please use tensorflow/transform or tf.data.') def __init__(self, x, y, n_classes, batch_size=None, shuffle=True, random_state=None, epochs=None): """Initializes a DataFeeder instance. Args: x: One feature sample which can either Nd numpy matrix of shape `[n_samples, n_features, ...]` or dictionary of Nd numpy matrix. y: label vector, either floats for regression or class id for classification. If matrix, will consider as a sequence of labels. Can be `None` for unsupervised setting. Also supports dictionary of labels. n_classes: Number of classes, 0 and 1 are considered regression, `None` will pass through the input labels without one-hot conversion. Also, if `y` is `dict`, then `n_classes` must be `dict` such that `n_classes[key] = n_classes for label y[key]`, `None` otherwise. batch_size: Mini-batch size to accumulate samples in one mini batch. shuffle: Whether to shuffle `x`. random_state: Numpy `RandomState` object to reproduce sampling. epochs: Number of times to iterate over input data before raising `StopIteration` exception. Attributes: x: Input features (ndarray or dictionary of ndarrays). y: Input label (ndarray or dictionary of ndarrays). n_classes: Number of classes (if `None`, pass through indices without one-hot conversion). batch_size: Mini-batch size to accumulate. input_shape: Shape of the input (or dictionary of shapes). output_shape: Shape of the output (or dictionary of shapes). input_dtype: DType of input (or dictionary of shapes). output_dtype: DType of output (or dictionary of shapes. """ x_is_dict, y_is_dict = isinstance( x, dict), y is not None and isinstance(y, dict) if isinstance(y, list): y = np.array(y) self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items()) ]) if x_is_dict else check_array(x, x.dtype) self._y = None if y is None else (dict( [(k, check_array(v, v.dtype)) for k, v in list(y.items())]) if y_is_dict else check_array(y, y.dtype)) # self.n_classes is not None means we're converting raw target indices # to one-hot. if n_classes is not None: if not y_is_dict: y_dtype = ( np.int64 if n_classes is not None and n_classes > 1 else np.float32) self._y = (None if y is None else check_array(y, dtype=y_dtype)) self.n_classes = n_classes self.max_epochs = epochs x_shape = dict([(k, v.shape) for k, v in list(self._x.items()) ]) if x_is_dict else self._x.shape y_shape = dict([(k, v.shape) for k, v in list(self._y.items()) ]) if y_is_dict else None if y is None else self._y.shape self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape( x_shape, y_shape, n_classes, batch_size) # Input dtype matches dtype of x. self._input_dtype = ( dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())]) if x_is_dict else _check_dtype(self._x.dtype)) # self._output_dtype == np.float32 when y is None self._output_dtype = ( dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())]) if y_is_dict else (_check_dtype(self._y.dtype) if y is not None else np.float32)) # self.n_classes is None means we're passing in raw target indices if n_classes is not None and y_is_dict: for key in list(n_classes.keys()): if key in self._output_dtype: self._output_dtype[key] = np.float32 self._shuffle = shuffle self.random_state = np.random.RandomState( 42) if random_state is None else random_state if x_is_dict: num_samples = list(self._x.values())[0].shape[0] elif tensor_util.is_tensor(self._x): num_samples = self._x.shape[ 0].value # shape will be a Dimension, extract an int else: num_samples = self._x.shape[0] if self._shuffle: self.indices = self.random_state.permutation(num_samples) else: self.indices = np.array(range(num_samples)) self.offset = 0 self.epoch = 0 self._epoch_placeholder = None @property def x(self): return self._x @property def y(self): return self._y @property def shuffle(self): return self._shuffle @property def input_dtype(self): return self._input_dtype @property def output_dtype(self): return self._output_dtype @property def batch_size(self): return self._batch_size def make_epoch_variable(self): """Adds a placeholder variable for the epoch to the graph. Returns: The epoch placeholder. """ self._epoch_placeholder = array_ops.placeholder( dtypes.int32, [1], name='epoch') return self._epoch_placeholder def input_builder(self): """Builds inputs in the graph. Returns: Two placeholders for inputs and outputs. """ def get_placeholder(shape, dtype, name_prepend): if shape is None: return None if isinstance(shape, dict): placeholder = {} for key in list(shape.keys()): placeholder[key] = array_ops.placeholder( dtypes.as_dtype(dtype[key]), [None] + shape[key][1:], name=name_prepend + '_' + key) else: placeholder = array_ops.placeholder( dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend) return placeholder self._input_placeholder = get_placeholder(self.input_shape, self._input_dtype, 'input') self._output_placeholder = get_placeholder(self.output_shape, self._output_dtype, 'output') return self._input_placeholder, self._output_placeholder def set_placeholders(self, input_placeholder, output_placeholder): """Sets placeholders for this data feeder. Args: input_placeholder: Placeholder for `x` variable. Should match shape of the examples in the x dataset. output_placeholder: Placeholder for `y` variable. Should match shape of the examples in the y dataset. Can be `None`. """ self._input_placeholder = input_placeholder self._output_placeholder = output_placeholder def get_feed_params(self): """Function returns a `dict` with data feed params while training. Returns: A `dict` with data feed params while training. """ return { 'epoch': self.epoch, 'offset': self.offset, 'batch_size': self._batch_size } def get_feed_dict_fn(self): """Returns a function that samples data into given placeholders. Returns: A function that when called samples a random subset of batch size from `x` and `y`. """ x_is_dict, y_is_dict = isinstance( self._x, dict), self._y is not None and isinstance(self._y, dict) # Assign input features from random indices. def extract(data, indices): return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if len(data.shape) == 1 else _access(data, indices)) # assign labels from random indices def assign_label(data, shape, dtype, n_classes, indices): shape[0] = indices.shape[0] out = np.zeros(shape, dtype=dtype) for i in xrange(out.shape[0]): sample = indices[i] # self.n_classes is None means we're passing in raw target indices if n_classes is None: out[i] = _access(data, sample) else: if n_classes > 1: if len(shape) == 2: out.itemset((i, int(_access(data, sample))), 1.0) else: for idx, value in enumerate(_access(data, sample)): out.itemset(tuple([i, idx, value]), 1.0) else: out[i] = _access(data, sample) return out def _feed_dict_fn(): """Function that samples data into given placeholders.""" if self.max_epochs is not None and self.epoch + 1 > self.max_epochs: raise StopIteration assert self._input_placeholder is not None feed_dict = {} if self._epoch_placeholder is not None: feed_dict[self._epoch_placeholder.name] = [self.epoch] # Take next batch of indices. x_len = list( self._x.values())[0].shape[0] if x_is_dict else self._x.shape[0] end = min(x_len, self.offset + self._batch_size) batch_indices = self.indices[self.offset:end] # adding input placeholder feed_dict.update( dict([(self._input_placeholder[k].name, extract(v, batch_indices)) for k, v in list(self._x.items())]) if x_is_dict else { self._input_placeholder.name: extract(self._x, batch_indices) }) # move offset and reset it if necessary self.offset += self._batch_size if self.offset >= x_len: self.indices = self.random_state.permutation( x_len) if self._shuffle else np.array(range(x_len)) self.offset = 0 self.epoch += 1 # return early if there are no labels if self._output_placeholder is None: return feed_dict # adding output placeholders if y_is_dict: for k, v in list(self._y.items()): n_classes = (self.n_classes[k] if k in self.n_classes else None) if self.n_classes is not None else None shape, dtype = self.output_shape[k], self._output_dtype[k] feed_dict.update({ self._output_placeholder[k].name: assign_label(v, shape, dtype, n_classes, batch_indices) }) else: shape, dtype, n_classes = (self.output_shape, self._output_dtype, self.n_classes) feed_dict.update({ self._output_placeholder.name: assign_label(self._y, shape, dtype, n_classes, batch_indices) }) return feed_dict return _feed_dict_fn class StreamingDataFeeder(DataFeeder): """Data feeder for TF trainer that reads data from iterator. THIS CLASS IS DEPRECATED. See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md) for general migration instructions. Streaming data feeder allows to read data as it comes it from disk or somewhere else. It's custom to have this iterators rotate infinetly over the dataset, to allow control of how much to learn on the trainer side. """ def __init__(self, x, y, n_classes, batch_size): """Initializes a StreamingDataFeeder instance. Args: x: iterator each element of which returns one feature sample. Sample can be a Nd numpy matrix or dictionary of Nd numpy matrices. y: iterator each element of which returns one label sample. Sample can be a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many classes regression values. n_classes: indicator of how many classes the corresponding label sample has for the purposes of one-hot conversion of label. In case where `y` is a dictionary, `n_classes` must be dictionary (with same keys as `y`) of how many classes there are in each label in `y`. If key is present in `y` and missing in `n_classes`, the value is assumed `None` and no one-hot conversion will be applied to the label with that key. batch_size: Mini batch size to accumulate samples in one batch. If set `None`, then assumes that iterator to return already batched element. Attributes: x: input features (or dictionary of input features). y: input label (or dictionary of output features). n_classes: number of classes. batch_size: mini batch size to accumulate. input_shape: shape of the input (can be dictionary depending on `x`). output_shape: shape of the output (can be dictionary depending on `y`). input_dtype: dtype of input (can be dictionary depending on `x`). output_dtype: dtype of output (can be dictionary depending on `y`). """ # pylint: disable=invalid-name,super-init-not-called x_first_el = six.next(x) self._x = itertools.chain([x_first_el], x) if y is not None: y_first_el = six.next(y) self._y = itertools.chain([y_first_el], y) else: y_first_el = None self._y = None self.n_classes = n_classes x_is_dict = isinstance(x_first_el, dict) y_is_dict = y is not None and isinstance(y_first_el, dict) if y_is_dict and n_classes is not None: assert isinstance(n_classes, dict) # extract shapes for first_elements if x_is_dict: x_first_el_shape = dict( [(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())]) else: x_first_el_shape = [1] + list(x_first_el.shape) if y_is_dict: y_first_el_shape = dict( [(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())]) elif y is None: y_first_el_shape = None else: y_first_el_shape = ( [1] + list(y_first_el[0].shape if isinstance(y_first_el, list) else y_first_el.shape)) self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape( x_first_el_shape, y_first_el_shape, n_classes, batch_size) # Input dtype of x_first_el. if x_is_dict: self._input_dtype = dict( [(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())]) else: self._input_dtype = _check_dtype(x_first_el.dtype) # Output dtype of y_first_el. def check_y_dtype(el): if isinstance(el, np.ndarray): return el.dtype elif isinstance(el, list): return check_y_dtype(el[0]) else: return _check_dtype(np.dtype(type(el))) # Output types are floats, due to both softmaxes and regression req. if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0: self._output_dtype = np.float32 elif y_is_dict: self._output_dtype = dict( [(k, check_y_dtype(v)) for k, v in list(y_first_el.items())]) elif y is None: self._output_dtype = None else: self._output_dtype = check_y_dtype(y_first_el) def get_feed_params(self): """Function returns a `dict` with data feed params while training. Returns: A `dict` with data feed params while training. """ return {'batch_size': self._batch_size} def get_feed_dict_fn(self): """Returns a function, that will sample data and provide it to placeholders. Returns: A function that when called samples a random subset of batch size from x and y. """ self.stopped = False def _feed_dict_fn(): """Samples data and provides it to placeholders. Returns: `dict` of input and output tensors. """ def init_array(shape, dtype): """Initialize array of given shape or dict of shapes and dtype.""" if shape is None: return None elif isinstance(shape, dict): return dict( [(k, np.zeros(shape[k], dtype[k])) for k in list(shape.keys())]) else: return np.zeros(shape, dtype=dtype) def put_data_array(dest, index, source=None, n_classes=None): """Puts data array into container.""" if source is None: dest = dest[:index] elif n_classes is not None and n_classes > 1: if len(self.output_shape) == 2: dest.itemset((index, source), 1.0) else: for idx, value in enumerate(source): dest.itemset(tuple([index, idx, value]), 1.0) else: if len(dest.shape) > 1: dest[index, :] = source else: dest[index] = source[0] if isinstance(source, list) else source return dest def put_data_array_or_dict(holder, index, data=None, n_classes=None): """Puts data array or data dictionary into container.""" if holder is None: return None if isinstance(holder, dict): if data is None: data = {k: None for k in holder.keys()} assert isinstance(data, dict) for k in holder.keys(): num_classes = n_classes[k] if (n_classes is not None and k in n_classes) else None holder[k] = put_data_array(holder[k], index, data[k], num_classes) else: holder = put_data_array(holder, index, data, n_classes) return holder if self.stopped: raise StopIteration inp = init_array(self.input_shape, self._input_dtype) out = init_array(self.output_shape, self._output_dtype) for i in xrange(self._batch_size): # Add handling when queue ends. try: next_inp = six.next(self._x) inp = put_data_array_or_dict(inp, i, next_inp, None) except StopIteration: self.stopped = True if i == 0: raise inp = put_data_array_or_dict(inp, i, None, None) out = put_data_array_or_dict(out, i, None, None) break if self._y is not None: next_out = six.next(self._y) out = put_data_array_or_dict(out, i, next_out, self.n_classes) # creating feed_dict if isinstance(inp, dict): feed_dict = dict([(self._input_placeholder[k].name, inp[k]) for k in list(self._input_placeholder.keys())]) else: feed_dict = {self._input_placeholder.name: inp} if self._y is not None: if isinstance(out, dict): feed_dict.update( dict([(self._output_placeholder[k].name, out[k]) for k in list(self._output_placeholder.keys())])) else: feed_dict.update({self._output_placeholder.name: out}) return feed_dict return _feed_dict_fn class DaskDataFeeder(object): """Data feeder for that reads data from dask.Series and dask.DataFrame. THIS CLASS IS DEPRECATED. See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md) for general migration instructions. Numpy arrays can be serialized to disk and it's possible to do random seeks into them. DaskDataFeeder will remove requirement to have full dataset in the memory and still do random seeks for sampling of batches. """ @deprecated(None, 'Please feed input to tf.data to support dask.') def __init__(self, x, y, n_classes, batch_size, shuffle=True, random_state=None, epochs=None): """Initializes a DaskDataFeeder instance. Args: x: iterator that returns for each element, returns features. y: iterator that returns for each element, returns 1 or many classes / regression values. n_classes: indicator of how many classes the label has. batch_size: Mini batch size to accumulate. shuffle: Whether to shuffle the inputs. random_state: random state for RNG. Note that it will mutate so use a int value for this if you want consistent sized batches. epochs: Number of epochs to run. Attributes: x: input features. y: input label. n_classes: number of classes. batch_size: mini batch size to accumulate. input_shape: shape of the input. output_shape: shape of the output. input_dtype: dtype of input. output_dtype: dtype of output. Raises: ValueError: if `x` or `y` are `dict`, as they are not supported currently. """ if isinstance(x, dict) or isinstance(y, dict): raise ValueError( 'DaskDataFeeder does not support dictionaries at the moment.') # pylint: disable=invalid-name,super-init-not-called import dask.dataframe as dd # pylint: disable=g-import-not-at-top # TODO(terrytangyuan): check x and y dtypes in dask_io like pandas self._x = x self._y = y # save column names self._x_columns = list(x.columns) if isinstance(y.columns[0], str): self._y_columns = list(y.columns) else: # deal with cases where two DFs have overlapped default numeric colnames self._y_columns = len(self._x_columns) + 1 self._y = self._y.rename(columns={y.columns[0]: self._y_columns}) # TODO(terrytangyuan): deal with unsupervised cases # combine into a data frame self.df = dd.multi.concat([self._x, self._y], axis=1) self.n_classes = n_classes x_count = x.count().compute()[0] x_shape = (x_count, len(self._x.columns)) y_shape = (x_count, len(self._y.columns)) # TODO(terrytangyuan): Add support for shuffle and epochs. self._shuffle = shuffle self.epochs = epochs self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape( x_shape, y_shape, n_classes, batch_size) self.sample_fraction = self._batch_size / float(x_count) self._input_dtype = _check_dtype(self._x.dtypes[0]) self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns]) if random_state is None: self.random_state = 66 else: self.random_state = random_state def get_feed_params(self): """Function returns a `dict` with data feed params while training. Returns: A `dict` with data feed params while training. """ return {'batch_size': self._batch_size} def get_feed_dict_fn(self, input_placeholder, output_placeholder): """Returns a function, that will sample data and provide it to placeholders. Args: input_placeholder: tf.placeholder for input features mini batch. output_placeholder: tf.placeholder for output labels. Returns: A function that when called samples a random subset of batch size from x and y. """ def _feed_dict_fn(): """Samples data and provides it to placeholders.""" # TODO(ipolosukhin): option for with/without replacement (dev version of # dask) sample = self.df.random_split( [self.sample_fraction, 1 - self.sample_fraction], random_state=self.random_state) inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist() out = extract_pandas_matrix(sample[0][self._y_columns].compute()) # convert to correct dtype inp = np.array(inp, dtype=self._input_dtype) # one-hot encode out for each class for cross entropy loss if HAS_PANDAS: import pandas as pd # pylint: disable=g-import-not-at-top if not isinstance(out, pd.Series): out = out.flatten() out_max = self._y.max().compute().values[0] encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype) encoded_out[np.arange(out.size), out] = 1 return {input_placeholder.name: inp, output_placeholder.name: encoded_out} return _feed_dict_fn
apache-2.0
siutanwong/scikit-learn
sklearn/ensemble/tests/test_forest.py
48
35412
""" Testing for the forest module (sklearn.ensemble.forest). """ # Authors: Gilles Louppe, # Brian Holt, # Andreas Mueller, # Arnaud Joly # License: BSD 3 clause import pickle from collections import defaultdict from itertools import product import numpy as np from scipy.sparse import csr_matrix, csc_matrix, coo_matrix from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_false, assert_true from sklearn.utils.testing import assert_less, assert_greater from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_warns from sklearn.utils.testing import ignore_warnings from sklearn import datasets from sklearn.decomposition import TruncatedSVD from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import ExtraTreesRegressor from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomTreesEmbedding from sklearn.grid_search import GridSearchCV from sklearn.svm import LinearSVC from sklearn.utils.validation import check_random_state from sklearn.tree.tree import SPARSE_SPLITTERS # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] true_result = [-1, 1, 1] # also load the iris dataset # and randomly permute it iris = datasets.load_iris() rng = check_random_state(0) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # also load the boston dataset # and randomly permute it boston = datasets.load_boston() perm = rng.permutation(boston.target.size) boston.data = boston.data[perm] boston.target = boston.target[perm] FOREST_CLASSIFIERS = { "ExtraTreesClassifier": ExtraTreesClassifier, "RandomForestClassifier": RandomForestClassifier, } FOREST_REGRESSORS = { "ExtraTreesRegressor": ExtraTreesRegressor, "RandomForestRegressor": RandomForestRegressor, } FOREST_TRANSFORMERS = { "RandomTreesEmbedding": RandomTreesEmbedding, } FOREST_ESTIMATORS = dict() FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS) FOREST_ESTIMATORS.update(FOREST_REGRESSORS) FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS) def check_classification_toy(name): """Check classification on a toy dataset.""" ForestClassifier = FOREST_CLASSIFIERS[name] clf = ForestClassifier(n_estimators=10, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) assert_equal(10, len(clf)) clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) assert_equal(10, len(clf)) # also test apply leaf_indices = clf.apply(X) assert_equal(leaf_indices.shape, (len(X), clf.n_estimators)) def test_classification_toy(): for name in FOREST_CLASSIFIERS: yield check_classification_toy, name def check_iris_criterion(name, criterion): # Check consistency on dataset iris. ForestClassifier = FOREST_CLASSIFIERS[name] clf = ForestClassifier(n_estimators=10, criterion=criterion, random_state=1) clf.fit(iris.data, iris.target) score = clf.score(iris.data, iris.target) assert_greater(score, 0.9, "Failed with criterion %s and score = %f" % (criterion, score)) clf = ForestClassifier(n_estimators=10, criterion=criterion, max_features=2, random_state=1) clf.fit(iris.data, iris.target) score = clf.score(iris.data, iris.target) assert_greater(score, 0.5, "Failed with criterion %s and score = %f" % (criterion, score)) def test_iris(): for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")): yield check_iris_criterion, name, criterion def check_boston_criterion(name, criterion): # Check consistency on dataset boston house prices. ForestRegressor = FOREST_REGRESSORS[name] clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1) clf.fit(boston.data, boston.target) score = clf.score(boston.data, boston.target) assert_greater(score, 0.95, "Failed with max_features=None, criterion %s " "and score = %f" % (criterion, score)) clf = ForestRegressor(n_estimators=5, criterion=criterion, max_features=6, random_state=1) clf.fit(boston.data, boston.target) score = clf.score(boston.data, boston.target) assert_greater(score, 0.95, "Failed with max_features=6, criterion %s " "and score = %f" % (criterion, score)) def test_boston(): for name, criterion in product(FOREST_REGRESSORS, ("mse", )): yield check_boston_criterion, name, criterion def check_regressor_attributes(name): # Regression models should not have a classes_ attribute. r = FOREST_REGRESSORS[name](random_state=0) assert_false(hasattr(r, "classes_")) assert_false(hasattr(r, "n_classes_")) r.fit([[1, 2, 3], [4, 5, 6]], [1, 2]) assert_false(hasattr(r, "classes_")) assert_false(hasattr(r, "n_classes_")) def test_regressor_attributes(): for name in FOREST_REGRESSORS: yield check_regressor_attributes, name def check_probability(name): # Predict probabilities. ForestClassifier = FOREST_CLASSIFIERS[name] with np.errstate(divide="ignore"): clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1, max_depth=1) clf.fit(iris.data, iris.target) assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1), np.ones(iris.data.shape[0])) assert_array_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data))) def test_probability(): for name in FOREST_CLASSIFIERS: yield check_probability, name def check_importances(name, X, y): # Check variable importances. ForestClassifier = FOREST_CLASSIFIERS[name] for n_jobs in [1, 2]: clf = ForestClassifier(n_estimators=10, n_jobs=n_jobs) clf.fit(X, y) importances = clf.feature_importances_ n_important = np.sum(importances > 0.1) assert_equal(importances.shape[0], 10) assert_equal(n_important, 3) X_new = clf.transform(X, threshold="mean") assert_less(0 < X_new.shape[1], X.shape[1]) # Check with sample weights sample_weight = np.ones(y.shape) sample_weight[y == 1] *= 100 clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0) clf.fit(X, y, sample_weight=sample_weight) importances = clf.feature_importances_ assert_true(np.all(importances >= 0.0)) clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0) clf.fit(X, y, sample_weight=3 * sample_weight) importances_bis = clf.feature_importances_ assert_almost_equal(importances, importances_bis) def test_importances(): X, y = datasets.make_classification(n_samples=1000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) for name in FOREST_CLASSIFIERS: yield check_importances, name, X, y def check_unfitted_feature_importances(name): assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0), "feature_importances_") def test_unfitted_feature_importances(): for name in FOREST_ESTIMATORS: yield check_unfitted_feature_importances, name def check_oob_score(name, X, y, n_estimators=20): # Check that oob prediction is a good estimation of the generalization # error. # Proper behavior est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0, n_estimators=n_estimators, bootstrap=True) n_samples = X.shape[0] est.fit(X[:n_samples // 2, :], y[:n_samples // 2]) test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:]) if name in FOREST_CLASSIFIERS: assert_less(abs(test_score - est.oob_score_), 0.1) else: assert_greater(test_score, est.oob_score_) assert_greater(est.oob_score_, .8) # Check warning if not enough estimators with np.errstate(divide="ignore", invalid="ignore"): est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0, n_estimators=1, bootstrap=True) assert_warns(UserWarning, est.fit, X, y) def test_oob_score(): for name in FOREST_CLASSIFIERS: yield check_oob_score, name, iris.data, iris.target # csc matrix yield check_oob_score, name, csc_matrix(iris.data), iris.target # non-contiguous targets in classification yield check_oob_score, name, iris.data, iris.target * 2 + 1 for name in FOREST_REGRESSORS: yield check_oob_score, name, boston.data, boston.target, 50 # csc matrix yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50 def check_oob_score_raise_error(name): ForestEstimator = FOREST_ESTIMATORS[name] if name in FOREST_TRANSFORMERS: for oob_score in [True, False]: assert_raises(TypeError, ForestEstimator, oob_score=oob_score) assert_raises(NotImplementedError, ForestEstimator()._set_oob_score, X, y) else: # Unfitted / no bootstrap / no oob_score for oob_score, bootstrap in [(True, False), (False, True), (False, False)]: est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap, random_state=0) assert_false(hasattr(est, "oob_score_")) # No bootstrap assert_raises(ValueError, ForestEstimator(oob_score=True, bootstrap=False).fit, X, y) def test_oob_score_raise_error(): for name in FOREST_ESTIMATORS: yield check_oob_score_raise_error, name def check_gridsearch(name): forest = FOREST_CLASSIFIERS[name]() clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)}) clf.fit(iris.data, iris.target) def test_gridsearch(): # Check that base trees can be grid-searched. for name in FOREST_CLASSIFIERS: yield check_gridsearch, name def check_parallel(name, X, y): """Check parallel computations in classification""" ForestEstimator = FOREST_ESTIMATORS[name] forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0) forest.fit(X, y) assert_equal(len(forest), 10) forest.set_params(n_jobs=1) y1 = forest.predict(X) forest.set_params(n_jobs=2) y2 = forest.predict(X) assert_array_almost_equal(y1, y2, 3) def test_parallel(): for name in FOREST_CLASSIFIERS: yield check_parallel, name, iris.data, iris.target for name in FOREST_REGRESSORS: yield check_parallel, name, boston.data, boston.target def check_pickle(name, X, y): # Check pickability. ForestEstimator = FOREST_ESTIMATORS[name] obj = ForestEstimator(random_state=0) obj.fit(X, y) score = obj.score(X, y) pickle_object = pickle.dumps(obj) obj2 = pickle.loads(pickle_object) assert_equal(type(obj2), obj.__class__) score2 = obj2.score(X, y) assert_equal(score, score2) def test_pickle(): for name in FOREST_CLASSIFIERS: yield check_pickle, name, iris.data[::2], iris.target[::2] for name in FOREST_REGRESSORS: yield check_pickle, name, boston.data[::2], boston.target[::2] def check_multioutput(name): # Check estimators on multi-output problems. X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1], [-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]] y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2], [-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]] X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]] y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]] est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False) y_pred = est.fit(X_train, y_train).predict(X_test) assert_array_almost_equal(y_pred, y_test) if name in FOREST_CLASSIFIERS: with np.errstate(divide="ignore"): proba = est.predict_proba(X_test) assert_equal(len(proba), 2) assert_equal(proba[0].shape, (4, 2)) assert_equal(proba[1].shape, (4, 4)) log_proba = est.predict_log_proba(X_test) assert_equal(len(log_proba), 2) assert_equal(log_proba[0].shape, (4, 2)) assert_equal(log_proba[1].shape, (4, 4)) def test_multioutput(): for name in FOREST_CLASSIFIERS: yield check_multioutput, name for name in FOREST_REGRESSORS: yield check_multioutput, name def check_classes_shape(name): # Test that n_classes_ and classes_ have proper shape. ForestClassifier = FOREST_CLASSIFIERS[name] # Classification, single output clf = ForestClassifier(random_state=0).fit(X, y) assert_equal(clf.n_classes_, 2) assert_array_equal(clf.classes_, [-1, 1]) # Classification, multi-output _y = np.vstack((y, np.array(y) * 2)).T clf = ForestClassifier(random_state=0).fit(X, _y) assert_array_equal(clf.n_classes_, [2, 2]) assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]]) def test_classes_shape(): for name in FOREST_CLASSIFIERS: yield check_classes_shape, name def test_random_trees_dense_type(): # Test that the `sparse_output` parameter of RandomTreesEmbedding # works by returning a dense array. # Create the RTE with sparse=False hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False) X, y = datasets.make_circles(factor=0.5) X_transformed = hasher.fit_transform(X) # Assert that type is ndarray, not scipy.sparse.csr.csr_matrix assert_equal(type(X_transformed), np.ndarray) def test_random_trees_dense_equal(): # Test that the `sparse_output` parameter of RandomTreesEmbedding # works by returning the same array for both argument values. # Create the RTEs hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False, random_state=0) hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True, random_state=0) X, y = datasets.make_circles(factor=0.5) X_transformed_dense = hasher_dense.fit_transform(X) X_transformed_sparse = hasher_sparse.fit_transform(X) # Assert that dense and sparse hashers have same array. assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense) def test_random_hasher(): # test random forest hashing on circles dataset # make sure that it is linearly separable. # even after projected to two SVD dimensions # Note: Not all random_states produce perfect results. hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) X, y = datasets.make_circles(factor=0.5) X_transformed = hasher.fit_transform(X) # test fit and transform: hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) assert_array_equal(hasher.fit(X).transform(X).toarray(), X_transformed.toarray()) # one leaf active per data point per forest assert_equal(X_transformed.shape[0], X.shape[0]) assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators) svd = TruncatedSVD(n_components=2) X_reduced = svd.fit_transform(X_transformed) linear_clf = LinearSVC() linear_clf.fit(X_reduced, y) assert_equal(linear_clf.score(X_reduced, y), 1.) def test_random_hasher_sparse_data(): X, y = datasets.make_multilabel_classification(return_indicator=True, random_state=0) hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) X_transformed = hasher.fit_transform(X) X_transformed_sparse = hasher.fit_transform(csc_matrix(X)) assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray()) def test_parallel_train(): rng = check_random_state(12321) n_samples, n_features = 80, 30 X_train = rng.randn(n_samples, n_features) y_train = rng.randint(0, 2, n_samples) clfs = [ RandomForestClassifier(n_estimators=20, n_jobs=n_jobs, random_state=12345).fit(X_train, y_train) for n_jobs in [1, 2, 3, 8, 16, 32] ] X_test = rng.randn(n_samples, n_features) probas = [clf.predict_proba(X_test) for clf in clfs] for proba1, proba2 in zip(probas, probas[1:]): assert_array_almost_equal(proba1, proba2) def test_distribution(): rng = check_random_state(12321) # Single variable with 4 values X = rng.randint(0, 4, size=(1000, 1)) y = rng.rand(1000) n_trees = 500 clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y) uniques = defaultdict(int) for tree in clf.estimators_: tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-") for f, t in zip(tree.tree_.feature, tree.tree_.threshold)) uniques[tree] += 1 uniques = sorted([(1. * count / n_trees, tree) for tree, count in uniques.items()]) # On a single variable problem where X_0 has 4 equiprobable values, there # are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of # them has probability 1/3 while the 4 others have probability 1/6. assert_equal(len(uniques), 5) assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6. assert_greater(0.20, uniques[1][0]) assert_greater(0.20, uniques[2][0]) assert_greater(0.20, uniques[3][0]) assert_greater(uniques[4][0], 0.3) assert_equal(uniques[4][1], "0,1/0,0/--0,2/--") # Two variables, one with 2 values, one with 3 values X = np.empty((1000, 2)) X[:, 0] = np.random.randint(0, 2, 1000) X[:, 1] = np.random.randint(0, 3, 1000) y = rng.rand(1000) clf = ExtraTreesRegressor(n_estimators=100, max_features=1, random_state=1).fit(X, y) uniques = defaultdict(int) for tree in clf.estimators_: tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-") for f, t in zip(tree.tree_.feature, tree.tree_.threshold)) uniques[tree] += 1 uniques = [(count, tree) for tree, count in uniques.items()] assert_equal(len(uniques), 8) def check_max_leaf_nodes_max_depth(name, X, y): # Test precedence of max_leaf_nodes over max_depth. ForestEstimator = FOREST_ESTIMATORS[name] est = ForestEstimator(max_depth=1, max_leaf_nodes=4, n_estimators=1).fit(X, y) assert_greater(est.estimators_[0].tree_.max_depth, 1) est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y) assert_equal(est.estimators_[0].tree_.max_depth, 1) def test_max_leaf_nodes_max_depth(): X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) for name in FOREST_ESTIMATORS: yield check_max_leaf_nodes_max_depth, name, X, y def check_min_samples_leaf(name, X, y): # Test if leaves contain more than leaf_count training examples ForestEstimator = FOREST_ESTIMATORS[name] # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes in (None, 1000): est = ForestEstimator(min_samples_leaf=5, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) out = est.estimators_[0].tree_.apply(X) node_counts = np.bincount(out) # drop inner nodes leaf_count = node_counts[node_counts != 0] assert_greater(np.min(leaf_count), 4, "Failed with {0}".format(name)) def test_min_samples_leaf(): X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) X = X.astype(np.float32) for name in FOREST_ESTIMATORS: yield check_min_samples_leaf, name, X, y def check_min_weight_fraction_leaf(name, X, y): # Test if leaves contain at least min_weight_fraction_leaf of the # training set ForestEstimator = FOREST_ESTIMATORS[name] rng = np.random.RandomState(0) weights = rng.rand(X.shape[0]) total_weight = np.sum(weights) # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes in (None, 1000): for frac in np.linspace(0, 0.5, 6): est = ForestEstimator(min_weight_fraction_leaf=frac, max_leaf_nodes=max_leaf_nodes, random_state=0) if isinstance(est, (RandomForestClassifier, RandomForestRegressor)): est.bootstrap = False est.fit(X, y, sample_weight=weights) out = est.estimators_[0].tree_.apply(X) node_weights = np.bincount(out, weights=weights) # drop inner nodes leaf_weights = node_weights[node_weights != 0] assert_greater_equal( np.min(leaf_weights), total_weight * est.min_weight_fraction_leaf, "Failed with {0} " "min_weight_fraction_leaf={1}".format( name, est.min_weight_fraction_leaf)) def test_min_weight_fraction_leaf(): X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) X = X.astype(np.float32) for name in FOREST_ESTIMATORS: yield check_min_weight_fraction_leaf, name, X, y def check_sparse_input(name, X, X_sparse, y): ForestEstimator = FOREST_ESTIMATORS[name] dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y) sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y) assert_array_almost_equal(sparse.apply(X), dense.apply(X)) if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS: assert_array_almost_equal(sparse.predict(X), dense.predict(X)) assert_array_almost_equal(sparse.feature_importances_, dense.feature_importances_) if name in FOREST_CLASSIFIERS: assert_array_almost_equal(sparse.predict_proba(X), dense.predict_proba(X)) assert_array_almost_equal(sparse.predict_log_proba(X), dense.predict_log_proba(X)) if name in FOREST_TRANSFORMERS: assert_array_almost_equal(sparse.transform(X).toarray(), dense.transform(X).toarray()) assert_array_almost_equal(sparse.fit_transform(X).toarray(), dense.fit_transform(X).toarray()) def test_sparse_input(): X, y = datasets.make_multilabel_classification(return_indicator=True, random_state=0, n_samples=40) for name, sparse_matrix in product(FOREST_ESTIMATORS, (csr_matrix, csc_matrix, coo_matrix)): yield check_sparse_input, name, X, sparse_matrix(X), y def check_memory_layout(name, dtype): # Check that it works no matter the memory layout est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False) # Nothing X = np.asarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # C-order X = np.asarray(iris.data, order="C", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # F-order X = np.asarray(iris.data, order="F", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Contiguous X = np.ascontiguousarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) if est.base_estimator.splitter in SPARSE_SPLITTERS: # csr matrix X = csr_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # csc_matrix X = csc_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # coo_matrix X = coo_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Strided X = np.asarray(iris.data[::3], dtype=dtype) y = iris.target[::3] assert_array_equal(est.fit(X, y).predict(X), y) def test_memory_layout(): for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]): yield check_memory_layout, name, dtype for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]): yield check_memory_layout, name, dtype def check_1d_input(name, X, X_2d, y): ForestEstimator = FOREST_ESTIMATORS[name] assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y) est = ForestEstimator(random_state=0) est.fit(X_2d, y) if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS: assert_raises(ValueError, est.predict, X) def test_1d_input(): X = iris.data[:, 0].ravel() X_2d = iris.data[:, 0].reshape((-1, 1)) y = iris.target for name in FOREST_ESTIMATORS: yield check_1d_input, name, X, X_2d, y def check_class_weights(name): # Check class_weights resemble sample_weights behavior. ForestClassifier = FOREST_CLASSIFIERS[name] # Iris is balanced, so no effect expected for using 'balanced' weights clf1 = ForestClassifier(random_state=0) clf1.fit(iris.data, iris.target) clf2 = ForestClassifier(class_weight='balanced', random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Make a multi-output problem with three copies of Iris iris_multi = np.vstack((iris.target, iris.target, iris.target)).T # Create user-defined weights that should balance over the outputs clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.}, {0: 2., 1: 1., 2: 2.}, {0: 1., 1: 2., 2: 2.}], random_state=0) clf3.fit(iris.data, iris_multi) assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_) # Check against multi-output "balanced" which should also have no effect clf4 = ForestClassifier(class_weight='balanced', random_state=0) clf4.fit(iris.data, iris_multi) assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_) # Inflate importance of class 1, check against user-defined weights sample_weight = np.ones(iris.target.shape) sample_weight[iris.target == 1] *= 100 class_weight = {0: 1., 1: 100., 2: 1.} clf1 = ForestClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight) clf2 = ForestClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Check that sample_weight and class_weight are multiplicative clf1 = ForestClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight ** 2) clf2 = ForestClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target, sample_weight) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) def test_class_weights(): for name in FOREST_CLASSIFIERS: yield check_class_weights, name def check_class_weight_balanced_and_bootstrap_multi_output(name): # Test class_weight works for multi-output""" ForestClassifier = FOREST_CLASSIFIERS[name] _y = np.vstack((y, np.array(y) * 2)).T clf = ForestClassifier(class_weight='balanced', random_state=0) clf.fit(X, _y) clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}], random_state=0) clf.fit(X, _y) # smoke test for subsample and balanced subsample clf = ForestClassifier(class_weight='balanced_subsample', random_state=0) clf.fit(X, _y) clf = ForestClassifier(class_weight='subsample', random_state=0) ignore_warnings(clf.fit)(X, _y) def test_class_weight_balanced_and_bootstrap_multi_output(): for name in FOREST_CLASSIFIERS: yield check_class_weight_balanced_and_bootstrap_multi_output, name def check_class_weight_errors(name): # Test if class_weight raises errors and warnings when expected. ForestClassifier = FOREST_CLASSIFIERS[name] _y = np.vstack((y, np.array(y) * 2)).T # Invalid preset string clf = ForestClassifier(class_weight='the larch', random_state=0) assert_raises(ValueError, clf.fit, X, y) assert_raises(ValueError, clf.fit, X, _y) # Warning warm_start with preset clf = ForestClassifier(class_weight='auto', warm_start=True, random_state=0) assert_warns(UserWarning, clf.fit, X, y) assert_warns(UserWarning, clf.fit, X, _y) # Not a list or preset for multi-output clf = ForestClassifier(class_weight=1, random_state=0) assert_raises(ValueError, clf.fit, X, _y) # Incorrect length list for multi-output clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0) assert_raises(ValueError, clf.fit, X, _y) def test_class_weight_errors(): for name in FOREST_CLASSIFIERS: yield check_class_weight_errors, name def check_warm_start(name, random_state=42): # Test if fitting incrementally with warm start gives a forest of the # right size and the same results as a normal fit. X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1) ForestEstimator = FOREST_ESTIMATORS[name] clf_ws = None for n_estimators in [5, 10]: if clf_ws is None: clf_ws = ForestEstimator(n_estimators=n_estimators, random_state=random_state, warm_start=True) else: clf_ws.set_params(n_estimators=n_estimators) clf_ws.fit(X, y) assert_equal(len(clf_ws), n_estimators) clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state, warm_start=False) clf_no_ws.fit(X, y) assert_equal(set([tree.random_state for tree in clf_ws]), set([tree.random_state for tree in clf_no_ws])) assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X), err_msg="Failed with {0}".format(name)) def test_warm_start(): for name in FOREST_ESTIMATORS: yield check_warm_start, name def check_warm_start_clear(name): # Test if fit clears state and grows a new forest when warm_start==False. X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1) ForestEstimator = FOREST_ESTIMATORS[name] clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False, random_state=1) clf.fit(X, y) clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True, random_state=2) clf_2.fit(X, y) # inits state clf_2.set_params(warm_start=False, random_state=1) clf_2.fit(X, y) # clears old state and equals clf assert_array_almost_equal(clf_2.apply(X), clf.apply(X)) def test_warm_start_clear(): for name in FOREST_ESTIMATORS: yield check_warm_start_clear, name def check_warm_start_smaller_n_estimators(name): # Test if warm start second fit with smaller n_estimators raises error. X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1) ForestEstimator = FOREST_ESTIMATORS[name] clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True) clf.fit(X, y) clf.set_params(n_estimators=4) assert_raises(ValueError, clf.fit, X, y) def test_warm_start_smaller_n_estimators(): for name in FOREST_ESTIMATORS: yield check_warm_start_smaller_n_estimators, name def check_warm_start_equal_n_estimators(name): # Test if warm start with equal n_estimators does nothing and returns the # same forest and raises a warning. X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1) ForestEstimator = FOREST_ESTIMATORS[name] clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True, random_state=1) clf.fit(X, y) clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True, random_state=1) clf_2.fit(X, y) # Now clf_2 equals clf. clf_2.set_params(random_state=2) assert_warns(UserWarning, clf_2.fit, X, y) # If we had fit the trees again we would have got a different forest as we # changed the random state. assert_array_equal(clf.apply(X), clf_2.apply(X)) def test_warm_start_equal_n_estimators(): for name in FOREST_ESTIMATORS: yield check_warm_start_equal_n_estimators, name def check_warm_start_oob(name): # Test that the warm start computes oob score when asked. X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1) ForestEstimator = FOREST_ESTIMATORS[name] # Use 15 estimators to avoid 'some inputs do not have OOB scores' warning. clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False, random_state=1, bootstrap=True, oob_score=True) clf.fit(X, y) clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False, random_state=1, bootstrap=True, oob_score=False) clf_2.fit(X, y) clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15) clf_2.fit(X, y) assert_true(hasattr(clf_2, 'oob_score_')) assert_equal(clf.oob_score_, clf_2.oob_score_) # Test that oob_score is computed even if we don't need to train # additional trees. clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True, random_state=1, bootstrap=True, oob_score=False) clf_3.fit(X, y) assert_true(not(hasattr(clf_3, 'oob_score_'))) clf_3.set_params(oob_score=True) ignore_warnings(clf_3.fit)(X, y) assert_equal(clf.oob_score_, clf_3.oob_score_) def test_warm_start_oob(): for name in FOREST_CLASSIFIERS: yield check_warm_start_oob, name for name in FOREST_REGRESSORS: yield check_warm_start_oob, name def test_dtype_convert(): classifier = RandomForestClassifier() CLASSES = 15 X = np.eye(CLASSES) y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]] result = classifier.fit(X, y).predict(X) assert_array_equal(result, y)
bsd-3-clause
ryfeus/lambda-packs
Tensorflow_LightGBM_Scipy_nightly/source/numpy/linalg/linalg.py
3
82838
"""Lite version of scipy.linalg. Notes ----- This module is a lite version of the linalg.py module in SciPy which contains high-level Python interface to the LAPACK library. The lite version only accesses the following LAPACK functions: dgesv, zgesv, dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf, zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr. """ from __future__ import division, absolute_import, print_function __all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv', 'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det', 'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank', 'LinAlgError', 'multi_dot'] import operator import warnings from numpy.core import ( array, asarray, zeros, empty, empty_like, intc, single, double, csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot, add, multiply, sqrt, fastCopyAndTranspose, sum, isfinite, finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs, atleast_2d, intp, asanyarray, object_, matmul, swapaxes, divide, count_nonzero, isnan ) from numpy.core.multiarray import normalize_axis_index from numpy.lib.twodim_base import triu, eye from numpy.linalg import lapack_lite, _umath_linalg # For Python2/3 compatibility _N = b'N' _V = b'V' _A = b'A' _S = b'S' _L = b'L' fortran_int = intc # Error object class LinAlgError(Exception): """ Generic Python-exception-derived object raised by linalg functions. General purpose exception class, derived from Python's exception.Exception class, programmatically raised in linalg functions when a Linear Algebra-related condition would prevent further correct execution of the function. Parameters ---------- None Examples -------- >>> from numpy import linalg as LA >>> LA.inv(np.zeros((2,2))) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "...linalg.py", line 350, in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype))) File "...linalg.py", line 249, in solve raise LinAlgError('Singular matrix') numpy.linalg.LinAlgError: Singular matrix """ pass def _determine_error_states(): errobj = geterrobj() bufsize = errobj[0] with errstate(invalid='call', over='ignore', divide='ignore', under='ignore'): invalid_call_errmask = geterrobj()[1] return [bufsize, invalid_call_errmask, None] # Dealing with errors in _umath_linalg _linalg_error_extobj = _determine_error_states() del _determine_error_states def _raise_linalgerror_singular(err, flag): raise LinAlgError("Singular matrix") def _raise_linalgerror_nonposdef(err, flag): raise LinAlgError("Matrix is not positive definite") def _raise_linalgerror_eigenvalues_nonconvergence(err, flag): raise LinAlgError("Eigenvalues did not converge") def _raise_linalgerror_svd_nonconvergence(err, flag): raise LinAlgError("SVD did not converge") def _raise_linalgerror_lstsq(err, flag): raise LinAlgError("SVD did not converge in Linear Least Squares") def get_linalg_error_extobj(callback): extobj = list(_linalg_error_extobj) # make a copy extobj[2] = callback return extobj def _makearray(a): new = asarray(a) wrap = getattr(a, "__array_prepare__", new.__array_wrap__) return new, wrap def isComplexType(t): return issubclass(t, complexfloating) _real_types_map = {single : single, double : double, csingle : single, cdouble : double} _complex_types_map = {single : csingle, double : cdouble, csingle : csingle, cdouble : cdouble} def _realType(t, default=double): return _real_types_map.get(t, default) def _complexType(t, default=cdouble): return _complex_types_map.get(t, default) def _linalgRealType(t): """Cast the type t to either double or cdouble.""" return double _complex_types_map = {single : csingle, double : cdouble, csingle : csingle, cdouble : cdouble} def _commonType(*arrays): # in lite version, use higher precision (always double or cdouble) result_type = single is_complex = False for a in arrays: if issubclass(a.dtype.type, inexact): if isComplexType(a.dtype.type): is_complex = True rt = _realType(a.dtype.type, default=None) if rt is None: # unsupported inexact scalar raise TypeError("array type %s is unsupported in linalg" % (a.dtype.name,)) else: rt = double if rt is double: result_type = double if is_complex: t = cdouble result_type = _complex_types_map[result_type] else: t = double return t, result_type # _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are). _fastCT = fastCopyAndTranspose def _to_native_byte_order(*arrays): ret = [] for arr in arrays: if arr.dtype.byteorder not in ('=', '|'): ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('='))) else: ret.append(arr) if len(ret) == 1: return ret[0] else: return ret def _fastCopyAndTranspose(type, *arrays): cast_arrays = () for a in arrays: if a.dtype.type is type: cast_arrays = cast_arrays + (_fastCT(a),) else: cast_arrays = cast_arrays + (_fastCT(a.astype(type)),) if len(cast_arrays) == 1: return cast_arrays[0] else: return cast_arrays def _assertRank2(*arrays): for a in arrays: if a.ndim != 2: raise LinAlgError('%d-dimensional array given. Array must be ' 'two-dimensional' % a.ndim) def _assertRankAtLeast2(*arrays): for a in arrays: if a.ndim < 2: raise LinAlgError('%d-dimensional array given. Array must be ' 'at least two-dimensional' % a.ndim) def _assertSquareness(*arrays): for a in arrays: if max(a.shape) != min(a.shape): raise LinAlgError('Array must be square') def _assertNdSquareness(*arrays): for a in arrays: m, n = a.shape[-2:] if m != n: raise LinAlgError('Last 2 dimensions of the array must be square') def _assertFinite(*arrays): for a in arrays: if not (isfinite(a).all()): raise LinAlgError("Array must not contain infs or NaNs") def _isEmpty2d(arr): # check size first for efficiency return arr.size == 0 and product(arr.shape[-2:]) == 0 def _assertNoEmpty2d(*arrays): for a in arrays: if _isEmpty2d(a): raise LinAlgError("Arrays cannot be empty") def transpose(a): """ Transpose each matrix in a stack of matrices. Unlike np.transpose, this only swaps the last two axes, rather than all of them Parameters ---------- a : (...,M,N) array_like Returns ------- aT : (...,N,M) ndarray """ return swapaxes(a, -1, -2) # Linear equations def tensorsolve(a, b, axes=None): """ Solve the tensor equation ``a x = b`` for x. It is assumed that all indices of `x` are summed over in the product, together with the rightmost indices of `a`, as is done in, for example, ``tensordot(a, x, axes=b.ndim)``. Parameters ---------- a : array_like Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals the shape of that sub-tensor of `a` consisting of the appropriate number of its rightmost indices, and must be such that ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be 'square'). b : array_like Right-hand tensor, which can be of any shape. axes : tuple of ints, optional Axes in `a` to reorder to the right, before inversion. If None (default), no reordering is done. Returns ------- x : ndarray, shape Q Raises ------ LinAlgError If `a` is singular or not 'square' (in the above sense). See Also -------- numpy.tensordot, tensorinv, numpy.einsum Examples -------- >>> a = np.eye(2*3*4) >>> a.shape = (2*3, 4, 2, 3, 4) >>> b = np.random.randn(2*3, 4) >>> x = np.linalg.tensorsolve(a, b) >>> x.shape (2, 3, 4) >>> np.allclose(np.tensordot(a, x, axes=3), b) True """ a, wrap = _makearray(a) b = asarray(b) an = a.ndim if axes is not None: allaxes = list(range(0, an)) for k in axes: allaxes.remove(k) allaxes.insert(an, k) a = a.transpose(allaxes) oldshape = a.shape[-(an-b.ndim):] prod = 1 for k in oldshape: prod *= k a = a.reshape(-1, prod) b = b.ravel() res = wrap(solve(a, b)) res.shape = oldshape return res def solve(a, b): """ Solve a linear matrix equation, or system of linear scalar equations. Computes the "exact" solution, `x`, of the well-determined, i.e., full rank, linear matrix equation `ax = b`. Parameters ---------- a : (..., M, M) array_like Coefficient matrix. b : {(..., M,), (..., M, K)}, array_like Ordinate or "dependent variable" values. Returns ------- x : {(..., M,), (..., M, K)} ndarray Solution to the system a x = b. Returned shape is identical to `b`. Raises ------ LinAlgError If `a` is singular or not square. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. The solutions are computed using LAPACK routine _gesv `a` must be square and of full-rank, i.e., all rows (or, equivalently, columns) must be linearly independent; if either is not true, use `lstsq` for the least-squares best "solution" of the system/equation. References ---------- .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, Academic Press, Inc., 1980, pg. 22. Examples -------- Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``: >>> a = np.array([[3,1], [1,2]]) >>> b = np.array([9,8]) >>> x = np.linalg.solve(a, b) >>> x array([ 2., 3.]) Check that the solution is correct: >>> np.allclose(np.dot(a, x), b) True """ a, _ = _makearray(a) _assertRankAtLeast2(a) _assertNdSquareness(a) b, wrap = _makearray(b) t, result_t = _commonType(a, b) # We use the b = (..., M,) logic, only if the number of extra dimensions # match exactly if b.ndim == a.ndim - 1: gufunc = _umath_linalg.solve1 else: gufunc = _umath_linalg.solve signature = 'DD->D' if isComplexType(t) else 'dd->d' extobj = get_linalg_error_extobj(_raise_linalgerror_singular) r = gufunc(a, b, signature=signature, extobj=extobj) return wrap(r.astype(result_t, copy=False)) def tensorinv(a, ind=2): """ Compute the 'inverse' of an N-dimensional array. The result is an inverse for `a` relative to the tensordot operation ``tensordot(a, b, ind)``, i. e., up to floating-point accuracy, ``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the tensordot operation. Parameters ---------- a : array_like Tensor to 'invert'. Its shape must be 'square', i. e., ``prod(a.shape[:ind]) == prod(a.shape[ind:])``. ind : int, optional Number of first indices that are involved in the inverse sum. Must be a positive integer, default is 2. Returns ------- b : ndarray `a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``. Raises ------ LinAlgError If `a` is singular or not 'square' (in the above sense). See Also -------- numpy.tensordot, tensorsolve Examples -------- >>> a = np.eye(4*6) >>> a.shape = (4, 6, 8, 3) >>> ainv = np.linalg.tensorinv(a, ind=2) >>> ainv.shape (8, 3, 4, 6) >>> b = np.random.randn(4, 6) >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b)) True >>> a = np.eye(4*6) >>> a.shape = (24, 8, 3) >>> ainv = np.linalg.tensorinv(a, ind=1) >>> ainv.shape (8, 3, 24) >>> b = np.random.randn(24) >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) True """ a = asarray(a) oldshape = a.shape prod = 1 if ind > 0: invshape = oldshape[ind:] + oldshape[:ind] for k in oldshape[ind:]: prod *= k else: raise ValueError("Invalid ind argument.") a = a.reshape(prod, -1) ia = inv(a) return ia.reshape(*invshape) # Matrix inversion def inv(a): """ Compute the (multiplicative) inverse of a matrix. Given a square matrix `a`, return the matrix `ainv` satisfying ``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``. Parameters ---------- a : (..., M, M) array_like Matrix to be inverted. Returns ------- ainv : (..., M, M) ndarray or matrix (Multiplicative) inverse of the matrix `a`. Raises ------ LinAlgError If `a` is not square or inversion fails. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. Examples -------- >>> from numpy.linalg import inv >>> a = np.array([[1., 2.], [3., 4.]]) >>> ainv = inv(a) >>> np.allclose(np.dot(a, ainv), np.eye(2)) True >>> np.allclose(np.dot(ainv, a), np.eye(2)) True If a is a matrix object, then the return value is a matrix as well: >>> ainv = inv(np.matrix(a)) >>> ainv matrix([[-2. , 1. ], [ 1.5, -0.5]]) Inverses of several matrices can be computed at once: >>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]]) >>> inv(a) array([[[-2. , 1. ], [ 1.5, -0.5]], [[-5. , 2. ], [ 3. , -1. ]]]) """ a, wrap = _makearray(a) _assertRankAtLeast2(a) _assertNdSquareness(a) t, result_t = _commonType(a) signature = 'D->D' if isComplexType(t) else 'd->d' extobj = get_linalg_error_extobj(_raise_linalgerror_singular) ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj) return wrap(ainv.astype(result_t, copy=False)) def matrix_power(a, n): """ Raise a square matrix to the (integer) power `n`. For positive integers `n`, the power is computed by repeated matrix squarings and matrix multiplications. If ``n == 0``, the identity matrix of the same shape as M is returned. If ``n < 0``, the inverse is computed and then raised to the ``abs(n)``. .. note:: Stacks of object matrices are not currently supported. Parameters ---------- a : (..., M, M) array_like Matrix to be "powered." n : int The exponent can be any integer or long integer, positive, negative, or zero. Returns ------- a**n : (..., M, M) ndarray or matrix object The return value is the same shape and type as `M`; if the exponent is positive or zero then the type of the elements is the same as those of `M`. If the exponent is negative the elements are floating-point. Raises ------ LinAlgError For matrices that are not square or that (for negative powers) cannot be inverted numerically. Examples -------- >>> from numpy.linalg import matrix_power >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit >>> matrix_power(i, 3) # should = -i array([[ 0, -1], [ 1, 0]]) >>> matrix_power(i, 0) array([[1, 0], [0, 1]]) >>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements array([[ 0., 1.], [-1., 0.]]) Somewhat more sophisticated example >>> q = np.zeros((4, 4)) >>> q[0:2, 0:2] = -i >>> q[2:4, 2:4] = i >>> q # one of the three quaternion units not equal to 1 array([[ 0., -1., 0., 0.], [ 1., 0., 0., 0.], [ 0., 0., 0., 1.], [ 0., 0., -1., 0.]]) >>> matrix_power(q, 2) # = -np.eye(4) array([[-1., 0., 0., 0.], [ 0., -1., 0., 0.], [ 0., 0., -1., 0.], [ 0., 0., 0., -1.]]) """ a = asanyarray(a) _assertRankAtLeast2(a) _assertNdSquareness(a) try: n = operator.index(n) except TypeError: raise TypeError("exponent must be an integer") # Fall back on dot for object arrays. Object arrays are not supported by # the current implementation of matmul using einsum if a.dtype != object: fmatmul = matmul elif a.ndim == 2: fmatmul = dot else: raise NotImplementedError( "matrix_power not supported for stacks of object arrays") if n == 0: a = empty_like(a) a[...] = eye(a.shape[-2], dtype=a.dtype) return a elif n < 0: a = inv(a) n = abs(n) # short-cuts. if n == 1: return a elif n == 2: return fmatmul(a, a) elif n == 3: return fmatmul(fmatmul(a, a), a) # Use binary decomposition to reduce the number of matrix multiplications. # Here, we iterate over the bits of n, from LSB to MSB, raise `a` to # increasing powers of 2, and multiply into the result as needed. z = result = None while n > 0: z = a if z is None else fmatmul(z, z) n, bit = divmod(n, 2) if bit: result = z if result is None else fmatmul(result, z) return result # Cholesky decomposition def cholesky(a): """ Cholesky decomposition. Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`, where `L` is lower-triangular and .H is the conjugate transpose operator (which is the ordinary transpose if `a` is real-valued). `a` must be Hermitian (symmetric if real-valued) and positive-definite. Only `L` is actually returned. Parameters ---------- a : (..., M, M) array_like Hermitian (symmetric if all elements are real), positive-definite input matrix. Returns ------- L : (..., M, M) array_like Upper or lower-triangular Cholesky factor of `a`. Returns a matrix object if `a` is a matrix object. Raises ------ LinAlgError If the decomposition fails, for example, if `a` is not positive-definite. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. The Cholesky decomposition is often used as a fast way of solving .. math:: A \\mathbf{x} = \\mathbf{b} (when `A` is both Hermitian/symmetric and positive-definite). First, we solve for :math:`\\mathbf{y}` in .. math:: L \\mathbf{y} = \\mathbf{b}, and then for :math:`\\mathbf{x}` in .. math:: L.H \\mathbf{x} = \\mathbf{y}. Examples -------- >>> A = np.array([[1,-2j],[2j,5]]) >>> A array([[ 1.+0.j, 0.-2.j], [ 0.+2.j, 5.+0.j]]) >>> L = np.linalg.cholesky(A) >>> L array([[ 1.+0.j, 0.+0.j], [ 0.+2.j, 1.+0.j]]) >>> np.dot(L, L.T.conj()) # verify that L * L.H = A array([[ 1.+0.j, 0.-2.j], [ 0.+2.j, 5.+0.j]]) >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like? >>> np.linalg.cholesky(A) # an ndarray object is returned array([[ 1.+0.j, 0.+0.j], [ 0.+2.j, 1.+0.j]]) >>> # But a matrix object is returned if A is a matrix object >>> LA.cholesky(np.matrix(A)) matrix([[ 1.+0.j, 0.+0.j], [ 0.+2.j, 1.+0.j]]) """ extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef) gufunc = _umath_linalg.cholesky_lo a, wrap = _makearray(a) _assertRankAtLeast2(a) _assertNdSquareness(a) t, result_t = _commonType(a) signature = 'D->D' if isComplexType(t) else 'd->d' r = gufunc(a, signature=signature, extobj=extobj) return wrap(r.astype(result_t, copy=False)) # QR decompostion def qr(a, mode='reduced'): """ Compute the qr factorization of a matrix. Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is upper-triangular. Parameters ---------- a : array_like, shape (M, N) Matrix to be factored. mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional If K = min(M, N), then * 'reduced' : returns q, r with dimensions (M, K), (K, N) (default) * 'complete' : returns q, r with dimensions (M, M), (M, N) * 'r' : returns r only with dimensions (K, N) * 'raw' : returns h, tau with dimensions (N, M), (K,) * 'full' : alias of 'reduced', deprecated * 'economic' : returns h from 'raw', deprecated. The options 'reduced', 'complete, and 'raw' are new in numpy 1.8, see the notes for more information. The default is 'reduced', and to maintain backward compatibility with earlier versions of numpy both it and the old default 'full' can be omitted. Note that array h returned in 'raw' mode is transposed for calling Fortran. The 'economic' mode is deprecated. The modes 'full' and 'economic' may be passed using only the first letter for backwards compatibility, but all others must be spelled out. See the Notes for more explanation. Returns ------- q : ndarray of float or complex, optional A matrix with orthonormal columns. When mode = 'complete' the result is an orthogonal/unitary matrix depending on whether or not a is real/complex. The determinant may be either +/- 1 in that case. r : ndarray of float or complex, optional The upper-triangular matrix. (h, tau) : ndarrays of np.double or np.cdouble, optional The array h contains the Householder reflectors that generate q along with r. The tau array contains scaling factors for the reflectors. In the deprecated 'economic' mode only h is returned. Raises ------ LinAlgError If factoring fails. Notes ----- This is an interface to the LAPACK routines dgeqrf, zgeqrf, dorgqr, and zungqr. For more information on the qr factorization, see for example: http://en.wikipedia.org/wiki/QR_factorization Subclasses of `ndarray` are preserved except for the 'raw' mode. So if `a` is of type `matrix`, all the return values will be matrices too. New 'reduced', 'complete', and 'raw' options for mode were added in NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In addition the options 'full' and 'economic' were deprecated. Because 'full' was the previous default and 'reduced' is the new default, backward compatibility can be maintained by letting `mode` default. The 'raw' option was added so that LAPACK routines that can multiply arrays by q using the Householder reflectors can be used. Note that in this case the returned arrays are of type np.double or np.cdouble and the h array is transposed to be FORTRAN compatible. No routines using the 'raw' return are currently exposed by numpy, but some are available in lapack_lite and just await the necessary work. Examples -------- >>> a = np.random.randn(9, 6) >>> q, r = np.linalg.qr(a) >>> np.allclose(a, np.dot(q, r)) # a does equal qr True >>> r2 = np.linalg.qr(a, mode='r') >>> r3 = np.linalg.qr(a, mode='economic') >>> np.allclose(r, r2) # mode='r' returns the same r as mode='full' True >>> # But only triu parts are guaranteed equal when mode='economic' >>> np.allclose(r, np.triu(r3[:6,:6], k=0)) True Example illustrating a common use of `qr`: solving of least squares problems What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points and you'll see that it should be y0 = 0, m = 1.) The answer is provided by solving the over-determined matrix equation ``Ax = b``, where:: A = array([[0, 1], [1, 1], [1, 1], [2, 1]]) x = array([[y0], [m]]) b = array([[1], [0], [2], [1]]) If A = qr such that q is orthonormal (which is always possible via Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice, however, we simply use `lstsq`.) >>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]]) >>> A array([[0, 1], [1, 1], [1, 1], [2, 1]]) >>> b = np.array([1, 0, 2, 1]) >>> q, r = LA.qr(A) >>> p = np.dot(q.T, b) >>> np.dot(LA.inv(r), p) array([ 1.1e-16, 1.0e+00]) """ if mode not in ('reduced', 'complete', 'r', 'raw'): if mode in ('f', 'full'): # 2013-04-01, 1.8 msg = "".join(( "The 'full' option is deprecated in favor of 'reduced'.\n", "For backward compatibility let mode default.")) warnings.warn(msg, DeprecationWarning, stacklevel=2) mode = 'reduced' elif mode in ('e', 'economic'): # 2013-04-01, 1.8 msg = "The 'economic' option is deprecated." warnings.warn(msg, DeprecationWarning, stacklevel=2) mode = 'economic' else: raise ValueError("Unrecognized mode '%s'" % mode) a, wrap = _makearray(a) _assertRank2(a) _assertNoEmpty2d(a) m, n = a.shape t, result_t = _commonType(a) a = _fastCopyAndTranspose(t, a) a = _to_native_byte_order(a) mn = min(m, n) tau = zeros((mn,), t) if isComplexType(t): lapack_routine = lapack_lite.zgeqrf routine_name = 'zgeqrf' else: lapack_routine = lapack_lite.dgeqrf routine_name = 'dgeqrf' # calculate optimal size of work data 'work' lwork = 1 work = zeros((lwork,), t) results = lapack_routine(m, n, a, m, tau, work, -1, 0) if results['info'] != 0: raise LinAlgError('%s returns %d' % (routine_name, results['info'])) # do qr decomposition lwork = int(abs(work[0])) work = zeros((lwork,), t) results = lapack_routine(m, n, a, m, tau, work, lwork, 0) if results['info'] != 0: raise LinAlgError('%s returns %d' % (routine_name, results['info'])) # handle modes that don't return q if mode == 'r': r = _fastCopyAndTranspose(result_t, a[:, :mn]) return wrap(triu(r)) if mode == 'raw': return a, tau if mode == 'economic': if t != result_t : a = a.astype(result_t, copy=False) return wrap(a.T) # generate q from a if mode == 'complete' and m > n: mc = m q = empty((m, m), t) else: mc = mn q = empty((n, m), t) q[:n] = a if isComplexType(t): lapack_routine = lapack_lite.zungqr routine_name = 'zungqr' else: lapack_routine = lapack_lite.dorgqr routine_name = 'dorgqr' # determine optimal lwork lwork = 1 work = zeros((lwork,), t) results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0) if results['info'] != 0: raise LinAlgError('%s returns %d' % (routine_name, results['info'])) # compute q lwork = int(abs(work[0])) work = zeros((lwork,), t) results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0) if results['info'] != 0: raise LinAlgError('%s returns %d' % (routine_name, results['info'])) q = _fastCopyAndTranspose(result_t, q[:mc]) r = _fastCopyAndTranspose(result_t, a[:, :mc]) return wrap(q), wrap(triu(r)) # Eigenvalues def eigvals(a): """ Compute the eigenvalues of a general matrix. Main difference between `eigvals` and `eig`: the eigenvectors aren't returned. Parameters ---------- a : (..., M, M) array_like A complex- or real-valued matrix whose eigenvalues will be computed. Returns ------- w : (..., M,) ndarray The eigenvalues, each repeated according to its multiplicity. They are not necessarily ordered, nor are they necessarily real for real matrices. Raises ------ LinAlgError If the eigenvalue computation does not converge. See Also -------- eig : eigenvalues and right eigenvectors of general arrays eigvalsh : eigenvalues of symmetric or Hermitian arrays. eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. This is implemented using the _geev LAPACK routines which compute the eigenvalues and eigenvectors of general square arrays. Examples -------- Illustration, using the fact that the eigenvalues of a diagonal matrix are its diagonal elements, that multiplying a matrix on the left by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose of `Q`), preserves the eigenvalues of the "middle" matrix. In other words, if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as ``A``: >>> from numpy import linalg as LA >>> x = np.random.random() >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]]) >>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :]) (1.0, 1.0, 0.0) Now multiply a diagonal matrix by Q on one side and by Q.T on the other: >>> D = np.diag((-1,1)) >>> LA.eigvals(D) array([-1., 1.]) >>> A = np.dot(Q, D) >>> A = np.dot(A, Q.T) >>> LA.eigvals(A) array([ 1., -1.]) """ a, wrap = _makearray(a) _assertRankAtLeast2(a) _assertNdSquareness(a) _assertFinite(a) t, result_t = _commonType(a) extobj = get_linalg_error_extobj( _raise_linalgerror_eigenvalues_nonconvergence) signature = 'D->D' if isComplexType(t) else 'd->D' w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj) if not isComplexType(t): if all(w.imag == 0): w = w.real result_t = _realType(result_t) else: result_t = _complexType(result_t) return w.astype(result_t, copy=False) def eigvalsh(a, UPLO='L'): """ Compute the eigenvalues of a Hermitian or real symmetric matrix. Main difference from eigh: the eigenvectors are not computed. Parameters ---------- a : (..., M, M) array_like A complex- or real-valued matrix whose eigenvalues are to be computed. UPLO : {'L', 'U'}, optional Specifies whether the calculation is done with the lower triangular part of `a` ('L', default) or the upper triangular part ('U'). Irrespective of this value only the real parts of the diagonal will be considered in the computation to preserve the notion of a Hermitian matrix. It therefore follows that the imaginary part of the diagonal will always be treated as zero. Returns ------- w : (..., M,) ndarray The eigenvalues in ascending order, each repeated according to its multiplicity. Raises ------ LinAlgError If the eigenvalue computation does not converge. See Also -------- eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays. eigvals : eigenvalues of general real or complex arrays. eig : eigenvalues and right eigenvectors of general real or complex arrays. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. The eigenvalues are computed using LAPACK routines _syevd, _heevd Examples -------- >>> from numpy import linalg as LA >>> a = np.array([[1, -2j], [2j, 5]]) >>> LA.eigvalsh(a) array([ 0.17157288, 5.82842712]) >>> # demonstrate the treatment of the imaginary part of the diagonal >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) >>> a array([[ 5.+2.j, 9.-2.j], [ 0.+2.j, 2.-1.j]]) >>> # with UPLO='L' this is numerically equivalent to using LA.eigvals() >>> # with: >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]]) >>> b array([[ 5.+0.j, 0.-2.j], [ 0.+2.j, 2.+0.j]]) >>> wa = LA.eigvalsh(a) >>> wb = LA.eigvals(b) >>> wa; wb array([ 1., 6.]) array([ 6.+0.j, 1.+0.j]) """ UPLO = UPLO.upper() if UPLO not in ('L', 'U'): raise ValueError("UPLO argument must be 'L' or 'U'") extobj = get_linalg_error_extobj( _raise_linalgerror_eigenvalues_nonconvergence) if UPLO == 'L': gufunc = _umath_linalg.eigvalsh_lo else: gufunc = _umath_linalg.eigvalsh_up a, wrap = _makearray(a) _assertRankAtLeast2(a) _assertNdSquareness(a) t, result_t = _commonType(a) signature = 'D->d' if isComplexType(t) else 'd->d' w = gufunc(a, signature=signature, extobj=extobj) return w.astype(_realType(result_t), copy=False) def _convertarray(a): t, result_t = _commonType(a) a = _fastCT(a.astype(t)) return a, t, result_t # Eigenvectors def eig(a): """ Compute the eigenvalues and right eigenvectors of a square array. Parameters ---------- a : (..., M, M) array Matrices for which the eigenvalues and right eigenvectors will be computed Returns ------- w : (..., M) array The eigenvalues, each repeated according to its multiplicity. The eigenvalues are not necessarily ordered. The resulting array will be of complex type, unless the imaginary part is zero in which case it will be cast to a real type. When `a` is real the resulting eigenvalues will be real (0 imaginary part) or occur in conjugate pairs v : (..., M, M) array The normalized (unit "length") eigenvectors, such that the column ``v[:,i]`` is the eigenvector corresponding to the eigenvalue ``w[i]``. Raises ------ LinAlgError If the eigenvalue computation does not converge. See Also -------- eigvals : eigenvalues of a non-symmetric array. eigh : eigenvalues and eigenvectors of a symmetric or Hermitian (conjugate symmetric) array. eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric) array. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. This is implemented using the _geev LAPACK routines which compute the eigenvalues and eigenvectors of general square arrays. The number `w` is an eigenvalue of `a` if there exists a vector `v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and `v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]`` for :math:`i \\in \\{0,...,M-1\\}`. The array `v` of eigenvectors may not be of maximum rank, that is, some of the columns may be linearly dependent, although round-off error may obscure that fact. If the eigenvalues are all different, then theoretically the eigenvectors are linearly independent. Likewise, the (complex-valued) matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e., if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate transpose of `a`. Finally, it is emphasized that `v` consists of the *right* (as in right-hand side) eigenvectors of `a`. A vector `y` satisfying ``dot(y.T, a) = z * y.T`` for some number `z` is called a *left* eigenvector of `a`, and, in general, the left and right eigenvectors of a matrix are not necessarily the (perhaps conjugate) transposes of each other. References ---------- G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, Academic Press, Inc., 1980, Various pp. Examples -------- >>> from numpy import linalg as LA (Almost) trivial example with real e-values and e-vectors. >>> w, v = LA.eig(np.diag((1, 2, 3))) >>> w; v array([ 1., 2., 3.]) array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]]) Real matrix possessing complex e-values and e-vectors; note that the e-values are complex conjugates of each other. >>> w, v = LA.eig(np.array([[1, -1], [1, 1]])) >>> w; v array([ 1. + 1.j, 1. - 1.j]) array([[ 0.70710678+0.j , 0.70710678+0.j ], [ 0.00000000-0.70710678j, 0.00000000+0.70710678j]]) Complex-valued matrix with real e-values (but complex-valued e-vectors); note that a.conj().T = a, i.e., a is Hermitian. >>> a = np.array([[1, 1j], [-1j, 1]]) >>> w, v = LA.eig(a) >>> w; v array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0} array([[ 0.00000000+0.70710678j, 0.70710678+0.j ], [ 0.70710678+0.j , 0.00000000+0.70710678j]]) Be careful about round-off error! >>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]]) >>> # Theor. e-values are 1 +/- 1e-9 >>> w, v = LA.eig(a) >>> w; v array([ 1., 1.]) array([[ 1., 0.], [ 0., 1.]]) """ a, wrap = _makearray(a) _assertRankAtLeast2(a) _assertNdSquareness(a) _assertFinite(a) t, result_t = _commonType(a) extobj = get_linalg_error_extobj( _raise_linalgerror_eigenvalues_nonconvergence) signature = 'D->DD' if isComplexType(t) else 'd->DD' w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj) if not isComplexType(t) and all(w.imag == 0.0): w = w.real vt = vt.real result_t = _realType(result_t) else: result_t = _complexType(result_t) vt = vt.astype(result_t, copy=False) return w.astype(result_t, copy=False), wrap(vt) def eigh(a, UPLO='L'): """ Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix. Returns two objects, a 1-D array containing the eigenvalues of `a`, and a 2-D square array or matrix (depending on the input type) of the corresponding eigenvectors (in columns). Parameters ---------- a : (..., M, M) array Hermitian/Symmetric matrices whose eigenvalues and eigenvectors are to be computed. UPLO : {'L', 'U'}, optional Specifies whether the calculation is done with the lower triangular part of `a` ('L', default) or the upper triangular part ('U'). Irrespective of this value only the real parts of the diagonal will be considered in the computation to preserve the notion of a Hermitian matrix. It therefore follows that the imaginary part of the diagonal will always be treated as zero. Returns ------- w : (..., M) ndarray The eigenvalues in ascending order, each repeated according to its multiplicity. v : {(..., M, M) ndarray, (..., M, M) matrix} The column ``v[:, i]`` is the normalized eigenvector corresponding to the eigenvalue ``w[i]``. Will return a matrix object if `a` is a matrix object. Raises ------ LinAlgError If the eigenvalue computation does not converge. See Also -------- eigvalsh : eigenvalues of symmetric or Hermitian arrays. eig : eigenvalues and right eigenvectors for non-symmetric arrays. eigvals : eigenvalues of non-symmetric arrays. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. The eigenvalues/eigenvectors are computed using LAPACK routines _syevd, _heevd The eigenvalues of real symmetric or complex Hermitian matrices are always real. [1]_ The array `v` of (column) eigenvectors is unitary and `a`, `w`, and `v` satisfy the equations ``dot(a, v[:, i]) = w[i] * v[:, i]``. References ---------- .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, Academic Press, Inc., 1980, pg. 222. Examples -------- >>> from numpy import linalg as LA >>> a = np.array([[1, -2j], [2j, 5]]) >>> a array([[ 1.+0.j, 0.-2.j], [ 0.+2.j, 5.+0.j]]) >>> w, v = LA.eigh(a) >>> w; v array([ 0.17157288, 5.82842712]) array([[-0.92387953+0.j , -0.38268343+0.j ], [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]]) >>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j]) >>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair array([ 0.+0.j, 0.+0.j]) >>> A = np.matrix(a) # what happens if input is a matrix object >>> A matrix([[ 1.+0.j, 0.-2.j], [ 0.+2.j, 5.+0.j]]) >>> w, v = LA.eigh(A) >>> w; v array([ 0.17157288, 5.82842712]) matrix([[-0.92387953+0.j , -0.38268343+0.j ], [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]]) >>> # demonstrate the treatment of the imaginary part of the diagonal >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) >>> a array([[ 5.+2.j, 9.-2.j], [ 0.+2.j, 2.-1.j]]) >>> # with UPLO='L' this is numerically equivalent to using LA.eig() with: >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]]) >>> b array([[ 5.+0.j, 0.-2.j], [ 0.+2.j, 2.+0.j]]) >>> wa, va = LA.eigh(a) >>> wb, vb = LA.eig(b) >>> wa; wb array([ 1., 6.]) array([ 6.+0.j, 1.+0.j]) >>> va; vb array([[-0.44721360-0.j , -0.89442719+0.j ], [ 0.00000000+0.89442719j, 0.00000000-0.4472136j ]]) array([[ 0.89442719+0.j , 0.00000000-0.4472136j], [ 0.00000000-0.4472136j, 0.89442719+0.j ]]) """ UPLO = UPLO.upper() if UPLO not in ('L', 'U'): raise ValueError("UPLO argument must be 'L' or 'U'") a, wrap = _makearray(a) _assertRankAtLeast2(a) _assertNdSquareness(a) t, result_t = _commonType(a) extobj = get_linalg_error_extobj( _raise_linalgerror_eigenvalues_nonconvergence) if UPLO == 'L': gufunc = _umath_linalg.eigh_lo else: gufunc = _umath_linalg.eigh_up signature = 'D->dD' if isComplexType(t) else 'd->dd' w, vt = gufunc(a, signature=signature, extobj=extobj) w = w.astype(_realType(result_t), copy=False) vt = vt.astype(result_t, copy=False) return w, wrap(vt) # Singular value decomposition def svd(a, full_matrices=True, compute_uv=True): """ Singular Value Decomposition. When `a` is a 2D array, it is factorized as ``u @ np.diag(s) @ vh = (u * s) @ vh``, where `u` and `vh` are 2D unitary arrays and `s` is a 1D array of `a`'s singular values. When `a` is higher-dimensional, SVD is applied in stacked mode as explained below. Parameters ---------- a : (..., M, N) array_like A real or complex array with ``a.ndim >= 2``. full_matrices : bool, optional If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and ``(..., N, N)``, respectively. Otherwise, the shapes are ``(..., M, K)`` and ``(..., K, N)``, respectively, where ``K = min(M, N)``. compute_uv : bool, optional Whether or not to compute `u` and `vh` in addition to `s`. True by default. Returns ------- u : { (..., M, M), (..., M, K) } array Unitary array(s). The first ``a.ndim - 2`` dimensions have the same size as those of the input `a`. The size of the last two dimensions depends on the value of `full_matrices`. Only returned when `compute_uv` is True. s : (..., K) array Vector(s) with the singular values, within each vector sorted in descending order. The first ``a.ndim - 2`` dimensions have the same size as those of the input `a`. vh : { (..., N, N), (..., K, N) } array Unitary array(s). The first ``a.ndim - 2`` dimensions have the same size as those of the input `a`. The size of the last two dimensions depends on the value of `full_matrices`. Only returned when `compute_uv` is True. Raises ------ LinAlgError If SVD computation does not converge. Notes ----- .. versionchanged:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. The decomposition is performed using LAPACK routine ``_gesdd``. SVD is usually described for the factorization of a 2D matrix :math:`A`. The higher-dimensional case will be discussed below. In the 2D case, SVD is written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`, :math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s` contains the singular values of `a` and `u` and `vh` are unitary. The rows of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are the eigenvectors of :math:`A A^H`. In both cases the corresponding (possibly non-zero) eigenvalues are given by ``s**2``. If `a` has more than two dimensions, then broadcasting rules apply, as explained in :ref:`routines.linalg-broadcasting`. This means that SVD is working in "stacked" mode: it iterates over all indices of the first ``a.ndim - 2`` dimensions and for each combination SVD is applied to the last two indices. The matrix `a` can be reconstructed from the decomposition with either ``(u * s[..., None, :]) @ vh`` or ``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the function ``np.matmul`` for python versions below 3.5.) If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are all the return values. Examples -------- >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6) >>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3) Reconstruction based on full SVD, 2D case: >>> u, s, vh = np.linalg.svd(a, full_matrices=True) >>> u.shape, s.shape, vh.shape ((9, 9), (6,), (6, 6)) >>> np.allclose(a, np.dot(u[:, :6] * s, vh)) True >>> smat = np.zeros((9, 6), dtype=complex) >>> smat[:6, :6] = np.diag(s) >>> np.allclose(a, np.dot(u, np.dot(smat, vh))) True Reconstruction based on reduced SVD, 2D case: >>> u, s, vh = np.linalg.svd(a, full_matrices=False) >>> u.shape, s.shape, vh.shape ((9, 6), (6,), (6, 6)) >>> np.allclose(a, np.dot(u * s, vh)) True >>> smat = np.diag(s) >>> np.allclose(a, np.dot(u, np.dot(smat, vh))) True Reconstruction based on full SVD, 4D case: >>> u, s, vh = np.linalg.svd(b, full_matrices=True) >>> u.shape, s.shape, vh.shape ((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3)) >>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh)) True >>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh)) True Reconstruction based on reduced SVD, 4D case: >>> u, s, vh = np.linalg.svd(b, full_matrices=False) >>> u.shape, s.shape, vh.shape ((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3)) >>> np.allclose(b, np.matmul(u * s[..., None, :], vh)) True >>> np.allclose(b, np.matmul(u, s[..., None] * vh)) True """ a, wrap = _makearray(a) _assertNoEmpty2d(a) _assertRankAtLeast2(a) t, result_t = _commonType(a) extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence) m, n = a.shape[-2:] if compute_uv: if full_matrices: if m < n: gufunc = _umath_linalg.svd_m_f else: gufunc = _umath_linalg.svd_n_f else: if m < n: gufunc = _umath_linalg.svd_m_s else: gufunc = _umath_linalg.svd_n_s signature = 'D->DdD' if isComplexType(t) else 'd->ddd' u, s, vh = gufunc(a, signature=signature, extobj=extobj) u = u.astype(result_t, copy=False) s = s.astype(_realType(result_t), copy=False) vh = vh.astype(result_t, copy=False) return wrap(u), s, wrap(vh) else: if m < n: gufunc = _umath_linalg.svd_m else: gufunc = _umath_linalg.svd_n signature = 'D->d' if isComplexType(t) else 'd->d' s = gufunc(a, signature=signature, extobj=extobj) s = s.astype(_realType(result_t), copy=False) return s def cond(x, p=None): """ Compute the condition number of a matrix. This function is capable of returning the condition number using one of seven different norms, depending on the value of `p` (see Parameters below). Parameters ---------- x : (..., M, N) array_like The matrix whose condition number is sought. p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional Order of the norm: ===== ============================ p norm for matrices ===== ============================ None 2-norm, computed directly using the ``SVD`` 'fro' Frobenius norm inf max(sum(abs(x), axis=1)) -inf min(sum(abs(x), axis=1)) 1 max(sum(abs(x), axis=0)) -1 min(sum(abs(x), axis=0)) 2 2-norm (largest sing. value) -2 smallest singular value ===== ============================ inf means the numpy.inf object, and the Frobenius norm is the root-of-sum-of-squares norm. Returns ------- c : {float, inf} The condition number of the matrix. May be infinite. See Also -------- numpy.linalg.norm Notes ----- The condition number of `x` is defined as the norm of `x` times the norm of the inverse of `x` [1]_; the norm can be the usual L2-norm (root-of-sum-of-squares) or one of a number of other matrix norms. References ---------- .. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL, Academic Press, Inc., 1980, pg. 285. Examples -------- >>> from numpy import linalg as LA >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]]) >>> a array([[ 1, 0, -1], [ 0, 1, 0], [ 1, 0, 1]]) >>> LA.cond(a) 1.4142135623730951 >>> LA.cond(a, 'fro') 3.1622776601683795 >>> LA.cond(a, np.inf) 2.0 >>> LA.cond(a, -np.inf) 1.0 >>> LA.cond(a, 1) 2.0 >>> LA.cond(a, -1) 1.0 >>> LA.cond(a, 2) 1.4142135623730951 >>> LA.cond(a, -2) 0.70710678118654746 >>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0)) 0.70710678118654746 """ x = asarray(x) # in case we have a matrix if p is None or p == 2 or p == -2: s = svd(x, compute_uv=False) with errstate(all='ignore'): if p == -2: r = s[..., -1] / s[..., 0] else: r = s[..., 0] / s[..., -1] else: # Call inv(x) ignoring errors. The result array will # contain nans in the entries where inversion failed. _assertRankAtLeast2(x) _assertNdSquareness(x) t, result_t = _commonType(x) signature = 'D->D' if isComplexType(t) else 'd->d' with errstate(all='ignore'): invx = _umath_linalg.inv(x, signature=signature) r = norm(x, p, axis=(-2, -1)) * norm(invx, p, axis=(-2, -1)) r = r.astype(result_t, copy=False) # Convert nans to infs unless the original array had nan entries r = asarray(r) nan_mask = isnan(r) if nan_mask.any(): nan_mask &= ~isnan(x).any(axis=(-2, -1)) if r.ndim > 0: r[nan_mask] = Inf elif nan_mask: r[()] = Inf # Convention is to return scalars instead of 0d arrays if r.ndim == 0: r = r[()] return r def matrix_rank(M, tol=None, hermitian=False): """ Return matrix rank of array using SVD method Rank of the array is the number of singular values of the array that are greater than `tol`. .. versionchanged:: 1.14 Can now operate on stacks of matrices Parameters ---------- M : {(M,), (..., M, N)} array_like input vector or stack of matrices tol : (...) array_like, float, optional threshold below which SVD values are considered zero. If `tol` is None, and ``S`` is an array with singular values for `M`, and ``eps`` is the epsilon value for datatype of ``S``, then `tol` is set to ``S.max() * max(M.shape) * eps``. .. versionchanged:: 1.14 Broadcasted against the stack of matrices hermitian : bool, optional If True, `M` is assumed to be Hermitian (symmetric if real-valued), enabling a more efficient method for finding singular values. Defaults to False. .. versionadded:: 1.14 Notes ----- The default threshold to detect rank deficiency is a test on the magnitude of the singular values of `M`. By default, we identify singular values less than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with the symbols defined above). This is the algorithm MATLAB uses [1]. It also appears in *Numerical recipes* in the discussion of SVD solutions for linear least squares [2]. This default threshold is designed to detect rank deficiency accounting for the numerical errors of the SVD computation. Imagine that there is a column in `M` that is an exact (in floating point) linear combination of other columns in `M`. Computing the SVD on `M` will not produce a singular value exactly equal to 0 in general: any difference of the smallest SVD value from 0 will be caused by numerical imprecision in the calculation of the SVD. Our threshold for small SVD values takes this numerical imprecision into account, and the default threshold will detect such numerical rank deficiency. The threshold may declare a matrix `M` rank deficient even if the linear combination of some columns of `M` is not exactly equal to another column of `M` but only numerically very close to another column of `M`. We chose our default threshold because it is in wide use. Other thresholds are possible. For example, elsewhere in the 2007 edition of *Numerical recipes* there is an alternative threshold of ``S.max() * np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe this threshold as being based on "expected roundoff error" (p 71). The thresholds above deal with floating point roundoff error in the calculation of the SVD. However, you may have more information about the sources of error in `M` that would make you consider other tolerance values to detect *effective* rank deficiency. The most useful measure of the tolerance depends on the operations you intend to use on your matrix. For example, if your data come from uncertain measurements with uncertainties greater than floating point epsilon, choosing a tolerance near that uncertainty may be preferable. The tolerance may be absolute if the uncertainties are absolute rather than relative. References ---------- .. [1] MATLAB reference documention, "Rank" http://www.mathworks.com/help/techdoc/ref/rank.html .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery, "Numerical Recipes (3rd edition)", Cambridge University Press, 2007, page 795. Examples -------- >>> from numpy.linalg import matrix_rank >>> matrix_rank(np.eye(4)) # Full rank matrix 4 >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix >>> matrix_rank(I) 3 >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0 1 >>> matrix_rank(np.zeros((4,))) 0 """ M = asarray(M) if M.ndim < 2: return int(not all(M==0)) if hermitian: S = abs(eigvalsh(M)) else: S = svd(M, compute_uv=False) if tol is None: tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps else: tol = asarray(tol)[..., newaxis] return count_nonzero(S > tol, axis=-1) # Generalized inverse def pinv(a, rcond=1e-15 ): """ Compute the (Moore-Penrose) pseudo-inverse of a matrix. Calculate the generalized inverse of a matrix using its singular-value decomposition (SVD) and including all *large* singular values. .. versionchanged:: 1.14 Can now operate on stacks of matrices Parameters ---------- a : (..., M, N) array_like Matrix or stack of matrices to be pseudo-inverted. rcond : (...) array_like of float Cutoff for small singular values. Singular values smaller (in modulus) than `rcond` * largest_singular_value (again, in modulus) are set to zero. Broadcasts against the stack of matrices Returns ------- B : (..., N, M) ndarray The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so is `B`. Raises ------ LinAlgError If the SVD computation does not converge. Notes ----- The pseudo-inverse of a matrix A, denoted :math:`A^+`, is defined as: "the matrix that 'solves' [the least-squares problem] :math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then :math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`. It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular value decomposition of A, then :math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting of A's so-called singular values, (followed, typically, by zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix consisting of the reciprocals of A's singular values (again, followed by zeros). [1]_ References ---------- .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, Academic Press, Inc., 1980, pp. 139-142. Examples -------- The following example checks that ``a * a+ * a == a`` and ``a+ * a * a+ == a+``: >>> a = np.random.randn(9, 6) >>> B = np.linalg.pinv(a) >>> np.allclose(a, np.dot(a, np.dot(B, a))) True >>> np.allclose(B, np.dot(B, np.dot(a, B))) True """ a, wrap = _makearray(a) rcond = asarray(rcond) if _isEmpty2d(a): m, n = a.shape[-2:] res = empty(a.shape[:-2] + (n, m), dtype=a.dtype) return wrap(res) a = a.conjugate() u, s, vt = svd(a, full_matrices=False) # discard small singular values cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True) large = s > cutoff s = divide(1, s, where=large, out=s) s[~large] = 0 res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u))) return wrap(res) # Determinant def slogdet(a): """ Compute the sign and (natural) logarithm of the determinant of an array. If an array has a very small or very large determinant, then a call to `det` may overflow or underflow. This routine is more robust against such issues, because it computes the logarithm of the determinant rather than the determinant itself. Parameters ---------- a : (..., M, M) array_like Input array, has to be a square 2-D array. Returns ------- sign : (...) array_like A number representing the sign of the determinant. For a real matrix, this is 1, 0, or -1. For a complex matrix, this is a complex number with absolute value 1 (i.e., it is on the unit circle), or else 0. logdet : (...) array_like The natural log of the absolute value of the determinant. If the determinant is zero, then `sign` will be 0 and `logdet` will be -Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``. See Also -------- det Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. .. versionadded:: 1.6.0 The determinant is computed via LU factorization using the LAPACK routine z/dgetrf. Examples -------- The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``: >>> a = np.array([[1, 2], [3, 4]]) >>> (sign, logdet) = np.linalg.slogdet(a) >>> (sign, logdet) (-1, 0.69314718055994529) >>> sign * np.exp(logdet) -2.0 Computing log-determinants for a stack of matrices: >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) >>> a.shape (3, 2, 2) >>> sign, logdet = np.linalg.slogdet(a) >>> (sign, logdet) (array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154])) >>> sign * np.exp(logdet) array([-2., -3., -8.]) This routine succeeds where ordinary `det` does not: >>> np.linalg.det(np.eye(500) * 0.1) 0.0 >>> np.linalg.slogdet(np.eye(500) * 0.1) (1, -1151.2925464970228) """ a = asarray(a) _assertRankAtLeast2(a) _assertNdSquareness(a) t, result_t = _commonType(a) real_t = _realType(result_t) signature = 'D->Dd' if isComplexType(t) else 'd->dd' sign, logdet = _umath_linalg.slogdet(a, signature=signature) sign = sign.astype(result_t, copy=False) logdet = logdet.astype(real_t, copy=False) return sign, logdet def det(a): """ Compute the determinant of an array. Parameters ---------- a : (..., M, M) array_like Input array to compute determinants for. Returns ------- det : (...) array_like Determinant of `a`. See Also -------- slogdet : Another way to represent the determinant, more suitable for large matrices where underflow/overflow may occur. Notes ----- .. versionadded:: 1.8.0 Broadcasting rules apply, see the `numpy.linalg` documentation for details. The determinant is computed via LU factorization using the LAPACK routine z/dgetrf. Examples -------- The determinant of a 2-D array [[a, b], [c, d]] is ad - bc: >>> a = np.array([[1, 2], [3, 4]]) >>> np.linalg.det(a) -2.0 Computing determinants for a stack of matrices: >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) >>> a.shape (3, 2, 2) >>> np.linalg.det(a) array([-2., -3., -8.]) """ a = asarray(a) _assertRankAtLeast2(a) _assertNdSquareness(a) t, result_t = _commonType(a) signature = 'D->D' if isComplexType(t) else 'd->d' r = _umath_linalg.det(a, signature=signature) r = r.astype(result_t, copy=False) return r # Linear Least Squares def lstsq(a, b, rcond="warn"): """ Return the least-squares solution to a linear matrix equation. Solves the equation `a x = b` by computing a vector `x` that minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may be under-, well-, or over- determined (i.e., the number of linearly independent rows of `a` can be less than, equal to, or greater than its number of linearly independent columns). If `a` is square and of full rank, then `x` (but for round-off error) is the "exact" solution of the equation. Parameters ---------- a : (M, N) array_like "Coefficient" matrix. b : {(M,), (M, K)} array_like Ordinate or "dependent variable" values. If `b` is two-dimensional, the least-squares solution is calculated for each of the `K` columns of `b`. rcond : float, optional Cut-off ratio for small singular values of `a`. For the purposes of rank determination, singular values are treated as zero if they are smaller than `rcond` times the largest singular value of `a`. .. versionchanged:: 1.14.0 If not set, a FutureWarning is given. The previous default of ``-1`` will use the machine precision as `rcond` parameter, the new default will use the machine precision times `max(M, N)`. To silence the warning and use the new default, use ``rcond=None``, to keep using the old behavior, use ``rcond=-1``. Returns ------- x : {(N,), (N, K)} ndarray Least-squares solution. If `b` is two-dimensional, the solutions are in the `K` columns of `x`. residuals : {(1,), (K,), (0,)} ndarray Sums of residuals; squared Euclidean 2-norm for each column in ``b - a*x``. If the rank of `a` is < N or M <= N, this is an empty array. If `b` is 1-dimensional, this is a (1,) shape array. Otherwise the shape is (K,). rank : int Rank of matrix `a`. s : (min(M, N),) ndarray Singular values of `a`. Raises ------ LinAlgError If computation does not converge. Notes ----- If `b` is a matrix, then all array results are returned as matrices. Examples -------- Fit a line, ``y = mx + c``, through some noisy data-points: >>> x = np.array([0, 1, 2, 3]) >>> y = np.array([-1, 0.2, 0.9, 2.1]) By examining the coefficients, we see that the line should have a gradient of roughly 1 and cut the y-axis at, more or less, -1. We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]`` and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`: >>> A = np.vstack([x, np.ones(len(x))]).T >>> A array([[ 0., 1.], [ 1., 1.], [ 2., 1.], [ 3., 1.]]) >>> m, c = np.linalg.lstsq(A, y, rcond=None)[0] >>> print(m, c) 1.0 -0.95 Plot the data along with the fitted line: >>> import matplotlib.pyplot as plt >>> plt.plot(x, y, 'o', label='Original data', markersize=10) >>> plt.plot(x, m*x + c, 'r', label='Fitted line') >>> plt.legend() >>> plt.show() """ a, _ = _makearray(a) b, wrap = _makearray(b) is_1d = b.ndim == 1 if is_1d: b = b[:, newaxis] _assertRank2(a, b) _assertNoEmpty2d(a, b) # TODO: relax this constraint m, n = a.shape[-2:] m2, n_rhs = b.shape[-2:] if m != m2: raise LinAlgError('Incompatible dimensions') t, result_t = _commonType(a, b) real_t = _linalgRealType(t) result_real_t = _realType(result_t) # Determine default rcond value if rcond == "warn": # 2017-08-19, 1.14.0 warnings.warn("`rcond` parameter will change to the default of " "machine precision times ``max(M, N)`` where M and N " "are the input matrix dimensions.\n" "To use the future default and silence this warning " "we advise to pass `rcond=None`, to keep using the old, " "explicitly pass `rcond=-1`.", FutureWarning, stacklevel=2) rcond = -1 if rcond is None: rcond = finfo(t).eps * max(n, m) if m <= n: gufunc = _umath_linalg.lstsq_m else: gufunc = _umath_linalg.lstsq_n signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid' extobj = get_linalg_error_extobj(_raise_linalgerror_lstsq) x, resids, rank, s = gufunc(a, b, rcond, signature=signature, extobj=extobj) # remove the axis we added if is_1d: x = x.squeeze(axis=-1) # we probably should squeeze resids too, but we can't # without breaking compatibility. # as documented if rank != n or m <= n: resids = array([], result_real_t) # coerce output arrays s = s.astype(result_real_t, copy=False) resids = resids.astype(result_real_t, copy=False) x = x.astype(result_t, copy=True) # Copying lets the memory in r_parts be freed return wrap(x), wrap(resids), rank, s def _multi_svd_norm(x, row_axis, col_axis, op): """Compute a function of the singular values of the 2-D matrices in `x`. This is a private utility function used by numpy.linalg.norm(). Parameters ---------- x : ndarray row_axis, col_axis : int The axes of `x` that hold the 2-D matrices. op : callable This should be either numpy.amin or numpy.amax or numpy.sum. Returns ------- result : float or ndarray If `x` is 2-D, the return values is a float. Otherwise, it is an array with ``x.ndim - 2`` dimensions. The return values are either the minimum or maximum or sum of the singular values of the matrices, depending on whether `op` is `numpy.amin` or `numpy.amax` or `numpy.sum`. """ y = moveaxis(x, (row_axis, col_axis), (-2, -1)) result = op(svd(y, compute_uv=0), axis=-1) return result def norm(x, ord=None, axis=None, keepdims=False): """ Matrix or vector norm. This function is able to return one of eight different matrix norms, or one of an infinite number of vector norms (described below), depending on the value of the ``ord`` parameter. Parameters ---------- x : array_like Input array. If `axis` is None, `x` must be 1-D or 2-D. ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional Order of the norm (see table under ``Notes``). inf means numpy's `inf` object. axis : {int, 2-tuple of ints, None}, optional If `axis` is an integer, it specifies the axis of `x` along which to compute the vector norms. If `axis` is a 2-tuple, it specifies the axes that hold 2-D matrices, and the matrix norms of these matrices are computed. If `axis` is None then either a vector norm (when `x` is 1-D) or a matrix norm (when `x` is 2-D) is returned. .. versionadded:: 1.8.0 keepdims : bool, optional If this is set to True, the axes which are normed over are left in the result as dimensions with size one. With this option the result will broadcast correctly against the original `x`. .. versionadded:: 1.10.0 Returns ------- n : float or ndarray Norm of the matrix or vector(s). Notes ----- For values of ``ord <= 0``, the result is, strictly speaking, not a mathematical 'norm', but it may still be useful for various numerical purposes. The following norms can be calculated: ===== ============================ ========================== ord norm for matrices norm for vectors ===== ============================ ========================== None Frobenius norm 2-norm 'fro' Frobenius norm -- 'nuc' nuclear norm -- inf max(sum(abs(x), axis=1)) max(abs(x)) -inf min(sum(abs(x), axis=1)) min(abs(x)) 0 -- sum(x != 0) 1 max(sum(abs(x), axis=0)) as below -1 min(sum(abs(x), axis=0)) as below 2 2-norm (largest sing. value) as below -2 smallest singular value as below other -- sum(abs(x)**ord)**(1./ord) ===== ============================ ========================== The Frobenius norm is given by [1]_: :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` The nuclear norm is the sum of the singular values. References ---------- .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 Examples -------- >>> from numpy import linalg as LA >>> a = np.arange(9) - 4 >>> a array([-4, -3, -2, -1, 0, 1, 2, 3, 4]) >>> b = a.reshape((3, 3)) >>> b array([[-4, -3, -2], [-1, 0, 1], [ 2, 3, 4]]) >>> LA.norm(a) 7.745966692414834 >>> LA.norm(b) 7.745966692414834 >>> LA.norm(b, 'fro') 7.745966692414834 >>> LA.norm(a, np.inf) 4.0 >>> LA.norm(b, np.inf) 9.0 >>> LA.norm(a, -np.inf) 0.0 >>> LA.norm(b, -np.inf) 2.0 >>> LA.norm(a, 1) 20.0 >>> LA.norm(b, 1) 7.0 >>> LA.norm(a, -1) -4.6566128774142013e-010 >>> LA.norm(b, -1) 6.0 >>> LA.norm(a, 2) 7.745966692414834 >>> LA.norm(b, 2) 7.3484692283495345 >>> LA.norm(a, -2) nan >>> LA.norm(b, -2) 1.8570331885190563e-016 >>> LA.norm(a, 3) 5.8480354764257312 >>> LA.norm(a, -3) nan Using the `axis` argument to compute vector norms: >>> c = np.array([[ 1, 2, 3], ... [-1, 1, 4]]) >>> LA.norm(c, axis=0) array([ 1.41421356, 2.23606798, 5. ]) >>> LA.norm(c, axis=1) array([ 3.74165739, 4.24264069]) >>> LA.norm(c, ord=1, axis=1) array([ 6., 6.]) Using the `axis` argument to compute matrix norms: >>> m = np.arange(8).reshape(2,2,2) >>> LA.norm(m, axis=(1,2)) array([ 3.74165739, 11.22497216]) >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :]) (3.7416573867739413, 11.224972160321824) """ x = asarray(x) if not issubclass(x.dtype.type, (inexact, object_)): x = x.astype(float) # Immediately handle some default, simple, fast, and common cases. if axis is None: ndim = x.ndim if ((ord is None) or (ord in ('f', 'fro') and ndim == 2) or (ord == 2 and ndim == 1)): x = x.ravel(order='K') if isComplexType(x.dtype.type): sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag) else: sqnorm = dot(x, x) ret = sqrt(sqnorm) if keepdims: ret = ret.reshape(ndim*[1]) return ret # Normalize the `axis` argument to a tuple. nd = x.ndim if axis is None: axis = tuple(range(nd)) elif not isinstance(axis, tuple): try: axis = int(axis) except Exception: raise TypeError("'axis' must be None, an integer or a tuple of integers") axis = (axis,) if len(axis) == 1: if ord == Inf: return abs(x).max(axis=axis, keepdims=keepdims) elif ord == -Inf: return abs(x).min(axis=axis, keepdims=keepdims) elif ord == 0: # Zero norm return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims) elif ord == 1: # special case for speedup return add.reduce(abs(x), axis=axis, keepdims=keepdims) elif ord is None or ord == 2: # special case for speedup s = (x.conj() * x).real return sqrt(add.reduce(s, axis=axis, keepdims=keepdims)) else: try: ord + 1 except TypeError: raise ValueError("Invalid norm order for vectors.") absx = abs(x) absx **= ord ret = add.reduce(absx, axis=axis, keepdims=keepdims) ret **= (1 / ord) return ret elif len(axis) == 2: row_axis, col_axis = axis row_axis = normalize_axis_index(row_axis, nd) col_axis = normalize_axis_index(col_axis, nd) if row_axis == col_axis: raise ValueError('Duplicate axes given.') if ord == 2: ret = _multi_svd_norm(x, row_axis, col_axis, amax) elif ord == -2: ret = _multi_svd_norm(x, row_axis, col_axis, amin) elif ord == 1: if col_axis > row_axis: col_axis -= 1 ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis) elif ord == Inf: if row_axis > col_axis: row_axis -= 1 ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis) elif ord == -1: if col_axis > row_axis: col_axis -= 1 ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis) elif ord == -Inf: if row_axis > col_axis: row_axis -= 1 ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis) elif ord in [None, 'fro', 'f']: ret = sqrt(add.reduce((x.conj() * x).real, axis=axis)) elif ord == 'nuc': ret = _multi_svd_norm(x, row_axis, col_axis, sum) else: raise ValueError("Invalid norm order for matrices.") if keepdims: ret_shape = list(x.shape) ret_shape[axis[0]] = 1 ret_shape[axis[1]] = 1 ret = ret.reshape(ret_shape) return ret else: raise ValueError("Improper number of dimensions to norm.") # multi_dot def multi_dot(arrays): """ Compute the dot product of two or more arrays in a single function call, while automatically selecting the fastest evaluation order. `multi_dot` chains `numpy.dot` and uses optimal parenthesization of the matrices [1]_ [2]_. Depending on the shapes of the matrices, this can speed up the multiplication a lot. If the first argument is 1-D it is treated as a row vector. If the last argument is 1-D it is treated as a column vector. The other arguments must be 2-D. Think of `multi_dot` as:: def multi_dot(arrays): return functools.reduce(np.dot, arrays) Parameters ---------- arrays : sequence of array_like If the first argument is 1-D it is treated as row vector. If the last argument is 1-D it is treated as column vector. The other arguments must be 2-D. Returns ------- output : ndarray Returns the dot product of the supplied arrays. See Also -------- dot : dot multiplication with two arguments. References ---------- .. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378 .. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication Examples -------- `multi_dot` allows you to write:: >>> from numpy.linalg import multi_dot >>> # Prepare some data >>> A = np.random.random(10000, 100) >>> B = np.random.random(100, 1000) >>> C = np.random.random(1000, 5) >>> D = np.random.random(5, 333) >>> # the actual dot multiplication >>> multi_dot([A, B, C, D]) instead of:: >>> np.dot(np.dot(np.dot(A, B), C), D) >>> # or >>> A.dot(B).dot(C).dot(D) Notes ----- The cost for a matrix multiplication can be calculated with the following function:: def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1] Let's assume we have three matrices :math:`A_{10x100}, B_{100x5}, C_{5x50}`. The costs for the two different parenthesizations are as follows:: cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500 cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000 """ n = len(arrays) # optimization only makes sense for len(arrays) > 2 if n < 2: raise ValueError("Expecting at least two arrays.") elif n == 2: return dot(arrays[0], arrays[1]) arrays = [asanyarray(a) for a in arrays] # save original ndim to reshape the result array into the proper form later ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim # Explicitly convert vectors to 2D arrays to keep the logic of the internal # _multi_dot_* functions as simple as possible. if arrays[0].ndim == 1: arrays[0] = atleast_2d(arrays[0]) if arrays[-1].ndim == 1: arrays[-1] = atleast_2d(arrays[-1]).T _assertRank2(*arrays) # _multi_dot_three is much faster than _multi_dot_matrix_chain_order if n == 3: result = _multi_dot_three(arrays[0], arrays[1], arrays[2]) else: order = _multi_dot_matrix_chain_order(arrays) result = _multi_dot(arrays, order, 0, n - 1) # return proper shape if ndim_first == 1 and ndim_last == 1: return result[0, 0] # scalar elif ndim_first == 1 or ndim_last == 1: return result.ravel() # 1-D else: return result def _multi_dot_three(A, B, C): """ Find the best order for three arrays and do the multiplication. For three arguments `_multi_dot_three` is approximately 15 times faster than `_multi_dot_matrix_chain_order` """ a0, a1b0 = A.shape b1c0, c1 = C.shape # cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1 cost1 = a0 * b1c0 * (a1b0 + c1) # cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1 cost2 = a1b0 * c1 * (a0 + b1c0) if cost1 < cost2: return dot(dot(A, B), C) else: return dot(A, dot(B, C)) def _multi_dot_matrix_chain_order(arrays, return_costs=False): """ Return a np.array that encodes the optimal order of mutiplications. The optimal order array is then used by `_multi_dot()` to do the multiplication. Also return the cost matrix if `return_costs` is `True` The implementation CLOSELY follows Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices. cost[i, j] = min([ cost[prefix] + cost[suffix] + cost_mult(prefix, suffix) for k in range(i, j)]) """ n = len(arrays) # p stores the dimensions of the matrices # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50] p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]] # m is a matrix of costs of the subproblems # m[i,j]: min number of scalar multiplications needed to compute A_{i..j} m = zeros((n, n), dtype=double) # s is the actual ordering # s[i, j] is the value of k at which we split the product A_i..A_j s = empty((n, n), dtype=intp) for l in range(1, n): for i in range(n - l): j = i + l m[i, j] = Inf for k in range(i, j): q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1] if q < m[i, j]: m[i, j] = q s[i, j] = k # Note that Cormen uses 1-based index return (s, m) if return_costs else s def _multi_dot(arrays, order, i, j): """Actually do the multiplication with the given order.""" if i == j: return arrays[i] else: return dot(_multi_dot(arrays, order, i, order[i, j]), _multi_dot(arrays, order, order[i, j] + 1, j))
mit
kelseyoo14/Wander
venv_2_7/lib/python2.7/site-packages/numpy/lib/polynomial.py
82
37957
""" Functions to operate on polynomials. """ from __future__ import division, absolute_import, print_function __all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd', 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d', 'polyfit', 'RankWarning'] import re import warnings import numpy.core.numeric as NX from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array, ones) from numpy.lib.twodim_base import diag, vander from numpy.lib.function_base import trim_zeros, sort_complex from numpy.lib.type_check import iscomplex, real, imag, mintypecode from numpy.linalg import eigvals, lstsq, inv class RankWarning(UserWarning): """ Issued by `polyfit` when the Vandermonde matrix is rank deficient. For more information, a way to suppress the warning, and an example of `RankWarning` being issued, see `polyfit`. """ pass def poly(seq_of_zeros): """ Find the coefficients of a polynomial with the given sequence of roots. Returns the coefficients of the polynomial whose leading coefficient is one for the given sequence of zeros (multiple roots must be included in the sequence as many times as their multiplicity; see Examples). A square matrix (or array, which will be treated as a matrix) can also be given, in which case the coefficients of the characteristic polynomial of the matrix are returned. Parameters ---------- seq_of_zeros : array_like, shape (N,) or (N, N) A sequence of polynomial roots, or a square array or matrix object. Returns ------- c : ndarray 1D array of polynomial coefficients from highest to lowest degree: ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]`` where c[0] always equals 1. Raises ------ ValueError If input is the wrong shape (the input must be a 1-D or square 2-D array). See Also -------- polyval : Evaluate a polynomial at a point. roots : Return the roots of a polynomial. polyfit : Least squares polynomial fit. poly1d : A one-dimensional polynomial class. Notes ----- Specifying the roots of a polynomial still leaves one degree of freedom, typically represented by an undetermined leading coefficient. [1]_ In the case of this function, that coefficient - the first one in the returned array - is always taken as one. (If for some reason you have one other point, the only automatic way presently to leverage that information is to use ``polyfit``.) The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n` matrix **A** is given by :math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`, where **I** is the `n`-by-`n` identity matrix. [2]_ References ---------- .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry, Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996. .. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition," Academic Press, pg. 182, 1980. Examples -------- Given a sequence of a polynomial's zeros: >>> np.poly((0, 0, 0)) # Multiple root example array([1, 0, 0, 0]) The line above represents z**3 + 0*z**2 + 0*z + 0. >>> np.poly((-1./2, 0, 1./2)) array([ 1. , 0. , -0.25, 0. ]) The line above represents z**3 - z/4 >>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0])) array([ 1. , -0.77086955, 0.08618131, 0. ]) #random Given a square array object: >>> P = np.array([[0, 1./3], [-1./2, 0]]) >>> np.poly(P) array([ 1. , 0. , 0.16666667]) Or a square matrix object: >>> np.poly(np.matrix(P)) array([ 1. , 0. , 0.16666667]) Note how in all cases the leading coefficient is always 1. """ seq_of_zeros = atleast_1d(seq_of_zeros) sh = seq_of_zeros.shape if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0: seq_of_zeros = eigvals(seq_of_zeros) elif len(sh) == 1: dt = seq_of_zeros.dtype # Let object arrays slip through, e.g. for arbitrary precision if dt != object: seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char)) else: raise ValueError("input must be 1d or non-empty square 2d array.") if len(seq_of_zeros) == 0: return 1.0 dt = seq_of_zeros.dtype a = ones((1,), dtype=dt) for k in range(len(seq_of_zeros)): a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt), mode='full') if issubclass(a.dtype.type, NX.complexfloating): # if complex roots are all complex conjugates, the roots are real. roots = NX.asarray(seq_of_zeros, complex) pos_roots = sort_complex(NX.compress(roots.imag > 0, roots)) neg_roots = NX.conjugate(sort_complex( NX.compress(roots.imag < 0, roots))) if (len(pos_roots) == len(neg_roots) and NX.alltrue(neg_roots == pos_roots)): a = a.real.copy() return a def roots(p): """ Return the roots of a polynomial with coefficients given in p. The values in the rank-1 array `p` are coefficients of a polynomial. If the length of `p` is n+1 then the polynomial is described by:: p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n] Parameters ---------- p : array_like Rank-1 array of polynomial coefficients. Returns ------- out : ndarray An array containing the complex roots of the polynomial. Raises ------ ValueError When `p` cannot be converted to a rank-1 array. See also -------- poly : Find the coefficients of a polynomial with a given sequence of roots. polyval : Evaluate a polynomial at a point. polyfit : Least squares polynomial fit. poly1d : A one-dimensional polynomial class. Notes ----- The algorithm relies on computing the eigenvalues of the companion matrix [1]_. References ---------- .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK: Cambridge University Press, 1999, pp. 146-7. Examples -------- >>> coeff = [3.2, 2, 1] >>> np.roots(coeff) array([-0.3125+0.46351241j, -0.3125-0.46351241j]) """ # If input is scalar, this makes it an array p = atleast_1d(p) if len(p.shape) != 1: raise ValueError("Input must be a rank-1 array.") # find non-zero array entries non_zero = NX.nonzero(NX.ravel(p))[0] # Return an empty array if polynomial is all zeros if len(non_zero) == 0: return NX.array([]) # find the number of trailing zeros -- this is the number of roots at 0. trailing_zeros = len(p) - non_zero[-1] - 1 # strip leading and trailing zeros p = p[int(non_zero[0]):int(non_zero[-1])+1] # casting: if incoming array isn't floating point, make it floating point. if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)): p = p.astype(float) N = len(p) if N > 1: # build companion matrix and find its eigenvalues (the roots) A = diag(NX.ones((N-2,), p.dtype), -1) A[0,:] = -p[1:] / p[0] roots = eigvals(A) else: roots = NX.array([]) # tack any zeros onto the back of the array roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype))) return roots def polyint(p, m=1, k=None): """ Return an antiderivative (indefinite integral) of a polynomial. The returned order `m` antiderivative `P` of polynomial `p` satisfies :math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1` integration constants `k`. The constants determine the low-order polynomial part .. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1} of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`. Parameters ---------- p : array_like or poly1d Polynomial to differentiate. A sequence is interpreted as polynomial coefficients, see `poly1d`. m : int, optional Order of the antiderivative. (Default: 1) k : list of `m` scalars or scalar, optional Integration constants. They are given in the order of integration: those corresponding to highest-order terms come first. If ``None`` (default), all constants are assumed to be zero. If `m = 1`, a single scalar can be given instead of a list. See Also -------- polyder : derivative of a polynomial poly1d.integ : equivalent method Examples -------- The defining property of the antiderivative: >>> p = np.poly1d([1,1,1]) >>> P = np.polyint(p) >>> P poly1d([ 0.33333333, 0.5 , 1. , 0. ]) >>> np.polyder(P) == p True The integration constants default to zero, but can be specified: >>> P = np.polyint(p, 3) >>> P(0) 0.0 >>> np.polyder(P)(0) 0.0 >>> np.polyder(P, 2)(0) 0.0 >>> P = np.polyint(p, 3, k=[6,5,3]) >>> P poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) Note that 3 = 6 / 2!, and that the constants are given in the order of integrations. Constant of the highest-order polynomial term comes first: >>> np.polyder(P, 2)(0) 6.0 >>> np.polyder(P, 1)(0) 5.0 >>> P(0) 3.0 """ m = int(m) if m < 0: raise ValueError("Order of integral must be positive (see polyder)") if k is None: k = NX.zeros(m, float) k = atleast_1d(k) if len(k) == 1 and m > 1: k = k[0]*NX.ones(m, float) if len(k) < m: raise ValueError( "k must be a scalar or a rank-1 array of length 1 or >m.") truepoly = isinstance(p, poly1d) p = NX.asarray(p) if m == 0: if truepoly: return poly1d(p) return p else: # Note: this must work also with object and integer arrays y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]])) val = polyint(y, m - 1, k=k[1:]) if truepoly: return poly1d(val) return val def polyder(p, m=1): """ Return the derivative of the specified order of a polynomial. Parameters ---------- p : poly1d or sequence Polynomial to differentiate. A sequence is interpreted as polynomial coefficients, see `poly1d`. m : int, optional Order of differentiation (default: 1) Returns ------- der : poly1d A new polynomial representing the derivative. See Also -------- polyint : Anti-derivative of a polynomial. poly1d : Class for one-dimensional polynomials. Examples -------- The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is: >>> p = np.poly1d([1,1,1,1]) >>> p2 = np.polyder(p) >>> p2 poly1d([3, 2, 1]) which evaluates to: >>> p2(2.) 17.0 We can verify this, approximating the derivative with ``(f(x + h) - f(x))/h``: >>> (p(2. + 0.001) - p(2.)) / 0.001 17.007000999997857 The fourth-order derivative of a 3rd-order polynomial is zero: >>> np.polyder(p, 2) poly1d([6, 2]) >>> np.polyder(p, 3) poly1d([6]) >>> np.polyder(p, 4) poly1d([ 0.]) """ m = int(m) if m < 0: raise ValueError("Order of derivative must be positive (see polyint)") truepoly = isinstance(p, poly1d) p = NX.asarray(p) n = len(p) - 1 y = p[:-1] * NX.arange(n, 0, -1) if m == 0: val = p else: val = polyder(y, m - 1) if truepoly: val = poly1d(val) return val def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): """ Least squares polynomial fit. Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` to points `(x, y)`. Returns a vector of coefficients `p` that minimises the squared error. Parameters ---------- x : array_like, shape (M,) x-coordinates of the M sample points ``(x[i], y[i])``. y : array_like, shape (M,) or (M, K) y-coordinates of the sample points. Several data sets of sample points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column. deg : int Degree of the fitting polynomial rcond : float, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be ignored. The default value is len(x)*eps, where eps is the relative precision of the float type, about 2e-16 in most cases. full : bool, optional Switch determining nature of return value. When it is False (the default) just the coefficients are returned, when True diagnostic information from the singular value decomposition is also returned. w : array_like, shape (M,), optional weights to apply to the y-coordinates of the sample points. cov : bool, optional Return the estimate and the covariance matrix of the estimate If full is True, then cov is not returned. Returns ------- p : ndarray, shape (M,) or (M, K) Polynomial coefficients, highest power first. If `y` was 2-D, the coefficients for `k`-th data set are in ``p[:,k]``. residuals, rank, singular_values, rcond : Present only if `full` = True. Residuals of the least-squares fit, the effective rank of the scaled Vandermonde coefficient matrix, its singular values, and the specified value of `rcond`. For more details, see `linalg.lstsq`. V : ndarray, shape (M,M) or (M,M,K) Present only if `full` = False and `cov`=True. The covariance matrix of the polynomial coefficient estimates. The diagonal of this matrix are the variance estimates for each coefficient. If y is a 2-D array, then the covariance matrix for the `k`-th data set are in ``V[:,:,k]`` Warns ----- RankWarning The rank of the coefficient matrix in the least-squares fit is deficient. The warning is only raised if `full` = False. The warnings can be turned off by >>> import warnings >>> warnings.simplefilter('ignore', np.RankWarning) See Also -------- polyval : Computes polynomial values. linalg.lstsq : Computes a least-squares fit. scipy.interpolate.UnivariateSpline : Computes spline fits. Notes ----- The solution minimizes the squared error .. math :: E = \\sum_{j=0}^k |p(x_j) - y_j|^2 in the equations:: x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0] x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1] ... x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k] The coefficient matrix of the coefficients `p` is a Vandermonde matrix. `polyfit` issues a `RankWarning` when the least-squares fit is badly conditioned. This implies that the best fit is not well-defined due to numerical error. The results may be improved by lowering the polynomial degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter can also be set to a value smaller than its default, but the resulting fit may be spurious: including contributions from the small singular values can add numerical noise to the result. Note that fitting polynomial coefficients is inherently badly conditioned when the degree of the polynomial is large or the interval of sample points is badly centered. The quality of the fit should always be checked in these cases. When polynomial fits are not satisfactory, splines may be a good alternative. References ---------- .. [1] Wikipedia, "Curve fitting", http://en.wikipedia.org/wiki/Curve_fitting .. [2] Wikipedia, "Polynomial interpolation", http://en.wikipedia.org/wiki/Polynomial_interpolation Examples -------- >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0]) >>> z = np.polyfit(x, y, 3) >>> z array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) It is convenient to use `poly1d` objects for dealing with polynomials: >>> p = np.poly1d(z) >>> p(0.5) 0.6143849206349179 >>> p(3.5) -0.34732142857143039 >>> p(10) 22.579365079365115 High-order polynomials may oscillate wildly: >>> p30 = np.poly1d(np.polyfit(x, y, 30)) /... RankWarning: Polyfit may be poorly conditioned... >>> p30(4) -0.80000000000000204 >>> p30(5) -0.99999999999999445 >>> p30(4.5) -0.10547061179440398 Illustration: >>> import matplotlib.pyplot as plt >>> xp = np.linspace(-2, 6, 100) >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--') >>> plt.ylim(-2,2) (-2, 2) >>> plt.show() """ order = int(deg) + 1 x = NX.asarray(x) + 0.0 y = NX.asarray(y) + 0.0 # check arguments. if deg < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: raise TypeError("expected 1D vector for x") if x.size == 0: raise TypeError("expected non-empty vector for x") if y.ndim < 1 or y.ndim > 2: raise TypeError("expected 1D or 2D array for y") if x.shape[0] != y.shape[0]: raise TypeError("expected x and y to have same length") # set rcond if rcond is None: rcond = len(x)*finfo(x.dtype).eps # set up least squares equation for powers of x lhs = vander(x, order) rhs = y # apply weighting if w is not None: w = NX.asarray(w) + 0.0 if w.ndim != 1: raise TypeError("expected a 1-d array for weights") if w.shape[0] != y.shape[0]: raise TypeError("expected w and y to have the same length") lhs *= w[:, NX.newaxis] if rhs.ndim == 2: rhs *= w[:, NX.newaxis] else: rhs *= w # scale lhs to improve condition number and solve scale = NX.sqrt((lhs*lhs).sum(axis=0)) lhs /= scale c, resids, rank, s = lstsq(lhs, rhs, rcond) c = (c.T/scale).T # broadcast scale coefficients # warn on rank reduction, which indicates an ill conditioned matrix if rank != order and not full: msg = "Polyfit may be poorly conditioned" warnings.warn(msg, RankWarning) if full: return c, resids, rank, s, rcond elif cov: Vbase = inv(dot(lhs.T, lhs)) Vbase /= NX.outer(scale, scale) # Some literature ignores the extra -2.0 factor in the denominator, but # it is included here because the covariance of Multivariate Student-T # (which is implied by a Bayesian uncertainty analysis) includes it. # Plus, it gives a slightly more conservative estimate of uncertainty. fac = resids / (len(x) - order - 2.0) if y.ndim == 1: return c, Vbase * fac else: return c, Vbase[:,:, NX.newaxis] * fac else: return c def polyval(p, x): """ Evaluate a polynomial at specific values. If `p` is of length N, this function returns the value: ``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]`` If `x` is a sequence, then `p(x)` is returned for each element of `x`. If `x` is another polynomial then the composite polynomial `p(x(t))` is returned. Parameters ---------- p : array_like or poly1d object 1D array of polynomial coefficients (including coefficients equal to zero) from highest degree to the constant term, or an instance of poly1d. x : array_like or poly1d object A number, a 1D array of numbers, or an instance of poly1d, "at" which to evaluate `p`. Returns ------- values : ndarray or poly1d If `x` is a poly1d instance, the result is the composition of the two polynomials, i.e., `x` is "substituted" in `p` and the simplified result is returned. In addition, the type of `x` - array_like or poly1d - governs the type of the output: `x` array_like => `values` array_like, `x` a poly1d object => `values` is also. See Also -------- poly1d: A polynomial class. Notes ----- Horner's scheme [1]_ is used to evaluate the polynomial. Even so, for polynomials of high degree the values may be inaccurate due to rounding errors. Use carefully. References ---------- .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng. trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand Reinhold Co., 1985, pg. 720. Examples -------- >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1 76 >>> np.polyval([3,0,1], np.poly1d(5)) poly1d([ 76.]) >>> np.polyval(np.poly1d([3,0,1]), 5) 76 >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5)) poly1d([ 76.]) """ p = NX.asarray(p) if isinstance(x, poly1d): y = 0 else: x = NX.asarray(x) y = NX.zeros_like(x) for i in range(len(p)): y = y * x + p[i] return y def polyadd(a1, a2): """ Find the sum of two polynomials. Returns the polynomial resulting from the sum of two input polynomials. Each input must be either a poly1d object or a 1D sequence of polynomial coefficients, from highest to lowest degree. Parameters ---------- a1, a2 : array_like or poly1d object Input polynomials. Returns ------- out : ndarray or poly1d object The sum of the inputs. If either input is a poly1d object, then the output is also a poly1d object. Otherwise, it is a 1D array of polynomial coefficients from highest to lowest degree. See Also -------- poly1d : A one-dimensional polynomial class. poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval Examples -------- >>> np.polyadd([1, 2], [9, 5, 4]) array([9, 6, 6]) Using poly1d objects: >>> p1 = np.poly1d([1, 2]) >>> p2 = np.poly1d([9, 5, 4]) >>> print p1 1 x + 2 >>> print p2 2 9 x + 5 x + 4 >>> print np.polyadd(p1, p2) 2 9 x + 6 x + 6 """ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) a1 = atleast_1d(a1) a2 = atleast_1d(a2) diff = len(a2) - len(a1) if diff == 0: val = a1 + a2 elif diff > 0: zr = NX.zeros(diff, a1.dtype) val = NX.concatenate((zr, a1)) + a2 else: zr = NX.zeros(abs(diff), a2.dtype) val = a1 + NX.concatenate((zr, a2)) if truepoly: val = poly1d(val) return val def polysub(a1, a2): """ Difference (subtraction) of two polynomials. Given two polynomials `a1` and `a2`, returns ``a1 - a2``. `a1` and `a2` can be either array_like sequences of the polynomials' coefficients (including coefficients equal to zero), or `poly1d` objects. Parameters ---------- a1, a2 : array_like or poly1d Minuend and subtrahend polynomials, respectively. Returns ------- out : ndarray or poly1d Array or `poly1d` object of the difference polynomial's coefficients. See Also -------- polyval, polydiv, polymul, polyadd Examples -------- .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2) >>> np.polysub([2, 10, -2], [3, 10, -4]) array([-1, 0, 2]) """ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) a1 = atleast_1d(a1) a2 = atleast_1d(a2) diff = len(a2) - len(a1) if diff == 0: val = a1 - a2 elif diff > 0: zr = NX.zeros(diff, a1.dtype) val = NX.concatenate((zr, a1)) - a2 else: zr = NX.zeros(abs(diff), a2.dtype) val = a1 - NX.concatenate((zr, a2)) if truepoly: val = poly1d(val) return val def polymul(a1, a2): """ Find the product of two polynomials. Finds the polynomial resulting from the multiplication of the two input polynomials. Each input must be either a poly1d object or a 1D sequence of polynomial coefficients, from highest to lowest degree. Parameters ---------- a1, a2 : array_like or poly1d object Input polynomials. Returns ------- out : ndarray or poly1d object The polynomial resulting from the multiplication of the inputs. If either inputs is a poly1d object, then the output is also a poly1d object. Otherwise, it is a 1D array of polynomial coefficients from highest to lowest degree. See Also -------- poly1d : A one-dimensional polynomial class. poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval convolve : Array convolution. Same output as polymul, but has parameter for overlap mode. Examples -------- >>> np.polymul([1, 2, 3], [9, 5, 1]) array([ 9, 23, 38, 17, 3]) Using poly1d objects: >>> p1 = np.poly1d([1, 2, 3]) >>> p2 = np.poly1d([9, 5, 1]) >>> print p1 2 1 x + 2 x + 3 >>> print p2 2 9 x + 5 x + 1 >>> print np.polymul(p1, p2) 4 3 2 9 x + 23 x + 38 x + 17 x + 3 """ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) a1, a2 = poly1d(a1), poly1d(a2) val = NX.convolve(a1, a2) if truepoly: val = poly1d(val) return val def polydiv(u, v): """ Returns the quotient and remainder of polynomial division. The input arrays are the coefficients (including any coefficients equal to zero) of the "numerator" (dividend) and "denominator" (divisor) polynomials, respectively. Parameters ---------- u : array_like or poly1d Dividend polynomial's coefficients. v : array_like or poly1d Divisor polynomial's coefficients. Returns ------- q : ndarray Coefficients, including those equal to zero, of the quotient. r : ndarray Coefficients, including those equal to zero, of the remainder. See Also -------- poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub, polyval Notes ----- Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need not equal `v.ndim`. In other words, all four possible combinations - ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``, ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work. Examples -------- .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25 >>> x = np.array([3.0, 5.0, 2.0]) >>> y = np.array([2.0, 1.0]) >>> np.polydiv(x, y) (array([ 1.5 , 1.75]), array([ 0.25])) """ truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d)) u = atleast_1d(u) + 0.0 v = atleast_1d(v) + 0.0 # w has the common type w = u[0] + v[0] m = len(u) - 1 n = len(v) - 1 scale = 1. / v[0] q = NX.zeros((max(m - n + 1, 1),), w.dtype) r = u.copy() for k in range(0, m-n+1): d = scale * r[k] q[k] = d r[k:k+n+1] -= d*v while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1): r = r[1:] if truepoly: return poly1d(q), poly1d(r) return q, r _poly_mat = re.compile(r"[*][*]([0-9]*)") def _raise_power(astr, wrap=70): n = 0 line1 = '' line2 = '' output = ' ' while True: mat = _poly_mat.search(astr, n) if mat is None: break span = mat.span() power = mat.groups()[0] partstr = astr[n:span[0]] n = span[1] toadd2 = partstr + ' '*(len(power)-1) toadd1 = ' '*(len(partstr)-1) + power if ((len(line2) + len(toadd2) > wrap) or (len(line1) + len(toadd1) > wrap)): output += line1 + "\n" + line2 + "\n " line1 = toadd1 line2 = toadd2 else: line2 += partstr + ' '*(len(power)-1) line1 += ' '*(len(partstr)-1) + power output += line1 + "\n" + line2 return output + astr[n:] class poly1d(object): """ A one-dimensional polynomial class. A convenience class, used to encapsulate "natural" operations on polynomials so that said operations may take on their customary form in code (see Examples). Parameters ---------- c_or_r : array_like The polynomial's coefficients, in decreasing powers, or if the value of the second parameter is True, the polynomial's roots (values where the polynomial evaluates to 0). For example, ``poly1d([1, 2, 3])`` returns an object that represents :math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`. r : bool, optional If True, `c_or_r` specifies the polynomial's roots; the default is False. variable : str, optional Changes the variable used when printing `p` from `x` to `variable` (see Examples). Examples -------- Construct the polynomial :math:`x^2 + 2x + 3`: >>> p = np.poly1d([1, 2, 3]) >>> print np.poly1d(p) 2 1 x + 2 x + 3 Evaluate the polynomial at :math:`x = 0.5`: >>> p(0.5) 4.25 Find the roots: >>> p.r array([-1.+1.41421356j, -1.-1.41421356j]) >>> p(p.r) array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) These numbers in the previous line represent (0, 0) to machine precision Show the coefficients: >>> p.c array([1, 2, 3]) Display the order (the leading zero-coefficients are removed): >>> p.order 2 Show the coefficient of the k-th power in the polynomial (which is equivalent to ``p.c[-(i+1)]``): >>> p[1] 2 Polynomials can be added, subtracted, multiplied, and divided (returns quotient and remainder): >>> p * p poly1d([ 1, 4, 10, 12, 9]) >>> (p**3 + 4) / p (poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.])) ``asarray(p)`` gives the coefficient array, so polynomials can be used in all functions that accept arrays: >>> p**2 # square of polynomial poly1d([ 1, 4, 10, 12, 9]) >>> np.square(p) # square of individual coefficients array([1, 4, 9]) The variable used in the string representation of `p` can be modified, using the `variable` parameter: >>> p = np.poly1d([1,2,3], variable='z') >>> print p 2 1 z + 2 z + 3 Construct a polynomial from its roots: >>> np.poly1d([1, 2], True) poly1d([ 1, -3, 2]) This is the same polynomial as obtained by: >>> np.poly1d([1, -1]) * np.poly1d([1, -2]) poly1d([ 1, -3, 2]) """ coeffs = None order = None variable = None __hash__ = None def __init__(self, c_or_r, r=0, variable=None): if isinstance(c_or_r, poly1d): for key in c_or_r.__dict__.keys(): self.__dict__[key] = c_or_r.__dict__[key] if variable is not None: self.__dict__['variable'] = variable return if r: c_or_r = poly(c_or_r) c_or_r = atleast_1d(c_or_r) if len(c_or_r.shape) > 1: raise ValueError("Polynomial must be 1d only.") c_or_r = trim_zeros(c_or_r, trim='f') if len(c_or_r) == 0: c_or_r = NX.array([0.]) self.__dict__['coeffs'] = c_or_r self.__dict__['order'] = len(c_or_r) - 1 if variable is None: variable = 'x' self.__dict__['variable'] = variable def __array__(self, t=None): if t: return NX.asarray(self.coeffs, t) else: return NX.asarray(self.coeffs) def __repr__(self): vals = repr(self.coeffs) vals = vals[6:-1] return "poly1d(%s)" % vals def __len__(self): return self.order def __str__(self): thestr = "0" var = self.variable # Remove leading zeros coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)] N = len(coeffs)-1 def fmt_float(q): s = '%.4g' % q if s.endswith('.0000'): s = s[:-5] return s for k in range(len(coeffs)): if not iscomplex(coeffs[k]): coefstr = fmt_float(real(coeffs[k])) elif real(coeffs[k]) == 0: coefstr = '%sj' % fmt_float(imag(coeffs[k])) else: coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])), fmt_float(imag(coeffs[k]))) power = (N-k) if power == 0: if coefstr != '0': newstr = '%s' % (coefstr,) else: if k == 0: newstr = '0' else: newstr = '' elif power == 1: if coefstr == '0': newstr = '' elif coefstr == 'b': newstr = var else: newstr = '%s %s' % (coefstr, var) else: if coefstr == '0': newstr = '' elif coefstr == 'b': newstr = '%s**%d' % (var, power,) else: newstr = '%s %s**%d' % (coefstr, var, power) if k > 0: if newstr != '': if newstr.startswith('-'): thestr = "%s - %s" % (thestr, newstr[1:]) else: thestr = "%s + %s" % (thestr, newstr) else: thestr = newstr return _raise_power(thestr) def __call__(self, val): return polyval(self.coeffs, val) def __neg__(self): return poly1d(-self.coeffs) def __pos__(self): return self def __mul__(self, other): if isscalar(other): return poly1d(self.coeffs * other) else: other = poly1d(other) return poly1d(polymul(self.coeffs, other.coeffs)) def __rmul__(self, other): if isscalar(other): return poly1d(other * self.coeffs) else: other = poly1d(other) return poly1d(polymul(self.coeffs, other.coeffs)) def __add__(self, other): other = poly1d(other) return poly1d(polyadd(self.coeffs, other.coeffs)) def __radd__(self, other): other = poly1d(other) return poly1d(polyadd(self.coeffs, other.coeffs)) def __pow__(self, val): if not isscalar(val) or int(val) != val or val < 0: raise ValueError("Power to non-negative integers only.") res = [1] for _ in range(val): res = polymul(self.coeffs, res) return poly1d(res) def __sub__(self, other): other = poly1d(other) return poly1d(polysub(self.coeffs, other.coeffs)) def __rsub__(self, other): other = poly1d(other) return poly1d(polysub(other.coeffs, self.coeffs)) def __div__(self, other): if isscalar(other): return poly1d(self.coeffs/other) else: other = poly1d(other) return polydiv(self, other) __truediv__ = __div__ def __rdiv__(self, other): if isscalar(other): return poly1d(other/self.coeffs) else: other = poly1d(other) return polydiv(other, self) __rtruediv__ = __rdiv__ def __eq__(self, other): if self.coeffs.shape != other.coeffs.shape: return False return (self.coeffs == other.coeffs).all() def __ne__(self, other): return not self.__eq__(other) def __setattr__(self, key, val): raise ValueError("Attributes cannot be changed this way.") def __getattr__(self, key): if key in ['r', 'roots']: return roots(self.coeffs) elif key in ['c', 'coef', 'coefficients']: return self.coeffs elif key in ['o']: return self.order else: try: return self.__dict__[key] except KeyError: raise AttributeError( "'%s' has no attribute '%s'" % (self.__class__, key)) def __getitem__(self, val): ind = self.order - val if val > self.order: return 0 if val < 0: return 0 return self.coeffs[ind] def __setitem__(self, key, val): ind = self.order - key if key < 0: raise ValueError("Does not support negative powers.") if key > self.order: zr = NX.zeros(key-self.order, self.coeffs.dtype) self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs)) self.__dict__['order'] = key ind = 0 self.__dict__['coeffs'][ind] = val return def __iter__(self): return iter(self.coeffs) def integ(self, m=1, k=0): """ Return an antiderivative (indefinite integral) of this polynomial. Refer to `polyint` for full documentation. See Also -------- polyint : equivalent function """ return poly1d(polyint(self.coeffs, m=m, k=k)) def deriv(self, m=1): """ Return a derivative of this polynomial. Refer to `polyder` for full documentation. See Also -------- polyder : equivalent function """ return poly1d(polyder(self.coeffs, m=m)) # Stuff to do on module import warnings.simplefilter('always', RankWarning)
artistic-2.0
rrrrrr8/vnpy
vnpy/trader/app/ctaStrategy/ctaBacktesting.py
1
56364
# encoding: UTF-8 ''' 本文件中包含的是CTA模块的回测引擎,回测引擎的API和CTA引擎一致, 可以使用和实盘相同的代码进行回测。 ''' from __future__ import division from __future__ import print_function from datetime import datetime, timedelta from collections import OrderedDict from itertools import product import multiprocessing import copy import pymongo import pandas as pd import numpy as np import matplotlib.pyplot as plt from vnpy.rpc import RpcClient, RpcServer, RemoteException # 如果安装了seaborn则设置为白色风格 try: import seaborn as sns sns.set_style('whitegrid') except ImportError: pass from vnpy.trader.vtGlobal import globalSetting from vnpy.trader.vtObject import VtTickData, VtBarData from vnpy.trader.vtConstant import * from vnpy.trader.vtGateway import VtOrderData, VtTradeData from .ctaBase import * ######################################################################## class BacktestingEngine(object): """ CTA回测引擎 函数接口和策略引擎保持一样, 从而实现同一套代码从回测到实盘。 """ TICK_MODE = 'tick' BAR_MODE = 'bar' #---------------------------------------------------------------------- def __init__(self): """Constructor""" # 本地停止单 self.stopOrderCount = 0 # 编号计数:stopOrderID = STOPORDERPREFIX + str(stopOrderCount) # 本地停止单字典, key为stopOrderID,value为stopOrder对象 self.stopOrderDict = {} # 停止单撤销后不会从本字典中删除 self.workingStopOrderDict = {} # 停止单撤销后会从本字典中删除 self.engineType = ENGINETYPE_BACKTESTING # 引擎类型为回测 self.strategy = None # 回测策略 self.mode = self.BAR_MODE # 回测模式,默认为K线 self.startDate = '' self.initDays = 0 self.endDate = '' self.capital = 1000000 # 回测时的起始本金(默认100万) self.slippage = 0 # 回测时假设的滑点 self.rate = 0 # 回测时假设的佣金比例(适用于百分比佣金) self.size = 1 # 合约大小,默认为1 self.priceTick = 0 # 价格最小变动 self.dbClient = None # 数据库客户端 self.dbCursor = None # 数据库指针 self.hdsClient = None # 历史数据服务器客户端 self.initData = [] # 初始化用的数据 self.dbName = '' # 回测数据库名 self.symbol = '' # 回测集合名 self.dataStartDate = None # 回测数据开始日期,datetime对象 self.dataEndDate = None # 回测数据结束日期,datetime对象 self.strategyStartDate = None # 策略启动日期(即前面的数据用于初始化),datetime对象 self.limitOrderCount = 0 # 限价单编号 self.limitOrderDict = OrderedDict() # 限价单字典 self.workingLimitOrderDict = OrderedDict() # 活动限价单字典,用于进行撮合用 self.tradeCount = 0 # 成交编号 self.tradeDict = OrderedDict() # 成交字典 self.logList = [] # 日志记录 # 当前最新数据,用于模拟成交用 self.tick = None self.bar = None self.dt = None # 最新的时间 # 日线回测结果计算用 self.dailyResultDict = OrderedDict() #------------------------------------------------ # 通用功能 #------------------------------------------------ #---------------------------------------------------------------------- def roundToPriceTick(self, price): """取整价格到合约最小价格变动""" if not self.priceTick: return price newPrice = round(price/self.priceTick, 0) * self.priceTick return newPrice #---------------------------------------------------------------------- def output(self, content): """输出内容""" print(str(datetime.now()) + "\t" + content) #------------------------------------------------ # 参数设置相关 #------------------------------------------------ #---------------------------------------------------------------------- def setStartDate(self, startDate='20100416', initDays=10): """设置回测的启动日期""" self.startDate = startDate self.initDays = initDays self.dataStartDate = datetime.strptime(startDate, '%Y%m%d') initTimeDelta = timedelta(initDays) self.strategyStartDate = self.dataStartDate + initTimeDelta #---------------------------------------------------------------------- def setEndDate(self, endDate=''): """设置回测的结束日期""" self.endDate = endDate if endDate: self.dataEndDate = datetime.strptime(endDate, '%Y%m%d') # 若不修改时间则会导致不包含dataEndDate当天数据 self.dataEndDate = self.dataEndDate.replace(hour=23, minute=59) #---------------------------------------------------------------------- def setBacktestingMode(self, mode): """设置回测模式""" self.mode = mode #---------------------------------------------------------------------- def setDatabase(self, dbName, symbol): """设置历史数据所用的数据库""" self.dbName = dbName self.symbol = symbol #---------------------------------------------------------------------- def setCapital(self, capital): """设置资本金""" self.capital = capital #---------------------------------------------------------------------- def setSlippage(self, slippage): """设置滑点点数""" self.slippage = slippage #---------------------------------------------------------------------- def setSize(self, size): """设置合约大小""" self.size = size #---------------------------------------------------------------------- def setRate(self, rate): """设置佣金比例""" self.rate = rate #---------------------------------------------------------------------- def setPriceTick(self, priceTick): """设置价格最小变动""" self.priceTick = priceTick #------------------------------------------------ # 数据回放相关 #------------------------------------------------ #---------------------------------------------------------------------- def initHdsClient(self): """初始化历史数据服务器客户端""" reqAddress = 'tcp://localhost:5555' subAddress = 'tcp://localhost:7777' self.hdsClient = RpcClient(reqAddress, subAddress) self.hdsClient.start() #---------------------------------------------------------------------- def loadHistoryData(self): """载入历史数据""" self.dbClient = pymongo.MongoClient(globalSetting['mongoHost'], globalSetting['mongoPort']) collection = self.dbClient[self.dbName][self.symbol] self.output(u'开始载入数据') # 首先根据回测模式,确认要使用的数据类 if self.mode == self.BAR_MODE: dataClass = VtBarData func = self.newBar else: dataClass = VtTickData func = self.newTick # 载入初始化需要用的数据 if self.hdsClient: initCursor = self.hdsClient.loadHistoryData(self.dbName, self.symbol, self.dataStartDate, self.strategyStartDate) else: flt = {'datetime':{'$gte':self.dataStartDate, '$lt':self.strategyStartDate}} initCursor = collection.find(flt).sort('datetime') # 将数据从查询指针中读取出,并生成列表 self.initData = [] # 清空initData列表 for d in initCursor: data = dataClass() data.__dict__ = d self.initData.append(data) # 载入回测数据 if self.hdsClient: self.dbCursor = self.hdsClient.loadHistoryData(self.dbName, self.symbol, self.strategyStartDate, self.dataEndDate) else: if not self.dataEndDate: flt = {'datetime':{'$gte':self.strategyStartDate}} # 数据过滤条件 else: flt = {'datetime':{'$gte':self.strategyStartDate, '$lte':self.dataEndDate}} self.dbCursor = collection.find(flt).sort('datetime') if isinstance(self.dbCursor, list): count = len(initCursor) + len(self.dbCursor) else: count = initCursor.count() + self.dbCursor.count() self.output(u'载入完成,数据量:%s' %count) #---------------------------------------------------------------------- def runBacktesting(self): """运行回测""" # 载入历史数据 self.loadHistoryData() # 首先根据回测模式,确认要使用的数据类 if self.mode == self.BAR_MODE: dataClass = VtBarData func = self.newBar else: dataClass = VtTickData func = self.newTick self.output(u'开始回测') self.strategy.onInit() self.strategy.inited = True self.output(u'策略初始化完成') self.strategy.trading = True self.strategy.onStart() self.output(u'策略启动完成') self.output(u'开始回放数据') for d in self.dbCursor: data = dataClass() data.__dict__ = d func(data) self.output(u'数据回放结束') #---------------------------------------------------------------------- def newBar(self, bar): """新的K线""" self.bar = bar self.dt = bar.datetime self.crossLimitOrder() # 先撮合限价单 self.crossStopOrder() # 再撮合停止单 self.strategy.onBar(bar) # 推送K线到策略中 self.updateDailyClose(bar.datetime, bar.close) #---------------------------------------------------------------------- def newTick(self, tick): """新的Tick""" self.tick = tick self.dt = tick.datetime self.crossLimitOrder() self.crossStopOrder() self.strategy.onTick(tick) self.updateDailyClose(tick.datetime, tick.lastPrice) #---------------------------------------------------------------------- def initStrategy(self, strategyClass, setting=None): """ 初始化策略 setting是策略的参数设置,如果使用类中写好的默认设置则可以不传该参数 """ self.strategy = strategyClass(self, setting) self.strategy.name = self.strategy.className #---------------------------------------------------------------------- def crossLimitOrder(self): """基于最新数据撮合限价单""" # 先确定会撮合成交的价格 if self.mode == self.BAR_MODE: buyCrossPrice = self.bar.low # 若买入方向限价单价格高于该价格,则会成交 sellCrossPrice = self.bar.high # 若卖出方向限价单价格低于该价格,则会成交 buyBestCrossPrice = self.bar.open # 在当前时间点前发出的买入委托可能的最优成交价 sellBestCrossPrice = self.bar.open # 在当前时间点前发出的卖出委托可能的最优成交价 else: buyCrossPrice = self.tick.askPrice1 sellCrossPrice = self.tick.bidPrice1 buyBestCrossPrice = self.tick.askPrice1 sellBestCrossPrice = self.tick.bidPrice1 # 遍历限价单字典中的所有限价单 for orderID, order in self.workingLimitOrderDict.items(): # 推送委托进入队列(未成交)的状态更新 if not order.status: order.status = STATUS_NOTTRADED self.strategy.onOrder(order) # 判断是否会成交 buyCross = (order.direction==DIRECTION_LONG and order.price>=buyCrossPrice and buyCrossPrice > 0) # 国内的tick行情在涨停时askPrice1为0,此时买无法成交 sellCross = (order.direction==DIRECTION_SHORT and order.price<=sellCrossPrice and sellCrossPrice > 0) # 国内的tick行情在跌停时bidPrice1为0,此时卖无法成交 # 如果发生了成交 if buyCross or sellCross: # 推送成交数据 self.tradeCount += 1 # 成交编号自增1 tradeID = str(self.tradeCount) trade = VtTradeData() trade.vtSymbol = order.vtSymbol trade.tradeID = tradeID trade.vtTradeID = tradeID trade.orderID = order.orderID trade.vtOrderID = order.orderID trade.direction = order.direction trade.offset = order.offset # 以买入为例: # 1. 假设当根K线的OHLC分别为:100, 125, 90, 110 # 2. 假设在上一根K线结束(也是当前K线开始)的时刻,策略发出的委托为限价105 # 3. 则在实际中的成交价会是100而不是105,因为委托发出时市场的最优价格是100 if buyCross: trade.price = min(order.price, buyBestCrossPrice) self.strategy.pos += order.totalVolume else: trade.price = max(order.price, sellBestCrossPrice) self.strategy.pos -= order.totalVolume trade.volume = order.totalVolume trade.tradeTime = self.dt.strftime('%H:%M:%S') trade.dt = self.dt self.strategy.onTrade(trade) self.tradeDict[tradeID] = trade # 推送委托数据 order.tradedVolume = order.totalVolume order.status = STATUS_ALLTRADED self.strategy.onOrder(order) # 从字典中删除该限价单 if orderID in self.workingLimitOrderDict: del self.workingLimitOrderDict[orderID] #---------------------------------------------------------------------- def crossStopOrder(self): """基于最新数据撮合停止单""" # 先确定会撮合成交的价格,这里和限价单规则相反 if self.mode == self.BAR_MODE: buyCrossPrice = self.bar.high # 若买入方向停止单价格低于该价格,则会成交 sellCrossPrice = self.bar.low # 若卖出方向限价单价格高于该价格,则会成交 bestCrossPrice = self.bar.open # 最优成交价,买入停止单不能低于,卖出停止单不能高于 else: buyCrossPrice = self.tick.lastPrice sellCrossPrice = self.tick.lastPrice bestCrossPrice = self.tick.lastPrice # 遍历停止单字典中的所有停止单 for stopOrderID, so in self.workingStopOrderDict.items(): # 判断是否会成交 buyCross = so.direction==DIRECTION_LONG and so.price<=buyCrossPrice sellCross = so.direction==DIRECTION_SHORT and so.price>=sellCrossPrice # 如果发生了成交 if buyCross or sellCross: # 更新停止单状态,并从字典中删除该停止单 so.status = STOPORDER_TRIGGERED if stopOrderID in self.workingStopOrderDict: del self.workingStopOrderDict[stopOrderID] # 推送成交数据 self.tradeCount += 1 # 成交编号自增1 tradeID = str(self.tradeCount) trade = VtTradeData() trade.vtSymbol = so.vtSymbol trade.tradeID = tradeID trade.vtTradeID = tradeID if buyCross: self.strategy.pos += so.volume trade.price = max(bestCrossPrice, so.price) else: self.strategy.pos -= so.volume trade.price = min(bestCrossPrice, so.price) self.limitOrderCount += 1 orderID = str(self.limitOrderCount) trade.orderID = orderID trade.vtOrderID = orderID trade.direction = so.direction trade.offset = so.offset trade.volume = so.volume trade.tradeTime = self.dt.strftime('%H:%M:%S') trade.dt = self.dt self.tradeDict[tradeID] = trade # 推送委托数据 order = VtOrderData() order.vtSymbol = so.vtSymbol order.symbol = so.vtSymbol order.orderID = orderID order.vtOrderID = orderID order.direction = so.direction order.offset = so.offset order.price = so.price order.totalVolume = so.volume order.tradedVolume = so.volume order.status = STATUS_ALLTRADED order.orderTime = trade.tradeTime self.limitOrderDict[orderID] = order # 按照顺序推送数据 self.strategy.onStopOrder(so) self.strategy.onOrder(order) self.strategy.onTrade(trade) #------------------------------------------------ # 策略接口相关 #------------------------------------------------ #---------------------------------------------------------------------- def sendOrder(self, vtSymbol, orderType, price, volume, strategy): """发单""" self.limitOrderCount += 1 orderID = str(self.limitOrderCount) order = VtOrderData() order.vtSymbol = vtSymbol order.price = self.roundToPriceTick(price) order.totalVolume = volume order.orderID = orderID order.vtOrderID = orderID order.orderTime = self.dt.strftime('%H:%M:%S') # CTA委托类型映射 if orderType == CTAORDER_BUY: order.direction = DIRECTION_LONG order.offset = OFFSET_OPEN elif orderType == CTAORDER_SELL: order.direction = DIRECTION_SHORT order.offset = OFFSET_CLOSE elif orderType == CTAORDER_SHORT: order.direction = DIRECTION_SHORT order.offset = OFFSET_OPEN elif orderType == CTAORDER_COVER: order.direction = DIRECTION_LONG order.offset = OFFSET_CLOSE # 保存到限价单字典中 self.workingLimitOrderDict[orderID] = order self.limitOrderDict[orderID] = order return [orderID] #---------------------------------------------------------------------- def cancelOrder(self, vtOrderID): """撤单""" if vtOrderID in self.workingLimitOrderDict: order = self.workingLimitOrderDict[vtOrderID] order.status = STATUS_CANCELLED order.cancelTime = self.dt.strftime('%H:%M:%S') self.strategy.onOrder(order) del self.workingLimitOrderDict[vtOrderID] #---------------------------------------------------------------------- def sendStopOrder(self, vtSymbol, orderType, price, volume, strategy): """发停止单(本地实现)""" self.stopOrderCount += 1 stopOrderID = STOPORDERPREFIX + str(self.stopOrderCount) so = StopOrder() so.vtSymbol = vtSymbol so.price = self.roundToPriceTick(price) so.volume = volume so.strategy = strategy so.status = STOPORDER_WAITING so.stopOrderID = stopOrderID if orderType == CTAORDER_BUY: so.direction = DIRECTION_LONG so.offset = OFFSET_OPEN elif orderType == CTAORDER_SELL: so.direction = DIRECTION_SHORT so.offset = OFFSET_CLOSE elif orderType == CTAORDER_SHORT: so.direction = DIRECTION_SHORT so.offset = OFFSET_OPEN elif orderType == CTAORDER_COVER: so.direction = DIRECTION_LONG so.offset = OFFSET_CLOSE # 保存stopOrder对象到字典中 self.stopOrderDict[stopOrderID] = so self.workingStopOrderDict[stopOrderID] = so # 推送停止单初始更新 self.strategy.onStopOrder(so) return [stopOrderID] #---------------------------------------------------------------------- def cancelStopOrder(self, stopOrderID): """撤销停止单""" # 检查停止单是否存在 if stopOrderID in self.workingStopOrderDict: so = self.workingStopOrderDict[stopOrderID] so.status = STOPORDER_CANCELLED del self.workingStopOrderDict[stopOrderID] self.strategy.onStopOrder(so) #---------------------------------------------------------------------- def putStrategyEvent(self, name): """发送策略更新事件,回测中忽略""" pass #---------------------------------------------------------------------- def insertData(self, dbName, collectionName, data): """考虑到回测中不允许向数据库插入数据,防止实盘交易中的一些代码出错""" pass #---------------------------------------------------------------------- def loadBar(self, dbName, collectionName, startDate): """直接返回初始化数据列表中的Bar""" return self.initData #---------------------------------------------------------------------- def loadTick(self, dbName, collectionName, startDate): """直接返回初始化数据列表中的Tick""" return self.initData #---------------------------------------------------------------------- def writeCtaLog(self, content): """记录日志""" log = str(self.dt) + ' ' + content self.logList.append(log) #---------------------------------------------------------------------- def cancelAll(self, name): """全部撤单""" # 撤销限价单 for orderID in self.workingLimitOrderDict.keys(): self.cancelOrder(orderID) # 撤销停止单 for stopOrderID in self.workingStopOrderDict.keys(): self.cancelStopOrder(stopOrderID) #---------------------------------------------------------------------- def saveSyncData(self, strategy): """保存同步数据(无效)""" pass #---------------------------------------------------------------------- def getPriceTick(self, strategy): """获取最小价格变动""" return self.priceTick #------------------------------------------------ # 结果计算相关 #------------------------------------------------ #---------------------------------------------------------------------- def calculateBacktestingResult(self): """ 计算回测结果 """ self.output(u'计算回测结果') # 检查成交记录 if not self.tradeDict: self.output(u'成交记录为空,无法计算回测结果') return {} # 首先基于回测后的成交记录,计算每笔交易的盈亏 resultList = [] # 交易结果列表 longTrade = [] # 未平仓的多头交易 shortTrade = [] # 未平仓的空头交易 tradeTimeList = [] # 每笔成交时间戳 posList = [0] # 每笔成交后的持仓情况 for trade in self.tradeDict.values(): # 复制成交对象,因为下面的开平仓交易配对涉及到对成交数量的修改 # 若不进行复制直接操作,则计算完后所有成交的数量会变成0 trade = copy.copy(trade) # 多头交易 if trade.direction == DIRECTION_LONG: # 如果尚无空头交易 if not shortTrade: longTrade.append(trade) # 当前多头交易为平空 else: while True: entryTrade = shortTrade[0] exitTrade = trade # 清算开平仓交易 closedVolume = min(exitTrade.volume, entryTrade.volume) result = TradingResult(entryTrade.price, entryTrade.dt, exitTrade.price, exitTrade.dt, -closedVolume, self.rate, self.slippage, self.size) resultList.append(result) posList.extend([-1,0]) tradeTimeList.extend([result.entryDt, result.exitDt]) # 计算未清算部分 entryTrade.volume -= closedVolume exitTrade.volume -= closedVolume # 如果开仓交易已经全部清算,则从列表中移除 if not entryTrade.volume: shortTrade.pop(0) # 如果平仓交易已经全部清算,则退出循环 if not exitTrade.volume: break # 如果平仓交易未全部清算, if exitTrade.volume: # 且开仓交易已经全部清算完,则平仓交易剩余的部分 # 等于新的反向开仓交易,添加到队列中 if not shortTrade: longTrade.append(exitTrade) break # 如果开仓交易还有剩余,则进入下一轮循环 else: pass # 空头交易 else: # 如果尚无多头交易 if not longTrade: shortTrade.append(trade) # 当前空头交易为平多 else: while True: entryTrade = longTrade[0] exitTrade = trade # 清算开平仓交易 closedVolume = min(exitTrade.volume, entryTrade.volume) result = TradingResult(entryTrade.price, entryTrade.dt, exitTrade.price, exitTrade.dt, closedVolume, self.rate, self.slippage, self.size) resultList.append(result) posList.extend([1,0]) tradeTimeList.extend([result.entryDt, result.exitDt]) # 计算未清算部分 entryTrade.volume -= closedVolume exitTrade.volume -= closedVolume # 如果开仓交易已经全部清算,则从列表中移除 if not entryTrade.volume: longTrade.pop(0) # 如果平仓交易已经全部清算,则退出循环 if not exitTrade.volume: break # 如果平仓交易未全部清算, if exitTrade.volume: # 且开仓交易已经全部清算完,则平仓交易剩余的部分 # 等于新的反向开仓交易,添加到队列中 if not longTrade: shortTrade.append(exitTrade) break # 如果开仓交易还有剩余,则进入下一轮循环 else: pass # 到最后交易日尚未平仓的交易,则以最后价格平仓 if self.mode == self.BAR_MODE: endPrice = self.bar.close else: endPrice = self.tick.lastPrice for trade in longTrade: result = TradingResult(trade.price, trade.dt, endPrice, self.dt, trade.volume, self.rate, self.slippage, self.size) resultList.append(result) for trade in shortTrade: result = TradingResult(trade.price, trade.dt, endPrice, self.dt, -trade.volume, self.rate, self.slippage, self.size) resultList.append(result) # 检查是否有交易 if not resultList: self.output(u'无交易结果') return {} # 然后基于每笔交易的结果,我们可以计算具体的盈亏曲线和最大回撤等 capital = 0 # 资金 maxCapital = 0 # 资金最高净值 drawdown = 0 # 回撤 totalResult = 0 # 总成交数量 totalTurnover = 0 # 总成交金额(合约面值) totalCommission = 0 # 总手续费 totalSlippage = 0 # 总滑点 timeList = [] # 时间序列 pnlList = [] # 每笔盈亏序列 capitalList = [] # 盈亏汇总的时间序列 drawdownList = [] # 回撤的时间序列 winningResult = 0 # 盈利次数 losingResult = 0 # 亏损次数 totalWinning = 0 # 总盈利金额 totalLosing = 0 # 总亏损金额 for result in resultList: capital += result.pnl maxCapital = max(capital, maxCapital) drawdown = capital - maxCapital pnlList.append(result.pnl) timeList.append(result.exitDt) # 交易的时间戳使用平仓时间 capitalList.append(capital) drawdownList.append(drawdown) totalResult += 1 totalTurnover += result.turnover totalCommission += result.commission totalSlippage += result.slippage if result.pnl >= 0: winningResult += 1 totalWinning += result.pnl else: losingResult += 1 totalLosing += result.pnl # 计算盈亏相关数据 winningRate = winningResult/totalResult*100 # 胜率 averageWinning = 0 # 这里把数据都初始化为0 averageLosing = 0 profitLossRatio = 0 if winningResult: averageWinning = totalWinning/winningResult # 平均每笔盈利 if losingResult: averageLosing = totalLosing/losingResult # 平均每笔亏损 if averageLosing: profitLossRatio = -averageWinning/averageLosing # 盈亏比 # 返回回测结果 d = {} d['capital'] = capital d['maxCapital'] = maxCapital d['drawdown'] = drawdown d['totalResult'] = totalResult d['totalTurnover'] = totalTurnover d['totalCommission'] = totalCommission d['totalSlippage'] = totalSlippage d['timeList'] = timeList d['pnlList'] = pnlList d['capitalList'] = capitalList d['drawdownList'] = drawdownList d['winningRate'] = winningRate d['averageWinning'] = averageWinning d['averageLosing'] = averageLosing d['profitLossRatio'] = profitLossRatio d['posList'] = posList d['tradeTimeList'] = tradeTimeList d['resultList'] = resultList return d #---------------------------------------------------------------------- def showBacktestingResult(self): """显示回测结果""" d = self.calculateBacktestingResult() # 输出 self.output('-' * 30) self.output(u'第一笔交易:\t%s' % d['timeList'][0]) self.output(u'最后一笔交易:\t%s' % d['timeList'][-1]) self.output(u'总交易次数:\t%s' % formatNumber(d['totalResult'])) self.output(u'总盈亏:\t%s' % formatNumber(d['capital'])) self.output(u'最大回撤: \t%s' % formatNumber(min(d['drawdownList']))) self.output(u'平均每笔盈利:\t%s' %formatNumber(d['capital']/d['totalResult'])) self.output(u'平均每笔滑点:\t%s' %formatNumber(d['totalSlippage']/d['totalResult'])) self.output(u'平均每笔佣金:\t%s' %formatNumber(d['totalCommission']/d['totalResult'])) self.output(u'胜率\t\t%s%%' %formatNumber(d['winningRate'])) self.output(u'盈利交易平均值\t%s' %formatNumber(d['averageWinning'])) self.output(u'亏损交易平均值\t%s' %formatNumber(d['averageLosing'])) self.output(u'盈亏比:\t%s' %formatNumber(d['profitLossRatio'])) # 绘图 fig = plt.figure(figsize=(10, 16)) pCapital = plt.subplot(4, 1, 1) pCapital.set_ylabel("capital") pCapital.plot(d['capitalList'], color='r', lw=0.8) pDD = plt.subplot(4, 1, 2) pDD.set_ylabel("DD") pDD.bar(range(len(d['drawdownList'])), d['drawdownList'], color='g') pPnl = plt.subplot(4, 1, 3) pPnl.set_ylabel("pnl") pPnl.hist(d['pnlList'], bins=50, color='c') pPos = plt.subplot(4, 1, 4) pPos.set_ylabel("Position") if d['posList'][-1] == 0: del d['posList'][-1] tradeTimeIndex = [item.strftime("%m/%d %H:%M:%S") for item in d['tradeTimeList']] xindex = np.arange(0, len(tradeTimeIndex), np.int(len(tradeTimeIndex)/10)) tradeTimeIndex = map(lambda i: tradeTimeIndex[i], xindex) pPos.plot(d['posList'], color='k', drawstyle='steps-pre') pPos.set_ylim(-1.2, 1.2) plt.sca(pPos) plt.tight_layout() plt.xticks(xindex, tradeTimeIndex, rotation=30) # 旋转15 plt.show() #---------------------------------------------------------------------- def clearBacktestingResult(self): """清空之前回测的结果""" # 清空限价单相关 self.limitOrderCount = 0 self.limitOrderDict.clear() self.workingLimitOrderDict.clear() # 清空停止单相关 self.stopOrderCount = 0 self.stopOrderDict.clear() self.workingStopOrderDict.clear() # 清空成交相关 self.tradeCount = 0 self.tradeDict.clear() #---------------------------------------------------------------------- def runOptimization(self, strategyClass, optimizationSetting): """优化参数""" # 获取优化设置 settingList = optimizationSetting.generateSetting() targetName = optimizationSetting.optimizeTarget # 检查参数设置问题 if not settingList or not targetName: self.output(u'优化设置有问题,请检查') # 遍历优化 resultList = [] for setting in settingList: self.clearBacktestingResult() self.output('-' * 30) self.output('setting: %s' %str(setting)) self.initStrategy(strategyClass, setting) self.runBacktesting() df = self.calculateDailyResult() df, d = self.calculateDailyStatistics(df) try: targetValue = d[targetName] except KeyError: targetValue = 0 resultList.append(([str(setting)], targetValue, d)) # 显示结果 resultList.sort(reverse=True, key=lambda result:result[1]) self.output('-' * 30) self.output(u'优化结果:') for result in resultList: self.output(u'参数:%s,目标:%s' %(result[0], result[1])) return resultList #---------------------------------------------------------------------- def runParallelOptimization(self, strategyClass, optimizationSetting): """并行优化参数""" # 获取优化设置 settingList = optimizationSetting.generateSetting() targetName = optimizationSetting.optimizeTarget # 检查参数设置问题 if not settingList or not targetName: self.output(u'优化设置有问题,请检查') # 多进程优化,启动一个对应CPU核心数量的进程池 pool = multiprocessing.Pool(multiprocessing.cpu_count()) l = [] for setting in settingList: l.append(pool.apply_async(optimize, (strategyClass, setting, targetName, self.mode, self.startDate, self.initDays, self.endDate, self.slippage, self.rate, self.size, self.priceTick, self.dbName, self.symbol))) pool.close() pool.join() # 显示结果 resultList = [res.get() for res in l] resultList.sort(reverse=True, key=lambda result:result[1]) self.output('-' * 30) self.output(u'优化结果:') for result in resultList: self.output(u'参数:%s,目标:%s' %(result[0], result[1])) return resultList #---------------------------------------------------------------------- def updateDailyClose(self, dt, price): """更新每日收盘价""" date = dt.date() if date not in self.dailyResultDict: self.dailyResultDict[date] = DailyResult(date, price) else: self.dailyResultDict[date].closePrice = price #---------------------------------------------------------------------- def calculateDailyResult(self): """计算按日统计的交易结果""" self.output(u'计算按日统计结果') # 检查成交记录 if not self.tradeDict: self.output(u'成交记录为空,无法计算回测结果') return {} # 将成交添加到每日交易结果中 for trade in self.tradeDict.values(): date = trade.dt.date() dailyResult = self.dailyResultDict[date] dailyResult.addTrade(trade) # 遍历计算每日结果 previousClose = 0 openPosition = 0 for dailyResult in self.dailyResultDict.values(): dailyResult.previousClose = previousClose previousClose = dailyResult.closePrice dailyResult.calculatePnl(openPosition, self.size, self.rate, self.slippage ) openPosition = dailyResult.closePosition # 生成DataFrame resultDict = {k:[] for k in dailyResult.__dict__.keys()} for dailyResult in self.dailyResultDict.values(): for k, v in dailyResult.__dict__.items(): resultDict[k].append(v) resultDf = pd.DataFrame.from_dict(resultDict) # 计算衍生数据 resultDf = resultDf.set_index('date') return resultDf #---------------------------------------------------------------------- def calculateDailyStatistics(self, df): """计算按日统计的结果""" df['balance'] = df['netPnl'].cumsum() + self.capital df['return'] = (np.log(df['balance']) - np.log(df['balance'].shift(1))).fillna(0) df['highlevel'] = df['balance'].rolling(min_periods=1,window=len(df),center=False).max() df['drawdown'] = df['balance'] - df['highlevel'] df['ddPercent'] = df['drawdown'] / df['highlevel'] * 100 # 计算统计结果 startDate = df.index[0] endDate = df.index[-1] totalDays = len(df) profitDays = len(df[df['netPnl']>0]) lossDays = len(df[df['netPnl']<0]) endBalance = df['balance'].iloc[-1] maxDrawdown = df['drawdown'].min() maxDdPercent = df['ddPercent'].min() totalNetPnl = df['netPnl'].sum() dailyNetPnl = totalNetPnl / totalDays totalCommission = df['commission'].sum() dailyCommission = totalCommission / totalDays totalSlippage = df['slippage'].sum() dailySlippage = totalSlippage / totalDays totalTurnover = df['turnover'].sum() dailyTurnover = totalTurnover / totalDays totalTradeCount = df['tradeCount'].sum() dailyTradeCount = totalTradeCount / totalDays totalReturn = (endBalance/self.capital - 1) * 100 annualizedReturn = totalReturn / totalDays * 240 dailyReturn = df['return'].mean() * 100 returnStd = df['return'].std() * 100 if returnStd: sharpeRatio = dailyReturn / returnStd * np.sqrt(240) else: sharpeRatio = 0 # 返回结果 result = { 'startDate': startDate, 'endDate': endDate, 'totalDays': totalDays, 'profitDays': profitDays, 'lossDays': lossDays, 'endBalance': endBalance, 'maxDrawdown': maxDrawdown, 'maxDdPercent': maxDdPercent, 'totalNetPnl': totalNetPnl, 'dailyNetPnl': dailyNetPnl, 'totalCommission': totalCommission, 'dailyCommission': dailyCommission, 'totalSlippage': totalSlippage, 'dailySlippage': dailySlippage, 'totalTurnover': totalTurnover, 'dailyTurnover': dailyTurnover, 'totalTradeCount': totalTradeCount, 'dailyTradeCount': dailyTradeCount, 'totalReturn': totalReturn, 'annualizedReturn': annualizedReturn, 'dailyReturn': dailyReturn, 'returnStd': returnStd, 'sharpeRatio': sharpeRatio } return df, result #---------------------------------------------------------------------- def showDailyResult(self, df=None, result=None): """显示按日统计的交易结果""" if df is None: df = self.calculateDailyResult() df, result = self.calculateDailyStatistics(df) # 输出统计结果 self.output('-' * 30) self.output(u'首个交易日:\t%s' % result['startDate']) self.output(u'最后交易日:\t%s' % result['endDate']) self.output(u'总交易日:\t%s' % result['totalDays']) self.output(u'盈利交易日\t%s' % result['profitDays']) self.output(u'亏损交易日:\t%s' % result['lossDays']) self.output(u'起始资金:\t%s' % self.capital) self.output(u'结束资金:\t%s' % formatNumber(result['endBalance'])) self.output(u'总收益率:\t%s%%' % formatNumber(result['totalReturn'])) self.output(u'年化收益:\t%s%%' % formatNumber(result['annualizedReturn'])) self.output(u'总盈亏:\t%s' % formatNumber(result['totalNetPnl'])) self.output(u'最大回撤: \t%s' % formatNumber(result['maxDrawdown'])) self.output(u'百分比最大回撤: %s%%' % formatNumber(result['maxDdPercent'])) self.output(u'总手续费:\t%s' % formatNumber(result['totalCommission'])) self.output(u'总滑点:\t%s' % formatNumber(result['totalSlippage'])) self.output(u'总成交金额:\t%s' % formatNumber(result['totalTurnover'])) self.output(u'总成交笔数:\t%s' % formatNumber(result['totalTradeCount'])) self.output(u'日均盈亏:\t%s' % formatNumber(result['dailyNetPnl'])) self.output(u'日均手续费:\t%s' % formatNumber(result['dailyCommission'])) self.output(u'日均滑点:\t%s' % formatNumber(result['dailySlippage'])) self.output(u'日均成交金额:\t%s' % formatNumber(result['dailyTurnover'])) self.output(u'日均成交笔数:\t%s' % formatNumber(result['dailyTradeCount'])) self.output(u'日均收益率:\t%s%%' % formatNumber(result['dailyReturn'])) self.output(u'收益标准差:\t%s%%' % formatNumber(result['returnStd'])) self.output(u'Sharpe Ratio:\t%s' % formatNumber(result['sharpeRatio'])) # 绘图 fig = plt.figure(figsize=(10, 16)) pBalance = plt.subplot(4, 1, 1) pBalance.set_title('Balance') df['balance'].plot(legend=True) pDrawdown = plt.subplot(4, 1, 2) pDrawdown.set_title('Drawdown') pDrawdown.fill_between(range(len(df)), df['drawdown'].values) pPnl = plt.subplot(4, 1, 3) pPnl.set_title('Daily Pnl') df['netPnl'].plot(kind='bar', legend=False, grid=False, xticks=[]) pKDE = plt.subplot(4, 1, 4) pKDE.set_title('Daily Pnl Distribution') df['netPnl'].hist(bins=50) plt.show() ######################################################################## class TradingResult(object): """每笔交易的结果""" #---------------------------------------------------------------------- def __init__(self, entryPrice, entryDt, exitPrice, exitDt, volume, rate, slippage, size): """Constructor""" self.entryPrice = entryPrice # 开仓价格 self.exitPrice = exitPrice # 平仓价格 self.entryDt = entryDt # 开仓时间datetime self.exitDt = exitDt # 平仓时间 self.volume = volume # 交易数量(+/-代表方向) self.turnover = (self.entryPrice+self.exitPrice)*size*abs(volume) # 成交金额 self.commission = self.turnover*rate # 手续费成本 self.slippage = slippage*2*size*abs(volume) # 滑点成本 self.pnl = ((self.exitPrice - self.entryPrice) * volume * size - self.commission - self.slippage) # 净盈亏 ######################################################################## class DailyResult(object): """每日交易的结果""" #---------------------------------------------------------------------- def __init__(self, date, closePrice): """Constructor""" self.date = date # 日期 self.closePrice = closePrice # 当日收盘价 self.previousClose = 0 # 昨日收盘价 self.tradeList = [] # 成交列表 self.tradeCount = 0 # 成交数量 self.openPosition = 0 # 开盘时的持仓 self.closePosition = 0 # 收盘时的持仓 self.tradingPnl = 0 # 交易盈亏 self.positionPnl = 0 # 持仓盈亏 self.totalPnl = 0 # 总盈亏 self.turnover = 0 # 成交量 self.commission = 0 # 手续费 self.slippage = 0 # 滑点 self.netPnl = 0 # 净盈亏 #---------------------------------------------------------------------- def addTrade(self, trade): """添加交易""" self.tradeList.append(trade) #---------------------------------------------------------------------- def calculatePnl(self, openPosition=0, size=1, rate=0, slippage=0): """ 计算盈亏 size: 合约乘数 rate:手续费率 slippage:滑点点数 """ # 持仓部分 self.openPosition = openPosition self.positionPnl = self.openPosition * (self.closePrice - self.previousClose) * size self.closePosition = self.openPosition # 交易部分 self.tradeCount = len(self.tradeList) for trade in self.tradeList: if trade.direction == DIRECTION_LONG: posChange = trade.volume else: posChange = -trade.volume self.tradingPnl += posChange * (self.closePrice - trade.price) * size self.closePosition += posChange self.turnover += trade.price * trade.volume * size self.commission += trade.price * trade.volume * size * rate self.slippage += trade.volume * size * slippage # 汇总 self.totalPnl = self.tradingPnl + self.positionPnl self.netPnl = self.totalPnl - self.commission - self.slippage ######################################################################## class OptimizationSetting(object): """优化设置""" #---------------------------------------------------------------------- def __init__(self): """Constructor""" self.paramDict = OrderedDict() self.optimizeTarget = '' # 优化目标字段 #---------------------------------------------------------------------- def addParameter(self, name, start, end=None, step=None): """增加优化参数""" if end is None and step is None: self.paramDict[name] = [start] return if end < start: print(u'参数起始点必须不大于终止点') return if step <= 0: print(u'参数布进必须大于0') return l = [] param = start while param <= end: l.append(param) param += step self.paramDict[name] = l #---------------------------------------------------------------------- def generateSetting(self): """生成优化参数组合""" # 参数名的列表 nameList = self.paramDict.keys() paramList = self.paramDict.values() # 使用迭代工具生产参数对组合 productList = list(product(*paramList)) # 把参数对组合打包到一个个字典组成的列表中 settingList = [] for p in productList: d = dict(zip(nameList, p)) settingList.append(d) return settingList #---------------------------------------------------------------------- def setOptimizeTarget(self, target): """设置优化目标字段""" self.optimizeTarget = target ######################################################################## class HistoryDataServer(RpcServer): """历史数据缓存服务器""" #---------------------------------------------------------------------- def __init__(self, repAddress, pubAddress): """Constructor""" super(HistoryDataServer, self).__init__(repAddress, pubAddress) self.dbClient = pymongo.MongoClient(globalSetting['mongoHost'], globalSetting['mongoPort']) self.historyDict = {} self.register(self.loadHistoryData) #---------------------------------------------------------------------- def loadHistoryData(self, dbName, symbol, start, end): """""" # 首先检查是否有缓存,如果有则直接返回 history = self.historyDict.get((dbName, symbol, start, end), None) if history: print(u'找到内存缓存:%s %s %s %s' %(dbName, symbol, start, end)) return history # 否则从数据库加载 collection = self.dbClient[dbName][symbol] if end: flt = {'datetime':{'$gte':start, '$lt':end}} else: flt = {'datetime':{'$gte':start}} cx = collection.find(flt).sort('datetime') history = [d for d in cx] self.historyDict[(dbName, symbol, start, end)] = history print(u'从数据库加载:%s %s %s %s' %(dbName, symbol, start, end)) return history #---------------------------------------------------------------------- def runHistoryDataServer(): """""" repAddress = 'tcp://*:5555' pubAddress = 'tcp://*:7777' hds = HistoryDataServer(repAddress, pubAddress) hds.start() print(u'按任意键退出') hds.stop() raw_input() #---------------------------------------------------------------------- def formatNumber(n): """格式化数字到字符串""" rn = round(n, 2) # 保留两位小数 return format(rn, ',') # 加上千分符 #---------------------------------------------------------------------- def optimize(strategyClass, setting, targetName, mode, startDate, initDays, endDate, slippage, rate, size, priceTick, dbName, symbol): """多进程优化时跑在每个进程中运行的函数""" engine = BacktestingEngine() engine.setBacktestingMode(mode) engine.setStartDate(startDate, initDays) engine.setEndDate(endDate) engine.setSlippage(slippage) engine.setRate(rate) engine.setSize(size) engine.setPriceTick(priceTick) engine.setDatabase(dbName, symbol) engine.initStrategy(strategyClass, setting) engine.runBacktesting() df = engine.calculateDailyResult() df, d = engine.calculateDailyStatistics(df) try: targetValue = d[targetName] except KeyError: targetValue = 0 return (str(setting), targetValue, d)
mit
tomasreimers/tensorflow-emscripten
tensorflow/contrib/learn/python/learn/estimators/estimator.py
5
55320
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base Estimator class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import copy import inspect import os import tempfile import numpy as np import six from tensorflow.contrib import framework as contrib_framework from tensorflow.contrib import layers from tensorflow.contrib import metrics as metrics_lib from tensorflow.contrib.framework import deprecated from tensorflow.contrib.framework import deprecated_arg_values from tensorflow.contrib.framework import deprecated_args from tensorflow.contrib.framework import list_variables from tensorflow.contrib.framework import load_variable from tensorflow.contrib.framework.python.framework import experimental from tensorflow.contrib.framework.python.ops import variables as contrib_variables from tensorflow.contrib.learn.python.learn import evaluable from tensorflow.contrib.learn.python.learn import metric_spec from tensorflow.contrib.learn.python.learn import monitors as monitor_lib from tensorflow.contrib.learn.python.learn import trainable from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn from tensorflow.contrib.learn.python.learn.estimators import metric_key from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib from tensorflow.contrib.learn.python.learn.estimators import run_config from tensorflow.contrib.learn.python.learn.estimators import tensor_signature from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError from tensorflow.contrib.learn.python.learn.learn_io import data_feeder from tensorflow.contrib.learn.python.learn.utils import export from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils from tensorflow.contrib.training.python.training import evaluation from tensorflow.core.framework import summary_pb2 from tensorflow.python.client import session as tf_session from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import variables from tensorflow.python.platform import gfile from tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model import builder as saved_model_builder from tensorflow.python.saved_model import tag_constants from tensorflow.python.training import basic_session_run_hooks from tensorflow.python.training import device_setter from tensorflow.python.training import monitored_session from tensorflow.python.training import saver from tensorflow.python.training import session_run_hook from tensorflow.python.training import summary_io from tensorflow.python.util import compat AS_ITERABLE_DATE = '2016-09-15' AS_ITERABLE_INSTRUCTIONS = ( 'The default behavior of predict() is changing. The default value for\n' 'as_iterable will change to True, and then the flag will be removed\n' 'altogether. The behavior of this flag is described below.') SCIKIT_DECOUPLE_DATE = '2016-12-01' SCIKIT_DECOUPLE_INSTRUCTIONS = ( 'Estimator is decoupled from Scikit Learn interface by moving into\n' 'separate class SKCompat. Arguments x, y and batch_size are only\n' 'available in the SKCompat class, Estimator will only accept input_fn.\n' 'Example conversion:\n' ' est = Estimator(...) -> est = SKCompat(Estimator(...))') def _verify_input_args(x, y, input_fn, feed_fn, batch_size): """Verifies validity of co-existance of input arguments.""" if input_fn is None: if x is None: raise ValueError('Either x or input_fn must be provided.') if contrib_framework.is_tensor(x) or (y is not None and contrib_framework.is_tensor(y)): raise ValueError('Inputs cannot be tensors. Please provide input_fn.') if feed_fn is not None: raise ValueError('Can not provide both feed_fn and x or y.') else: if (x is not None) or (y is not None): raise ValueError('Can not provide both input_fn and x or y.') if batch_size is not None: raise ValueError('Can not provide both input_fn and batch_size.') def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1): """Make inputs into input and feed functions. Args: x: Numpy, Pandas or Dask matrix or iterable. y: Numpy, Pandas or Dask matrix or iterable. input_fn: Pre-defined input function for training data. feed_fn: Pre-defined data feeder function. batch_size: Size to split data into parts. Must be >= 1. shuffle: Whether to shuffle the inputs. epochs: Number of epochs to run. Returns: Data input and feeder function based on training data. Raises: ValueError: Only one of `(x & y)` or `input_fn` must be provided. """ _verify_input_args(x, y, input_fn, feed_fn, batch_size) if input_fn is not None: return input_fn, feed_fn df = data_feeder.setup_train_data_feeder( x, y, n_classes=None, batch_size=batch_size, shuffle=shuffle, epochs=epochs) return df.input_builder, df.get_feed_dict_fn() def infer_real_valued_columns_from_input_fn(input_fn): """Creates `FeatureColumn` objects for inputs defined by `input_fn`. This interprets all inputs as dense, fixed-length float values. This creates a local graph in which it calls `input_fn` to build the tensors, then discards it. Args: input_fn: Input function returning a tuple of: features - Dictionary of string feature name to `Tensor` or `Tensor`. labels - `Tensor` of label values. Returns: List of `FeatureColumn` objects. """ with ops.Graph().as_default(): features, _ = input_fn() return layers.infer_real_valued_columns(features) def infer_real_valued_columns_from_input(x): """Creates `FeatureColumn` objects for inputs defined by input `x`. This interprets all inputs as dense, fixed-length float values. Args: x: Real-valued matrix of shape [n_samples, n_features...]. Can be iterator that returns arrays of features. Returns: List of `FeatureColumn` objects. """ input_fn, _ = _get_input_fn( x=x, y=None, input_fn=None, feed_fn=None, batch_size=None) return infer_real_valued_columns_from_input_fn(input_fn) def _get_arguments(func): """Returns list of arguments this function has.""" if hasattr(func, '__code__'): # Regular function. return inspect.getargspec(func).args elif hasattr(func, '__call__'): # Callable object. return _get_arguments(func.__call__) elif hasattr(func, 'func'): # Partial function. return _get_arguments(func.func) def _get_replica_device_setter(config): """Creates a replica device setter if required. Args: config: A RunConfig instance. Returns: A replica device setter, or None. """ ps_ops = [ 'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable', 'MutableHashTableOfTensors', 'MutableDenseHashTable' ] if config.task_type: worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id) else: worker_device = '/job:worker' if config.num_ps_replicas > 0: return device_setter.replica_device_setter( ps_tasks=config.num_ps_replicas, worker_device=worker_device, merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec) else: return None def _make_metrics_ops(metrics, features, labels, predictions): """Add metrics based on `features`, `labels`, and `predictions`. `metrics` contains a specification for how to run metrics. It is a dict mapping friendly names to either `MetricSpec` objects, or directly to a metric function (assuming that `predictions` and `labels` are single tensors), or to `(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and `labels` to `metric` (assuming `labels` is a single tensor). Users are encouraged to use `MetricSpec` objects, which are more flexible and cleaner. They also lead to clearer errors. Args: metrics: A dict mapping names to metrics specification, for example `MetricSpec` objects. features: A dict of tensors returned from an input_fn as features/inputs. labels: A single tensor or a dict of tensors returned from an input_fn as labels. predictions: A single tensor or a dict of tensors output from a model as predictions. Returns: A dict mapping the friendly given in `metrics` to the result of calling the given metric function. Raises: ValueError: If metrics specifications do not work with the type of `features`, `labels`, or `predictions` provided. Mostly, a dict is given but no pred_name specified. """ metrics = metrics or {} # If labels is a dict with a single key, unpack into a single tensor. labels_tensor_or_dict = labels if isinstance(labels, dict) and len(labels) == 1: labels_tensor_or_dict = labels[list(labels.keys())[0]] result = {} # Iterate in lexicographic order, so the graph is identical among runs. for name, metric in sorted(six.iteritems(metrics)): if isinstance(metric, metric_spec.MetricSpec): result[name] = metric.create_metric_ops(features, labels, predictions) continue # TODO(b/31229024): Remove the rest of this loop logging.warning('Please specify metrics using MetricSpec. Using bare ' 'functions or (key, fn) tuples is deprecated and support ' 'for it will be removed on Oct 1, 2016.') if isinstance(name, tuple): # Multi-head metrics. if len(name) != 2: raise ValueError('Invalid metric for {}. It returned a tuple with ' 'len {}, expected 2.'.format(name, len(name))) if not isinstance(predictions, dict): raise ValueError( 'Metrics passed provide (name, prediction), ' 'but predictions are not dict. ' 'Metrics: %s, Predictions: %s.' % (metrics, predictions)) # Here are two options: labels are single Tensor or a dict. if isinstance(labels, dict) and name[1] in labels: # If labels are dict and the prediction name is in it, apply metric. result[name[0]] = metric(predictions[name[1]], labels[name[1]]) else: # Otherwise pass the labels to the metric. result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict) else: # Single head metrics. if isinstance(predictions, dict): raise ValueError( 'Metrics passed provide only name, no prediction, ' 'but predictions are dict. ' 'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict)) result[name] = metric(predictions, labels_tensor_or_dict) return result def _dict_to_str(dictionary): """Get a `str` representation of a `dict`. Args: dictionary: The `dict` to be represented as `str`. Returns: A `str` representing the `dictionary`. """ return ', '.join('%s = %s' % (k, v) for k, v in sorted(dictionary.items())) def _write_dict_to_summary(output_dir, dictionary, current_global_step): """Writes a `dict` into summary file in given output directory. Args: output_dir: `str`, directory to write the summary file in. dictionary: the `dict` to be written to summary file. current_global_step: `int`, the current global step. """ logging.info('Saving dict for global step %d: %s', current_global_step, _dict_to_str(dictionary)) summary_writer = summary_io.SummaryWriterCache.get(output_dir) summary_proto = summary_pb2.Summary() for key in dictionary: if dictionary[key] is None: continue value = summary_proto.value.add() value.tag = key if (isinstance(dictionary[key], np.float32) or isinstance(dictionary[key], float)): value.simple_value = float(dictionary[key]) else: logging.warn('Skipping summary for %s, must be a float or np.float32.', key) summary_writer.add_summary(summary_proto, current_global_step) summary_writer.flush() class BaseEstimator( sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable): """Abstract BaseEstimator class to train and evaluate TensorFlow models. Concrete implementation of this class should provide the following functions: * _get_train_ops * _get_eval_ops * _get_predict_ops `Estimator` implemented below is a good example of how to use this class. """ __metaclass__ = abc.ABCMeta # Note that for Google users, this is overriden with # learn_runner.EstimatorConfig. # TODO(wicke): Remove this once launcher takes over config functionality _Config = run_config.RunConfig # pylint: disable=invalid-name def __init__(self, model_dir=None, config=None): """Initializes a BaseEstimator instance. Args: model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. config: A RunConfig instance. """ # Model directory. self._model_dir = model_dir if self._model_dir is None: self._model_dir = tempfile.mkdtemp() logging.warning('Using temporary folder as model directory: %s', self._model_dir) # Create a run configuration. if config is None: self._config = BaseEstimator._Config() logging.info('Using default config.') else: self._config = config logging.info('Using config: %s', str(vars(self._config))) # Set device function depending if there are replicas or not. self._device_fn = _get_replica_device_setter(self._config) # Features and labels TensorSignature objects. # TODO(wicke): Rename these to something more descriptive self._features_info = None self._labels_info = None self._graph = None @property def config(self): # TODO(wicke): make RunConfig immutable, and then return it without a copy. return copy.deepcopy(self._config) @deprecated_args( SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None), ('y', None), ('batch_size', None) ) def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None, monitors=None, max_steps=None): # pylint: disable=g-doc-args,g-doc-return-or-yield """See `Trainable`. Raises: ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`. ValueError: If both `steps` and `max_steps` are not `None`. """ if (steps is not None) and (max_steps is not None): raise ValueError('Can not provide both steps and max_steps.') _verify_input_args(x, y, input_fn, None, batch_size) if x is not None: SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors) return self if max_steps is not None: try: start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP) if max_steps <= start_step: logging.info('Skipping training since max_steps has already saved.') return self except: # pylint: disable=bare-except pass hooks = monitor_lib.replace_monitors_with_hooks(monitors, self) if steps is not None or max_steps is not None: hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps)) loss = self._train_model(input_fn=input_fn, hooks=hooks) logging.info('Loss for final step: %s.', loss) return self @deprecated_args( SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None), ('y', None), ('batch_size', None) ) def partial_fit( self, x=None, y=None, input_fn=None, steps=1, batch_size=None, monitors=None): """Incremental fit on a batch of samples. This method is expected to be called several times consecutively on different or the same chunks of the dataset. This either can implement iterative training or out-of-core/online training. This is especially useful when the whole dataset is too big to fit in memory at the same time. Or when model is taking long time to converge, and you want to split up training into subparts. Args: x: Matrix of shape [n_samples, n_features...]. Can be iterator that returns arrays of features. The training input samples for fitting the model. If set, `input_fn` must be `None`. y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be iterator that returns array of labels. The training label values (class labels in classification, real numbers in regression). If set, `input_fn` must be `None`. input_fn: Input function. If set, `x`, `y`, and `batch_size` must be `None`. steps: Number of steps for which to train model. If `None`, train forever. batch_size: minibatch size to use on the input, defaults to first dimension of `x`. Must be `None` if `input_fn` is provided. monitors: List of `BaseMonitor` subclass instances. Used for callbacks inside the training loop. Returns: `self`, for chaining. Raises: ValueError: If at least one of `x` and `y` is provided, and `input_fn` is provided. """ logging.warning('The current implementation of partial_fit is not optimized' ' for use in a loop. Consider using fit() instead.') return self.fit(x=x, y=y, input_fn=input_fn, steps=steps, batch_size=batch_size, monitors=monitors) @deprecated_args( SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None), ('y', None), ('batch_size', None) ) def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None, batch_size=None, steps=None, metrics=None, name=None, checkpoint_path=None, hooks=None, log_progress=True): # pylint: disable=g-doc-args,g-doc-return-or-yield """See `Evaluable`. Raises: ValueError: If at least one of `x` or `y` is provided, and at least one of `input_fn` or `feed_fn` is provided. Or if `metrics` is not `None` or `dict`. """ _verify_input_args(x, y, input_fn, feed_fn, batch_size) if x is not None: return SKCompat(self).score(x, y, batch_size, steps, metrics) if metrics is not None and not isinstance(metrics, dict): raise ValueError('Metrics argument should be None or dict. ' 'Got %s.' % metrics) eval_results, global_step = self._evaluate_model( input_fn=input_fn, feed_fn=feed_fn, steps=steps, metrics=metrics, name=name, checkpoint_path=checkpoint_path, hooks=hooks, log_progress=log_progress) if eval_results is not None: eval_results.update({'global_step': global_step}) return eval_results @deprecated_args( SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None), ('batch_size', None), ('as_iterable', True) ) def predict( self, x=None, input_fn=None, batch_size=None, outputs=None, as_iterable=True): """Returns predictions for given features. Args: x: Matrix of shape [n_samples, n_features...]. Can be iterator that returns arrays of features. The training input samples for fitting the model. If set, `input_fn` must be `None`. input_fn: Input function. If set, `x` and 'batch_size' must be `None`. batch_size: Override default batch size. If set, 'input_fn' must be 'None'. outputs: list of `str`, name of the output to predict. If `None`, returns all. as_iterable: If True, return an iterable which keeps yielding predictions for each example until inputs are exhausted. Note: The inputs must terminate if you want the iterable to terminate (e.g. be sure to pass num_epochs=1 if you are using something like read_batch_features). Returns: A numpy array of predicted classes or regression values if the constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict` of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of predictions if as_iterable is True. Raises: ValueError: If x and input_fn are both provided or both `None`. """ _verify_input_args(x, None, input_fn, None, batch_size) if x is not None and not as_iterable: return SKCompat(self).predict(x, batch_size) input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size) return self._infer_model( input_fn=input_fn, feed_fn=feed_fn, outputs=outputs, as_iterable=as_iterable) def get_variable_value(self, name): """Returns value of the variable given by name. Args: name: string, name of the tensor. Returns: Numpy array - value of the tensor. """ return load_variable(self.model_dir, name) def get_variable_names(self): """Returns list of all variable names in this model. Returns: List of names. """ return [name for name, _ in list_variables(self.model_dir)] @property def model_dir(self): return self._model_dir @deprecated_arg_values( '2016-09-23', 'The signature of the input_fn accepted by export is changing to be ' 'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate. ' 'input_fn (and in most cases, input_feature_key) will become required ' 'args, and use_deprecated_input_fn will default to False and be removed ' 'altogether.', use_deprecated_input_fn=True, input_fn=None) def export(self, export_dir, input_fn=export._default_input_fn, # pylint: disable=protected-access input_feature_key=None, use_deprecated_input_fn=True, signature_fn=None, prediction_key=None, default_batch_size=1, exports_to_keep=None): """Exports inference graph into given dir. Args: export_dir: A string containing a directory to write the exported graph and checkpoints. input_fn: If `use_deprecated_input_fn` is true, then a function that given `Tensor` of `Example` strings, parses it into features that are then passed to the model. Otherwise, a function that takes no argument and returns a tuple of (features, labels), where features is a dict of string key to `Tensor` and labels is a `Tensor` that's currently not used (and so can be `None`). input_feature_key: Only used if `use_deprecated_input_fn` is false. String key into the features dict returned by `input_fn` that corresponds to a the raw `Example` strings `Tensor` that the exported model will take as input. Can only be `None` if you're using a custom `signature_fn` that does not use the first arg (examples). use_deprecated_input_fn: Determines the signature format of `input_fn`. signature_fn: Function that returns a default signature and a named signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s for features and `Tensor` or `dict` of `Tensor`s for predictions. prediction_key: The key for a tensor in the `predictions` dict (output from the `model_fn`) to use as the `predictions` input to the `signature_fn`. Optional. If `None`, predictions will pass to `signature_fn` without filtering. default_batch_size: Default batch size of the `Example` placeholder. exports_to_keep: Number of exports to keep. Returns: The string path to the exported directory. NB: this functionality was added ca. 2016/09/25; clients that depend on the return value may need to handle the case where this function returns None because subclasses are not returning a value. """ # pylint: disable=protected-access return export._export_estimator( estimator=self, export_dir=export_dir, signature_fn=signature_fn, prediction_key=prediction_key, input_fn=input_fn, input_feature_key=input_feature_key, use_deprecated_input_fn=use_deprecated_input_fn, default_batch_size=default_batch_size, exports_to_keep=exports_to_keep) @abc.abstractproperty def _get_train_ops(self, features, labels): """Method that builds model graph and returns trainer ops. Expected to be overridden by sub-classes that require custom support. Args: features: `Tensor` or `dict` of `Tensor` objects. labels: `Tensor` or `dict` of `Tensor` objects. Returns: A `ModelFnOps` object. """ pass @abc.abstractproperty def _get_predict_ops(self, features): """Method that builds model graph and returns prediction ops. Args: features: `Tensor` or `dict` of `Tensor` objects. Returns: A `ModelFnOps` object. """ pass def _get_eval_ops(self, features, labels, metrics): """Method that builds model graph and returns evaluation ops. Expected to be overriden by sub-classes that require custom support. Args: features: `Tensor` or `dict` of `Tensor` objects. labels: `Tensor` or `dict` of `Tensor` objects. metrics: Dict of metrics to run. If None, the default metric functions are used; if {}, no metrics are used. Otherwise, `metrics` should map friendly names for the metric to a `MetricSpec` object defining which model outputs to evaluate against which labels with which metric function. Metric ops should support streaming, e.g., returning update_op and value tensors. See more details in `../../../../metrics/python/metrics/ops/streaming_metrics.py` and `../metric_spec.py`. Returns: A `ModelFnOps` object. """ raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator') @deprecated( '2016-09-23', 'The signature of the input_fn accepted by export is changing to be ' 'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, ' 'which makes this function useless. This will be removed after the ' 'deprecation date.') def _get_feature_ops_from_example(self, examples_batch): """Returns feature parser for given example batch using features info. This function requires `fit()` has been called. Args: examples_batch: batch of tf.Example Returns: features: `Tensor` or `dict` of `Tensor` objects. Raises: ValueError: If `_features_info` attribute is not available (usually because `fit()` has not been called). """ if self._features_info is None: raise ValueError('Features information missing, was fit() ever called?') return tensor_signature.create_example_parser_from_signatures( self._features_info, examples_batch) def _check_inputs(self, features, labels): if self._features_info is not None: logging.debug('Given features: %s, required signatures: %s.', str(features), str(self._features_info)) if not tensor_signature.tensors_compatible(features, self._features_info): raise ValueError('Features are incompatible with given information. ' 'Given features: %s, required signatures: %s.' % (str(features), str(self._features_info))) else: self._features_info = tensor_signature.create_signatures(features) logging.debug('Setting feature info to %s.', str(self._features_info)) if labels is not None: if self._labels_info is not None: logging.debug('Given labels: %s, required signatures: %s.', str(labels), str(self._labels_info)) if not tensor_signature.tensors_compatible(labels, self._labels_info): raise ValueError('Labels are incompatible with given information. ' 'Given labels: %s, required signatures: %s.' % (str(labels), str(self._labels_info))) else: self._labels_info = tensor_signature.create_signatures(labels) logging.debug('Setting labels info to %s', str(self._labels_info)) def _extract_metric_update_ops(self, eval_dict): """Separate update operations from metric value operations.""" update_ops = [] value_ops = {} for name, metric_ops in six.iteritems(eval_dict): if isinstance(metric_ops, (list, tuple)): if len(metric_ops) == 2: value_ops[name] = metric_ops[0] update_ops.append(metric_ops[1]) else: logging.warning( 'Ignoring metric {}. It returned a list|tuple with len {}, ' 'expected 2'.format(name, len(metric_ops))) value_ops[name] = metric_ops else: value_ops[name] = metric_ops if update_ops: update_ops = control_flow_ops.group(*update_ops) else: update_ops = None return update_ops, value_ops def _evaluate_model(self, input_fn, steps, feed_fn=None, metrics=None, name='', checkpoint_path=None, hooks=None, log_progress=True): # TODO(wicke): Remove this once Model and associated code are gone. if (hasattr(self._config, 'execution_mode') and self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')): return None, None # Check that model has been trained (if nothing has been set explicitly). if not checkpoint_path: latest_path = saver.latest_checkpoint(self._model_dir) if not latest_path: raise NotFittedError("Couldn't find trained model at %s." % self._model_dir) checkpoint_path = latest_path # Setup output directory. eval_dir = os.path.join(self._model_dir, 'eval' if not name else 'eval_' + name) with ops.Graph().as_default() as g: random_seed.set_random_seed(self._config.tf_random_seed) global_step = contrib_framework.create_global_step(g) features, labels = input_fn() self._check_inputs(features, labels) # The default return type of _get_eval_ops is ModelFnOps. But there are # some subclasses of tf.contrib.learn.Estimator which override this # method and use the legacy signature, namely _get_eval_ops returns an # `eval_dict` dictionary of Tensors. The following else-statement code # covers these cases, but will soon be deleted after the subclasses are # updated. # TODO(b/32664904): Update subclasses and delete the else-statement. eval_ops = self._get_eval_ops(features, labels, metrics) if isinstance(eval_ops, model_fn_lib.ModelFnOps): # Default signature eval_dict = eval_ops.eval_metric_ops else: # Legacy signature eval_dict = eval_ops update_op, eval_dict = self._extract_metric_update_ops(eval_dict) hooks = hooks or [] if feed_fn: hooks.append(_FeedFnHook(feed_fn)) if steps: hooks.append( evaluation.StopAfterNEvalsHook( steps, log_progress=log_progress)) global_step_key = 'global_step' while global_step_key in eval_dict: global_step_key = '_' + global_step_key eval_dict[global_step_key] = global_step eval_results = evaluation.evaluate_once( checkpoint_path=checkpoint_path, master=self._config.evaluation_master, eval_ops=update_op, final_ops=eval_dict, hooks=hooks) current_global_step = eval_results[global_step_key] _write_dict_to_summary(eval_dir, eval_results, current_global_step) return eval_results, current_global_step def _get_features_from_input_fn(self, input_fn): result = input_fn() if isinstance(result, (list, tuple)): return result[0] return result def _infer_model(self, input_fn, feed_fn=None, outputs=None, as_iterable=True, iterate_batches=False): # Check that model has been trained. checkpoint_path = saver.latest_checkpoint(self._model_dir) if not checkpoint_path: raise NotFittedError("Couldn't find trained model at %s." % self._model_dir) with ops.Graph().as_default() as g: random_seed.set_random_seed(self._config.tf_random_seed) contrib_framework.create_global_step(g) features = self._get_features_from_input_fn(input_fn) infer_ops = self._call_legacy_get_predict_ops(features) predictions = self._filter_predictions(infer_ops.predictions, outputs) mon_sess = monitored_session.MonitoredSession( session_creator=monitored_session.ChiefSessionCreator( checkpoint_filename_with_path=checkpoint_path)) if not as_iterable: with mon_sess: if not mon_sess.should_stop(): return mon_sess.run(predictions, feed_fn() if feed_fn else None) else: return self._predict_generator(mon_sess, predictions, feed_fn, iterate_batches) def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches): with mon_sess: while not mon_sess.should_stop(): preds = mon_sess.run(predictions, feed_fn() if feed_fn else None) if iterate_batches: yield preds elif not isinstance(predictions, dict): for pred in preds: yield pred else: first_tensor = list(preds.values())[0] if isinstance(first_tensor, sparse_tensor.SparseTensorValue): batch_length = first_tensor.dense_shape[0] else: batch_length = first_tensor.shape[0] for i in range(batch_length): yield {key: value[i] for key, value in six.iteritems(preds)} if self._is_input_constant(feed_fn, mon_sess.graph): return def _is_input_constant(self, feed_fn, graph): # If there are no queue_runners, the input `predictions` is a # constant, and we should stop after the first epoch. If, # instead, there are queue_runners, eventually they should throw # an `OutOfRangeError`. if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS): return False # data_feeder uses feed_fn to generate `OutOfRangeError`. if feed_fn is not None: return False return True def _filter_predictions(self, predictions, outputs): if not outputs: return predictions if not isinstance(predictions, dict): raise ValueError( 'outputs argument is not valid in case of non-dict predictions.') existing_keys = predictions.keys() predictions = { key: value for key, value in six.iteritems(predictions) if key in outputs } if not predictions: raise ValueError('Expected to run at least one output from %s, ' 'provided %s.' % (existing_keys, outputs)) return predictions def _train_model(self, input_fn, hooks): all_hooks = [] self._graph = ops.Graph() with self._graph.as_default() as g, g.device(self._device_fn): random_seed.set_random_seed(self._config.tf_random_seed) global_step = contrib_framework.create_global_step(g) features, labels = input_fn() self._check_inputs(features, labels) model_fn_ops = self._call_legacy_get_train_ops(features, labels) ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss) all_hooks.extend([ basic_session_run_hooks.NanTensorHook(model_fn_ops.loss), basic_session_run_hooks.LoggingTensorHook( { 'loss': model_fn_ops.loss, 'step': global_step }, every_n_iter=100) ]) all_hooks.extend(hooks) scaffold = model_fn_ops.training_scaffold or monitored_session.Scaffold() if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)): ops.add_to_collection( ops.GraphKeys.SAVERS, saver.Saver( sharded=True, max_to_keep=self._config.keep_checkpoint_max, defer_build=True)) chief_hooks = [] if (self._config.save_checkpoints_secs or self._config.save_checkpoints_steps): saver_hook_exists = any([ isinstance(h, basic_session_run_hooks.CheckpointSaverHook) for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks + model_fn_ops.training_chief_hooks) ]) if not saver_hook_exists: chief_hooks = [ basic_session_run_hooks.CheckpointSaverHook( self._model_dir, save_secs=self._config.save_checkpoints_secs, save_steps=self._config.save_checkpoints_steps, scaffold=scaffold) ] with monitored_session.MonitoredTrainingSession( master=self._config.master, is_chief=self._config.is_chief, checkpoint_dir=self._model_dir, scaffold=scaffold, hooks=all_hooks + model_fn_ops.training_hooks, chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks, save_checkpoint_secs=0, # Saving is handled by a hook. save_summaries_steps=self._config.save_summary_steps, config=self.config.tf_config) as mon_sess: loss = None while not mon_sess.should_stop(): _, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss]) summary_io.SummaryWriterCache.clear() return loss def _call_legacy_get_predict_ops(self, features): # The default return type of _get_predict_ops is ModelFnOps. But there are # some subclasses of tf.contrib.learn.Estimator which override this # method and use the legacy signature, namely _get_predict_ops returns a # `predictions` Tensor or dict or Tensors. The following else-statement # code covers these cases, but will soon be deleted after the subclasses # are updated. # TODO(b/32664904): Update subclasses and delete the else-statement. infer_ops = self._get_predict_ops(features) if isinstance(infer_ops, model_fn_lib.ModelFnOps): # Default signature return infer_ops return model_fn_lib.ModelFnOps( mode=model_fn_lib.ModeKeys.INFER, predictions=infer_ops) def _call_legacy_get_train_ops(self, features, labels): train_ops = self._get_train_ops(features, labels) if isinstance(train_ops, model_fn_lib.ModelFnOps): # Default signature return train_ops return model_fn_lib.ModelFnOps( mode=model_fn_lib.ModeKeys.TRAIN, predictions=None, loss=train_ops[1], train_op=train_ops[0]) def _identity_feature_engineering_fn(features, labels): return features, labels class Estimator(BaseEstimator): """Estimator class is the basic TensorFlow model trainer/evaluator. """ def __init__(self, model_fn=None, model_dir=None, config=None, params=None, feature_engineering_fn=None): """Constructs an `Estimator` instance. Args: model_fn: Model function. Follows the signature: * Args: * `features`: single `Tensor` or `dict` of `Tensor`s (depending on data passed to `fit`), * `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head models). If mode is `ModeKeys.INFER`, `labels=None` will be passed. If the `model_fn`'s signature does not accept `mode`, the `model_fn` must still be able to handle `labels=None`. * `mode`: Optional. Specifies if this training, evaluation or prediction. See `ModeKeys`. * `params`: Optional `dict` of hyperparameters. Will receive what is passed to Estimator in `params` parameter. This allows to configure Estimators from hyper parameter tuning. * `config`: Optional configuration object. Will receive what is passed to Estimator in `config` parameter, or the default `config`. Allows updating things in your model_fn based on configuration such as `num_ps_replicas`. * `model_dir`: Optional directory where model parameters, graph etc are saved. Will receive what is passed to Estimator in `model_dir` parameter, or the default `model_dir`. Allows updating things in your model_fn that expect model_dir, such as training hooks. * Returns: `ModelFnOps` Also supports a legacy signature which returns tuple of: * predictions: `Tensor`, `SparseTensor` or dictionary of same. Can also be any type that is convertible to a `Tensor` or `SparseTensor`, or dictionary of same. * loss: Scalar loss `Tensor`. * train_op: Training update `Tensor` or `Operation`. Supports next three signatures for the function: * `(features, labels) -> (predictions, loss, train_op)` * `(features, labels, mode) -> (predictions, loss, train_op)` * `(features, labels, mode, params) -> (predictions, loss, train_op)` * `(features, labels, mode, params, config) -> (predictions, loss, train_op)` * `(features, labels, mode, params, config, model_dir) -> (predictions, loss, train_op)` model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. config: Configuration object. params: `dict` of hyper parameters that will be passed into `model_fn`. Keys are names of parameters, values are basic python types. feature_engineering_fn: Feature engineering function. Takes features and labels which are the output of `input_fn` and returns features and labels which will be fed into `model_fn`. Please check `model_fn` for a definition of features and labels. Raises: ValueError: parameters of `model_fn` don't match `params`. """ super(Estimator, self).__init__(model_dir=model_dir, config=config) if model_fn is not None: # Check number of arguments of the given function matches requirements. model_fn_args = _get_arguments(model_fn) if params is not None and 'params' not in model_fn_args: raise ValueError('Estimator\'s model_fn (%s) has less than 4 ' 'arguments, but not None params (%s) are passed.' % (model_fn, params)) if params is None and 'params' in model_fn_args: logging.warning('Estimator\'s model_fn (%s) includes params ' 'argument, but params are not passed to Estimator.', model_fn) self._model_fn = model_fn self.params = params self._feature_engineering_fn = ( feature_engineering_fn or _identity_feature_engineering_fn) def _call_model_fn(self, features, labels, mode): """Calls model function with support of 2, 3 or 4 arguments. Args: features: features dict. labels: labels dict. mode: ModeKeys Returns: A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a `ModelFnOps` object. Raises: ValueError: if model_fn returns invalid objects. """ features, labels = self._feature_engineering_fn(features, labels) model_fn_args = _get_arguments(self._model_fn) kwargs = {} if 'mode' in model_fn_args: kwargs['mode'] = mode if 'params' in model_fn_args: kwargs['params'] = self.params if 'config' in model_fn_args: kwargs['config'] = self.config if 'model_dir' in model_fn_args: kwargs['model_dir'] = self.model_dir model_fn_results = self._model_fn(features, labels, **kwargs) if isinstance(model_fn_results, model_fn_lib.ModelFnOps): return model_fn_results # Here model_fn_ops should be a tuple with 3 elements. if len(model_fn_results) != 3: raise ValueError('Unrecognized value returned by model_fn, ' 'please return ModelFnOps.') return model_fn_lib.ModelFnOps( mode=mode, predictions=model_fn_results[0], loss=model_fn_results[1], train_op=model_fn_results[2]) def _get_train_ops(self, features, labels): """Method that builds model graph and returns trainer ops. Expected to be overriden by sub-classes that require custom support. This implementation uses `model_fn` passed as parameter to constructor to build model. Args: features: `Tensor` or `dict` of `Tensor` objects. labels: `Tensor` or `dict` of `Tensor` objects. Returns: `ModelFnOps` object. """ return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN) def _get_eval_ops(self, features, labels, metrics): """Method that builds model graph and returns evaluation ops. Expected to be overriden by sub-classes that require custom support. This implementation uses `model_fn` passed as parameter to constructor to build model. Args: features: `Tensor` or `dict` of `Tensor` objects. labels: `Tensor` or `dict` of `Tensor` objects. metrics: Dict of metrics to run. If None, the default metric functions are used; if {}, no metrics are used. Otherwise, `metrics` should map friendly names for the metric to a `MetricSpec` object defining which model outputs to evaluate against which labels with which metric function. Metric ops should support streaming, e.g., returning update_op and value tensors. See more details in `../../../../metrics/python/metrics/ops/streaming_metrics.py` and `../metric_spec.py`. Returns: `ModelFnOps` object. Raises: ValueError: if `metrics` don't match `labels`. """ model_fn_ops = self._call_model_fn( features, labels, model_fn_lib.ModeKeys.EVAL) # Custom metrics should overwrite defaults. if metrics: model_fn_ops.eval_metric_ops.update(_make_metrics_ops( metrics, features, labels, model_fn_ops.predictions)) if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops: model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = ( metrics_lib.streaming_mean(model_fn_ops.loss)) return model_fn_ops def _get_predict_ops(self, features): """Method that builds model graph and returns prediction ops. Expected to be overriden by sub-classes that require custom support. This implementation uses `model_fn` passed as parameter to constructor to build model. Args: features: `Tensor` or `dict` of `Tensor` objects. Returns: `ModelFnOps` object. """ labels = tensor_signature.create_placeholders_from_signatures( self._labels_info) return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER) @experimental def export_savedmodel( self, export_dir_base, input_fn, default_output_alternative_key=None, assets_extra=None, as_text=False, exports_to_keep=None): """Exports inference graph as a SavedModel into given dir. Args: export_dir_base: A string containing a directory to write the exported graph and checkpoints. input_fn: A function that takes no argument and returns an `InputFnOps`. default_output_alternative_key: the name of the head to serve when none is specified. assets_extra: A dict specifying how to populate the assets.extra directory within the exported SavedModel. Each key should give the destination path (including the filename) relative to the assets.extra directory. The corresponding value gives the full path of the source file to be copied. For example, the simple case of copying a single file without renaming it is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`. as_text: whether to write the SavedModel proto in text format. exports_to_keep: Number of exports to keep. Returns: The string path to the exported directory. Raises: ValueError: if an unrecognized export_type is requested. """ if input_fn is None: raise ValueError('input_fn must be defined.') with ops.Graph().as_default() as g: contrib_variables.create_global_step(g) # Call the input_fn and collect the input alternatives. input_ops = input_fn() input_alternatives, features = ( saved_model_export_utils.get_input_alternatives(input_ops)) # Call the model_fn and collect the output alternatives. model_fn_ops = self._call_model_fn(features, None, model_fn_lib.ModeKeys.INFER) output_alternatives, actual_default_output_alternative_key = ( saved_model_export_utils.get_output_alternatives( model_fn_ops, default_output_alternative_key)) # Build the SignatureDefs from all pairs of input and output signatures signature_def_map = saved_model_export_utils.build_all_signature_defs( input_alternatives, output_alternatives, actual_default_output_alternative_key) # Locate the latest checkpoint # TODO(soergel): does it help that we know we have one from this step? checkpoint_path = saver.latest_checkpoint(self._model_dir) if not checkpoint_path: raise NotFittedError("Couldn't find trained model at %s." % self._model_dir) export_dir = saved_model_export_utils.get_timestamped_export_dir( export_dir_base) with tf_session.Session('') as session: variables.initialize_local_variables() data_flow_ops.tables_initializer() saver_for_restore = saver.Saver( variables.global_variables(), sharded=True) saver_for_restore.restore(session, checkpoint_path) init_op = control_flow_ops.group( variables.local_variables_initializer(), data_flow_ops.tables_initializer()) # Perform the export builder = saved_model_builder.SavedModelBuilder(export_dir) builder.add_meta_graph_and_variables( session, [tag_constants.SERVING], signature_def_map=signature_def_map, assets_collection=ops.get_collection( ops.GraphKeys.ASSET_FILEPATHS), legacy_init_op=init_op) builder.save(as_text) # Add the extra assets if assets_extra: assets_extra_path = os.path.join(compat.as_bytes(export_dir), compat.as_bytes('assets.extra')) for dest_relative, source in assets_extra.items(): dest_absolute = os.path.join(compat.as_bytes(assets_extra_path), compat.as_bytes(dest_relative)) dest_path = os.path.dirname(dest_absolute) gfile.MakeDirs(dest_path) gfile.Copy(source, dest_absolute) return export_dir class _FeedFnHook(session_run_hook.SessionRunHook): """Runs feed_fn and sets the feed_dict accordingly.""" def __init__(self, feed_fn): self.feed_fn = feed_fn def before_run(self, run_context): # pylint: disable=unused-argument return session_run_hook.SessionRunArgs( fetches=None, feed_dict=self.feed_fn()) # For time of deprecation x,y from Estimator allow direct access. # pylint: disable=protected-access class SKCompat(sklearn.BaseEstimator): """Scikit learn wrapper for TensorFlow Learn Estimator.""" def __init__(self, estimator): self._estimator = estimator def fit(self, x, y, batch_size=128, steps=None, max_steps=None, monitors=None): input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None, batch_size=batch_size, shuffle=True, epochs=None) all_monitors = [] if feed_fn: all_monitors = [_FeedFnHook(feed_fn)] if monitors: all_monitors.extend(monitors) self._estimator.fit(input_fn=input_fn, steps=steps, max_steps=max_steps, monitors=all_monitors) return self def score(self, x, y, batch_size=128, steps=None, metrics=None): input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None, batch_size=batch_size, shuffle=False, epochs=1) if metrics is not None and not isinstance(metrics, dict): raise ValueError('Metrics argument should be None or dict. ' 'Got %s.' % metrics) eval_results, global_step = self._estimator._evaluate_model( input_fn=input_fn, feed_fn=feed_fn, steps=steps, metrics=metrics, name='score') if eval_results is not None: eval_results.update({'global_step': global_step}) return eval_results def predict(self, x, batch_size=128, outputs=None): input_fn, feed_fn = _get_input_fn( x, None, input_fn=None, feed_fn=None, batch_size=batch_size, shuffle=False, epochs=1) results = list( self._estimator._infer_model( input_fn=input_fn, feed_fn=feed_fn, outputs=outputs, as_iterable=True, iterate_batches=True)) if not isinstance(results[0], dict): return np.concatenate([output for output in results], axis=0) return { key: np.concatenate( [output[key] for output in results], axis=0) for key in results[0] }
apache-2.0
fegonda/icon_demo
code/model/unet/g.py
1
44565
import os import sys import skimage.transform import skimage.exposure import time import glob import numpy as np import mahotas import random import matplotlib import matplotlib.pyplot as plt import scipy import scipy.ndimage import json from scipy.ndimage.filters import maximum_filter base_path = os.path.dirname(__file__) sys.path.insert(1,os.path.join(base_path, '../../common')) sys.path.insert(2,os.path.join(base_path, '../../database')) from utility import Utility from settings import Paths from project import Project from paths import Paths from db import DB # the idea is to grow the labels to cover the whole membrane # image and label should be [0,1] def adjust_imprecise_boundaries(image, label, number_iterations=5): label = label.copy() label_orig = label.copy() for i in xrange(number_iterations): # grow labels by one pixel label = maximum_filter(label, 2) # only keep pixels that are on dark membrane non_valid_label = np.logical_and(label==1, image>0.7) label[non_valid_label] = 0 # make sure original labels are preserved label = np.logical_or(label==1, label_orig==1) return label def deform_image(image): # assumes image is uint8 def apply_deformation(image, coordinates): # ndimage expects uint8 otherwise introduces artifacts. Don't ask me why, its stupid. deformed = scipy.ndimage.map_coordinates(image, coordinates, mode='reflect') deformed = np.reshape(deformed, image.shape) return deformed displacement_x = np.random.normal(size=image.shape, scale=10) displacement_y = np.random.normal(size=image.shape, scale=10) # smooth over image coords_x, coords_y = np.meshgrid(np.arange(0,image.shape[0]), np.arange(0,image.shape[1]), indexing='ij') displacement_x = coords_x.flatten() #+ scipy.ndimage.gaussian_filter(displacement_x, sigma=5).flatten() displacement_y = coords_y.flatten() #+ scipy.ndimage.gaussian_filter(displacement_y, sigma=5).flatten() coordinates = np.vstack([displacement_x, displacement_y]) return apply_deformation(np.uint8(image*255), coordinates) def deform_images(image1, image2, image3=None): # assumes image is uint8 def apply_deformation(image, coordinates): # ndimage expects uint8 otherwise introduces artifacts. Don't ask me why, its stupid. deformed = scipy.ndimage.map_coordinates(image, coordinates, mode='reflect') deformed = np.reshape(deformed, image.shape) return deformed displacement_x = np.random.normal(size=image1.shape, scale=10) displacement_y = np.random.normal(size=image1.shape, scale=10) # smooth over image coords_x, coords_y = np.meshgrid(np.arange(0,image1.shape[0]), np.arange(0,image1.shape[1]), indexing='ij') displacement_x = coords_x.flatten() #+ scipy.ndimage.gaussian_filter(displacement_x, sigma=5).flatten() displacement_y = coords_y.flatten() #+ scipy.ndimage.gaussian_filter(displacement_y, sigma=5).flatten() coordinates = np.vstack([displacement_x, displacement_y]) deformed1 = apply_deformation(np.uint8(image1*255), coordinates) deformed2 = apply_deformation(np.uint8(image2*255), coordinates) if not image3 is None: deformed3 = apply_deformation(image3, coordinates) return (deformed1, deformed2, deformed3) return (deformed1, deformed2) def deform_images_list(images): # assumes image is uint8 def apply_deformation(image, coordinates): # ndimage expects uint8 otherwise introduces artifacts. Don't ask me why, its stupid. deformed = scipy.ndimage.map_coordinates(image, coordinates, mode='reflect') deformed = np.reshape(deformed, image.shape) return deformed displacement_x = np.random.normal(size=images.shape[:2], scale=10) displacement_y = np.random.normal(size=images.shape[:2], scale=10) # smooth over image coords_x, coords_y = np.meshgrid(np.arange(0,images.shape[0]), np.arange(0,images.shape[1]), indexing='ij') displacement_x = coords_x.flatten() #+ scipy.ndimage.gaussian_filter(displacement_x, sigma=5).flatten() displacement_y = coords_y.flatten() #+ scipy.ndimage.gaussian_filter(displacement_y, sigma=5).flatten() coordinates = np.vstack([displacement_x, displacement_y]) deformed = images.copy() for i in xrange(images.shape[2]): deformed[:,:,i] = apply_deformation(np.uint8(images[:,:,i]), coordinates) return deformed def normalizeImage(img, saturation_level=0.05, doClahe=False): #was 0.005 if not doClahe: sortedValues = np.sort( img.ravel()) minVal = np.float32(sortedValues[np.int(len(sortedValues) * (saturation_level / 2))]) maxVal = np.float32(sortedValues[np.int(len(sortedValues) * (1 - saturation_level / 2))]) normImg = np.float32(img - minVal) * (255 / (maxVal-minVal)) normImg[normImg<0] = 0 normImg[normImg>255] = 255 output = (np.float32(normImg) / 255.0) return output else: output = skimage.exposure.equalize_adapthist(img) return output def generate_experiment_data_supervised(purpose='train', nsamples=1000, patchSize=29, balanceRate=0.5, rng=np.random): start_time = time.time() data_path = '/n/home00/fgonda/icon/data/reference' #if os.path.exists('/media/vkaynig/Data1/Cmor_paper_data/'): if os.path.exists( data_path ): pathPrefix = data_path #pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/' else: pathPrefix = '/n/pfister_lab/vkaynig/' #img_search_string_membraneImages = pathPrefix + 'labels/membranes_nonDilate/' + purpose + '/*.tif' #img_search_string_backgroundMaskImages = pathPrefix + 'labels/background_nonDilate/' + purpose + '/*.tif' img_search_string_membraneImages = pathPrefix + 'labels/membranes/' + purpose + '/*.tif' img_search_string_backgroundMaskImages = pathPrefix + 'labels/background/' + purpose + '/*.tif' img_search_string_grayImages = pathPrefix + 'images/' + purpose + '/*.tif' img_files_gray = sorted( glob.glob( img_search_string_grayImages ) ) img_files_label = sorted( glob.glob( img_search_string_membraneImages ) ) img_files_backgroundMask = sorted( glob.glob( img_search_string_backgroundMaskImages ) ) whole_set_patches = np.zeros((nsamples, patchSize*patchSize), dtype=np.float) whole_set_labels = np.zeros(nsamples, dtype=np.int32) #how many samples per image? nsamples_perImage = np.uint(np.ceil( (nsamples) / np.float(np.shape(img_files_gray)[0]) )) print 'using ' + np.str(nsamples_perImage) + ' samples per image.' counter = 0 img = mahotas.imread(img_files_gray[0]) grayImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) labelImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) maskImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) for img_index in xrange(np.shape(img_files_gray)[0]): img = mahotas.imread(img_files_gray[img_index]) img = normalizeImage(img) grayImages[:,:,img_index] = img label_img = mahotas.imread(img_files_label[img_index]) labelImages[:,:,img_index] = label_img mask_img = mahotas.imread(img_files_backgroundMask[img_index]) maskImages[:,:,img_index] = mask_img for img_index in xrange(np.shape(img_files_gray)[0]): img = grayImages[:,:,img_index] label_img = labelImages[:,:,img_index] mask_img = maskImages[:,:,img_index] #get rid of invalid image borders border_patch = np.int(np.ceil(patchSize/2.0)) border = np.int(np.ceil(np.sqrt(2*(border_patch**2)))) label_img[:border,:] = 0 #top label_img[-border:,:] = 0 #bottom label_img[:,:border] = 0 #left label_img[:,-border:] = 0 #right mask_img[:border,:] = 0 mask_img[-border:,:] = 0 mask_img[:,:border] = 0 mask_img[:,-border:] = 0 membrane_indices = np.nonzero(label_img) non_membrane_indices = np.nonzero(mask_img) positiveSample = True for i in xrange(nsamples_perImage): if counter >= nsamples: break if positiveSample: randmem = random.choice(xrange(len(membrane_indices[0]))) (row,col) = (membrane_indices[0][randmem], membrane_indices[1][randmem]) label = 1.0 positiveSample = False else: randmem = random.choice(xrange(len(non_membrane_indices[0]))) (row,col) = (non_membrane_indices[0][randmem], non_membrane_indices[1][randmem]) label = 0.0 positiveSample = True imgPatch = img[row-border+1:row+border, col-border+1:col+border] imgPatch = skimage.transform.rotate(imgPatch, random.choice(xrange(360))) imgPatch = imgPatch[border-border_patch:border+border_patch-1,border-border_patch:border+border_patch-1] if random.random() < 0.5: imgPatch = np.fliplr(imgPatch) imgPatch = np.rot90(imgPatch, random.randint(0,3)) whole_set_patches[counter,:] = imgPatch.flatten() whole_set_labels[counter] = label counter += 1 #normalize data whole_data = np.float32(whole_set_patches) whole_data = whole_data - 0.5 data = whole_data.copy() labels = whole_set_labels.copy() #remove the sorting in image order shuffleIndex = rng.permutation(np.shape(labels)[0]) for i in xrange(np.shape(labels)[0]): whole_data[i,:] = data[shuffleIndex[i],:] whole_set_labels[i] = labels[shuffleIndex[i]] data_set = (whole_data, whole_set_labels) end_time = time.time() total_time = (end_time - start_time) print 'Running time: ' + '%.2fm' % (total_time / 60.) rval = data_set return rval def generate_image_data(img, patchSize=29, rows=1): img = normalizeImage(img) # pad image borders border = np.int(np.ceil(patchSize/2.0)) img_padded = np.pad(img, border, mode='reflect') whole_set_patches = np.zeros((len(rows)*img.shape[1], patchSize**2)) counter = 0 for row in rows: for col in xrange(img.shape[1]): imgPatch = img_padded[row+1:row+2*border, col+1:col+2*border] whole_set_patches[counter,:] = imgPatch.flatten() counter += 1 #normalize data whole_set_patches = np.float32(whole_set_patches) whole_set_patches = whole_set_patches - 0.5 return whole_set_patches def stupid_map_wrapper(parameters): f = parameters[0] args = parameters[1:] return f(*args) def get_sample_sizes(annotations): samples_sizes = [] n_labels = len(annotations) for coordinates in annotations: n_label_samples_size = len(coordinates)/2 samples_sizes.append( n_label_samples_size ) return samples_sizes def gen_membrane_image(annotations, dim): m_image = np.zeros( (dim[0], dim[1]) ) label = 1 coordinates = annotations[ label ] n_coordinates = len(coordinates) i = 0 while i < n_coordinates: x = coordinates[i] y = coordinates[i+1] m_image[x][y] = 1.0 i = i+2 return m_image def generate_experiment_data_patch_prediction(purpose='train', nsamples=1000, patchSize=29, outPatchSize=1, project=None): nr_layers=3 def relabel(image): id_list = np.unique(image) for index, id in enumerate(id_list): image[image==id] = index return image start_time = time.time() if purpose == 'train': images = DB.getTrainingImages( project.id, new=False ) path = Paths.TrainGrayscale else: images = DB.getImages( project.id, purpose=1, new=False, annotated=True ) path = Paths.ValidGrayscale files_gray = [] data_labels = [] label_sample_sizes = np.array([ 0, 0]) #imgs = DB.getImages( project.id ) for image in images: d_path = '%s/%s.tif'%(path, image.id) l_path = '%s/%s.%s.json'%(Paths.Labels, image.id, project.id) if os.path.exists( d_path ) and os.path.exists( l_path ): # load the annotations with open( l_path ) as labels_f: annotations = json.load( labels_f ) # skip if not enough samples in the annotations sample_sizes = get_sample_sizes( annotations ) if np.sum( sample_sizes ) == 0: continue label_sample_sizes = label_sample_sizes + np.array(sample_sizes) files_gray.append( d_path ) data_labels.append( annotations ) if len( files_gray ) == 0 or len( data_labels ) == 0 or np.min( label_sample_sizes ) == 0: return None whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float) whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32) #how many samples per image? nsamples_perImage = np.uint(np.ceil( (nsamples) / np.float(np.shape(files_gray)[0]) )) print 'using ' + np.str(nsamples_perImage) + ' samples per image.' counter = 0 border_patch = np.ceil(patchSize/2.0) pad = patchSize read_order = np.random.permutation(np.shape(files_gray)[0]) for index in read_order: file_image = files_gray[index] labels = data_labels[index] sample_sizes = get_sample_sizes( labels ) img = mahotas.imread(files_gray[index]) img = np.pad(img, ((pad, pad), (pad, pad)), 'symmetric') # normalizes [0,1] img = normalizeImage(img, doClahe=True) membrane_img = gen_membrane_image( labels, img.shape ) #img_cs = int(np.floor(nr_layers/2)) #if purpose=='train': # # adjust according to middle image # membrane_img = adjust_imprecise_boundaries(img[:,:,img_cs], membrane_img, 0) #get rid of invalid image borders #membrane_img[:,-patchSize:] = 0 #membrane_img[-patchSize:,:] = 0 valid_indices = np.nonzero(membrane_img) print valid_indices if len(valid_indices[0]) == 0 or len(valid_indices[1]) == 0: continue for i in xrange(nsamples_perImage): if counter >= nsamples: break randmem = random.choice(xrange(len(valid_indices[0]))) (row,col) = (valid_indices[0][randmem], valid_indices[1][randmem]) imgPatch = img[row:row+patchSize, col:col+patchSize] memPatch = membrane_img[row:row+patchSize, col:col+patchSize] if random.random() < 0.5: imgPatch = np.fliplr(imgPatch) memPatch = np.fliplr(memPatch) rotateInt = random.randint(0,3) imgPatch = np.rot90(imgPatch, rotateInt) memPatch = np.rot90(memPatch, rotateInt) #imgPatch = deform_image(imgPatch) imgPatch, memPatch = deform_images( imgPatch, memPatch ) imgPatch = imgPatch / np.double(np.max(imgPatch)) memPatch = memPatch / np.double(np.max(memPatch)) # crop labelPatch to potentially smaller output size offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0)) memPatch = memPatch[offset_small_patch:offset_small_patch+outPatchSize,offset_small_patch:offset_small_patch+outPatchSize] whole_set_patches[counter,:] = imgPatch.flatten() whole_set_labels[counter] = memPatch.flatten() counter = counter + 1 #normalize data whole_data = np.float32(whole_set_patches) whole_data = whole_data - 0.5 data = whole_data.copy() labels = whole_set_labels.copy() #remove the sorting in image order shuffleIndex = np.random.permutation(np.shape(labels)[0]) for i in xrange(np.shape(labels)[0]): whole_data[i,:] = data[shuffleIndex[i],:] whole_set_labels[i,:] = labels[shuffleIndex[i],:] data_set = (whole_data, whole_set_labels) print np.min(whole_data), np.max(whole_data) end_time = time.time() total_time = (end_time - start_time) print 'Running time: ', total_time / 60. print 'finished sampling data' return data_set def agenerate_experiment_data_patch_prediction(purpose='train', nsamples=1000, patchSize=29, outPatchSize=1, project=None): def relabel(image): id_list = np.unique(image) for index, id in enumerate(id_list): image[image==id] = index return image start_time = time.time() if purpose == 'train': images = DB.getTrainingImages( project.id, new=False ) path = Paths.TrainGrayscale else: images = DB.getImages( project.id, purpose=1, new=False, annotated=True ) path = Paths.ValidGrayscale files_gray = [] data_labels = [] label_sample_sizes = np.array([ 0, 0]) #imgs = DB.getImages( project.id ) for image in images: d_path = '%s/%s.tif'%(path, image.id) l_path = '%s/%s.%s.json'%(Paths.Labels, image.id, project.id) if os.path.exists( d_path ) and os.path.exists( l_path ): # load the annotations with open( l_path ) as labels_f: annotations = json.load( labels_f ) # skip if not enough samples in the annotations sample_sizes = get_sample_sizes( annotations ) if np.sum( sample_sizes ) == 0: continue label_sample_sizes = label_sample_sizes + np.array(sample_sizes) files_gray.append( d_path ) data_labels.append( annotations ) print len(files_gray) print len(data_labels) print label_sample_sizes if len( files_gray ) == 0 or len( data_labels ) == 0 or np.min( label_sample_sizes ) == 0: return None whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float) whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32) #how many samples per image? nsamples_perImage = np.uint(np.ceil( (nsamples) / np.float(np.shape(files_gray)[0]) )) print 'using ' + np.str(nsamples_perImage) + ' samples per image.' counter = 0 border_patch = np.ceil(patchSize/2.0) pad = patchSize read_order = np.random.permutation(np.shape(files_gray)[0]) for index in read_order: file_image = files_gray[index] labels = data_labels[index] sample_sizes = get_sample_sizes( labels ) img = mahotas.imread(files_gray[index]) img = np.pad(img, ((pad, pad), (pad, pad)), 'symmetric') # normalizes [0,1] img = normalizeImage(img, doClahe=True) membrane_img = gen_membrane_image( labels, img.shape ) print membrane_img.shape print np.unique(membrane_img) for label, coordinates in enumerate( labels ): if counter >= nsamples: break ncoordinates = len(coordinates) if ncoordinates == 0: continue # randomly sample from the label indices = np.random.choice( ncoordinates, sample_sizes[label], replace=False) for i in indices: if i%2 == 1: i = i-1 if counter >= nsamples: break col = coordinates[i] row = coordinates[i+1] r1 = int(row+patchSize-border_patch) r2 = int(row+patchSize+border_patch) c1 = int(col+patchSize-border_patch) c2 = int(col+patchSize+border_patch) imgPatch = img[r1:r2,c1:c2] memPatch = membrane_img[r1:r2,c1:c2] if random.random() < 0.5: imgPatch = np.fliplr(imgPatch) memPatch = np.fliplr(memPatch) rotateInt = random.randint(0,3) imgPatch = np.rot90(imgPatch, rotateInt) memPatch = np.rot90(memPatch, rotateInt) #imgPatch = deform_image(imgPatch) imgPatch, memPatch = deform_images( imgPatch, memPatch ) imgPatch = imgPatch / np.double(np.max(imgPatch)) memPatch = memPatch / np.double(np.max(memPatch)) # crop labelPatch to potentially smaller output size offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0)) memPatch = memPatch[offset_small_patch:offset_small_patch+outPatchSize,offset_small_patch:offset_small_patch+outPatchSize] whole_set_patches[counter,:] = imgPatch.flatten() whole_set_labels[counter] = memPatch.flatten() counter = counter + 1 #normalize data whole_data = np.float32(whole_set_patches) whole_data = whole_data - 0.5 data = whole_data.copy() labels = whole_set_labels.copy() #remove the sorting in image order shuffleIndex = np.random.permutation(np.shape(labels)[0]) for i in xrange(np.shape(labels)[0]): whole_data[i,:] = data[shuffleIndex[i],:] whole_set_labels[i,:] = labels[shuffleIndex[i],:] data_set = (whole_data, whole_set_labels) print np.min(whole_data), np.max(whole_data) end_time = time.time() total_time = (end_time - start_time) print 'Running time: ', total_time / 60. print 'finished sampling data' return data_set def gen_validation_data(project, nsamples=1000, patchSize=29, outPatchSize=1): def relabel(image): id_list = np.unique(image) for index, id in enumerate(id_list): image[image==id] = index return image start_time = time.time() img_search_string_membraneImages = '%s/*.tif'%(Paths.ValidMembranes) img_search_string_labelImages = '%s/*.tif'%(Paths.ValidLabels) img_search_string_grayImages = '%s/*.tif'%(Paths.ValidGray) img_files_gray = sorted( glob.glob( img_search_string_grayImages ) ) img_files_membrane = sorted( glob.glob( img_search_string_membraneImages ) ) img_files_labels = sorted( glob.glob( img_search_string_labelImages ) ) whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float) whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32) whole_set_membranes = np.zeros((nsamples, outPatchSize**2), dtype=np.int32) #how many samples per image? nsamples_perImage = np.uint(np.ceil( (nsamples) / np.float(np.shape(img_files_gray)[0]) )) print 'using ' + np.str(nsamples_perImage) + ' samples per image.' counter = 0 img = mahotas.imread(img_files_gray[0]) grayImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) labelImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) membraneImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) maskImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) # read the data # in random order read_order = np.random.permutation(np.shape(img_files_gray)[0]) for img_index in read_order: #print img_files_gray[img_index] img = mahotas.imread(img_files_gray[img_index]) # normalizes [0,1] img = normalizeImage(img, doClahe=True) grayImages[:,:,img_index] = img membrane_img = mahotas.imread(img_files_membrane[img_index])/255. membraneImages[:,:,img_index] = membrane_img maskImages[:,:,img_index] = 1.0 label_img = mahotas.imread(img_files_labels[img_index]) label_img = np.double(label_img) if label_img.ndim == 3: label_img = label_img[:,:,0] + 255*label_img[:,:,1] + 255**2 * label_img[:,:,2] labelImages[:,:,img_index] = label_img for img_index in xrange(np.shape(img_files_gray)[0]): #print img_files_gray[read_order[img_index]] img = grayImages[:,:,img_index] label_img = labelImages[:,:,img_index] membrane_img = membraneImages[:,:,img_index] mask_img = maskImages[:,:,img_index] #get rid of invalid image borders mask_img[:,-patchSize:] = 0 mask_img[-patchSize:,:] = 0 valid_indices = np.nonzero(mask_img) for i in xrange(nsamples_perImage): if counter >= nsamples: break randmem = random.choice(xrange(len(valid_indices[0]))) (row,col) = (valid_indices[0][randmem], valid_indices[1][randmem]) imgPatch = img[row:row+patchSize, col:col+patchSize] membranePatch = membrane_img[row:row+patchSize, col:col+patchSize] labelPatch = label_img[row:row+patchSize, col:col+patchSize] if random.random() < 0.5: imgPatch = np.fliplr(imgPatch) membranePatch = np.fliplr(membranePatch) labelPatch = np.fliplr(labelPatch) rotateInt = random.randint(0,3) imgPatch = np.rot90(imgPatch, rotateInt) membranePatch = np.rot90(membranePatch, rotateInt) labelPatch = np.rot90(labelPatch, rotateInt) labelPatch = relabel(labelPatch) imgPatch, membranePatch, labelPatch = deform_images(imgPatch, membranePatch, np.uint8(labelPatch)) imgPatch = imgPatch / np.double(np.max(imgPatch)) membranePatch = membranePatch / np.double(np.max(membranePatch)) # crop labelPatch to potentially smaller output size offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0)) membranePatch = membranePatch[offset_small_patch:offset_small_patch+outPatchSize, offset_small_patch:offset_small_patch+outPatchSize] labelPatch = labelPatch[offset_small_patch:offset_small_patch+outPatchSize, offset_small_patch:offset_small_patch+outPatchSize] whole_set_patches[counter,:] = imgPatch.flatten() whole_set_labels[counter] = labelPatch.flatten() whole_set_membranes[counter] = np.int32(membranePatch.flatten() > 0) counter += 1 #normalize data whole_data = np.float32(whole_set_patches) whole_data = whole_data - 0.5 data = whole_data.copy() labels = whole_set_labels.copy() membranes = whole_set_membranes.copy() #remove the sorting in image order shuffleIndex = np.random.permutation(np.shape(membranes)[0]) for i in xrange(np.shape(membranes)[0]): whole_data[i,:] = data[shuffleIndex[i],:] whole_set_labels[i,:] = labels[shuffleIndex[i],:] whole_set_membranes[i,:] = membranes[shuffleIndex[i],:] data_set = (whole_data, whole_set_membranes, whole_set_labels) end_time = time.time() total_time = (end_time - start_time) print 'Running time: ', total_time / 60. print 'finished sampling data' return data_set def gen_training_data(project, purpose='train', nsamples=1000, patchSize=29, outPatchSize=1): def relabel(image): id_list = np.unique(image) for index, id in enumerate(id_list): image[image==id] = index return image print 'gen_data' if project == None: return start_time = time.time() files_gray = [] files_membranes = [] if purpose == 'train': images = DB.getTrainingImages( project.id, new=False ) path = Paths.TrainGrayscale for image in images: d_path = '%s/%s.tif'%(path, image.id) m_path = '%s/%s.%s.json'%(Paths.Labels, image.id, project.id) if os.path.exists( d_path ) and os.path.exists( l_path ): ''' # load the annotations with open( l_path ) as labels_f: annotations = json.load( labels_f ) # skip if not enough samples in the annotations sample_sizes = get_sample_sizes( annotations ) if np.sum( sample_sizes ) == 0: continue label_sample_sizes = label_sample_sizes + np.array(sample_sizes) files_gray.append( d_path ) data_labels.append( annotations ) ''' files_gray.append( d_path ) files_membranes.append( m_path ) else: images = DB.getImages( project.id, purpose=1, new=False, annotated=True ) path = Paths.ValidLabels files_gray = [] data_labels = [] label_sample_sizes = np.array([ 0, 0]) if len( files_gray ) == 0 or len( data_labels ) == 0 or np.min( label_sample_sizes ) == 0: return None whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float) whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32) whole_set_membranes = np.zeros((nsamples, outPatchSize**2), dtype=np.int32) return data # changed the patch sampling to use upper left corner instead of middle pixel # for patch labels it doesn't matter and it makes sampling even and odd patches easier def oldgenerate_experiment_data_patch_prediction(purpose='train', nsamples=1000, patchSize=29, outPatchSize=1): def relabel(image): id_list = np.unique(image) for index, id in enumerate(id_list): image[image==id] = index return image start_time = time.time() # pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/' # pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/Thalamus-LGN/Data/25-175_train/' #pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/Cerebellum-P7/Dense/' pathPrefix = '/n/home00/fgonda/icon/data/reference/' if not os.path.exists(pathPrefix): pathPrefix = '/n/pfister_lab/vkaynig/' #img_search_string_membraneImages = pathPrefix + 'labels/membranes_fullContour/' + purpose + '/*.tif' img_search_string_membraneImages = pathPrefix + 'labels/membranes/' + purpose + '/*.tif' img_search_string_labelImages = pathPrefix + 'labels/' + purpose + '/*.tif' img_search_string_grayImages = pathPrefix + 'images/' + purpose + '/*.tif' img_files_gray = sorted( glob.glob( img_search_string_grayImages ) ) img_files_membrane = sorted( glob.glob( img_search_string_membraneImages ) ) img_files_labels = sorted( glob.glob( img_search_string_labelImages ) ) print len(img_files_gray) print len(img_files_membrane) print len(img_files_labels) whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float) whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32) whole_set_membranes = np.zeros((nsamples, outPatchSize**2), dtype=np.int32) #how many samples per image? nsamples_perImage = np.uint(np.ceil( (nsamples) / np.float(np.shape(img_files_gray)[0]) )) print 'using ' + np.str(nsamples_perImage) + ' samples per image.' counter = 0 img = mahotas.imread(img_files_gray[0]) grayImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) labelImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) membraneImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) maskImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) # read the data # in random order read_order = np.random.permutation(np.shape(img_files_gray)[0]) for img_index in read_order: #print img_files_gray[img_index] img = mahotas.imread(img_files_gray[img_index]) # normalizes [0,1] img = normalizeImage(img, doClahe=True) grayImages[:,:,img_index] = img membrane_img = mahotas.imread(img_files_membrane[img_index])/255. membraneImages[:,:,img_index] = membrane_img maskImages[:,:,img_index] = 1.0 if purpose == 'validate': label_img = mahotas.imread(img_files_labels[img_index]) label_img = np.double(label_img) if label_img.ndim == 3: label_img = label_img[:,:,0] + 255*label_img[:,:,1] + 255**2 * label_img[:,:,2] labelImages[:,:,img_index] = label_img for img_index in xrange(np.shape(img_files_gray)[0]): #print img_files_gray[read_order[img_index]] img = grayImages[:,:,img_index] label_img = labelImages[:,:,img_index] membrane_img = membraneImages[:,:,img_index] mask_img = maskImages[:,:,img_index] if purpose=='train': membrane_img = adjust_imprecise_boundaries(img, membrane_img, 0) #get rid of invalid image borders mask_img[:,-patchSize:] = 0 mask_img[-patchSize:,:] = 0 valid_indices = np.nonzero(mask_img) for i in xrange(nsamples_perImage): if counter >= nsamples: break randmem = random.choice(xrange(len(valid_indices[0]))) (row,col) = (valid_indices[0][randmem], valid_indices[1][randmem]) imgPatch = img[row:row+patchSize, col:col+patchSize] membranePatch = membrane_img[row:row+patchSize, col:col+patchSize] labelPatch = label_img[row:row+patchSize, col:col+patchSize] if random.random() < 0.5: imgPatch = np.fliplr(imgPatch) membranePatch = np.fliplr(membranePatch) if purpose == 'validate': labelPatch = np.fliplr(labelPatch) rotateInt = random.randint(0,3) imgPatch = np.rot90(imgPatch, rotateInt) membranePatch = np.rot90(membranePatch, rotateInt) if purpose=='validate': labelPatch = np.rot90(labelPatch, rotateInt) if purpose=='validate': labelPatch = relabel(labelPatch) imgPatch, membranePatch, labelPatch = deform_images(imgPatch, membranePatch, np.uint8(labelPatch)) else: imgPatch, membranePatch = deform_images(imgPatch, membranePatch) imgPatch = imgPatch / np.double(np.max(imgPatch)) membranePatch = membranePatch / np.double(np.max(membranePatch)) # crop labelPatch to potentially smaller output size offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0)) membranePatch = membranePatch[offset_small_patch:offset_small_patch+outPatchSize, offset_small_patch:offset_small_patch+outPatchSize] labelPatch = labelPatch[offset_small_patch:offset_small_patch+outPatchSize, offset_small_patch:offset_small_patch+outPatchSize] whole_set_patches[counter,:] = imgPatch.flatten() whole_set_labels[counter] = labelPatch.flatten() whole_set_membranes[counter] = np.int32(membranePatch.flatten() > 0) counter += 1 #normalize data whole_data = np.float32(whole_set_patches) whole_data = whole_data - 0.5 data = whole_data.copy() labels = whole_set_labels.copy() membranes = whole_set_membranes.copy() #remove the sorting in image order shuffleIndex = np.random.permutation(np.shape(membranes)[0]) for i in xrange(np.shape(membranes)[0]): whole_data[i,:] = data[shuffleIndex[i],:] whole_set_labels[i,:] = labels[shuffleIndex[i],:] whole_set_membranes[i,:] = membranes[shuffleIndex[i],:] if purpose == 'validate': data_set = (whole_data, whole_set_membranes, whole_set_labels) else: data_set = (whole_data, whole_set_membranes) end_time = time.time() total_time = (end_time - start_time) print 'Running time: ', total_time / 60. print 'finished sampling data' return data_set def generate_experiment_data_patch_prediction_layers(purpose='train', nsamples=1000, patchSize=29, outPatchSize=1, nr_layers=3): def relabel(image): id_list = np.unique(image) for index, id in enumerate(id_list): image[image==id] = index return image start_time = time.time() if os.path.exists('/media/vkaynig/Data1/Cmor_paper_data/'): pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/' else: pathPrefix = '/n/pfister_lab/vkaynig/' img_search_string_membraneImages = pathPrefix + 'labels/membranes_fullContour/' + purpose + '/*.tif' img_search_string_labelImages = pathPrefix + 'labels/' + purpose + '/*.tif' img_search_string_grayImages = pathPrefix + 'images/' + purpose + '/*.tif' img_files_gray = sorted( glob.glob( img_search_string_grayImages ) ) img_files_membrane = sorted( glob.glob( img_search_string_membraneImages ) ) img_files_labels = sorted( glob.glob( img_search_string_labelImages ) ) whole_set_patches = np.zeros((nsamples, nr_layers, patchSize**2), dtype=np.float) whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32) whole_set_membranes = np.zeros((nsamples, outPatchSize**2), dtype=np.int32) #how many samples per image? nsamples_perImage = np.uint(np.ceil( (nsamples) / np.float(np.shape(img_files_gray)[0]) )) print 'using ' + np.str(nsamples_perImage) + ' samples per image.' counter = 0 img = mahotas.imread(img_files_gray[0]) grayImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) labelImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) membraneImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) maskImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0])) # read the data # in random order #read_order = np.random.permutation(np.shape(img_files_gray)[0]) for img_index in range(np.shape(img_files_gray)[0]): #print img_files_gray[img_index] img = mahotas.imread(img_files_gray[img_index]) # normalizes [0,1] img = normalizeImage(img) grayImages[:,:,img_index] = img membrane_img = mahotas.imread(img_files_membrane[img_index])/255. membraneImages[:,:,img_index] = membrane_img maskImages[:,:,img_index] = 1.0 if purpose == 'validate': label_img = mahotas.imread(img_files_labels[img_index]) label_img = np.double(label_img) labelImages[:,:,img_index] = label_img for img_index in xrange(np.shape(img_files_gray)[0]): img_cs = int(np.floor(nr_layers/2)) img_valid_range_indices = np.clip(range(img_index-img_cs,img_index+img_cs+1),0,np.shape(img_files_gray)[0]-1) img = grayImages[:,:,img_valid_range_indices] label_img = labelImages[:,:,img_index] membrane_img = membraneImages[:,:,img_index] mask_img = maskImages[:,:,img_index] if purpose=='train': # adjust according to middle image membrane_img = adjust_imprecise_boundaries(img[:,:,img_cs], membrane_img, 0) #get rid of invalid image borders mask_img[:,-patchSize:] = 0 mask_img[-patchSize:,:] = 0 valid_indices = np.nonzero(mask_img) for i in xrange(nsamples_perImage): if counter >= nsamples: break randmem = random.choice(xrange(len(valid_indices[0]))) (row,col) = (valid_indices[0][randmem], valid_indices[1][randmem]) imgPatch = img[row:row+patchSize, col:col+patchSize,:] membranePatch = membrane_img[row:row+patchSize, col:col+patchSize] labelPatch = label_img[row:row+patchSize, col:col+patchSize] if random.random() < 0.5: for flip_i in xrange(nr_layers): imgPatch[:,:,flip_i] = np.fliplr(imgPatch[:,:,flip_i]) membranePatch = np.fliplr(membranePatch) if purpose == 'validate': labelPatch = np.fliplr(labelPatch) rotateInt = random.randint(0,3) for rot_i in xrange(nr_layers): imgPatch[:,:,rot_i] = np.rot90(imgPatch[:,:,rot_i], rotateInt) membranePatch = np.rot90(membranePatch, rotateInt) if purpose=='validate': labelPatch = np.rot90(labelPatch, rotateInt) if purpose=='validate': labelPatch = relabel(labelPatch) deformed_images = deform_images_list(np.dstack([imgPatch*255, np.reshape(membranePatch*255,(patchSize,patchSize,1)), np.uint8(np.reshape(labelPatch,(patchSize,patchSize,1)))])) imgPatch, membranePatch, labelPatch = np.split(deformed_images,[imgPatch.shape[2],imgPatch.shape[2]+1], axis=2) else: deformed_images = deform_images_list(np.dstack([imgPatch*255, np.reshape(membranePatch,(patchSize,patchSize,1))*255])) imgPatch, membranePatch = np.split(deformed_images,[imgPatch.shape[2]], axis=2) imgPatch = imgPatch / np.double(np.max(imgPatch)) membranePatch = membranePatch / np.double(np.max(membranePatch)) # crop labelPatch to potentially smaller output size offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0)) membranePatch = membranePatch[offset_small_patch:offset_small_patch+outPatchSize, offset_small_patch:offset_small_patch+outPatchSize] labelPatch = labelPatch[offset_small_patch:offset_small_patch+outPatchSize, offset_small_patch:offset_small_patch+outPatchSize] #whole_set_patches = np.zeros((nsamples, nr_layers, patchSize**2), dtype=np.float) for patch_i in xrange(nr_layers): whole_set_patches[counter,patch_i,:] = imgPatch[:,:,patch_i].flatten() whole_set_labels[counter] = labelPatch.flatten() whole_set_membranes[counter] = np.int32(membranePatch.flatten() > 0) counter += 1 #normalize data whole_data = np.float32(whole_set_patches) whole_data = whole_data - 0.5 data = whole_data.copy() labels = whole_set_labels.copy() membranes = whole_set_membranes.copy() #remove the sorting in image order shuffleIndex = np.random.permutation(np.shape(membranes)[0]) for i in xrange(np.shape(membranes)[0]): whole_data[i,:,:] = data[shuffleIndex[i],:,:] whole_set_labels[i,:] = labels[shuffleIndex[i],:] whole_set_membranes[i,:] = membranes[shuffleIndex[i],:] if purpose == 'validate': data_set = (whole_data, whole_set_membranes, whole_set_labels) else: data_set = (whole_data, whole_set_membranes) end_time = time.time() total_time = (end_time - start_time) print 'Running time: ', total_time / 60. print 'finished sampling data' return data_set if __name__=="__main__": import uuid test = generate_experiment_data_patch_prediction(purpose='train', nsamples=30, patchSize=572, outPatchSize=388) dir_path = './training_patches/' for i in xrange(30): unique_filename = str(uuid.uuid4()) img = np.reshape(test[1][i],(388,388)) img_gray = np.reshape(test[0][i],(572,572)) mahotas.imsave(dir_path+unique_filename+'.tif', np.uint8(img*255)) mahotas.imsave(dir_path+unique_filename+'_gray.tif', np.uint8((img_gray+0.5)*255)) #data_val = generate_experiment_data_supervised(purpose='validate', nsamples=10000, patchSize=65, balanceRate=0.5) #data = generate_experiment_data_patch_prediction(purpose='validate', nsamples=2, patchSize=315, outPatchSize=215) # plt.imshow(np.reshape(data[0][0],(315,315))); plt.figure() # plt.imshow(np.reshape(data[1][0],(215,215))); plt.figure() # plt.imshow(np.reshape(data[2][0],(215,215))); plt.show() # image = mahotas.imread('ac3_input_0141.tif') # image = normalizeImage(image) # label = mahotas.imread('ac3_labels_0141.tif') / 255. # test = adjust_imprecise_boundaries(image, label, 10) # plt.imshow(label+image); plt.show() # plt.imshow(test+image); plt.show()
mit
lhilt/scipy
scipy/interpolate/interpolate.py
4
97600
from __future__ import division, print_function, absolute_import __all__ = ['interp1d', 'interp2d', 'lagrange', 'PPoly', 'BPoly', 'NdPPoly', 'RegularGridInterpolator', 'interpn'] import itertools import warnings import functools import operator import numpy as np from numpy import (array, transpose, searchsorted, atleast_1d, atleast_2d, ravel, poly1d, asarray, intp) import scipy.special as spec from scipy.special import comb from scipy._lib.six import xrange, integer_types, string_types from . import fitpack from . import dfitpack from . import _fitpack from .polyint import _Interpolator1D from . import _ppoly from .fitpack2 import RectBivariateSpline from .interpnd import _ndim_coords_from_arrays from ._bsplines import make_interp_spline, BSpline def prod(x): """Product of a list of numbers; ~40x faster vs np.prod for Python tuples""" if len(x) == 0: return 1 return functools.reduce(operator.mul, x) def lagrange(x, w): r""" Return a Lagrange interpolating polynomial. Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating polynomial through the points ``(x, w)``. Warning: This implementation is numerically unstable. Do not expect to be able to use more than about 20 points even if they are chosen optimally. Parameters ---------- x : array_like `x` represents the x-coordinates of a set of datapoints. w : array_like `w` represents the y-coordinates of a set of datapoints, i.e. f(`x`). Returns ------- lagrange : `numpy.poly1d` instance The Lagrange interpolating polynomial. Examples -------- Interpolate :math:`f(x) = x^3` by 3 points. >>> from scipy.interpolate import lagrange >>> x = np.array([0, 1, 2]) >>> y = x**3 >>> poly = lagrange(x, y) Since there are only 3 points, Lagrange polynomial has degree 2. Explicitly, it is given by .. math:: \begin{aligned} L(x) &= 1\times \frac{x (x - 2)}{-1} + 8\times \frac{x (x-1)}{2} \\ &= x (-2 + 3x) \end{aligned} >>> from numpy.polynomial.polynomial import Polynomial >>> Polynomial(poly).coef array([ 3., -2., 0.]) """ M = len(x) p = poly1d(0.0) for j in xrange(M): pt = poly1d(w[j]) for k in xrange(M): if k == j: continue fac = x[j]-x[k] pt *= poly1d([1.0, -x[k]])/fac p += pt return p # !! Need to find argument for keeping initialize. If it isn't # !! found, get rid of it! class interp2d(object): """ interp2d(x, y, z, kind='linear', copy=True, bounds_error=False, fill_value=None) Interpolate over a 2-D grid. `x`, `y` and `z` are arrays of values used to approximate some function f: ``z = f(x, y)``. This class returns a function whose call method uses spline interpolation to find the value of new points. If `x` and `y` represent a regular grid, consider using RectBivariateSpline. Note that calling `interp2d` with NaNs present in input values results in undefined behaviour. Methods ------- __call__ Parameters ---------- x, y : array_like Arrays defining the data point coordinates. If the points lie on a regular grid, `x` can specify the column coordinates and `y` the row coordinates, for example:: >>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]] Otherwise, `x` and `y` must specify the full coordinates for each point, for example:: >>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6] If `x` and `y` are multi-dimensional, they are flattened before use. z : array_like The values of the function to interpolate at the data points. If `z` is a multi-dimensional array, it is flattened before use. The length of a flattened `z` array is either len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates for each point. kind : {'linear', 'cubic', 'quintic'}, optional The kind of spline interpolation to use. Default is 'linear'. copy : bool, optional If True, the class makes internal copies of x, y and z. If False, references may be used. The default is to copy. bounds_error : bool, optional If True, when interpolated values are requested outside of the domain of the input data (x,y), a ValueError is raised. If False, then `fill_value` is used. fill_value : number, optional If provided, the value to use for points outside of the interpolation domain. If omitted (None), values outside the domain are extrapolated via nearest-neighbor extrapolation. See Also -------- RectBivariateSpline : Much faster 2D interpolation if your input data is on a grid bisplrep, bisplev : Spline interpolation based on FITPACK BivariateSpline : a more recent wrapper of the FITPACK routines interp1d : one dimension version of this function Notes ----- The minimum number of data points required along the interpolation axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for quintic interpolation. The interpolator is constructed by `bisplrep`, with a smoothing factor of 0. If more control over smoothing is needed, `bisplrep` should be used directly. Examples -------- Construct a 2-D grid and interpolate on it: >>> from scipy import interpolate >>> x = np.arange(-5.01, 5.01, 0.25) >>> y = np.arange(-5.01, 5.01, 0.25) >>> xx, yy = np.meshgrid(x, y) >>> z = np.sin(xx**2+yy**2) >>> f = interpolate.interp2d(x, y, z, kind='cubic') Now use the obtained interpolation function and plot the result: >>> import matplotlib.pyplot as plt >>> xnew = np.arange(-5.01, 5.01, 1e-2) >>> ynew = np.arange(-5.01, 5.01, 1e-2) >>> znew = f(xnew, ynew) >>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-') >>> plt.show() """ def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False, fill_value=None): x = ravel(x) y = ravel(y) z = asarray(z) rectangular_grid = (z.size == len(x) * len(y)) if rectangular_grid: if z.ndim == 2: if z.shape != (len(y), len(x)): raise ValueError("When on a regular grid with x.size = m " "and y.size = n, if z.ndim == 2, then z " "must have shape (n, m)") if not np.all(x[1:] >= x[:-1]): j = np.argsort(x) x = x[j] z = z[:, j] if not np.all(y[1:] >= y[:-1]): j = np.argsort(y) y = y[j] z = z[j, :] z = ravel(z.T) else: z = ravel(z) if len(x) != len(y): raise ValueError( "x and y must have equal lengths for non rectangular grid") if len(z) != len(x): raise ValueError( "Invalid length for input z for non rectangular grid") try: kx = ky = {'linear': 1, 'cubic': 3, 'quintic': 5}[kind] except KeyError: raise ValueError("Unsupported interpolation type.") if not rectangular_grid: # TODO: surfit is really not meant for interpolation! self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0) else: nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth( x, y, z, None, None, None, None, kx=kx, ky=ky, s=0.0) self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)], kx, ky) self.bounds_error = bounds_error self.fill_value = fill_value self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)] self.x_min, self.x_max = np.amin(x), np.amax(x) self.y_min, self.y_max = np.amin(y), np.amax(y) def __call__(self, x, y, dx=0, dy=0, assume_sorted=False): """Interpolate the function. Parameters ---------- x : 1D array x-coordinates of the mesh on which to interpolate. y : 1D array y-coordinates of the mesh on which to interpolate. dx : int >= 0, < kx Order of partial derivatives in x. dy : int >= 0, < ky Order of partial derivatives in y. assume_sorted : bool, optional If False, values of `x` and `y` can be in any order and they are sorted first. If True, `x` and `y` have to be arrays of monotonically increasing values. Returns ------- z : 2D array with shape (len(y), len(x)) The interpolated values. """ x = atleast_1d(x) y = atleast_1d(y) if x.ndim != 1 or y.ndim != 1: raise ValueError("x and y should both be 1-D arrays") if not assume_sorted: x = np.sort(x) y = np.sort(y) if self.bounds_error or self.fill_value is not None: out_of_bounds_x = (x < self.x_min) | (x > self.x_max) out_of_bounds_y = (y < self.y_min) | (y > self.y_max) any_out_of_bounds_x = np.any(out_of_bounds_x) any_out_of_bounds_y = np.any(out_of_bounds_y) if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y): raise ValueError("Values out of range; x must be in %r, y in %r" % ((self.x_min, self.x_max), (self.y_min, self.y_max))) z = fitpack.bisplev(x, y, self.tck, dx, dy) z = atleast_2d(z) z = transpose(z) if self.fill_value is not None: if any_out_of_bounds_x: z[:, out_of_bounds_x] = self.fill_value if any_out_of_bounds_y: z[out_of_bounds_y, :] = self.fill_value if len(z) == 1: z = z[0] return array(z) def _check_broadcast_up_to(arr_from, shape_to, name): """Helper to check that arr_from broadcasts up to shape_to""" shape_from = arr_from.shape if len(shape_to) >= len(shape_from): for t, f in zip(shape_to[::-1], shape_from[::-1]): if f != 1 and f != t: break else: # all checks pass, do the upcasting that we need later if arr_from.size != 1 and arr_from.shape != shape_to: arr_from = np.ones(shape_to, arr_from.dtype) * arr_from return arr_from.ravel() # at least one check failed raise ValueError('%s argument must be able to broadcast up ' 'to shape %s but had shape %s' % (name, shape_to, shape_from)) def _do_extrapolate(fill_value): """Helper to check if fill_value == "extrapolate" without warnings""" return (isinstance(fill_value, string_types) and fill_value == 'extrapolate') class interp1d(_Interpolator1D): """ Interpolate a 1-D function. `x` and `y` are arrays of values used to approximate some function f: ``y = f(x)``. This class returns a function whose call method uses interpolation to find the value of new points. Note that calling `interp1d` with NaNs present in input values results in undefined behaviour. Parameters ---------- x : (N,) array_like A 1-D array of real values. y : (...,N,...) array_like A N-D array of real values. The length of `y` along the interpolation axis must be equal to the length of `x`. kind : str or int, optional Specifies the kind of interpolation as a string ('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'previous', 'next', where 'zero', 'slinear', 'quadratic' and 'cubic' refer to a spline interpolation of zeroth, first, second or third order; 'previous' and 'next' simply return the previous or next value of the point) or as an integer specifying the order of the spline interpolator to use. Default is 'linear'. axis : int, optional Specifies the axis of `y` along which to interpolate. Interpolation defaults to the last axis of `y`. copy : bool, optional If True, the class makes internal copies of x and y. If False, references to `x` and `y` are used. The default is to copy. bounds_error : bool, optional If True, a ValueError is raised any time interpolation is attempted on a value outside of the range of x (where extrapolation is necessary). If False, out of bounds values are assigned `fill_value`. By default, an error is raised unless ``fill_value="extrapolate"``. fill_value : array-like or (array-like, array_like) or "extrapolate", optional - if a ndarray (or float), this value will be used to fill in for requested points outside of the data range. If not provided, then the default is NaN. The array-like must broadcast properly to the dimensions of the non-interpolation axes. - If a two-element tuple, then the first element is used as a fill value for ``x_new < x[0]`` and the second element is used for ``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g., list or ndarray, regardless of shape) is taken to be a single array-like argument meant to be used for both bounds as ``below, above = fill_value, fill_value``. .. versionadded:: 0.17.0 - If "extrapolate", then points outside the data range will be extrapolated. .. versionadded:: 0.17.0 assume_sorted : bool, optional If False, values of `x` can be in any order and they are sorted first. If True, `x` has to be an array of monotonically increasing values. Attributes ---------- fill_value Methods ------- __call__ See Also -------- splrep, splev Spline interpolation/smoothing based on FITPACK. UnivariateSpline : An object-oriented wrapper of the FITPACK routines. interp2d : 2-D interpolation Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy import interpolate >>> x = np.arange(0, 10) >>> y = np.exp(-x/3.0) >>> f = interpolate.interp1d(x, y) >>> xnew = np.arange(0, 9, 0.1) >>> ynew = f(xnew) # use interpolation function returned by `interp1d` >>> plt.plot(x, y, 'o', xnew, ynew, '-') >>> plt.show() """ def __init__(self, x, y, kind='linear', axis=-1, copy=True, bounds_error=None, fill_value=np.nan, assume_sorted=False): """ Initialize a 1D linear interpolation class.""" _Interpolator1D.__init__(self, x, y, axis=axis) self.bounds_error = bounds_error # used by fill_value setter self.copy = copy if kind in ['zero', 'slinear', 'quadratic', 'cubic']: order = {'zero': 0, 'slinear': 1, 'quadratic': 2, 'cubic': 3}[kind] kind = 'spline' elif isinstance(kind, int): order = kind kind = 'spline' elif kind not in ('linear', 'nearest', 'previous', 'next'): raise NotImplementedError("%s is unsupported: Use fitpack " "routines for other types." % kind) x = array(x, copy=self.copy) y = array(y, copy=self.copy) if not assume_sorted: ind = np.argsort(x) x = x[ind] y = np.take(y, ind, axis=axis) if x.ndim != 1: raise ValueError("the x array must have exactly one dimension.") if y.ndim == 0: raise ValueError("the y array must have at least one dimension.") # Force-cast y to a floating-point type, if it's not yet one if not issubclass(y.dtype.type, np.inexact): y = y.astype(np.float_) # Backward compatibility self.axis = axis % y.ndim # Interpolation goes internally along the first axis self.y = y self._y = self._reshape_yi(self.y) self.x = x del y, x # clean up namespace to prevent misuse; use attributes self._kind = kind self.fill_value = fill_value # calls the setter, can modify bounds_err # Adjust to interpolation kind; store reference to *unbound* # interpolation methods, in order to avoid circular references to self # stored in the bound instance methods, and therefore delayed garbage # collection. See: https://docs.python.org/reference/datamodel.html if kind in ('linear', 'nearest', 'previous', 'next'): # Make a "view" of the y array that is rotated to the interpolation # axis. minval = 2 if kind == 'nearest': # Do division before addition to prevent possible integer # overflow self.x_bds = self.x / 2.0 self.x_bds = self.x_bds[1:] + self.x_bds[:-1] self._call = self.__class__._call_nearest elif kind == 'previous': # Side for np.searchsorted and index for clipping self._side = 'left' self._ind = 0 # Move x by one floating point value to the left self._x_shift = np.nextafter(self.x, -np.inf) self._call = self.__class__._call_previousnext elif kind == 'next': self._side = 'right' self._ind = 1 # Move x by one floating point value to the right self._x_shift = np.nextafter(self.x, np.inf) self._call = self.__class__._call_previousnext else: # Check if we can delegate to numpy.interp (2x-10x faster). cond = self.x.dtype == np.float_ and self.y.dtype == np.float_ cond = cond and self.y.ndim == 1 cond = cond and not _do_extrapolate(fill_value) if cond: self._call = self.__class__._call_linear_np else: self._call = self.__class__._call_linear else: minval = order + 1 rewrite_nan = False xx, yy = self.x, self._y if order > 1: # Quadratic or cubic spline. If input contains even a single # nan, then the output is all nans. We cannot just feed data # with nans to make_interp_spline because it calls LAPACK. # So, we make up a bogus x and y with no nans and use it # to get the correct shape of the output, which we then fill # with nans. # For slinear or zero order spline, we just pass nans through. if np.isnan(self.x).any(): xx = np.linspace(min(self.x), max(self.x), len(self.x)) rewrite_nan = True if np.isnan(self._y).any(): yy = np.ones_like(self._y) rewrite_nan = True self._spline = make_interp_spline(xx, yy, k=order, check_finite=False) if rewrite_nan: self._call = self.__class__._call_nan_spline else: self._call = self.__class__._call_spline if len(self.x) < minval: raise ValueError("x and y arrays must have at " "least %d entries" % minval) @property def fill_value(self): """The fill value.""" # backwards compat: mimic a public attribute return self._fill_value_orig @fill_value.setter def fill_value(self, fill_value): # extrapolation only works for nearest neighbor and linear methods if _do_extrapolate(fill_value): if self.bounds_error: raise ValueError("Cannot extrapolate and raise " "at the same time.") self.bounds_error = False self._extrapolate = True else: broadcast_shape = (self.y.shape[:self.axis] + self.y.shape[self.axis + 1:]) if len(broadcast_shape) == 0: broadcast_shape = (1,) # it's either a pair (_below_range, _above_range) or a single value # for both above and below range if isinstance(fill_value, tuple) and len(fill_value) == 2: below_above = [np.asarray(fill_value[0]), np.asarray(fill_value[1])] names = ('fill_value (below)', 'fill_value (above)') for ii in range(2): below_above[ii] = _check_broadcast_up_to( below_above[ii], broadcast_shape, names[ii]) else: fill_value = np.asarray(fill_value) below_above = [_check_broadcast_up_to( fill_value, broadcast_shape, 'fill_value')] * 2 self._fill_value_below, self._fill_value_above = below_above self._extrapolate = False if self.bounds_error is None: self.bounds_error = True # backwards compat: fill_value was a public attr; make it writeable self._fill_value_orig = fill_value def _call_linear_np(self, x_new): # Note that out-of-bounds values are taken care of in self._evaluate return np.interp(x_new, self.x, self.y) def _call_linear(self, x_new): # 2. Find where in the original data, the values to interpolate # would be inserted. # Note: If x_new[n] == x[m], then m is returned by searchsorted. x_new_indices = searchsorted(self.x, x_new) # 3. Clip x_new_indices so that they are within the range of # self.x indices and at least 1. Removes mis-interpolation # of x_new[n] = x[0] x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int) # 4. Calculate the slope of regions that each x_new value falls in. lo = x_new_indices - 1 hi = x_new_indices x_lo = self.x[lo] x_hi = self.x[hi] y_lo = self._y[lo] y_hi = self._y[hi] # Note that the following two expressions rely on the specifics of the # broadcasting semantics. slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None] # 5. Calculate the actual value for each entry in x_new. y_new = slope*(x_new - x_lo)[:, None] + y_lo return y_new def _call_nearest(self, x_new): """ Find nearest neighbour interpolated y_new = f(x_new).""" # 2. Find where in the averaged data the values to interpolate # would be inserted. # Note: use side='left' (right) to searchsorted() to define the # halfway point to be nearest to the left (right) neighbour x_new_indices = searchsorted(self.x_bds, x_new, side='left') # 3. Clip x_new_indices so that they are within the range of x indices. x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp) # 4. Calculate the actual value for each entry in x_new. y_new = self._y[x_new_indices] return y_new def _call_previousnext(self, x_new): """Use previous/next neighbour of x_new, y_new = f(x_new).""" # 1. Get index of left/right value x_new_indices = searchsorted(self._x_shift, x_new, side=self._side) # 2. Clip x_new_indices so that they are within the range of x indices. x_new_indices = x_new_indices.clip(1-self._ind, len(self.x)-self._ind).astype(intp) # 3. Calculate the actual value for each entry in x_new. y_new = self._y[x_new_indices+self._ind-1] return y_new def _call_spline(self, x_new): return self._spline(x_new) def _call_nan_spline(self, x_new): out = self._spline(x_new) out[...] = np.nan return out def _evaluate(self, x_new): # 1. Handle values in x_new that are outside of x. Throw error, # or return a list of mask array indicating the outofbounds values. # The behavior is set by the bounds_error variable. x_new = asarray(x_new) y_new = self._call(self, x_new) if not self._extrapolate: below_bounds, above_bounds = self._check_bounds(x_new) if len(y_new) > 0: # Note fill_value must be broadcast up to the proper size # and flattened to work here y_new[below_bounds] = self._fill_value_below y_new[above_bounds] = self._fill_value_above return y_new def _check_bounds(self, x_new): """Check the inputs for being in the bounds of the interpolated data. Parameters ---------- x_new : array Returns ------- out_of_bounds : bool array The mask on x_new of values that are out of the bounds. """ # If self.bounds_error is True, we raise an error if any x_new values # fall outside the range of x. Otherwise, we return an array indicating # which values are outside the boundary region. below_bounds = x_new < self.x[0] above_bounds = x_new > self.x[-1] # !! Could provide more information about which values are out of bounds if self.bounds_error and below_bounds.any(): raise ValueError("A value in x_new is below the interpolation " "range.") if self.bounds_error and above_bounds.any(): raise ValueError("A value in x_new is above the interpolation " "range.") # !! Should we emit a warning if some values are out of bounds? # !! matlab does not. return below_bounds, above_bounds class _PPolyBase(object): """Base class for piecewise polynomials.""" __slots__ = ('c', 'x', 'extrapolate', 'axis') def __init__(self, c, x, extrapolate=None, axis=0): self.c = np.asarray(c) self.x = np.ascontiguousarray(x, dtype=np.float64) if extrapolate is None: extrapolate = True elif extrapolate != 'periodic': extrapolate = bool(extrapolate) self.extrapolate = extrapolate if self.c.ndim < 2: raise ValueError("Coefficients array must be at least " "2-dimensional.") if not (0 <= axis < self.c.ndim - 1): raise ValueError("axis=%s must be between 0 and %s" % (axis, self.c.ndim-1)) self.axis = axis if axis != 0: # roll the interpolation axis to be the first one in self.c # More specifically, the target shape for self.c is (k, m, ...), # and axis !=0 means that we have c.shape (..., k, m, ...) # ^ # axis # So we roll two of them. self.c = np.rollaxis(self.c, axis+1) self.c = np.rollaxis(self.c, axis+1) if self.x.ndim != 1: raise ValueError("x must be 1-dimensional") if self.x.size < 2: raise ValueError("at least 2 breakpoints are needed") if self.c.ndim < 2: raise ValueError("c must have at least 2 dimensions") if self.c.shape[0] == 0: raise ValueError("polynomial must be at least of order 0") if self.c.shape[1] != self.x.size-1: raise ValueError("number of coefficients != len(x)-1") dx = np.diff(self.x) if not (np.all(dx >= 0) or np.all(dx <= 0)): raise ValueError("`x` must be strictly increasing or decreasing.") dtype = self._get_dtype(self.c.dtype) self.c = np.ascontiguousarray(self.c, dtype=dtype) def _get_dtype(self, dtype): if np.issubdtype(dtype, np.complexfloating) \ or np.issubdtype(self.c.dtype, np.complexfloating): return np.complex_ else: return np.float_ @classmethod def construct_fast(cls, c, x, extrapolate=None, axis=0): """ Construct the piecewise polynomial without making checks. Takes the same parameters as the constructor. Input arguments ``c`` and ``x`` must be arrays of the correct shape and type. The ``c`` array can only be of dtypes float and complex, and ``x`` array must have dtype float. """ self = object.__new__(cls) self.c = c self.x = x self.axis = axis if extrapolate is None: extrapolate = True self.extrapolate = extrapolate return self def _ensure_c_contiguous(self): """ c and x may be modified by the user. The Cython code expects that they are C contiguous. """ if not self.x.flags.c_contiguous: self.x = self.x.copy() if not self.c.flags.c_contiguous: self.c = self.c.copy() def extend(self, c, x, right=None): """ Add additional breakpoints and coefficients to the polynomial. Parameters ---------- c : ndarray, size (k, m, ...) Additional coefficients for polynomials in intervals. Note that the first additional interval will be formed using one of the ``self.x`` end points. x : ndarray, size (m,) Additional breakpoints. Must be sorted in the same order as ``self.x`` and either to the right or to the left of the current breakpoints. right Deprecated argument. Has no effect. .. deprecated:: 0.19 """ if right is not None: warnings.warn("`right` is deprecated and will be removed.") c = np.asarray(c) x = np.asarray(x) if c.ndim < 2: raise ValueError("invalid dimensions for c") if x.ndim != 1: raise ValueError("invalid dimensions for x") if x.shape[0] != c.shape[1]: raise ValueError("x and c have incompatible sizes") if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim: raise ValueError("c and self.c have incompatible shapes") if c.size == 0: return dx = np.diff(x) if not (np.all(dx >= 0) or np.all(dx <= 0)): raise ValueError("`x` is not sorted.") if self.x[-1] >= self.x[0]: if not x[-1] >= x[0]: raise ValueError("`x` is in the different order " "than `self.x`.") if x[0] >= self.x[-1]: action = 'append' elif x[-1] <= self.x[0]: action = 'prepend' else: raise ValueError("`x` is neither on the left or on the right " "from `self.x`.") else: if not x[-1] <= x[0]: raise ValueError("`x` is in the different order " "than `self.x`.") if x[0] <= self.x[-1]: action = 'append' elif x[-1] >= self.x[0]: action = 'prepend' else: raise ValueError("`x` is neither on the left or on the right " "from `self.x`.") dtype = self._get_dtype(c.dtype) k2 = max(c.shape[0], self.c.shape[0]) c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:], dtype=dtype) if action == 'append': c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c c2[k2-c.shape[0]:, self.c.shape[1]:] = c self.x = np.r_[self.x, x] elif action == 'prepend': c2[k2-self.c.shape[0]:, :c.shape[1]] = c c2[k2-c.shape[0]:, c.shape[1]:] = self.c self.x = np.r_[x, self.x] self.c = c2 def __call__(self, x, nu=0, extrapolate=None): """ Evaluate the piecewise polynomial or its derivative. Parameters ---------- x : array_like Points to evaluate the interpolant at. nu : int, optional Order of derivative to evaluate. Must be non-negative. extrapolate : {bool, 'periodic', None}, optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. If None (default), use `self.extrapolate`. Returns ------- y : array_like Interpolated values. Shape is determined by replacing the interpolation axis in the original array with the shape of x. Notes ----- Derivatives are evaluated piecewise for each polynomial segment, even if the polynomial is not differentiable at the breakpoints. The polynomial intervals are considered half-open, ``[a, b)``, except for the last interval which is closed ``[a, b]``. """ if extrapolate is None: extrapolate = self.extrapolate x = np.asarray(x) x_shape, x_ndim = x.shape, x.ndim x = np.ascontiguousarray(x.ravel(), dtype=np.float_) # With periodic extrapolation we map x to the segment # [self.x[0], self.x[-1]]. if extrapolate == 'periodic': x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0]) extrapolate = False out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype) self._ensure_c_contiguous() self._evaluate(x, nu, extrapolate, out) out = out.reshape(x_shape + self.c.shape[2:]) if self.axis != 0: # transpose to move the calculated values to the interpolation axis l = list(range(out.ndim)) l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:] out = out.transpose(l) return out class PPoly(_PPolyBase): """ Piecewise polynomial in terms of coefficients and breakpoints The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the local power basis:: S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1)) where ``k`` is the degree of the polynomial. Parameters ---------- c : ndarray, shape (k, m, ...) Polynomial coefficients, order `k` and `m` intervals x : ndarray, shape (m+1,) Polynomial breakpoints. Must be sorted in either increasing or decreasing order. extrapolate : bool or 'periodic', optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. Default is True. axis : int, optional Interpolation axis. Default is zero. Attributes ---------- x : ndarray Breakpoints. c : ndarray Coefficients of the polynomials. They are reshaped to a 3-dimensional array with the last dimension representing the trailing dimensions of the original coefficient array. axis : int Interpolation axis. Methods ------- __call__ derivative antiderivative integrate solve roots extend from_spline from_bernstein_basis construct_fast See also -------- BPoly : piecewise polynomials in the Bernstein basis Notes ----- High-order polynomials in the power basis can be numerically unstable. Precision problems can start to appear for orders larger than 20-30. """ def _evaluate(self, x, nu, extrapolate, out): _ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, x, nu, bool(extrapolate), out) def derivative(self, nu=1): """ Construct a new piecewise polynomial representing the derivative. Parameters ---------- nu : int, optional Order of derivative to evaluate. Default is 1, i.e. compute the first derivative. If negative, the antiderivative is returned. Returns ------- pp : PPoly Piecewise polynomial of order k2 = k - n representing the derivative of this polynomial. Notes ----- Derivatives are evaluated piecewise for each polynomial segment, even if the polynomial is not differentiable at the breakpoints. The polynomial intervals are considered half-open, ``[a, b)``, except for the last interval which is closed ``[a, b]``. """ if nu < 0: return self.antiderivative(-nu) # reduce order if nu == 0: c2 = self.c.copy() else: c2 = self.c[:-nu, :].copy() if c2.shape[0] == 0: # derivative of order 0 is zero c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype) # multiply by the correct rising factorials factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu) c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)] # construct a compatible polynomial return self.construct_fast(c2, self.x, self.extrapolate, self.axis) def antiderivative(self, nu=1): """ Construct a new piecewise polynomial representing the antiderivative. Antiderivative is also the indefinite integral of the function, and derivative is its inverse operation. Parameters ---------- nu : int, optional Order of antiderivative to evaluate. Default is 1, i.e. compute the first integral. If negative, the derivative is returned. Returns ------- pp : PPoly Piecewise polynomial of order k2 = k + n representing the antiderivative of this polynomial. Notes ----- The antiderivative returned by this function is continuous and continuously differentiable to order n-1, up to floating point rounding error. If antiderivative is computed and ``self.extrapolate='periodic'``, it will be set to False for the returned instance. This is done because the antiderivative is no longer periodic and its correct evaluation outside of the initially given x interval is difficult. """ if nu <= 0: return self.derivative(-nu) c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:], dtype=self.c.dtype) c[:-nu] = self.c # divide by the correct rising factorials factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu) c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)] # fix continuity of added degrees of freedom self._ensure_c_contiguous() _ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1), self.x, nu - 1) if self.extrapolate == 'periodic': extrapolate = False else: extrapolate = self.extrapolate # construct a compatible polynomial return self.construct_fast(c, self.x, extrapolate, self.axis) def integrate(self, a, b, extrapolate=None): """ Compute a definite integral over a piecewise polynomial. Parameters ---------- a : float Lower integration bound b : float Upper integration bound extrapolate : {bool, 'periodic', None}, optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. If None (default), use `self.extrapolate`. Returns ------- ig : array_like Definite integral of the piecewise polynomial over [a, b] """ if extrapolate is None: extrapolate = self.extrapolate # Swap integration bounds if needed sign = 1 if b < a: a, b = b, a sign = -1 range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype) self._ensure_c_contiguous() # Compute the integral. if extrapolate == 'periodic': # Split the integral into the part over period (can be several # of them) and the remaining part. xs, xe = self.x[0], self.x[-1] period = xe - xs interval = b - a n_periods, left = divmod(interval, period) if n_periods > 0: _ppoly.integrate( self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, xs, xe, False, out=range_int) range_int *= n_periods else: range_int.fill(0) # Map a to [xs, xe], b is always a + left. a = xs + (a - xs) % period b = a + left # If b <= xe then we need to integrate over [a, b], otherwise # over [a, xe] and from xs to what is remained. remainder_int = np.empty_like(range_int) if b <= xe: _ppoly.integrate( self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, a, b, False, out=remainder_int) range_int += remainder_int else: _ppoly.integrate( self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, a, xe, False, out=remainder_int) range_int += remainder_int _ppoly.integrate( self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, xs, xs + left + a - xe, False, out=remainder_int) range_int += remainder_int else: _ppoly.integrate( self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, a, b, bool(extrapolate), out=range_int) # Return range_int *= sign return range_int.reshape(self.c.shape[2:]) def solve(self, y=0., discontinuity=True, extrapolate=None): """ Find real solutions of the the equation ``pp(x) == y``. Parameters ---------- y : float, optional Right-hand side. Default is zero. discontinuity : bool, optional Whether to report sign changes across discontinuities at breakpoints as roots. extrapolate : {bool, 'periodic', None}, optional If bool, determines whether to return roots from the polynomial extrapolated based on first and last intervals, 'periodic' works the same as False. If None (default), use `self.extrapolate`. Returns ------- roots : ndarray Roots of the polynomial(s). If the PPoly object describes multiple polynomials, the return value is an object array whose each element is an ndarray containing the roots. Notes ----- This routine works only on real-valued polynomials. If the piecewise polynomial contains sections that are identically zero, the root list will contain the start point of the corresponding interval, followed by a ``nan`` value. If the polynomial is discontinuous across a breakpoint, and there is a sign change across the breakpoint, this is reported if the `discont` parameter is True. Examples -------- Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals ``[-2, 1], [1, 2]``: >>> from scipy.interpolate import PPoly >>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2]) >>> pp.solve() array([-1., 1.]) """ if extrapolate is None: extrapolate = self.extrapolate self._ensure_c_contiguous() if np.issubdtype(self.c.dtype, np.complexfloating): raise ValueError("Root finding is only for " "real-valued polynomials") y = float(y) r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, y, bool(discontinuity), bool(extrapolate)) if self.c.ndim == 2: return r[0] else: r2 = np.empty(prod(self.c.shape[2:]), dtype=object) # this for-loop is equivalent to ``r2[...] = r``, but that's broken # in numpy 1.6.0 for ii, root in enumerate(r): r2[ii] = root return r2.reshape(self.c.shape[2:]) def roots(self, discontinuity=True, extrapolate=None): """ Find real roots of the the piecewise polynomial. Parameters ---------- discontinuity : bool, optional Whether to report sign changes across discontinuities at breakpoints as roots. extrapolate : {bool, 'periodic', None}, optional If bool, determines whether to return roots from the polynomial extrapolated based on first and last intervals, 'periodic' works the same as False. If None (default), use `self.extrapolate`. Returns ------- roots : ndarray Roots of the polynomial(s). If the PPoly object describes multiple polynomials, the return value is an object array whose each element is an ndarray containing the roots. See Also -------- PPoly.solve """ return self.solve(0, discontinuity, extrapolate) @classmethod def from_spline(cls, tck, extrapolate=None): """ Construct a piecewise polynomial from a spline Parameters ---------- tck A spline, as returned by `splrep` or a BSpline object. extrapolate : bool or 'periodic', optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. Default is True. """ if isinstance(tck, BSpline): t, c, k = tck.tck if extrapolate is None: extrapolate = tck.extrapolate else: t, c, k = tck cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype) for m in xrange(k, -1, -1): y = fitpack.splev(t[:-1], tck, der=m) cvals[k - m, :] = y/spec.gamma(m+1) return cls.construct_fast(cvals, t, extrapolate) @classmethod def from_bernstein_basis(cls, bp, extrapolate=None): """ Construct a piecewise polynomial in the power basis from a polynomial in Bernstein basis. Parameters ---------- bp : BPoly A Bernstein basis polynomial, as created by BPoly extrapolate : bool or 'periodic', optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. Default is True. """ if not isinstance(bp, BPoly): raise TypeError(".from_bernstein_basis only accepts BPoly instances. " "Got %s instead." % type(bp)) dx = np.diff(bp.x) k = bp.c.shape[0] - 1 # polynomial order rest = (None,)*(bp.c.ndim-2) c = np.zeros_like(bp.c) for a in range(k+1): factor = (-1)**a * comb(k, a) * bp.c[a] for s in range(a, k+1): val = comb(k-a, s-a) * (-1)**s c[k-s] += factor * val / dx[(slice(None),)+rest]**s if extrapolate is None: extrapolate = bp.extrapolate return cls.construct_fast(c, bp.x, extrapolate, bp.axis) class BPoly(_PPolyBase): """Piecewise polynomial in terms of coefficients and breakpoints. The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the Bernstein polynomial basis:: S = sum(c[a, i] * b(a, k; x) for a in range(k+1)), where ``k`` is the degree of the polynomial, and:: b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a), with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial coefficient. Parameters ---------- c : ndarray, shape (k, m, ...) Polynomial coefficients, order `k` and `m` intervals x : ndarray, shape (m+1,) Polynomial breakpoints. Must be sorted in either increasing or decreasing order. extrapolate : bool, optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. Default is True. axis : int, optional Interpolation axis. Default is zero. Attributes ---------- x : ndarray Breakpoints. c : ndarray Coefficients of the polynomials. They are reshaped to a 3-dimensional array with the last dimension representing the trailing dimensions of the original coefficient array. axis : int Interpolation axis. Methods ------- __call__ extend derivative antiderivative integrate construct_fast from_power_basis from_derivatives See also -------- PPoly : piecewise polynomials in the power basis Notes ----- Properties of Bernstein polynomials are well documented in the literature, see for example [1]_ [2]_ [3]_. References ---------- .. [1] https://en.wikipedia.org/wiki/Bernstein_polynomial .. [2] Kenneth I. Joy, Bernstein polynomials, http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf .. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems, vol 2011, article ID 829546, :doi:`10.1155/2011/829543`. Examples -------- >>> from scipy.interpolate import BPoly >>> x = [0, 1] >>> c = [[1], [2], [3]] >>> bp = BPoly(c, x) This creates a 2nd order polynomial .. math:: B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\ = 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2 """ def _evaluate(self, x, nu, extrapolate, out): _ppoly.evaluate_bernstein( self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, x, nu, bool(extrapolate), out) def derivative(self, nu=1): """ Construct a new piecewise polynomial representing the derivative. Parameters ---------- nu : int, optional Order of derivative to evaluate. Default is 1, i.e. compute the first derivative. If negative, the antiderivative is returned. Returns ------- bp : BPoly Piecewise polynomial of order k - nu representing the derivative of this polynomial. """ if nu < 0: return self.antiderivative(-nu) if nu > 1: bp = self for k in range(nu): bp = bp.derivative() return bp # reduce order if nu == 0: c2 = self.c.copy() else: # For a polynomial # B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x), # we use the fact that # b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ), # which leads to # B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1} # # finally, for an interval [y, y + dy] with dy != 1, # we need to correct for an extra power of dy rest = (None,)*(self.c.ndim-2) k = self.c.shape[0] - 1 dx = np.diff(self.x)[(None, slice(None))+rest] c2 = k * np.diff(self.c, axis=0) / dx if c2.shape[0] == 0: # derivative of order 0 is zero c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype) # construct a compatible polynomial return self.construct_fast(c2, self.x, self.extrapolate, self.axis) def antiderivative(self, nu=1): """ Construct a new piecewise polynomial representing the antiderivative. Parameters ---------- nu : int, optional Order of antiderivative to evaluate. Default is 1, i.e. compute the first integral. If negative, the derivative is returned. Returns ------- bp : BPoly Piecewise polynomial of order k + nu representing the antiderivative of this polynomial. Notes ----- If antiderivative is computed and ``self.extrapolate='periodic'``, it will be set to False for the returned instance. This is done because the antiderivative is no longer periodic and its correct evaluation outside of the initially given x interval is difficult. """ if nu <= 0: return self.derivative(-nu) if nu > 1: bp = self for k in range(nu): bp = bp.antiderivative() return bp # Construct the indefinite integrals on individual intervals c, x = self.c, self.x k = c.shape[0] c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype) c2[1:, ...] = np.cumsum(c, axis=0) / k delta = x[1:] - x[:-1] c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)] # Now fix continuity: on the very first interval, take the integration # constant to be zero; on an interval [x_j, x_{j+1}) with j>0, # the integration constant is then equal to the jump of the `bp` at x_j. # The latter is given by the coefficient of B_{n+1, n+1} # *on the previous interval* (other B. polynomials are zero at the # breakpoint). Finally, use the fact that BPs form a partition of unity. c2[:,1:] += np.cumsum(c2[k, :], axis=0)[:-1] if self.extrapolate == 'periodic': extrapolate = False else: extrapolate = self.extrapolate return self.construct_fast(c2, x, extrapolate, axis=self.axis) def integrate(self, a, b, extrapolate=None): """ Compute a definite integral over a piecewise polynomial. Parameters ---------- a : float Lower integration bound b : float Upper integration bound extrapolate : {bool, 'periodic', None}, optional Whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. If None (default), use `self.extrapolate`. Returns ------- array_like Definite integral of the piecewise polynomial over [a, b] """ # XXX: can probably use instead the fact that # \int_0^{1} B_{j, n}(x) \dx = 1/(n+1) ib = self.antiderivative() if extrapolate is None: extrapolate = self.extrapolate # ib.extrapolate shouldn't be 'periodic', it is converted to # False for 'periodic. in antiderivative() call. if extrapolate != 'periodic': ib.extrapolate = extrapolate if extrapolate == 'periodic': # Split the integral into the part over period (can be several # of them) and the remaining part. # For simplicity and clarity convert to a <= b case. if a <= b: sign = 1 else: a, b = b, a sign = -1 xs, xe = self.x[0], self.x[-1] period = xe - xs interval = b - a n_periods, left = divmod(interval, period) res = n_periods * (ib(xe) - ib(xs)) # Map a and b to [xs, xe]. a = xs + (a - xs) % period b = a + left # If b <= xe then we need to integrate over [a, b], otherwise # over [a, xe] and from xs to what is remained. if b <= xe: res += ib(b) - ib(a) else: res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs) return sign * res else: return ib(b) - ib(a) def extend(self, c, x, right=None): k = max(self.c.shape[0], c.shape[0]) self.c = self._raise_degree(self.c, k - self.c.shape[0]) c = self._raise_degree(c, k - c.shape[0]) return _PPolyBase.extend(self, c, x, right) extend.__doc__ = _PPolyBase.extend.__doc__ @classmethod def from_power_basis(cls, pp, extrapolate=None): """ Construct a piecewise polynomial in Bernstein basis from a power basis polynomial. Parameters ---------- pp : PPoly A piecewise polynomial in the power basis extrapolate : bool or 'periodic', optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. Default is True. """ if not isinstance(pp, PPoly): raise TypeError(".from_power_basis only accepts PPoly instances. " "Got %s instead." % type(pp)) dx = np.diff(pp.x) k = pp.c.shape[0] - 1 # polynomial order rest = (None,)*(pp.c.ndim-2) c = np.zeros_like(pp.c) for a in range(k+1): factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a) for j in range(k-a, k+1): c[j] += factor * comb(j, k-a) if extrapolate is None: extrapolate = pp.extrapolate return cls.construct_fast(c, pp.x, extrapolate, pp.axis) @classmethod def from_derivatives(cls, xi, yi, orders=None, extrapolate=None): """Construct a piecewise polynomial in the Bernstein basis, compatible with the specified values and derivatives at breakpoints. Parameters ---------- xi : array_like sorted 1D array of x-coordinates yi : array_like or list of array_likes ``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]`` orders : None or int or array_like of ints. Default: None. Specifies the degree of local polynomials. If not None, some derivatives are ignored. extrapolate : bool or 'periodic', optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. Default is True. Notes ----- If ``k`` derivatives are specified at a breakpoint ``x``, the constructed polynomial is exactly ``k`` times continuously differentiable at ``x``, unless the ``order`` is provided explicitly. In the latter case, the smoothness of the polynomial at the breakpoint is controlled by the ``order``. Deduces the number of derivatives to match at each end from ``order`` and the number of derivatives available. If possible it uses the same number of derivatives from each end; if the number is odd it tries to take the extra one from y2. In any case if not enough derivatives are available at one end or another it draws enough to make up the total from the other end. If the order is too high and not enough derivatives are available, an exception is raised. Examples -------- >>> from scipy.interpolate import BPoly >>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]]) Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]` such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4` >>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]]) Creates a piecewise polynomial `f(x)`, such that `f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`. Based on the number of derivatives provided, the order of the local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`. Notice that no restriction is imposed on the derivatives at ``x = 1`` and ``x = 2``. Indeed, the explicit form of the polynomial is:: f(x) = | x * (1 - x), 0 <= x < 1 | 2 * (x - 1), 1 <= x <= 2 So that f'(1-0) = -1 and f'(1+0) = 2 """ xi = np.asarray(xi) if len(xi) != len(yi): raise ValueError("xi and yi need to have the same length") if np.any(xi[1:] - xi[:1] <= 0): raise ValueError("x coordinates are not in increasing order") # number of intervals m = len(xi) - 1 # global poly order is k-1, local orders are <=k and can vary try: k = max(len(yi[i]) + len(yi[i+1]) for i in range(m)) except TypeError: raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).") if orders is None: orders = [None] * m else: if isinstance(orders, (integer_types, np.integer)): orders = [orders] * m k = max(k, max(orders)) if any(o <= 0 for o in orders): raise ValueError("Orders must be positive.") c = [] for i in range(m): y1, y2 = yi[i], yi[i+1] if orders[i] is None: n1, n2 = len(y1), len(y2) else: n = orders[i]+1 n1 = min(n//2, len(y1)) n2 = min(n - n1, len(y2)) n1 = min(n - n2, len(y2)) if n1+n2 != n: mesg = ("Point %g has %d derivatives, point %g" " has %d derivatives, but order %d requested" % ( xi[i], len(y1), xi[i+1], len(y2), orders[i])) raise ValueError(mesg) if not (n1 <= len(y1) and n2 <= len(y2)): raise ValueError("`order` input incompatible with" " length y1 or y2.") b = BPoly._construct_from_derivatives(xi[i], xi[i+1], y1[:n1], y2[:n2]) if len(b) < k: b = BPoly._raise_degree(b, k - len(b)) c.append(b) c = np.asarray(c) return cls(c.swapaxes(0, 1), xi, extrapolate) @staticmethod def _construct_from_derivatives(xa, xb, ya, yb): r"""Compute the coefficients of a polynomial in the Bernstein basis given the values and derivatives at the edges. Return the coefficients of a polynomial in the Bernstein basis defined on ``[xa, xb]`` and having the values and derivatives at the endpoints `xa` and `xb` as specified by `ya`` and `yb`. The polynomial constructed is of the minimal possible degree, i.e., if the lengths of `ya` and `yb` are `na` and `nb`, the degree of the polynomial is ``na + nb - 1``. Parameters ---------- xa : float Left-hand end point of the interval xb : float Right-hand end point of the interval ya : array_like Derivatives at `xa`. `ya[0]` is the value of the function, and `ya[i]` for ``i > 0`` is the value of the ``i``-th derivative. yb : array_like Derivatives at `xb`. Returns ------- array coefficient array of a polynomial having specified derivatives Notes ----- This uses several facts from life of Bernstein basis functions. First of all, .. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1}) If B(x) is a linear combination of the form .. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n}, then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}. Iterating the latter one, one finds for the q-th derivative .. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q}, with .. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a} This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and `c_q` are found one by one by iterating `q = 0, ..., na`. At ``x = xb`` it's the same with ``a = n - q``. """ ya, yb = np.asarray(ya), np.asarray(yb) if ya.shape[1:] != yb.shape[1:]: raise ValueError('ya and yb have incompatible dimensions.') dta, dtb = ya.dtype, yb.dtype if (np.issubdtype(dta, np.complexfloating) or np.issubdtype(dtb, np.complexfloating)): dt = np.complex_ else: dt = np.float_ na, nb = len(ya), len(yb) n = na + nb c = np.empty((na+nb,) + ya.shape[1:], dtype=dt) # compute coefficients of a polynomial degree na+nb-1 # walk left-to-right for q in range(0, na): c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q for j in range(0, q): c[q] -= (-1)**(j+q) * comb(q, j) * c[j] # now walk right-to-left for q in range(0, nb): c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q for j in range(0, q): c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j] return c @staticmethod def _raise_degree(c, d): r"""Raise a degree of a polynomial in the Bernstein basis. Given the coefficients of a polynomial degree `k`, return (the coefficients of) the equivalent polynomial of degree `k+d`. Parameters ---------- c : array_like coefficient array, 1D d : integer Returns ------- array coefficient array, 1D array of length `c.shape[0] + d` Notes ----- This uses the fact that a Bernstein polynomial `b_{a, k}` can be identically represented as a linear combination of polynomials of a higher degree `k+d`: .. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \ comb(d, j) / comb(k+d, a+j) """ if d == 0: return c k = c.shape[0] - 1 out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype) for a in range(c.shape[0]): f = c[a] * comb(k, a) for j in range(d+1): out[a+j] += f * comb(d, j) / comb(k+d, a+j) return out class NdPPoly(object): """ Piecewise tensor product polynomial The value at point ``xp = (x', y', z', ...)`` is evaluated by first computing the interval indices `i` such that:: x[0][i[0]] <= x' < x[0][i[0]+1] x[1][i[1]] <= y' < x[1][i[1]+1] ... and then computing:: S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]] * (xp[0] - x[0][i[0]])**m0 * ... * (xp[n] - x[n][i[n]])**mn for m0 in range(k[0]+1) ... for mn in range(k[n]+1)) where ``k[j]`` is the degree of the polynomial in dimension j. This representation is the piecewise multivariate power basis. Parameters ---------- c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...) Polynomial coefficients, with polynomial order `kj` and `mj+1` intervals for each dimension `j`. x : ndim-tuple of ndarrays, shapes (mj+1,) Polynomial breakpoints for each dimension. These must be sorted in increasing order. extrapolate : bool, optional Whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. Default: True. Attributes ---------- x : tuple of ndarrays Breakpoints. c : ndarray Coefficients of the polynomials. Methods ------- __call__ construct_fast See also -------- PPoly : piecewise polynomials in 1D Notes ----- High-order polynomials in the power basis can be numerically unstable. """ def __init__(self, c, x, extrapolate=None): self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x) self.c = np.asarray(c) if extrapolate is None: extrapolate = True self.extrapolate = bool(extrapolate) ndim = len(self.x) if any(v.ndim != 1 for v in self.x): raise ValueError("x arrays must all be 1-dimensional") if any(v.size < 2 for v in self.x): raise ValueError("x arrays must all contain at least 2 points") if c.ndim < 2*ndim: raise ValueError("c must have at least 2*len(x) dimensions") if any(np.any(v[1:] - v[:-1] < 0) for v in self.x): raise ValueError("x-coordinates are not in increasing order") if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2*ndim], self.x)): raise ValueError("x and c do not agree on the number of intervals") dtype = self._get_dtype(self.c.dtype) self.c = np.ascontiguousarray(self.c, dtype=dtype) @classmethod def construct_fast(cls, c, x, extrapolate=None): """ Construct the piecewise polynomial without making checks. Takes the same parameters as the constructor. Input arguments ``c`` and ``x`` must be arrays of the correct shape and type. The ``c`` array can only be of dtypes float and complex, and ``x`` array must have dtype float. """ self = object.__new__(cls) self.c = c self.x = x if extrapolate is None: extrapolate = True self.extrapolate = extrapolate return self def _get_dtype(self, dtype): if np.issubdtype(dtype, np.complexfloating) \ or np.issubdtype(self.c.dtype, np.complexfloating): return np.complex_ else: return np.float_ def _ensure_c_contiguous(self): if not self.c.flags.c_contiguous: self.c = self.c.copy() if not isinstance(self.x, tuple): self.x = tuple(self.x) def __call__(self, x, nu=None, extrapolate=None): """ Evaluate the piecewise polynomial or its derivative Parameters ---------- x : array-like Points to evaluate the interpolant at. nu : tuple, optional Orders of derivatives to evaluate. Each must be non-negative. extrapolate : bool, optional Whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. Returns ------- y : array-like Interpolated values. Shape is determined by replacing the interpolation axis in the original array with the shape of x. Notes ----- Derivatives are evaluated piecewise for each polynomial segment, even if the polynomial is not differentiable at the breakpoints. The polynomial intervals are considered half-open, ``[a, b)``, except for the last interval which is closed ``[a, b]``. """ if extrapolate is None: extrapolate = self.extrapolate else: extrapolate = bool(extrapolate) ndim = len(self.x) x = _ndim_coords_from_arrays(x) x_shape = x.shape x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float_) if nu is None: nu = np.zeros((ndim,), dtype=np.intc) else: nu = np.asarray(nu, dtype=np.intc) if nu.ndim != 1 or nu.shape[0] != ndim: raise ValueError("invalid number of derivative orders nu") dim1 = prod(self.c.shape[:ndim]) dim2 = prod(self.c.shape[ndim:2*ndim]) dim3 = prod(self.c.shape[2*ndim:]) ks = np.array(self.c.shape[:ndim], dtype=np.intc) out = np.empty((x.shape[0], dim3), dtype=self.c.dtype) self._ensure_c_contiguous() _ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3), self.x, ks, x, nu, bool(extrapolate), out) return out.reshape(x_shape[:-1] + self.c.shape[2*ndim:]) def _derivative_inplace(self, nu, axis): """ Compute 1D derivative along a selected dimension in-place May result to non-contiguous c array. """ if nu < 0: return self._antiderivative_inplace(-nu, axis) ndim = len(self.x) axis = axis % ndim # reduce order if nu == 0: # noop return else: sl = [slice(None)]*ndim sl[axis] = slice(None, -nu, None) c2 = self.c[tuple(sl)] if c2.shape[axis] == 0: # derivative of order 0 is zero shp = list(c2.shape) shp[axis] = 1 c2 = np.zeros(shp, dtype=c2.dtype) # multiply by the correct rising factorials factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu) sl = [None]*c2.ndim sl[axis] = slice(None) c2 *= factor[tuple(sl)] self.c = c2 def _antiderivative_inplace(self, nu, axis): """ Compute 1D antiderivative along a selected dimension May result to non-contiguous c array. """ if nu <= 0: return self._derivative_inplace(-nu, axis) ndim = len(self.x) axis = axis % ndim perm = list(range(ndim)) perm[0], perm[axis] = perm[axis], perm[0] perm = perm + list(range(ndim, self.c.ndim)) c = self.c.transpose(perm) c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:], dtype=c.dtype) c2[:-nu] = c # divide by the correct rising factorials factor = spec.poch(np.arange(c.shape[0], 0, -1), nu) c2[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)] # fix continuity of added degrees of freedom perm2 = list(range(c2.ndim)) perm2[1], perm2[ndim+axis] = perm2[ndim+axis], perm2[1] c2 = c2.transpose(perm2) c2 = c2.copy() _ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1), self.x[axis], nu-1) c2 = c2.transpose(perm2) c2 = c2.transpose(perm) # Done self.c = c2 def derivative(self, nu): """ Construct a new piecewise polynomial representing the derivative. Parameters ---------- nu : ndim-tuple of int Order of derivatives to evaluate for each dimension. If negative, the antiderivative is returned. Returns ------- pp : NdPPoly Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n]) representing the derivative of this polynomial. Notes ----- Derivatives are evaluated piecewise for each polynomial segment, even if the polynomial is not differentiable at the breakpoints. The polynomial intervals in each dimension are considered half-open, ``[a, b)``, except for the last interval which is closed ``[a, b]``. """ p = self.construct_fast(self.c.copy(), self.x, self.extrapolate) for axis, n in enumerate(nu): p._derivative_inplace(n, axis) p._ensure_c_contiguous() return p def antiderivative(self, nu): """ Construct a new piecewise polynomial representing the antiderivative. Antiderivative is also the indefinite integral of the function, and derivative is its inverse operation. Parameters ---------- nu : ndim-tuple of int Order of derivatives to evaluate for each dimension. If negative, the derivative is returned. Returns ------- pp : PPoly Piecewise polynomial of order k2 = k + n representing the antiderivative of this polynomial. Notes ----- The antiderivative returned by this function is continuous and continuously differentiable to order n-1, up to floating point rounding error. """ p = self.construct_fast(self.c.copy(), self.x, self.extrapolate) for axis, n in enumerate(nu): p._antiderivative_inplace(n, axis) p._ensure_c_contiguous() return p def integrate_1d(self, a, b, axis, extrapolate=None): r""" Compute NdPPoly representation for one dimensional definite integral The result is a piecewise polynomial representing the integral: .. math:: p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...) where the dimension integrated over is specified with the `axis` parameter. Parameters ---------- a, b : float Lower and upper bound for integration. axis : int Dimension over which to compute the 1D integrals extrapolate : bool, optional Whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. Returns ------- ig : NdPPoly or array-like Definite integral of the piecewise polynomial over [a, b]. If the polynomial was 1-dimensional, an array is returned, otherwise, an NdPPoly object. """ if extrapolate is None: extrapolate = self.extrapolate else: extrapolate = bool(extrapolate) ndim = len(self.x) axis = int(axis) % ndim # reuse 1D integration routines c = self.c swap = list(range(c.ndim)) swap.insert(0, swap[axis]) del swap[axis + 1] swap.insert(1, swap[ndim + axis]) del swap[ndim + axis + 1] c = c.transpose(swap) p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1), self.x[axis], extrapolate=extrapolate) out = p.integrate(a, b, extrapolate=extrapolate) # Construct result if ndim == 1: return out.reshape(c.shape[2:]) else: c = out.reshape(c.shape[2:]) x = self.x[:axis] + self.x[axis+1:] return self.construct_fast(c, x, extrapolate=extrapolate) def integrate(self, ranges, extrapolate=None): """ Compute a definite integral over a piecewise polynomial. Parameters ---------- ranges : ndim-tuple of 2-tuples float Sequence of lower and upper bounds for each dimension, ``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]`` extrapolate : bool, optional Whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. Returns ------- ig : array_like Definite integral of the piecewise polynomial over [a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]] """ ndim = len(self.x) if extrapolate is None: extrapolate = self.extrapolate else: extrapolate = bool(extrapolate) if not hasattr(ranges, '__len__') or len(ranges) != ndim: raise ValueError("Range not a sequence of correct length") self._ensure_c_contiguous() # Reuse 1D integration routine c = self.c for n, (a, b) in enumerate(ranges): swap = list(range(c.ndim)) swap.insert(1, swap[ndim - n]) del swap[ndim - n + 1] c = c.transpose(swap) p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate) out = p.integrate(a, b, extrapolate=extrapolate) c = out.reshape(c.shape[2:]) return c class RegularGridInterpolator(object): """ Interpolation on a regular grid in arbitrary dimensions The data must be defined on a regular grid; the grid spacing however may be uneven. Linear and nearest-neighbour interpolation are supported. After setting up the interpolator object, the interpolation method (*linear* or *nearest*) may be chosen at each evaluation. Parameters ---------- points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, ) The points defining the regular grid in n dimensions. values : array_like, shape (m1, ..., mn, ...) The data on the regular grid in n dimensions. method : str, optional The method of interpolation to perform. Supported are "linear" and "nearest". This parameter will become the default for the object's ``__call__`` method. Default is "linear". bounds_error : bool, optional If True, when interpolated values are requested outside of the domain of the input data, a ValueError is raised. If False, then `fill_value` is used. fill_value : number, optional If provided, the value to use for points outside of the interpolation domain. If None, values outside the domain are extrapolated. Methods ------- __call__ Notes ----- Contrary to LinearNDInterpolator and NearestNDInterpolator, this class avoids expensive triangulation of the input data by taking advantage of the regular grid structure. If any of `points` have a dimension of size 1, linear interpolation will return an array of `nan` values. Nearest-neighbor interpolation will work as usual in this case. .. versionadded:: 0.14 Examples -------- Evaluate a simple example function on the points of a 3D grid: >>> from scipy.interpolate import RegularGridInterpolator >>> def f(x, y, z): ... return 2 * x**3 + 3 * y**2 - z >>> x = np.linspace(1, 4, 11) >>> y = np.linspace(4, 7, 22) >>> z = np.linspace(7, 9, 33) >>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True)) ``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``. Next, define an interpolating function from this data: >>> my_interpolating_function = RegularGridInterpolator((x, y, z), data) Evaluate the interpolating function at the two points ``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``: >>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]]) >>> my_interpolating_function(pts) array([ 125.80469388, 146.30069388]) which is indeed a close approximation to ``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``. See also -------- NearestNDInterpolator : Nearest neighbour interpolation on unstructured data in N dimensions LinearNDInterpolator : Piecewise linear interpolant on unstructured data in N dimensions References ---------- .. [1] Python package *regulargrid* by Johannes Buchner, see https://pypi.python.org/pypi/regulargrid/ .. [2] Wikipedia, "Trilinear interpolation", https://en.wikipedia.org/wiki/Trilinear_interpolation .. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear and multilinear table interpolation in many dimensions." MATH. COMPUT. 50.181 (1988): 189-196. https://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf """ # this class is based on code originally programmed by Johannes Buchner, # see https://github.com/JohannesBuchner/regulargrid def __init__(self, points, values, method="linear", bounds_error=True, fill_value=np.nan): if method not in ["linear", "nearest"]: raise ValueError("Method '%s' is not defined" % method) self.method = method self.bounds_error = bounds_error if not hasattr(values, 'ndim'): # allow reasonable duck-typed values values = np.asarray(values) if len(points) > values.ndim: raise ValueError("There are %d point arrays, but values has %d " "dimensions" % (len(points), values.ndim)) if hasattr(values, 'dtype') and hasattr(values, 'astype'): if not np.issubdtype(values.dtype, np.inexact): values = values.astype(float) self.fill_value = fill_value if fill_value is not None: fill_value_dtype = np.asarray(fill_value).dtype if (hasattr(values, 'dtype') and not np.can_cast(fill_value_dtype, values.dtype, casting='same_kind')): raise ValueError("fill_value must be either 'None' or " "of a type compatible with values") for i, p in enumerate(points): if not np.all(np.diff(p) > 0.): raise ValueError("The points in dimension %d must be strictly " "ascending" % i) if not np.asarray(p).ndim == 1: raise ValueError("The points in dimension %d must be " "1-dimensional" % i) if not values.shape[i] == len(p): raise ValueError("There are %d points and %d values in " "dimension %d" % (len(p), values.shape[i], i)) self.grid = tuple([np.asarray(p) for p in points]) self.values = values def __call__(self, xi, method=None): """ Interpolation at coordinates Parameters ---------- xi : ndarray of shape (..., ndim) The coordinates to sample the gridded data at method : str The method of interpolation to perform. Supported are "linear" and "nearest". """ method = self.method if method is None else method if method not in ["linear", "nearest"]: raise ValueError("Method '%s' is not defined" % method) ndim = len(self.grid) xi = _ndim_coords_from_arrays(xi, ndim=ndim) if xi.shape[-1] != len(self.grid): raise ValueError("The requested sample points xi have dimension " "%d, but this RegularGridInterpolator has " "dimension %d" % (xi.shape[1], ndim)) xi_shape = xi.shape xi = xi.reshape(-1, xi_shape[-1]) if self.bounds_error: for i, p in enumerate(xi.T): if not np.logical_and(np.all(self.grid[i][0] <= p), np.all(p <= self.grid[i][-1])): raise ValueError("One of the requested xi is out of bounds " "in dimension %d" % i) indices, norm_distances, out_of_bounds = self._find_indices(xi.T) if method == "linear": result = self._evaluate_linear(indices, norm_distances, out_of_bounds) elif method == "nearest": result = self._evaluate_nearest(indices, norm_distances, out_of_bounds) if not self.bounds_error and self.fill_value is not None: result[out_of_bounds] = self.fill_value return result.reshape(xi_shape[:-1] + self.values.shape[ndim:]) def _evaluate_linear(self, indices, norm_distances, out_of_bounds): # slice for broadcasting over trailing dimensions in self.values vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices)) # find relevant values # each i and i+1 represents a edge edges = itertools.product(*[[i, i + 1] for i in indices]) values = 0. for edge_indices in edges: weight = 1. for ei, i, yi in zip(edge_indices, indices, norm_distances): weight *= np.where(ei == i, 1 - yi, yi) values += np.asarray(self.values[edge_indices]) * weight[vslice] return values def _evaluate_nearest(self, indices, norm_distances, out_of_bounds): idx_res = [np.where(yi <= .5, i, i + 1) for i, yi in zip(indices, norm_distances)] return self.values[tuple(idx_res)] def _find_indices(self, xi): # find relevant edges between which xi are situated indices = [] # compute distance to lower edge in unity units norm_distances = [] # check for out of bounds xi out_of_bounds = np.zeros((xi.shape[1]), dtype=bool) # iterate through dimensions for x, grid in zip(xi, self.grid): i = np.searchsorted(grid, x) - 1 i[i < 0] = 0 i[i > grid.size - 2] = grid.size - 2 indices.append(i) norm_distances.append((x - grid[i]) / (grid[i + 1] - grid[i])) if not self.bounds_error: out_of_bounds += x < grid[0] out_of_bounds += x > grid[-1] return indices, norm_distances, out_of_bounds def interpn(points, values, xi, method="linear", bounds_error=True, fill_value=np.nan): """ Multidimensional interpolation on regular grids. Parameters ---------- points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, ) The points defining the regular grid in n dimensions. values : array_like, shape (m1, ..., mn, ...) The data on the regular grid in n dimensions. xi : ndarray of shape (..., ndim) The coordinates to sample the gridded data at method : str, optional The method of interpolation to perform. Supported are "linear" and "nearest", and "splinef2d". "splinef2d" is only supported for 2-dimensional data. bounds_error : bool, optional If True, when interpolated values are requested outside of the domain of the input data, a ValueError is raised. If False, then `fill_value` is used. fill_value : number, optional If provided, the value to use for points outside of the interpolation domain. If None, values outside the domain are extrapolated. Extrapolation is not supported by method "splinef2d". Returns ------- values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:] Interpolated values at input coordinates. Notes ----- .. versionadded:: 0.14 See also -------- NearestNDInterpolator : Nearest neighbour interpolation on unstructured data in N dimensions LinearNDInterpolator : Piecewise linear interpolant on unstructured data in N dimensions RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a regular grid in arbitrary dimensions RectBivariateSpline : Bivariate spline approximation over a rectangular mesh """ # sanity check 'method' kwarg if method not in ["linear", "nearest", "splinef2d"]: raise ValueError("interpn only understands the methods 'linear', " "'nearest', and 'splinef2d'. You provided %s." % method) if not hasattr(values, 'ndim'): values = np.asarray(values) ndim = values.ndim if ndim > 2 and method == "splinef2d": raise ValueError("The method spline2fd can only be used for " "2-dimensional input data") if not bounds_error and fill_value is None and method == "splinef2d": raise ValueError("The method spline2fd does not support extrapolation.") # sanity check consistency of input dimensions if len(points) > ndim: raise ValueError("There are %d point arrays, but values has %d " "dimensions" % (len(points), ndim)) if len(points) != ndim and method == 'splinef2d': raise ValueError("The method spline2fd can only be used for " "scalar data with one point per coordinate") # sanity check input grid for i, p in enumerate(points): if not np.all(np.diff(p) > 0.): raise ValueError("The points in dimension %d must be strictly " "ascending" % i) if not np.asarray(p).ndim == 1: raise ValueError("The points in dimension %d must be " "1-dimensional" % i) if not values.shape[i] == len(p): raise ValueError("There are %d points and %d values in " "dimension %d" % (len(p), values.shape[i], i)) grid = tuple([np.asarray(p) for p in points]) # sanity check requested xi xi = _ndim_coords_from_arrays(xi, ndim=len(grid)) if xi.shape[-1] != len(grid): raise ValueError("The requested sample points xi have dimension " "%d, but this RegularGridInterpolator has " "dimension %d" % (xi.shape[1], len(grid))) for i, p in enumerate(xi.T): if bounds_error and not np.logical_and(np.all(grid[i][0] <= p), np.all(p <= grid[i][-1])): raise ValueError("One of the requested xi is out of bounds " "in dimension %d" % i) # perform interpolation if method == "linear": interp = RegularGridInterpolator(points, values, method="linear", bounds_error=bounds_error, fill_value=fill_value) return interp(xi) elif method == "nearest": interp = RegularGridInterpolator(points, values, method="nearest", bounds_error=bounds_error, fill_value=fill_value) return interp(xi) elif method == "splinef2d": xi_shape = xi.shape xi = xi.reshape(-1, xi.shape[-1]) # RectBivariateSpline doesn't support fill_value; we need to wrap here idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1], grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]), axis=0) result = np.empty_like(xi[:, 0]) # make a copy of values for RectBivariateSpline interp = RectBivariateSpline(points[0], points[1], values[:]) result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1]) result[np.logical_not(idx_valid)] = fill_value return result.reshape(xi_shape[:-1]) # backward compatibility wrapper class _ppform(PPoly): """ Deprecated piecewise polynomial class. New code should use the `PPoly` class instead. """ def __init__(self, coeffs, breaks, fill=0.0, sort=False): warnings.warn("_ppform is deprecated -- use PPoly instead", category=DeprecationWarning) if sort: breaks = np.sort(breaks) else: breaks = np.asarray(breaks) PPoly.__init__(self, coeffs, breaks) self.coeffs = self.c self.breaks = self.x self.K = self.coeffs.shape[0] self.fill = fill self.a = self.breaks[0] self.b = self.breaks[-1] def __call__(self, x): return PPoly.__call__(self, x, 0, False) def _evaluate(self, x, nu, extrapolate, out): PPoly._evaluate(self, x, nu, extrapolate, out) out[~((x >= self.a) & (x <= self.b))] = self.fill return out @classmethod def fromspline(cls, xk, cvals, order, fill=0.0): # Note: this spline representation is incompatible with FITPACK N = len(xk)-1 sivals = np.empty((order+1, N), dtype=float) for m in xrange(order, -1, -1): fact = spec.gamma(m+1) res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m) res /= fact sivals[order-m, :] = res return cls(sivals, xk, fill=fill)
bsd-3-clause
aminert/scikit-learn
sklearn/preprocessing/tests/test_data.py
113
38432
import warnings import numpy as np import numpy.linalg as la from scipy import sparse from distutils.version import LooseVersion from sklearn.utils.testing import assert_almost_equal, clean_warning_registry from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_less_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regex from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import assert_no_warnings from sklearn.utils.sparsefuncs import mean_variance_axis from sklearn.preprocessing.data import _transform_selected from sklearn.preprocessing.data import Binarizer from sklearn.preprocessing.data import KernelCenterer from sklearn.preprocessing.data import Normalizer from sklearn.preprocessing.data import normalize from sklearn.preprocessing.data import OneHotEncoder from sklearn.preprocessing.data import StandardScaler from sklearn.preprocessing.data import scale from sklearn.preprocessing.data import MinMaxScaler from sklearn.preprocessing.data import minmax_scale from sklearn.preprocessing.data import MaxAbsScaler from sklearn.preprocessing.data import maxabs_scale from sklearn.preprocessing.data import RobustScaler from sklearn.preprocessing.data import robust_scale from sklearn.preprocessing.data import add_dummy_feature from sklearn.preprocessing.data import PolynomialFeatures from sklearn.utils.validation import DataConversionWarning from sklearn import datasets iris = datasets.load_iris() def toarray(a): if hasattr(a, "toarray"): a = a.toarray() return a def test_polynomial_features(): # Test Polynomial Features X1 = np.arange(6)[:, np.newaxis] P1 = np.hstack([np.ones_like(X1), X1, X1 ** 2, X1 ** 3]) deg1 = 3 X2 = np.arange(6).reshape((3, 2)) x1 = X2[:, :1] x2 = X2[:, 1:] P2 = np.hstack([x1 ** 0 * x2 ** 0, x1 ** 1 * x2 ** 0, x1 ** 0 * x2 ** 1, x1 ** 2 * x2 ** 0, x1 ** 1 * x2 ** 1, x1 ** 0 * x2 ** 2]) deg2 = 2 for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]: P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X) assert_array_almost_equal(P_test, P) P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X) assert_array_almost_equal(P_test, P[:, 1:]) interact = PolynomialFeatures(2, interaction_only=True, include_bias=True) X_poly = interact.fit_transform(X) assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]]) def test_scaler_1d(): # Test scaling of dataset along single axis rng = np.random.RandomState(0) X = rng.randn(5) X_orig_copy = X.copy() scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=False) assert_array_almost_equal(X_scaled.mean(axis=0), 0.0) assert_array_almost_equal(X_scaled.std(axis=0), 1.0) # check inverse transform X_scaled_back = scaler.inverse_transform(X_scaled) assert_array_almost_equal(X_scaled_back, X_orig_copy) # Test with 1D list X = [0., 1., 2, 0.4, 1.] scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=False) assert_array_almost_equal(X_scaled.mean(axis=0), 0.0) assert_array_almost_equal(X_scaled.std(axis=0), 1.0) X_scaled = scale(X) assert_array_almost_equal(X_scaled.mean(axis=0), 0.0) assert_array_almost_equal(X_scaled.std(axis=0), 1.0) X = np.ones(5) assert_array_equal(scale(X, with_mean=False), X) def test_standard_scaler_numerical_stability(): """Test numerical stability of scaling""" # np.log(1e-5) is taken because of its floating point representation # was empirically found to cause numerical problems with np.mean & np.std. x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64) if LooseVersion(np.__version__) >= LooseVersion('1.9'): # This does not raise a warning as the number of samples is too low # to trigger the problem in recent numpy x_scaled = assert_no_warnings(scale, x) assert_array_almost_equal(scale(x), np.zeros(8)) else: w = "standard deviation of the data is probably very close to 0" x_scaled = assert_warns_message(UserWarning, w, scale, x) assert_array_almost_equal(x_scaled, np.zeros(8)) # with 2 more samples, the std computation run into numerical issues: x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64) w = "standard deviation of the data is probably very close to 0" x_scaled = assert_warns_message(UserWarning, w, scale, x) assert_array_almost_equal(x_scaled, np.zeros(10)) x = np.ones(10, dtype=np.float64) * 1e-100 x_small_scaled = assert_no_warnings(scale, x) assert_array_almost_equal(x_small_scaled, np.zeros(10)) # Large values can cause (often recoverable) numerical stability issues: x_big = np.ones(10, dtype=np.float64) * 1e100 w = "Dataset may contain too large values" x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big) assert_array_almost_equal(x_big_scaled, np.zeros(10)) assert_array_almost_equal(x_big_scaled, x_small_scaled) x_big_centered = assert_warns_message(UserWarning, w, scale, x_big, with_std=False) assert_array_almost_equal(x_big_centered, np.zeros(10)) assert_array_almost_equal(x_big_centered, x_small_scaled) def test_scaler_2d_arrays(): # Test scaling of 2d array along first axis rng = np.random.RandomState(0) X = rng.randn(4, 5) X[:, 0] = 0.0 # first feature is always of zero scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=True) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has been copied assert_true(X_scaled is not X) # check inverse transform X_scaled_back = scaler.inverse_transform(X_scaled) assert_true(X_scaled_back is not X) assert_true(X_scaled_back is not X_scaled) assert_array_almost_equal(X_scaled_back, X) X_scaled = scale(X, axis=1, with_std=False) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0]) X_scaled = scale(X, axis=1, with_std=True) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0]) assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0]) # Check that the data hasn't been modified assert_true(X_scaled is not X) X_scaled = scaler.fit(X).transform(X, copy=False) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has not been copied assert_true(X_scaled is X) X = rng.randn(4, 5) X[:, 0] = 1.0 # first feature is a constant, non zero feature scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=True) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has not been copied assert_true(X_scaled is not X) def test_min_max_scaler_iris(): X = iris.data scaler = MinMaxScaler() # default params X_trans = scaler.fit_transform(X) assert_array_almost_equal(X_trans.min(axis=0), 0) assert_array_almost_equal(X_trans.min(axis=0), 0) assert_array_almost_equal(X_trans.max(axis=0), 1) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # not default params: min=1, max=2 scaler = MinMaxScaler(feature_range=(1, 2)) X_trans = scaler.fit_transform(X) assert_array_almost_equal(X_trans.min(axis=0), 1) assert_array_almost_equal(X_trans.max(axis=0), 2) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # min=-.5, max=.6 scaler = MinMaxScaler(feature_range=(-.5, .6)) X_trans = scaler.fit_transform(X) assert_array_almost_equal(X_trans.min(axis=0), -.5) assert_array_almost_equal(X_trans.max(axis=0), .6) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # raises on invalid range scaler = MinMaxScaler(feature_range=(2, 1)) assert_raises(ValueError, scaler.fit, X) def test_min_max_scaler_zero_variance_features(): # Check min max scaler on toy data with zero variance features X = [[0., 1., +0.5], [0., 1., -0.1], [0., 1., +1.1]] X_new = [[+0., 2., 0.5], [-1., 1., 0.0], [+0., 1., 1.5]] # default params scaler = MinMaxScaler() X_trans = scaler.fit_transform(X) X_expected_0_1 = [[0., 0., 0.5], [0., 0., 0.0], [0., 0., 1.0]] assert_array_almost_equal(X_trans, X_expected_0_1) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) X_trans_new = scaler.transform(X_new) X_expected_0_1_new = [[+0., 1., 0.500], [-1., 0., 0.083], [+0., 0., 1.333]] assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2) # not default params scaler = MinMaxScaler(feature_range=(1, 2)) X_trans = scaler.fit_transform(X) X_expected_1_2 = [[1., 1., 1.5], [1., 1., 1.0], [1., 1., 2.0]] assert_array_almost_equal(X_trans, X_expected_1_2) # function interface X_trans = minmax_scale(X) assert_array_almost_equal(X_trans, X_expected_0_1) X_trans = minmax_scale(X, feature_range=(1, 2)) assert_array_almost_equal(X_trans, X_expected_1_2) def test_minmax_scale_axis1(): X = iris.data X_trans = minmax_scale(X, axis=1) assert_array_almost_equal(np.min(X_trans, axis=1), 0) assert_array_almost_equal(np.max(X_trans, axis=1), 1) def test_min_max_scaler_1d(): # Test scaling of dataset along single axis rng = np.random.RandomState(0) X = rng.randn(5) X_orig_copy = X.copy() scaler = MinMaxScaler() X_scaled = scaler.fit(X).transform(X) assert_array_almost_equal(X_scaled.min(axis=0), 0.0) assert_array_almost_equal(X_scaled.max(axis=0), 1.0) # check inverse transform X_scaled_back = scaler.inverse_transform(X_scaled) assert_array_almost_equal(X_scaled_back, X_orig_copy) # Test with 1D list X = [0., 1., 2, 0.4, 1.] scaler = MinMaxScaler() X_scaled = scaler.fit(X).transform(X) assert_array_almost_equal(X_scaled.min(axis=0), 0.0) assert_array_almost_equal(X_scaled.max(axis=0), 1.0) # Constant feature. X = np.zeros(5) scaler = MinMaxScaler() X_scaled = scaler.fit(X).transform(X) assert_greater_equal(X_scaled.min(), 0.) assert_less_equal(X_scaled.max(), 1.) def test_scaler_without_centering(): rng = np.random.RandomState(42) X = rng.randn(4, 5) X[:, 0] = 0.0 # first feature is always of zero X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) assert_raises(ValueError, StandardScaler().fit, X_csr) null_transform = StandardScaler(with_mean=False, with_std=False, copy=True) X_null = null_transform.fit_transform(X_csr) assert_array_equal(X_null.data, X_csr.data) X_orig = null_transform.inverse_transform(X_null) assert_array_equal(X_orig.data, X_csr.data) scaler = StandardScaler(with_mean=False).fit(X) X_scaled = scaler.transform(X, copy=True) assert_false(np.any(np.isnan(X_scaled))) scaler_csr = StandardScaler(with_mean=False).fit(X_csr) X_csr_scaled = scaler_csr.transform(X_csr, copy=True) assert_false(np.any(np.isnan(X_csr_scaled.data))) scaler_csc = StandardScaler(with_mean=False).fit(X_csc) X_csc_scaled = scaler_csr.transform(X_csc, copy=True) assert_false(np.any(np.isnan(X_csc_scaled.data))) assert_equal(scaler.mean_, scaler_csr.mean_) assert_array_almost_equal(scaler.std_, scaler_csr.std_) assert_equal(scaler.mean_, scaler_csc.mean_) assert_array_almost_equal(scaler.std_, scaler_csc.std_) assert_array_almost_equal( X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0) assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0)) assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0)) # Check that X has not been modified (copy) assert_true(X_scaled is not X) assert_true(X_csr_scaled is not X_csr) X_scaled_back = scaler.inverse_transform(X_scaled) assert_true(X_scaled_back is not X) assert_true(X_scaled_back is not X_scaled) assert_array_almost_equal(X_scaled_back, X) X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled) assert_true(X_csr_scaled_back is not X_csr) assert_true(X_csr_scaled_back is not X_csr_scaled) assert_array_almost_equal(X_csr_scaled_back.toarray(), X) X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc()) assert_true(X_csc_scaled_back is not X_csc) assert_true(X_csc_scaled_back is not X_csc_scaled) assert_array_almost_equal(X_csc_scaled_back.toarray(), X) def test_scaler_int(): # test that scaler converts integer input to floating # for both sparse and dense matrices rng = np.random.RandomState(42) X = rng.randint(20, size=(4, 5)) X[:, 0] = 0 # first feature is always of zero X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) null_transform = StandardScaler(with_mean=False, with_std=False, copy=True) clean_warning_registry() with warnings.catch_warnings(record=True): X_null = null_transform.fit_transform(X_csr) assert_array_equal(X_null.data, X_csr.data) X_orig = null_transform.inverse_transform(X_null) assert_array_equal(X_orig.data, X_csr.data) clean_warning_registry() with warnings.catch_warnings(record=True): scaler = StandardScaler(with_mean=False).fit(X) X_scaled = scaler.transform(X, copy=True) assert_false(np.any(np.isnan(X_scaled))) clean_warning_registry() with warnings.catch_warnings(record=True): scaler_csr = StandardScaler(with_mean=False).fit(X_csr) X_csr_scaled = scaler_csr.transform(X_csr, copy=True) assert_false(np.any(np.isnan(X_csr_scaled.data))) clean_warning_registry() with warnings.catch_warnings(record=True): scaler_csc = StandardScaler(with_mean=False).fit(X_csc) X_csc_scaled = scaler_csr.transform(X_csc, copy=True) assert_false(np.any(np.isnan(X_csc_scaled.data))) assert_equal(scaler.mean_, scaler_csr.mean_) assert_array_almost_equal(scaler.std_, scaler_csr.std_) assert_equal(scaler.mean_, scaler_csc.mean_) assert_array_almost_equal(scaler.std_, scaler_csc.std_) assert_array_almost_equal( X_scaled.mean(axis=0), [0., 1.109, 1.856, 21., 1.559], 2) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis( X_csr_scaled.astype(np.float), 0) assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0)) assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0)) # Check that X has not been modified (copy) assert_true(X_scaled is not X) assert_true(X_csr_scaled is not X_csr) X_scaled_back = scaler.inverse_transform(X_scaled) assert_true(X_scaled_back is not X) assert_true(X_scaled_back is not X_scaled) assert_array_almost_equal(X_scaled_back, X) X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled) assert_true(X_csr_scaled_back is not X_csr) assert_true(X_csr_scaled_back is not X_csr_scaled) assert_array_almost_equal(X_csr_scaled_back.toarray(), X) X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc()) assert_true(X_csc_scaled_back is not X_csc) assert_true(X_csc_scaled_back is not X_csc_scaled) assert_array_almost_equal(X_csc_scaled_back.toarray(), X) def test_scaler_without_copy(): # Check that StandardScaler.fit does not change input rng = np.random.RandomState(42) X = rng.randn(4, 5) X[:, 0] = 0.0 # first feature is always of zero X_csr = sparse.csr_matrix(X) X_copy = X.copy() StandardScaler(copy=False).fit(X) assert_array_equal(X, X_copy) X_csr_copy = X_csr.copy() StandardScaler(with_mean=False, copy=False).fit(X_csr) assert_array_equal(X_csr.toarray(), X_csr_copy.toarray()) def test_scale_sparse_with_mean_raise_exception(): rng = np.random.RandomState(42) X = rng.randn(4, 5) X_csr = sparse.csr_matrix(X) # check scaling and fit with direct calls on sparse data assert_raises(ValueError, scale, X_csr, with_mean=True) assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr) # check transform and inverse_transform after a fit on a dense array scaler = StandardScaler(with_mean=True).fit(X) assert_raises(ValueError, scaler.transform, X_csr) X_transformed_csr = sparse.csr_matrix(scaler.transform(X)) assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr) def test_scale_input_finiteness_validation(): # Check if non finite inputs raise ValueError X = [np.nan, 5, 6, 7, 8] assert_raises_regex(ValueError, "Input contains NaN, infinity or a value too large", scale, X) X = [np.inf, 5, 6, 7, 8] assert_raises_regex(ValueError, "Input contains NaN, infinity or a value too large", scale, X) def test_scale_function_without_centering(): rng = np.random.RandomState(42) X = rng.randn(4, 5) X[:, 0] = 0.0 # first feature is always of zero X_csr = sparse.csr_matrix(X) X_scaled = scale(X, with_mean=False) assert_false(np.any(np.isnan(X_scaled))) X_csr_scaled = scale(X_csr, with_mean=False) assert_false(np.any(np.isnan(X_csr_scaled.data))) # test csc has same outcome X_csc_scaled = scale(X_csr.tocsc(), with_mean=False) assert_array_almost_equal(X_scaled, X_csc_scaled.toarray()) # raises value error on axis != 0 assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1) assert_array_almost_equal(X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has not been copied assert_true(X_scaled is not X) X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0) assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0)) assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0)) def test_robust_scaler_2d_arrays(): """Test robust scaling of 2d array along first axis""" rng = np.random.RandomState(0) X = rng.randn(4, 5) X[:, 0] = 0.0 # first feature is always of zero scaler = RobustScaler() X_scaled = scaler.fit(X).transform(X) assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0)[0], 0) def test_robust_scaler_iris(): X = iris.data scaler = RobustScaler() X_trans = scaler.fit_transform(X) assert_array_almost_equal(np.median(X_trans, axis=0), 0) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) q = np.percentile(X_trans, q=(25, 75), axis=0) iqr = q[1] - q[0] assert_array_almost_equal(iqr, 1) def test_robust_scale_axis1(): X = iris.data X_trans = robust_scale(X, axis=1) assert_array_almost_equal(np.median(X_trans, axis=1), 0) q = np.percentile(X_trans, q=(25, 75), axis=1) iqr = q[1] - q[0] assert_array_almost_equal(iqr, 1) def test_robust_scaler_zero_variance_features(): """Check RobustScaler on toy data with zero variance features""" X = [[0., 1., +0.5], [0., 1., -0.1], [0., 1., +1.1]] scaler = RobustScaler() X_trans = scaler.fit_transform(X) # NOTE: for such a small sample size, what we expect in the third column # depends HEAVILY on the method used to calculate quantiles. The values # here were calculated to fit the quantiles produces by np.percentile # using numpy 1.9 Calculating quantiles with # scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles # would yield very different results! X_expected = [[0., 0., +0.0], [0., 0., -1.0], [0., 0., +1.0]] assert_array_almost_equal(X_trans, X_expected) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # make sure new data gets transformed correctly X_new = [[+0., 2., 0.5], [-1., 1., 0.0], [+0., 1., 1.5]] X_trans_new = scaler.transform(X_new) X_expected_new = [[+0., 1., +0.], [-1., 0., -0.83333], [+0., 0., +1.66667]] assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3) def test_maxabs_scaler_zero_variance_features(): """Check MaxAbsScaler on toy data with zero variance features""" X = [[0., 1., +0.5], [0., 1., -0.3], [0., 1., +1.5], [0., 0., +0.0]] scaler = MaxAbsScaler() X_trans = scaler.fit_transform(X) X_expected = [[0., 1., 1.0 / 3.0], [0., 1., -0.2], [0., 1., 1.0], [0., 0., 0.0]] assert_array_almost_equal(X_trans, X_expected) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # make sure new data gets transformed correctly X_new = [[+0., 2., 0.5], [-1., 1., 0.0], [+0., 1., 1.5]] X_trans_new = scaler.transform(X_new) X_expected_new = [[+0., 2.0, 1.0 / 3.0], [-1., 1.0, 0.0], [+0., 1.0, 1.0]] assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2) # sparse data X_csr = sparse.csr_matrix(X) X_trans = scaler.fit_transform(X_csr) X_expected = [[0., 1., 1.0 / 3.0], [0., 1., -0.2], [0., 1., 1.0], [0., 0., 0.0]] assert_array_almost_equal(X_trans.A, X_expected) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv.A) def test_maxabs_scaler_large_negative_value(): """Check MaxAbsScaler on toy data with a large negative value""" X = [[0., 1., +0.5, -1.0], [0., 1., -0.3, -0.5], [0., 1., -100.0, 0.0], [0., 0., +0.0, -2.0]] scaler = MaxAbsScaler() X_trans = scaler.fit_transform(X) X_expected = [[0., 1., 0.005, -0.5], [0., 1., -0.003, -0.25], [0., 1., -1.0, 0.0], [0., 0., 0.0, -1.0]] assert_array_almost_equal(X_trans, X_expected) def test_warning_scaling_integers(): # Check warning when scaling integer data X = np.array([[1, 2, 0], [0, 0, 0]], dtype=np.uint8) w = "Data with input dtype uint8 was converted to float64" clean_warning_registry() assert_warns_message(DataConversionWarning, w, scale, X) assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X) assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X) def test_normalizer_l1(): rng = np.random.RandomState(0) X_dense = rng.randn(4, 5) X_sparse_unpruned = sparse.csr_matrix(X_dense) # set the row number 3 to zero X_dense[3, :] = 0.0 # set the row number 3 to zero without pruning (can happen in real life) indptr_3 = X_sparse_unpruned.indptr[3] indptr_4 = X_sparse_unpruned.indptr[4] X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0 # build the pruned variant using the regular constructor X_sparse_pruned = sparse.csr_matrix(X_dense) # check inputs that support the no-copy optim for X in (X_dense, X_sparse_pruned, X_sparse_unpruned): normalizer = Normalizer(norm='l1', copy=True) X_norm = normalizer.transform(X) assert_true(X_norm is not X) X_norm1 = toarray(X_norm) normalizer = Normalizer(norm='l1', copy=False) X_norm = normalizer.transform(X) assert_true(X_norm is X) X_norm2 = toarray(X_norm) for X_norm in (X_norm1, X_norm2): row_sums = np.abs(X_norm).sum(axis=1) for i in range(3): assert_almost_equal(row_sums[i], 1.0) assert_almost_equal(row_sums[3], 0.0) # check input for which copy=False won't prevent a copy for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix): X = init(X_dense) X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X) assert_true(X_norm is not X) assert_true(isinstance(X_norm, sparse.csr_matrix)) X_norm = toarray(X_norm) for i in range(3): assert_almost_equal(row_sums[i], 1.0) assert_almost_equal(la.norm(X_norm[3]), 0.0) def test_normalizer_l2(): rng = np.random.RandomState(0) X_dense = rng.randn(4, 5) X_sparse_unpruned = sparse.csr_matrix(X_dense) # set the row number 3 to zero X_dense[3, :] = 0.0 # set the row number 3 to zero without pruning (can happen in real life) indptr_3 = X_sparse_unpruned.indptr[3] indptr_4 = X_sparse_unpruned.indptr[4] X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0 # build the pruned variant using the regular constructor X_sparse_pruned = sparse.csr_matrix(X_dense) # check inputs that support the no-copy optim for X in (X_dense, X_sparse_pruned, X_sparse_unpruned): normalizer = Normalizer(norm='l2', copy=True) X_norm1 = normalizer.transform(X) assert_true(X_norm1 is not X) X_norm1 = toarray(X_norm1) normalizer = Normalizer(norm='l2', copy=False) X_norm2 = normalizer.transform(X) assert_true(X_norm2 is X) X_norm2 = toarray(X_norm2) for X_norm in (X_norm1, X_norm2): for i in range(3): assert_almost_equal(la.norm(X_norm[i]), 1.0) assert_almost_equal(la.norm(X_norm[3]), 0.0) # check input for which copy=False won't prevent a copy for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix): X = init(X_dense) X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X) assert_true(X_norm is not X) assert_true(isinstance(X_norm, sparse.csr_matrix)) X_norm = toarray(X_norm) for i in range(3): assert_almost_equal(la.norm(X_norm[i]), 1.0) assert_almost_equal(la.norm(X_norm[3]), 0.0) def test_normalizer_max(): rng = np.random.RandomState(0) X_dense = rng.randn(4, 5) X_sparse_unpruned = sparse.csr_matrix(X_dense) # set the row number 3 to zero X_dense[3, :] = 0.0 # set the row number 3 to zero without pruning (can happen in real life) indptr_3 = X_sparse_unpruned.indptr[3] indptr_4 = X_sparse_unpruned.indptr[4] X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0 # build the pruned variant using the regular constructor X_sparse_pruned = sparse.csr_matrix(X_dense) # check inputs that support the no-copy optim for X in (X_dense, X_sparse_pruned, X_sparse_unpruned): normalizer = Normalizer(norm='max', copy=True) X_norm1 = normalizer.transform(X) assert_true(X_norm1 is not X) X_norm1 = toarray(X_norm1) normalizer = Normalizer(norm='max', copy=False) X_norm2 = normalizer.transform(X) assert_true(X_norm2 is X) X_norm2 = toarray(X_norm2) for X_norm in (X_norm1, X_norm2): row_maxs = X_norm.max(axis=1) for i in range(3): assert_almost_equal(row_maxs[i], 1.0) assert_almost_equal(row_maxs[3], 0.0) # check input for which copy=False won't prevent a copy for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix): X = init(X_dense) X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X) assert_true(X_norm is not X) assert_true(isinstance(X_norm, sparse.csr_matrix)) X_norm = toarray(X_norm) for i in range(3): assert_almost_equal(row_maxs[i], 1.0) assert_almost_equal(la.norm(X_norm[3]), 0.0) def test_normalize(): # Test normalize function # Only tests functionality not used by the tests for Normalizer. X = np.random.RandomState(37).randn(3, 2) assert_array_equal(normalize(X, copy=False), normalize(X.T, axis=0, copy=False).T) assert_raises(ValueError, normalize, [[0]], axis=2) assert_raises(ValueError, normalize, [[0]], norm='l3') def test_binarizer(): X_ = np.array([[1, 0, 5], [2, 3, -1]]) for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix): X = init(X_.copy()) binarizer = Binarizer(threshold=2.0, copy=True) X_bin = toarray(binarizer.transform(X)) assert_equal(np.sum(X_bin == 0), 4) assert_equal(np.sum(X_bin == 1), 2) X_bin = binarizer.transform(X) assert_equal(sparse.issparse(X), sparse.issparse(X_bin)) binarizer = Binarizer(copy=True).fit(X) X_bin = toarray(binarizer.transform(X)) assert_true(X_bin is not X) assert_equal(np.sum(X_bin == 0), 2) assert_equal(np.sum(X_bin == 1), 4) binarizer = Binarizer(copy=True) X_bin = binarizer.transform(X) assert_true(X_bin is not X) X_bin = toarray(X_bin) assert_equal(np.sum(X_bin == 0), 2) assert_equal(np.sum(X_bin == 1), 4) binarizer = Binarizer(copy=False) X_bin = binarizer.transform(X) if init is not list: assert_true(X_bin is X) X_bin = toarray(X_bin) assert_equal(np.sum(X_bin == 0), 2) assert_equal(np.sum(X_bin == 1), 4) binarizer = Binarizer(threshold=-0.5, copy=True) for init in (np.array, list): X = init(X_.copy()) X_bin = toarray(binarizer.transform(X)) assert_equal(np.sum(X_bin == 0), 1) assert_equal(np.sum(X_bin == 1), 5) X_bin = binarizer.transform(X) # Cannot use threshold < 0 for sparse assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X)) def test_center_kernel(): # Test that KernelCenterer is equivalent to StandardScaler # in feature space rng = np.random.RandomState(0) X_fit = rng.random_sample((5, 4)) scaler = StandardScaler(with_std=False) scaler.fit(X_fit) X_fit_centered = scaler.transform(X_fit) K_fit = np.dot(X_fit, X_fit.T) # center fit time matrix centerer = KernelCenterer() K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T) K_fit_centered2 = centerer.fit_transform(K_fit) assert_array_almost_equal(K_fit_centered, K_fit_centered2) # center predict time matrix X_pred = rng.random_sample((2, 4)) K_pred = np.dot(X_pred, X_fit.T) X_pred_centered = scaler.transform(X_pred) K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T) K_pred_centered2 = centerer.transform(K_pred) assert_array_almost_equal(K_pred_centered, K_pred_centered2) def test_fit_transform(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) for obj in ((StandardScaler(), Normalizer(), Binarizer())): X_transformed = obj.fit(X).transform(X) X_transformed2 = obj.fit_transform(X) assert_array_equal(X_transformed, X_transformed2) def test_add_dummy_feature(): X = [[1, 0], [0, 1], [0, 1]] X = add_dummy_feature(X) assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) def test_add_dummy_feature_coo(): X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]]) X = add_dummy_feature(X) assert_true(sparse.isspmatrix_coo(X), X) assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) def test_add_dummy_feature_csc(): X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]]) X = add_dummy_feature(X) assert_true(sparse.isspmatrix_csc(X), X) assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) def test_add_dummy_feature_csr(): X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]]) X = add_dummy_feature(X) assert_true(sparse.isspmatrix_csr(X), X) assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) def test_one_hot_encoder_sparse(): # Test OneHotEncoder's fit and transform. X = [[3, 2, 1], [0, 1, 1]] enc = OneHotEncoder() # discover max values automatically X_trans = enc.fit_transform(X).toarray() assert_equal(X_trans.shape, (2, 5)) assert_array_equal(enc.active_features_, np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0]) assert_array_equal(enc.feature_indices_, [0, 4, 7, 9]) # check outcome assert_array_equal(X_trans, [[0., 1., 0., 1., 1.], [1., 0., 1., 0., 1.]]) # max value given as 3 enc = OneHotEncoder(n_values=4) X_trans = enc.fit_transform(X) assert_equal(X_trans.shape, (2, 4 * 3)) assert_array_equal(enc.feature_indices_, [0, 4, 8, 12]) # max value given per feature enc = OneHotEncoder(n_values=[3, 2, 2]) X = [[1, 0, 1], [0, 1, 1]] X_trans = enc.fit_transform(X) assert_equal(X_trans.shape, (2, 3 + 2 + 2)) assert_array_equal(enc.n_values_, [3, 2, 2]) # check that testing with larger feature works: X = np.array([[2, 0, 1], [0, 1, 1]]) enc.transform(X) # test that an error is raised when out of bounds: X_too_large = [[0, 2, 1], [0, 1, 1]] assert_raises(ValueError, enc.transform, X_too_large) assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X) # test that error is raised when wrong number of features assert_raises(ValueError, enc.transform, X[:, :-1]) # test that error is raised when wrong number of features in fit # with prespecified n_values assert_raises(ValueError, enc.fit, X[:, :-1]) # test exception on wrong init param assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X) enc = OneHotEncoder() # test negative input to fit assert_raises(ValueError, enc.fit, [[0], [-1]]) # test negative input to transform enc.fit([[0], [1]]) assert_raises(ValueError, enc.transform, [[0], [-1]]) def test_one_hot_encoder_dense(): # check for sparse=False X = [[3, 2, 1], [0, 1, 1]] enc = OneHotEncoder(sparse=False) # discover max values automatically X_trans = enc.fit_transform(X) assert_equal(X_trans.shape, (2, 5)) assert_array_equal(enc.active_features_, np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0]) assert_array_equal(enc.feature_indices_, [0, 4, 7, 9]) # check outcome assert_array_equal(X_trans, np.array([[0., 1., 0., 1., 1.], [1., 0., 1., 0., 1.]])) def _check_transform_selected(X, X_expected, sel): for M in (X, sparse.csr_matrix(X)): Xtr = _transform_selected(M, Binarizer().transform, sel) assert_array_equal(toarray(Xtr), X_expected) def test_transform_selected(): X = [[3, 2, 1], [0, 1, 1]] X_expected = [[1, 2, 1], [0, 1, 1]] _check_transform_selected(X, X_expected, [0]) _check_transform_selected(X, X_expected, [True, False, False]) X_expected = [[1, 1, 1], [0, 1, 1]] _check_transform_selected(X, X_expected, [0, 1, 2]) _check_transform_selected(X, X_expected, [True, True, True]) _check_transform_selected(X, X_expected, "all") _check_transform_selected(X, X, []) _check_transform_selected(X, X, [False, False, False]) def _run_one_hot(X, X2, cat): enc = OneHotEncoder(categorical_features=cat) Xtr = enc.fit_transform(X) X2tr = enc.transform(X2) return Xtr, X2tr def _check_one_hot(X, X2, cat, n_features): ind = np.where(cat)[0] # With mask A, B = _run_one_hot(X, X2, cat) # With indices C, D = _run_one_hot(X, X2, ind) # Check shape assert_equal(A.shape, (2, n_features)) assert_equal(B.shape, (1, n_features)) assert_equal(C.shape, (2, n_features)) assert_equal(D.shape, (1, n_features)) # Check that mask and indices give the same results assert_array_equal(toarray(A), toarray(C)) assert_array_equal(toarray(B), toarray(D)) def test_one_hot_encoder_categorical_features(): X = np.array([[3, 2, 1], [0, 1, 1]]) X2 = np.array([[1, 1, 1]]) cat = [True, False, False] _check_one_hot(X, X2, cat, 4) # Edge case: all non-categorical cat = [False, False, False] _check_one_hot(X, X2, cat, 3) # Edge case: all categorical cat = [True, True, True] _check_one_hot(X, X2, cat, 5) def test_one_hot_encoder_unknown_transform(): X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]]) y = np.array([[4, 1, 1]]) # Test that one hot encoder raises error for unknown features # present during transform. oh = OneHotEncoder(handle_unknown='error') oh.fit(X) assert_raises(ValueError, oh.transform, y) # Test the ignore option, ignores unknown features. oh = OneHotEncoder(handle_unknown='ignore') oh.fit(X) assert_array_equal( oh.transform(y).toarray(), np.array([[0., 0., 0., 0., 1., 0., 0.]]) ) # Raise error if handle_unknown is neither ignore or error. oh = OneHotEncoder(handle_unknown='42') oh.fit(X) assert_raises(ValueError, oh.transform, y)
bsd-3-clause
chengsoonong/acton
acton/database.py
1
72157
"""Wrapper class for databases.""" from abc import ABC, abstractmethod from inspect import Traceback import json import logging import os.path import tempfile from typing import Iterable, List, Sequence import warnings import time from acton.proto.acton_pb2 import Database as DatabasePB import astropy.io.ascii as io_ascii import astropy.io.fits as io_fits import astropy.table import h5py import numpy import pandas import sklearn.preprocessing from numpy.random import multivariate_normal LabelEncoderPB = DatabasePB.LabelEncoder def product(seq: Iterable[int]): """Finds the product of a list of ints. Arguments --------- seq List of ints. Returns ------- int Product. """ prod = 1 for i in seq: prod *= i return prod def serialise_encoder( encoder: sklearn.preprocessing.LabelEncoder) -> LabelEncoderPB: """Serialises a LabelEncoder as a protobuf. Parameters ---------- encoder LabelEncoder. Returns ------- LabelEncoderPB Protobuf representing the LabelEncoder. """ proto = LabelEncoderPB() if not hasattr(encoder, 'classes_'): return proto for i, class_label in enumerate(encoder.classes_): encoding = proto.encoding.add() encoding.class_label = str(class_label) encoding.class_int = i return proto class Database(ABC): """Base class for database wrappers.""" @abstractmethod def __enter__(self): return self @abstractmethod def __exit__(self, exc_type: Exception, exc_val: object, exc_tb: Traceback): pass @abstractmethod def read_features(self, ids: Sequence[int]) -> numpy.ndarray: """Reads feature vectors from the database. Parameters ---------- ids Iterable of IDs. Returns ------- numpy.ndarray N x D array of feature vectors. """ @abstractmethod def read_labels(self, labeller_ids: Sequence[int], instance_ids: Sequence[int]) -> numpy.ndarray: """Reads label vectors from the database. Parameters ---------- labeller_ids Iterable of labeller IDs. instance_ids Iterable of instance IDs. Returns ------- numpy.ndarray T x N x F array of label vectors. """ @abstractmethod def write_features(self, ids: Sequence[int], features: numpy.ndarray): """Writes feature vectors to the database. Parameters ---------- ids Iterable of IDs. features N x D array of feature vectors. The ith row corresponds to the ith ID in `ids`. """ @abstractmethod def write_labels(self, labeller_ids: Sequence[int], instance_ids: Sequence[int], labels: numpy.ndarray): """Writes label vectors to the database. Parameters ---------- labeller_ids Iterable of labeller IDs. instance_ids Iterable of instance IDs. labels T x N x D array of label vectors. The ith row corresponds to the ith labeller ID in `labeller_ids` and the jth column corresponds to the jth instance ID in `instance_ids`. """ @abstractmethod def get_known_instance_ids(self) -> List[int]: """Returns a list of known instance IDs. Returns ------- List[str] A list of known instance IDs. """ @abstractmethod def get_known_labeller_ids(self) -> List[int]: """Returns a list of known labeller IDs. Returns ------- List[str] A list of known labeller IDs. """ @abstractmethod def to_proto(self) -> DatabasePB: """Serialises this database as a protobuf. Returns ------- DatabasePB Protobuf representing this database. """ class HDF5Database(Database): """Database wrapping an HDF5 file as a context manager. Attributes ---------- path : str Path to HDF5 file. _h5_file : h5py.File HDF5 file object. """ def __init__(self, path: str): self.path = path def __enter__(self): self._open_hdf5() return self def __exit__(self, exc_type: Exception, exc_val: object, exc_tb: Traceback): self._h5_file.close() delattr(self, '_h5_file') def _assert_open(self): """Asserts that the HDF5 file is ready to be read to/written from. Raises ------ AssertionError """ assert hasattr(self, '_h5_file'), ('HDF5 database must be used as a ' 'context manager.') def _open_hdf5(self): """Opens the HDF5 file and creates it if it doesn't exist. Notes ----- The HDF5 file will be stored in self._h5_file. """ try: self._h5_file = h5py.File(self.path, 'r+') except OSError: with h5py.File(self.path, 'w') as h5_file: self._setup_hdf5(h5_file) self._h5_file = h5py.File(self.path, 'r+') class ManagedHDF5Database(HDF5Database): """Database using an HDF5 file. Notes ----- This database uses an internal schema. For reading files from disk, use another Database. Attributes ---------- path : str Path to HDF5 file. label_dtype : str Data type of labels. feature_dtype : str Data type of features. _h5_file : h5py.File Opened HDF5 file. _sync_attrs : List[str] List of instance attributes to sync with the HDF5 file's attributes. """ def __init__(self, path: str, label_dtype: str=None, feature_dtype: str=None): """ Parameters ---------- path Path to HDF5 file. label_dtype Data type of labels. If not provided then it will be read from the database file; if the database file does not exist then the default type of 'float32' will be used. feature_dtype Data type of features. If not provided then it will be read from the database file; if the database file does not exist then the default type of 'float32' will be used. """ super().__init__(path) self.label_dtype = label_dtype self._default_label_dtype = 'float32' self.feature_dtype = feature_dtype self._default_feature_dtype = 'float32' # List of attributes to keep in sync with the HDF5 file. self._sync_attrs = ['label_dtype', 'feature_dtype'] def to_proto(self) -> DatabasePB: """Serialises this database as a protobuf. Returns ------- DatabasePB Protobuf representing this database. """ proto = DatabasePB() proto.path = self.path proto.class_name = 'ManagedHDF5Database' db_kwargs = { 'label_dtype': self.label_dtype, 'feature_dtype': self.feature_dtype} for key, value in db_kwargs.items(): kwarg = proto.kwarg.add() kwarg.key = key kwarg.value = json.dumps(value) # No encoder for a managed DB - assume that labels are encoded already. # proto.label_encoder.CopyFrom(serialise_encoder(self.label_encoder)) return proto def _open_hdf5(self): """Opens the HDF5 file and creates it if it doesn't exist. Notes ----- The HDF5 file will be stored in self._h5_file. """ super()._open_hdf5() # Load attrs from HDF5 file if we haven't specified them. for attr in self._sync_attrs: if getattr(self, attr) is None: setattr(self, attr, self._h5_file.attrs[attr]) self._validate_hdf5() def write_features(self, ids: Sequence[int], features: numpy.ndarray): """Writes feature vectors to the database. Parameters ---------- ids Iterable of IDs. features: N x D array of feature vectors. The ith row corresponds to the ith ID in `ids`. Returns ------- numpy.ndarray N x D array of feature vectors. """ self._assert_open() # Input validation. if len(ids) != len(features): raise ValueError('Must have same number of IDs and features.') if self._h5_file.attrs['n_features'] == -1: # This is the first time we've stored features, so make a record of # the dimensionality. self._h5_file.attrs['n_features'] = features.shape[1] elif self._h5_file.attrs['n_features'] != features.shape[1]: raise ValueError( 'Expected features to have dimensionality {}, got {}'.format( self._h5_file.attrs['n_features'], features.shape[1])) # Early termination. if not ids: return # Cast the features to the right type. if features.dtype != self.feature_dtype: warnings.warn('Casting features from type {} to type {}.'.format( features.dtype, self.feature_dtype)) features = features.astype(self.feature_dtype) # Resize the feature array if we need to store more IDs than before. max_id = max(ids) + 1 if max_id > self._h5_file['features'].shape[0]: self._h5_file['features'].resize( (max_id, self._h5_file.attrs['n_features'])) # Store the feature vectors. # TODO(MatthewJA): Vectorise this. This could be tricky as HDF5 doesn't # fully support NumPy's fancy indexing. for id_, feature in zip(ids, features): self._h5_file['features'][id_, :] = feature # Add the IDs to the database. known_ids = set(self.get_known_instance_ids()) new_ids = [i for i in ids if i not in known_ids] n_new_ids = len(new_ids) n_old_ids = self._h5_file['instance_ids'].shape[0] self._h5_file['instance_ids'].resize((n_old_ids + n_new_ids,)) self._h5_file['instance_ids'][-n_new_ids:] = numpy.array( new_ids, dtype=int) def read_features(self, ids: Sequence[int]) -> numpy.ndarray: """Reads feature vectors from the database. Parameters ---------- ids Iterable of IDs. Returns ------- numpy.ndarray N x D array of feature vectors. """ self._assert_open() if self._h5_file.attrs['n_features'] == -1 and ids: raise KeyError('No features stored in database.') # Allocate the features array. features = numpy.zeros((len(ids), self._h5_file.attrs['n_features']), dtype=self._h5_file.attrs['feature_dtype']) # Loop through each ID we want to query and put the associated feature # into the features array. features = self._h5_file['features'].value[ids, :] features = numpy.asarray( features, dtype=self._h5_file.attrs['feature_dtype']) return features def write_labels(self, labeller_ids: Sequence[int], instance_ids: Sequence[int], labels: numpy.ndarray): """Writes label vectors to the database. Parameters ---------- labeller_ids Iterable of labeller IDs. instance_ids Iterable of instance IDs. labels T x N x D array of label vectors. The ith row corresponds to the ith labeller ID in `labeller_ids` and the jth column corresponds to the jth instance ID in `instance_ids`. """ self._assert_open() # Input validation. if len(labeller_ids) != labels.shape[0]: raise ValueError( 'labels array has incorrect number of labellers:' ' expected {}, got {}.'.format(len(labeller_ids), labels.shape[0])) if len(instance_ids) != labels.shape[1]: raise ValueError( 'labels array has incorrect number of instances:' ' expected {}, got {}.'.format(len(instance_ids), labels.shape[1])) if self._h5_file.attrs['label_dim'] == -1: # This is the first time we've stored labels, so make a record of # the dimensionality. self._h5_file.attrs['label_dim'] = labels.shape[2] elif self._h5_file.attrs['label_dim'] != labels.shape[2]: raise ValueError( 'Expected labels to have dimensionality {}, got {}'.format( self._h5_file.attrs['label_dim'], labels.shape[2])) # Early termination. if not labeller_ids or not instance_ids: return # Cast the labels to the right type. if labels.dtype != self.label_dtype: warnings.warn('Casting labels from type {} to type {}.'.format( labels.dtype, self.label_dtype)) labels = labels.astype(self.label_dtype) # Resize the label array if necessary. max_labeller_id = max(labeller_ids) + 1 max_instance_id = max(instance_ids) + 1 if (max_labeller_id > self._h5_file['labels'].shape[0] or max_instance_id > self._h5_file['labels'].shape[1]): self._h5_file['labels'].resize( (max_labeller_id, max_instance_id, self._h5_file.attrs['label_dim'])) # Store the labels. # TODO(MatthewJA): Vectorise this. for labeller_idx, labeller_id in enumerate(labeller_ids): for instance_idx, instance_id in enumerate(instance_ids): label = labels[labeller_idx, instance_idx] self._h5_file['labels'][ labeller_id, instance_id, :] = label logging.debug( 'New label array size: {}'.format(self._h5_file['labels'].shape)) # Add the instance IDs to the database. known_instance_ids = set(self.get_known_instance_ids()) new_instance_ids = [i for i in instance_ids if i not in known_instance_ids] n_new_instance_ids = len(new_instance_ids) n_old_instance_ids = self._h5_file['instance_ids'].shape[0] if n_new_instance_ids: self._h5_file['instance_ids'].resize( (n_old_instance_ids + n_new_instance_ids,)) self._h5_file['instance_ids'][-n_new_instance_ids:] = numpy.array( new_instance_ids, dtype=int) # Add the labeller IDs to the database. known_labeller_ids = set(self.get_known_labeller_ids()) new_labeller_ids = [i for i in labeller_ids if i not in known_labeller_ids] n_new_labeller_ids = len(new_labeller_ids) n_old_labeller_ids = self._h5_file['labeller_ids'].shape[0] if n_new_labeller_ids: self._h5_file['labeller_ids'].resize( (n_old_labeller_ids + n_new_labeller_ids,)) self._h5_file['labeller_ids'][-n_new_labeller_ids:] = numpy.array( new_labeller_ids, dtype=int) def read_labels(self, labeller_ids: Sequence[int], instance_ids: Sequence[int]) -> numpy.ndarray: """Reads label vectors from the database. Parameters ---------- labeller_ids Iterable of labeller IDs. instance_ids Iterable of instance IDs. Returns ------- numpy.ndarray T x N x F array of label vectors. """ self._assert_open() if self._h5_file.attrs['label_dim'] == -1 and ( labeller_ids or instance_ids): raise KeyError('No labels stored in database.') labels = self._h5_file['labels'].value[labeller_ids][:, instance_ids, :] labels = numpy.asarray(labels, dtype=self._h5_file.attrs['label_dtype']) return labels def get_known_instance_ids(self) -> List[int]: """Returns a list of known instance IDs. Returns ------- List[str] A list of known instance IDs. """ self._assert_open() return [id_ for id_ in self._h5_file['instance_ids']] def get_known_labeller_ids(self) -> List[int]: """Returns a list of known labeller IDs. Returns ------- List[str] A list of known labeller IDs. """ self._assert_open() return [id_ for id_ in self._h5_file['labeller_ids']] def _setup_hdf5(self, h5_file: h5py.File): """Sets up an HDF5 file to work as a database. Parameters ---------- h5_file HDF5 file to set up. Must be opened in write mode. """ if self.label_dtype is None: self.label_dtype = self._default_label_dtype if self.feature_dtype is None: self.feature_dtype = self._default_feature_dtype h5_file.create_dataset('features', shape=(0, 0), dtype=self.feature_dtype, maxshape=(None, None)) h5_file.create_dataset('labels', shape=(0, 0, 0), dtype=self.label_dtype, maxshape=(None, None, None)) h5_file.create_dataset('instance_ids', shape=(0,), dtype=int, maxshape=(None,)) h5_file.create_dataset('labeller_ids', shape=(0,), dtype=int, maxshape=(None,)) h5_file.attrs['label_dtype'] = self.label_dtype h5_file.attrs['feature_dtype'] = self.feature_dtype h5_file.attrs['n_features'] = -1 h5_file.attrs['label_dim'] = -1 def _validate_hdf5(self): """Checks that self._h5_file has the correct schema. Raises ------ ValueError """ try: assert 'features' in self._h5_file assert 'labels' in self._h5_file assert 'instance_ids' in self._h5_file assert 'labeller_ids' in self._h5_file assert len(self._h5_file['features'].shape) == 2 assert len(self._h5_file['labels'].shape) == 3 assert len(self._h5_file['instance_ids'].shape) == 1 assert len(self._h5_file['labeller_ids'].shape) == 1 except AssertionError: raise ValueError( 'File {} is not a valid database.'.format(self.path)) for attr in self._sync_attrs: assert getattr(self, attr) is not None if self._h5_file.attrs[attr] != getattr(self, attr): raise ValueError('Incompatible {}: expected {}, got {}'.format( attr, getattr(self, attr), self._h5_file.attrs[attr])) class GraphDatabase(HDF5Database): """Manage database handling knowledge graph factorization, Attributes ----------- path: str Path to HDF5 file. """ def __init__(self, path: str): """ Parameters ---------- path Path to HDF5 file. """ self.path = path def to_proto(self) -> DatabasePB: """Serialises this database as a protobuf. Returns ------- DatabasePB Protobuf representing this database. """ proto = DatabasePB() proto.path = self.path proto.class_name = 'ManagedHDF5Database' db_kwargs = { 'label_dtype': self.label_dtype, 'feature_dtype': self.feature_dtype} for key, value in db_kwargs.items(): kwarg = proto.kwarg.add() kwarg.key = key kwarg.value = json.dumps(value) # No encoder for a managed DB - assume that labels are encoded already. # proto.label_encoder.CopyFrom(serialise_encoder(self.label_encoder)) return proto def _open_hdf5(self): """Opens the HDF5 file and creates it if it doesn't exist. Notes ----- The HDF5 file will be stored in self._h5_file. """ try: self._h5_file = h5py.File(self.path, 'r+') except OSError: with h5py.File(self.path, 'w') as h5_file: self._setup_hdf5(h5_file) self._h5_file = h5py.File(self.path, 'r+') def _setup_hdf5(self, h5_file: h5py.File): """Sets up an HDF5 file to work as a database. Parameters ---------- h5_file HDF5 file to set up. Must be opened in write mode. """ h5_file.create_dataset('features_E', shape=(0, 0, 0), maxshape=(None, None, None)) h5_file.create_dataset('features_R', shape=(0, 0, 0, 0), maxshape=(None, None, None, None)) h5_file.create_dataset('labels', shape=(0, 0, 0), maxshape=(None, None, None)) h5_file.attrs['n_entities'] = -1 h5_file.attrs['n_relations'] = -1 h5_file.attrs['n_dim'] = -1 h5_file.attrs['n_particles'] = -1 def _validate_hdf5(self): """Checks that self._h5_file has the correct schema. Raises ------ ValueError """ try: assert 'features_E' in self._h5_file assert 'features_R' in self._h5_file assert 'labels' in self._h5_file assert len(self._h5_file['features_E'].shape) == 3 assert len(self._h5_file['features_R'].shape) == 4 assert len(self._h5_file['labels'].shape) == 3 except AssertionError: raise ValueError( 'File {} is not a valid database.'.format(self.path)) def write_labels(self, labels: numpy.ndarray): """Writes label vectors to the database. Parameters ---------- labels K x N x N array of label vectors. K is the number of relations, N is the number of entities. """ self._assert_open() # Input validation. if self._h5_file.attrs['n_relations'] == -1: # This is the first time we've stored labels, so make a record of # the dimensionality. self._h5_file.attrs['n_relations'] = labels.shape[0] elif self._h5_file.attrs['n_relations'] != labels.shape[0]: raise ValueError( 'Expected number of relations {}, glot {}'.format( self._h5_file.attrs['n_relations'], labels.shape[0])) if self._h5_file.attrs['n_entities'] == -1: # This is the first time we've stored labels, so make a record of # the dimensionality. self._h5_file.attrs['n_entities'] = labels.shape[1] elif self._h5_file.attrs['n_entities'] != labels.shape[1]: raise ValueError( 'Expected number of entities {}, glot {}'.format( self._h5_file.attrs['n_entities'], labels.shape[1])) # Resize the label array if necessary. if (labels.shape[0] > self._h5_file['labels'].shape[0] or labels.shape[1] > self._h5_file['labels'].shape[1] or labels.shape[2] > self._h5_file['labels'].shape[2]): self._h5_file['labels'].resize(labels.shape) # Store the labels. # TODO(MatthewJA): Vectorise this. for i in range(labels.shape[0]): self._h5_file['labels'][i, :] = labels[i, :] logging.debug( 'New label array size: {}'.format(self._h5_file['labels'].shape)) def write_features(self, features_E: numpy.ndarray, features_R: numpy.ndarray): """Writes feature vectors to the database. Parameters ---------- features_E: P x N x D array of entity feature vectors. P is the number of particles. N is the number of entities. D is the number of latent variable dimensions. features_R: P x K x D x D array of relation feature vectors. P is the number of particles. K is the number of relations. D is the number of latent variable dimensions. """ self._assert_open() n_particles = features_E.shape[0] assert features_E.shape[0] == features_R.shape[0] n_entities = features_E.shape[1] n_relations = features_R.shape[1] n_dim = features_E.shape[2] assert features_E.shape[2] == features_R.shape[2] == features_R.shape[3] # Input validation. if self._h5_file.attrs['n_relations'] == -1: # This is the first time we've stored labels, so make a record of # the dimensionality. self._h5_file.attrs['n_relations'] = n_relations elif self._h5_file.attrs['n_relations'] != n_relations: raise ValueError( 'Expected number of relations {}, glot {}'.format( self._h5_file.attrs['n_relations'], n_relations)) if self._h5_file.attrs['n_entities'] == -1: # This is the first time we've stored labels, so make a record of # the dimensionality. self._h5_file.attrs['n_entities'] = n_entities elif self._h5_file.attrs['n_entities'] != n_entities: raise ValueError( 'Expected number of entities {}, glot {}'.format( self._h5_file.attrs['n_entities'], n_entities)) if self._h5_file.attrs['n_dim'] == -1: # This is the first time we've stored labels, so make a record of # the dimensionality. self._h5_file.attrs['n_dim'] = n_dim elif self._h5_file.attrs['n_dim'] != n_dim: raise ValueError( 'Expected number of latent dimensions {}, glot {}'.format( self._h5_file.attrs['n_dim'], n_dim)) if self._h5_file.attrs['n_particles'] == -1: # This is the first time we've stored labels, so make a record of # the dimensionality. self._h5_file.attrs['n_particles'] = n_particles elif self._h5_file.attrs['n_particles'] != n_particles: raise ValueError( 'Expected number of partibles {}, glot {}'.format( self._h5_file.attrs['n_particles'], n_particles)) # Resize the feature array if we need to store more IDs than before. if (features_E.shape[0] > self._h5_file['features_E'].shape[0] or features_E.shape[1] > self._h5_file['features_E'].shape[1] or features_E.shape[2] > self._h5_file['features_E'].shape[2]): self._h5_file['features_E'].resize(features_E.shape) if (features_R.shape[0] > self._h5_file['features_R'].shape[0] or features_R.shape[1] > self._h5_file['features_R'].shape[1] or features_R.shape[2] > self._h5_file['features_R'].shape[2]): self._h5_file['features_R'].resize(features_R.shape) # Store the feature vectors. # TODO(MatthewJA): Vectorise this. This could be tricky as HDF5 doesn't # fully support NumPy's fancy indexing. for id_, feature in enumerate(features_E): self._h5_file['features_E'][id_, :] = feature for id_, feature in enumerate(features_R): self._h5_file['features_R'][id_, :, :] = feature logging.debug( 'New feature E array size: {}'.format( self._h5_file['features_E'].shape)) logging.debug( 'New feature R array size: {}'.format( self._h5_file['features_R'].shape)) def read_labels(self, instance_ids: Sequence[tuple]) -> numpy.ndarray: """Reads label vectors from the database. Parameters ---------- instance_ids sequence of ids to be read labels empty list indicates reading all labels in once Returns ------- numpy.ndarray array of label vectors. """ self._assert_open() n_entities = self._h5_file.attrs['n_entities'] n_relations = self._h5_file.attrs['n_relations'] if (n_entities == -1 or n_relations == -1): raise KeyError('No labels stored in database.') if len(instance_ids) == 0: return numpy.asarray(self._h5_file['labels'].value) else: labels = [] for tuple_ in instance_ids: r_k, e_i, e_j = tuple_ labels.append(self._h5_file['labels'].value[r_k, e_i, e_j]) return numpy.asarray(labels) def read_features(self) -> numpy.ndarray: """Reads feature vectors from the database. Parameters ---------- Returns ------- E numpy.ndarray P x N x D array of feature vectors. R list each element is numpy.ndarray P x K x D x D array of feature vectors. """ self._assert_open() if self._h5_file.attrs['n_particles'] == -1: raise KeyError('No features stored in database.') # Allocate the features array. features_E = numpy.zeros((self._h5_file.attrs['n_particles'], self._h5_file.attrs['n_entities']), self._h5_file.attrs['n_dim']) features_R = numpy.zeros((self._h5_file.attrs['n_particles'], self._h5_file.attrs['n_relations'], self._h5_file.attrs['n_dim'], self._h5_file.attrs['n_dim'])) # Loop through each ID we want to query and put the associated feature # into the features array. features_E = self._h5_file['features_E'].value features_R = self._h5_file['features_R'].value features_E = numpy.asarray(features_E) features_R = numpy.asarray(features_R) return features_E, features_R def get_known_instance_ids(self) -> List[int]: """Returns a list of known instance IDs. Returns ------- List[str] A list of known instance IDs. """ self._assert_open() return [id_ for id_ in self._h5_file['instance_ids']] def get_known_labeller_ids(self) -> List[int]: """Returns a list of known labeller IDs. Returns ------- List[str] A list of known labeller IDs. """ self._assert_open() return [id_ for id_ in self._h5_file['labeller_ids']] class HDF5Reader(HDF5Database): """Reads HDF5 databases. Attributes ---------- feature_cols : List[str] List of feature datasets. label_col : str Name of label dataset. n_features : int Number of features. n_instances : int Number of instances. n_labels : int Number of labels per instance. path : str Path to HDF5 file. encode_labels : bool Whether to encode labels as integers. label_encoder : sklearn.preprocessing.LabelEncoder Encodes labels as integers. _h5_file : h5py.File HDF5 file object. _is_multidimensional : bool Whether the features are in a multidimensional dataset. """ def __init__(self, path: str, feature_cols: List[str], label_col: str, encode_labels: bool=True, label_encoder: sklearn.preprocessing.LabelEncoder=None): """ Parameters ---------- path Path to HDF5 file. feature_cols List of feature datasets. If only one feature dataset is specified, this dataset is allowed to be a multidimensional dataset and contain multiple features. label_col Name of label dataset. encode_labels Whether to encode labels as integers. label_encoder Encodes labels as integers. If not specified, the label column will be read and a label encoding generated. """ super().__init__(path) if not feature_cols: raise ValueError('Must specify feature columns for HDF5.') self.feature_cols = feature_cols self.label_col = label_col self.encode_labels = encode_labels self.label_encoder = label_encoder if self.label_encoder and not self.encode_labels: raise ValueError('label_encoder specified but encode_labels is ' 'False') if self.label_encoder is None: self.label_encoder = sklearn.preprocessing.LabelEncoder() with h5py.File(self.path, 'r') as data: is_multidimensional = any(len(data[f_col].shape) > 1 or not product(data[f_col].shape[1:]) == 1 for f_col in feature_cols) if is_multidimensional and len(feature_cols) != 1: raise ValueError( 'Feature arrays and feature columns cannot be mixed. ' 'To read in features from a multidimensional dataset, ' 'only specify one feature column name.') self._is_multidimensional = is_multidimensional self.n_instances = data[label_col].shape[0] if len(data[label_col].shape) == 1: self.n_labels = 1 else: assert len(data[label_col].shape) == 2 self.n_labels = data[label_col].shape[1] if is_multidimensional: self.n_features = data[feature_cols[0]].shape[1] else: self.n_features = len(feature_cols) def to_proto(self) -> DatabasePB: """Serialises this database as a protobuf. Returns ------- DatabasePB Protobuf representing this database. """ proto = DatabasePB() proto.path = self.path proto.class_name = 'HDF5Reader' db_kwargs = { 'feature_cols': self.feature_cols, 'label_col': self.label_col, 'encode_labels': self.encode_labels} for key, value in db_kwargs.items(): kwarg = proto.kwarg.add() kwarg.key = key kwarg.value = json.dumps(value) proto.label_encoder.CopyFrom(serialise_encoder(self.label_encoder)) return proto def read_features(self, ids: Sequence[int]) -> numpy.ndarray: """Reads feature vectors from the database. Parameters ---------- ids Iterable of IDs. Returns ------- numpy.ndarray N x D array of feature vectors. """ # TODO(MatthewJA): Optimise this. self._assert_open() # For each ID, get the corresponding features. if self._is_multidimensional: # If there are duplicates in ids, then this will crash with an # OSError! (and a very cryptic error message...) To get around this, # we'll first get all the unique IDs. unique_ids = [] unique_ids_set = set() # For lookups. id_to_index = {} # For reconstructing the features. for id_ in ids: if id_ not in unique_ids_set: unique_ids.append(id_) unique_ids_set.add(id_) id_to_index[id_] = len(unique_ids) - 1 # Then index with just the unique IDs. features_ = self._h5_file[self.feature_cols[0]][unique_ids] # Finally, reconstruct the features array. features = numpy.zeros((len(ids), features_.shape[1])) for index, id_ in enumerate(ids): index_ = id_to_index[id_] features[index, :] = features_[index_, :] return features else: # Allocate output array. features = numpy.zeros((len(ids), len(self.feature_cols))) # Read each feature. features_h5 = self._h5_file[self.feature_cols[0]] for feature_idx, feature_name in enumerate(self.feature_cols): features[ids, feature_idx] = features_h5[feature_name][ids] return numpy.nan_to_num(features) def read_labels(self, labeller_ids: Sequence[int], instance_ids: Sequence[int]) -> numpy.ndarray: """Reads label vectors from the database. Parameters ---------- labeller_ids Iterable of labeller IDs. instance_ids Iterable of instance IDs. Returns ------- numpy.ndarray T x N x F array of label vectors. """ self._assert_open() if len(labeller_ids) > 1: raise NotImplementedError('Multiple labellers not yet supported.') # TODO(MatthewJA): Optimise this. # For each ID, get the corresponding labels. # If there are duplicates in ids, then this will crash with an # OSError! (and a very cryptic error message...) To get around this, # we'll first get all the unique IDs. unique_ids = [] unique_ids_set = set() # For lookups. id_to_index = {} # For reconstructing the labels. for id_ in instance_ids: if id_ not in unique_ids_set: unique_ids.append(id_) unique_ids_set.add(id_) id_to_index[id_] = len(unique_ids) - 1 # Then index with just the unique IDs. labels_ = self._h5_file[self.label_col][unique_ids].reshape( (1, len(unique_ids), -1)) # Finally, reconstruct the labels array. labels = numpy.zeros( (1, len(instance_ids), labels_.shape[2]), dtype=labels_.dtype) for index, id_ in enumerate(instance_ids): index_ = id_to_index[id_] labels[0, index, :] = labels_[0, index_, :] if labels.shape[2] != 1: raise NotImplementedError('Multidimensional labels not currently ' 'supported.') # Encode labels. if self.encode_labels: labels = numpy.apply_along_axis( self.label_encoder.fit_transform, axis=1, arr=labels.reshape(labels.shape[:2]) ).reshape(labels.shape) return labels def write_features(self, ids: Sequence[int], features: numpy.ndarray): raise PermissionError('Cannot write to read-only database.') def write_labels(self, labeller_ids: Sequence[int], instance_ids: Sequence[int], labels: numpy.ndarray): raise PermissionError('Cannot write to read-only database.') def get_known_instance_ids(self) -> List[int]: """Returns a list of known instance IDs. Returns ------- List[str] A list of known instance IDs. """ self._assert_open() return [i for i in range(self.n_instances)] def get_known_labeller_ids(self) -> List[int]: """Returns a list of known labeller IDs. Returns ------- List[str] A list of known labeller IDs. """ raise NotImplementedError() class ASCIIReader(Database): """Reads ASCII databases. Attributes ---------- feature_cols : List[str] List of feature columns. label_col : str Name of label column. max_id_length : int Maximum length of IDs. n_features : int Number of features. n_instances : int Number of instances. n_labels : int Number of labels per instance. path : str Path to ASCII file. encode_labels : bool Whether to encode labels as integers. label_encoder : sklearn.preprocessing.LabelEncoder Encodes labels as integers. _db : Database Underlying ManagedHDF5Database. _db_filepath : str Path of underlying HDF5 database. _tempdir : str Temporary directory where the underlying HDF5 database is stored. """ def __init__(self, path: str, feature_cols: List[str], label_col: str, encode_labels: bool=True, label_encoder: sklearn.preprocessing.LabelEncoder=None): """ Parameters ---------- path Path to ASCII file. feature_cols List of feature columns. label_col Name of label column. encode_labels Whether to encode labels as integers. label_encoder Encodes labels as integers. If not specified, the label column will be read and a label encoding generated. """ self.path = path self.feature_cols = feature_cols self.label_col = label_col self.encode_labels = encode_labels self.label_encoder = label_encoder if self.label_encoder and not self.encode_labels: raise ValueError('label_encoder specified but encode_labels is ' 'False') if self.label_encoder is None: self.label_encoder = sklearn.preprocessing.LabelEncoder() def to_proto(self) -> DatabasePB: """Serialises this database as a protobuf. Returns ------- DatabasePB Protobuf representing this database. """ proto = DatabasePB() proto.path = self.path proto.class_name = 'ASCIIReader' db_kwargs = { 'feature_cols': self.feature_cols, 'label_col': self.label_col, 'encode_labels': self.encode_labels} for key, value in db_kwargs.items(): kwarg = proto.kwarg.add() kwarg.key = key kwarg.value = json.dumps(value) proto.label_encoder.CopyFrom(serialise_encoder(self.label_encoder)) return proto def _db_from_ascii(self, db: Database, data: astropy.table.Table, feature_cols: Sequence[str], label_col: str, ids: Sequence[int]): """Reads an ASCII table into a database. Notes ----- The entire file is copied into memory. Arguments --------- db Database. data ASCII table. feature_cols List of column names of the features. If empty, all non-label and non-ID columns will be used. label_col Column name of the labels. ids List of instance IDs. """ # Read in features. columns = data.keys() if not feature_cols: # If there are no features given, use all columns. feature_cols = [c for c in columns if c != label_col] # This converts the features from a table to an array. features = data[feature_cols].as_array() features = features.view(numpy.float64).reshape(features.shape + (-1,)) # Read in labels. labels = numpy.array( data[label_col]).reshape((1, -1, 1)) # We want to support multiple labellers in the future, but currently # don't. So every labeller is the same, ID = 0. labeller_ids = [0] # Encode labels. if self.encode_labels: labels = numpy.apply_along_axis( self.label_encoder.fit_transform, axis=1, arr=labels.reshape(labels.shape[:2]) ).reshape(labels.shape) # Write to database. db.write_features(ids, features) db.write_labels(labeller_ids, ids, labels) def __enter__(self): self._tempdir = tempfile.TemporaryDirectory(prefix='acton') # Read the whole file into a DB. self._db_filepath = os.path.join(self._tempdir.name, 'db.h5') data = io_ascii.read(self.path) ids = list(range(len(data[self.label_col]))) max_label_len = max(len(str(i)) for i in data[self.label_col]) label_dtype = '<S{}'.format(max_label_len) self._db = ManagedHDF5Database( self._db_filepath, label_dtype=label_dtype, feature_dtype='float64') self._db.__enter__() try: # We want to handle the encoding ourselves. self._db_from_ascii(self._db, data, self.feature_cols, self.label_col, ids, encode_labels=False) except TypeError: # Encoding isn't supported in the underlying database. self._db_from_ascii(self._db, data, self.feature_cols, self.label_col, ids) return self def __exit__(self, exc_type: Exception, exc_val: object, exc_tb: Traceback): self._db.__exit__(exc_type, exc_val, exc_tb) self._tempdir.cleanup() delattr(self, '_db') def read_features(self, ids: Sequence[int]) -> numpy.ndarray: """Reads feature vectors from the database. Parameters ---------- ids Iterable of IDs. Returns ------- numpy.ndarray N x D array of feature vectors. """ return self._db.read_features(ids) def read_labels(self, labeller_ids: Sequence[int], instance_ids: Sequence[int]) -> numpy.ndarray: """Reads label vectors from the database. Parameters ---------- labeller_ids Iterable of labeller IDs. instance_ids Iterable of instance IDs. Returns ------- numpy.ndarray T x N x F array of label vectors. """ # N.B. Labels are encoded in _db_from_ascii. return self._db.read_labels(labeller_ids, instance_ids) def write_features(self, ids: Sequence[int], features: numpy.ndarray): raise NotImplementedError('Cannot write to read-only database.') def write_labels(self, labeller_ids: Sequence[int], instance_ids: Sequence[int], labels: numpy.ndarray): raise NotImplementedError('Cannot write to read-only database.') def get_known_instance_ids(self) -> List[int]: """Returns a list of known instance IDs. Returns ------- List[str] A list of known instance IDs. """ return self._db.get_known_instance_ids() def get_known_labeller_ids(self) -> List[int]: """Returns a list of known labeller IDs. Returns ------- List[str] A list of known labeller IDs. """ return self._db.get_known_labeller_ids() class GraphReader(Database): """Reads ASCII databases for graph based structure Input file: List of known facts, formatted as relation_id \tab entity_id1 \tab entity_id2, means entity_id1 has relation_id relation with entity_id2, Both entity-id and relation-id start from 0. Output labels: K x N x N ndarrays, where K is the number of relations, N is the number of entities. 0 represents invalid facts, 1 represents valid facts. Output features: E is N x D latent features of the entities. R is K x D x D latent features of the relations. Features are initially random/gibbs sampled, will be sequentially updated after getting labels Attributes ---------- path : str Path to ASCII file. _db : Database Underlying ManagedHDF5Database. _db_filepath : str Path of underlying HDF5 database. _tempdir : str Temporary directory where the underlying HDF5 database is stored. n_dim Number of latent features (size of latent dimension). n_particles: Number of particles for Thompson sampling. gibbs_init Indicates how to sample features (gibbs/random). var_r variance of prior of R var_e variance of prior of E var_x variance of X obs_mask Mask tensor of observed triples. given_r whether there is any R given for initialization """ def __init__(self, path: str, n_dim: int, n_particles: int = 5, gibbs_init: bool = True, var_r: int = 1, var_e: int = 1, var_x: float = 0.01, obs_mask: numpy.ndarray= None, given_r: numpy.ndarray = None): """ Parameters ---------- path Path to ASCII file. n_dim Number of latent features (size of latent dimension). n_particles: Number of particles for Thompson sampling. gibbs_init Indicates how to sample features (gibbs/random). var_r variance of prior of R var_e variance of prior of E var_x variance of X obs_mask Mask tensor of observed triples. given_r Given features R if any """ self.path = path self.n_dim = n_dim self.n_particles = n_particles self.gibbs_init = gibbs_init self.var_r = var_r self.var_e = var_e self.var_x = var_x self.obs_mask = obs_mask self.given_r = given_r def to_proto(self) -> DatabasePB: """Serialises this database as a protobuf. Returns ------- DatabasePB Protobuf representing this database. """ proto = DatabasePB() proto.path = self.path proto.class_name = 'LabelOnlyASCIIReader' db_kwargs = { 'n_dim': self.n_dim, 'n_particles': self.n_particles, } for key, value in db_kwargs.items(): kwarg = proto.kwarg.add() kwarg.key = key kwarg.value = json.dumps(value) return proto def _db_from_ascii(self, db: Database, data: astropy.table.Table, ): """Reads an ASCII table into a database. Notes ----- The entire file is copied into memory. Arguments --------- db Database. data ASCII table. """ # triples: relation_id entity_id1 entity_id2 # e.g. (0,2,4) represents entity 2 and 4 have relation 0 triples = data.as_array() triples = triples.view(numpy.int).reshape((triples.shape[0], 3)) self.n_relations = max(triples[:, 0]) + 1 self.n_entities = max(triples[:, 1]) + 1 assert self.n_entities == max(triples[:, -1]) + 1 # only support one labeller # construct label tensor X = {0,1}^{K x N x N} X = numpy.zeros((self.n_relations, self.n_entities, self.n_entities)) for i in triples: X[i[0], i[1], i[2]] = 1 # Initailize features E,R self.E = list() self.R = list() self.RE = numpy.zeros([self.n_relations, self.n_entities, self.n_dim]) self.RTE = numpy.zeros([self.n_relations, self.n_entities, self.n_dim]) if isinstance(self.obs_mask, type(None)): self.obs_mask = numpy.zeros_like(X) else: logging.info( "Initial Total, Positive, Negative Obs : %d / %d / %d", numpy.sum(self.obs_mask), numpy.sum(X[self.obs_mask == 1]), numpy.sum(self.obs_mask) - numpy.sum(X[self.obs_mask == 1])) cur_obs = numpy.zeros_like(X) for k in range(self.n_relations): cur_obs[k][self.obs_mask[k] == 1] = X[k][self.obs_mask[k] == 1] self.obs_sum = numpy.sum(numpy.sum(self.obs_mask, 1), 1) self.valid_relations = numpy.nonzero(numpy.sum(numpy.sum(X, 1), 1))[0] self.features = numpy.zeros( [2 * self.n_entities * self.n_relations, self.n_dim]) self.xi = numpy.zeros([2 * self.n_entities * self.n_relations]) # cur_obs[cur_obs.nonzero()] = 1 if self.gibbs_init and numpy.sum(self.obs_sum) != 0: # initialize latent variables with gibbs sampling E = numpy.random.random([self.n_entities, self.n_dim]) R = numpy.random.random([self.n_relations, self.n_dim, self.n_dim]) for gi in range(20): tic = time.time() if isinstance(self.given_r, type(None)): self._sample_relations( cur_obs, self.obs_mask, E, R, self.var_r) self._sample_entities( cur_obs, self.obs_mask, E, R, self.var_e) else: self._sample_entities( cur_obs, self.obs_mask, E, R, self.var_e) logging.info("Gibbs Init %d: %f", gi, time.time() - tic) for p in range(self.n_particles): self.E.append(E.copy()) self.R.append(R.copy()) else: # random initialization for p in range(self.n_particles): self.E.append(numpy.random.random( [self.n_entities, self.n_dim])) self.R.append(numpy.random.random( [self.n_relations, self.n_dim, self.n_dim])) self.E = numpy.asarray(self.E) self.R = numpy.asarray(self.R) # Write to database. db.write_features(self.E, self.R) db.write_labels(X) def __enter__(self): self._tempdir = tempfile.TemporaryDirectory(prefix='acton') # Read the whole file into a DB. self._db_filepath = os.path.join(self._tempdir.name, 'db.h5') data = io_ascii.read(self.path) self._db = GraphDatabase(self._db_filepath) self._db.__enter__() self._db_from_ascii(self._db, data) return self def __exit__(self, exc_type: Exception, exc_val: object, exc_tb: Traceback): self._db.__exit__(exc_type, exc_val, exc_tb) self._tempdir.cleanup() delattr(self, '_db') def read_features(self) -> numpy.ndarray: """Reads feature vectors from the database. Parameters ---------- Returns ------- E numpy.ndarray P x N x D array of feature vectors. R list each element is numpy.ndarray P x K x D x D array of feature vectors. N x D array of feature vectors. """ return self._db.read_features() def read_labels(self, instance_ids: Sequence[tuple]) -> numpy.ndarray: """Reads label vectors from the database. Parameters ---------- Returns ------- numpy.ndarray array of label vectors. """ # N.B. Labels are encoded in _db_from_ascii. return self._db.read_labels(instance_ids) def write_features(self, ids: Sequence[int], features: numpy.ndarray): raise NotImplementedError('Cannot write to read-only database.') def write_labels(self, labeller_ids: Sequence[int], instance_ids: Sequence[int], labels: numpy.ndarray): raise NotImplementedError('Cannot write to read-only database.') def _sample_entities(self, X, mask, E, R, var_e, sample_idx=None): RE = self.RE RTE = self.RTE for k in range(self.n_relations): RE[k] = numpy.dot(R[k], E.T).T RTE[k] = numpy.dot(R[k].T, E.T).T if isinstance(sample_idx, type(None)): sample_idx = range(self.n_entities) for i in sample_idx: self._sample_entity(X, mask, E, R, i, var_e, RE, RTE) for k in range(self.n_relations): RE[k][i] = numpy.dot(R[k], E[i]) RTE[k][i] = numpy.dot(R[k].T, E[i]) def _sample_entity(self, X, mask, E, R, i, var_e, RE=None, RTE=None): nz_r = mask[:, i, :].nonzero() nz_c = mask[:, :, i].nonzero() nnz_r = nz_r[0].size nnz_c = nz_c[0].size nnz_all = nnz_r + nnz_c self.features[:nnz_r] = RE[nz_r] self.features[nnz_r:nnz_all] = RTE[nz_c] self.xi[:nnz_r] = X[:, i, :][nz_r] self.xi[nnz_r:nnz_all] = X[:, :, i][nz_c] _xi = self.xi[:nnz_all] * self.features[:nnz_all].T xi = numpy.sum(_xi, 1) / self.var_x _lambda = numpy.identity(self.n_dim) / var_e _lambda += numpy.dot( self.features[:nnz_all].T, self.features[:nnz_all]) / self.var_x inv_lambda = numpy.linalg.inv(_lambda) mu = numpy.dot(inv_lambda, xi) E[i] = multivariate_normal(mu, inv_lambda) numpy.mean(numpy.diag(inv_lambda)) # logging.debug('Mean variance E, %d, %f', i, mean_var) def _sample_relations(self, X, mask, E, R, var_r): EXE = numpy.kron(E, E) for k in self.valid_relations: if self.obs_sum[k] != 0: self._sample_relation(X, mask, E, R, k, EXE, var_r) else: R[k] = numpy.random.normal( 0, var_r, size=[self.n_dim, self.n_dim]) def _sample_relation(self, X, mask, E, R, k, EXE, var_r): _lambda = numpy.identity(self.n_dim ** 2) / var_r xi = numpy.zeros(self.n_dim ** 2) kron = EXE[mask[k].flatten() == 1] if kron.shape[0] != 0: _lambda += numpy.dot(kron.T, kron) xi += numpy.sum(X[k, mask[k] == 1].flatten() * kron.T, 1) _lambda /= self.var_x # mu = numpy.linalg.solve(_lambda, xi) / self.var_x inv_lambda = numpy.linalg.inv(_lambda) mu = numpy.dot(inv_lambda, xi) / self.var_x # R[k] = normal(mu, _lambda).reshape([self.n_dim, self.n_dim]) R[k] = multivariate_normal( mu, inv_lambda).reshape([self.n_dim, self.n_dim]) numpy.mean(numpy.diag(inv_lambda)) # logging.info('Mean variance R, %d, %f', k, mean_var) def get_known_instance_ids(self) -> List[int]: """Returns a list of known instance IDs. Returns ------- List[str] A list of known instance IDs. """ return self._db.get_known_instance_ids() def get_known_labeller_ids(self) -> List[int]: """Returns a list of known labeller IDs. Returns ------- List[str] A list of known labeller IDs. """ return self._db.get_known_labeller_ids() class PandasReader(Database): """Reads HDF5 databases. Attributes ---------- feature_cols : List[str] List of feature datasets. label_col : str Name of label dataset. n_features : int Number of features. n_instances : int Number of instances. n_labels : int Number of labels per instance. path : str Path to HDF5 file. encode_labels : bool Whether to encode labels as integers. label_encoder : sklearn.preprocessing.LabelEncoder Encodes labels as integers. _df : pandas.DataFrame Pandas dataframe. """ def __init__(self, path: str, feature_cols: List[str], label_col: str, key: str, encode_labels: bool=True, label_encoder: sklearn.preprocessing.LabelEncoder=None): """ Parameters ---------- path Path to HDF5 file. feature_cols List of feature columns. If none are specified, then all non-label, non-ID columns will be used. label_col Name of label dataset. key Pandas key. encode_labels Whether to encode labels as integers. label_encoder Encodes labels as integers. If not specified, the label column will be read and a label encoding generated. """ self.path = path self.feature_cols = feature_cols self.label_col = label_col self.key = key self._df = pandas.read_hdf(self.path, self.key) self.encode_labels = encode_labels self.label_encoder = label_encoder if self.label_encoder and not self.encode_labels: raise ValueError('label_encoder specified but encode_labels is ' 'False') if self.label_encoder is None: self.label_encoder = sklearn.preprocessing.LabelEncoder() if not self.feature_cols: self.feature_cols = [k for k in self._df.keys() if k != self.label_col] self.n_instances = len(self._df[self.label_col]) self.n_features = len(self.feature_cols) def to_proto(self) -> DatabasePB: """Serialises this database as a protobuf. Returns ------- DatabasePB Protobuf representing this database. """ proto = DatabasePB() proto.path = self.path proto.class_name = 'PandasReader' db_kwargs = { 'feature_cols': self.feature_cols, 'label_col': self.label_col, 'key': self.key, 'encode_labels': self.encode_labels} for key, value in db_kwargs.items(): kwarg = proto.kwarg.add() kwarg.key = key kwarg.value = json.dumps(value) proto.label_encoder.CopyFrom(serialise_encoder(self.label_encoder)) return proto def __enter__(self): return self def __exit__(self, exc_type: Exception, exc_val: object, exc_tb: Traceback): delattr(self, '_df') def read_features(self, ids: Sequence[int]) -> numpy.ndarray: """Reads feature vectors from the database. Parameters ---------- ids Iterable of IDs. Returns ------- numpy.ndarray N x D array of feature vectors. """ # TODO(MatthewJA): Optimise this. # Allocate output features array. features = numpy.zeros((len(ids), self.n_features)) # For each ID, get the corresponding features. for out_index, id_ in enumerate(ids): sel = self._df.iloc[id_] for feature_index, feature in enumerate(self.feature_cols): features[out_index, feature_index] = sel[feature] return features def read_labels(self, labeller_ids: Sequence[int], instance_ids: Sequence[int]) -> numpy.ndarray: """Reads label vectors from the database. Parameters ---------- labeller_ids Iterable of labeller IDs. instance_ids Iterable of instance IDs. Returns ------- numpy.ndarray T x N x 1 array of label vectors. """ # Draw a label to get the dtype. dtype = type(self._df.iloc[0][self.label_col]) # Allocate output labels array. labels = numpy.zeros( (len(labeller_ids), len(instance_ids), 1), dtype=dtype) if len(labeller_ids) > 1: raise NotImplementedError('Multiple labellers not yet supported.') # For each ID, get the corresponding labels. for out_index, id_ in enumerate(instance_ids): sel = self._df.iloc[int(id_)] labels[0, out_index, 0] = sel[self.label_col] if labels.shape[2] != 1: raise NotImplementedError('Multidimensional labels not currently ' 'supported.') # Encode labels. if self.encode_labels: labels = numpy.apply_along_axis( self.label_encoder.fit_transform, axis=1, arr=labels.reshape(labels.shape[:2]) ).reshape(labels.shape) return labels def write_features(self, ids: Sequence[int], features: numpy.ndarray): raise PermissionError('Cannot write to read-only database.') def write_labels(self, labeller_ids: Sequence[int], instance_ids: Sequence[int], labels: numpy.ndarray): raise PermissionError('Cannot write to read-only database.') def get_known_instance_ids(self) -> List[int]: """Returns a list of known instance IDs. Returns ------- List[str] A list of known instance IDs. """ return [i for i in range(self.n_instances)] def get_known_labeller_ids(self) -> List[int]: """Returns a list of known labeller IDs. Returns ------- List[str] A list of known labeller IDs. """ raise NotImplementedError() class FITSReader(Database): """Reads FITS databases. Attributes ---------- hdu_index : int Index of HDU in the FITS file. feature_cols : List[str] List of feature columns. label_col : str Name of label column. n_features : int Number of features. n_instances : int Number of instances. n_labels : int Number of labels per instance. path : str Path to FITS file. encode_labels : bool Whether to encode labels as integers. label_encoder : sklearn.preprocessing.LabelEncoder Encodes labels as integers. _hdulist : astropy.io.fits.HDUList FITS HDUList. """ def __init__(self, path: str, feature_cols: List[str], label_col: str, hdu_index: int=1, encode_labels: bool=True, label_encoder: sklearn.preprocessing.LabelEncoder=None): """ Parameters ---------- path Path to FITS file. feature_cols List of feature columns. If none are specified, then all non-label, non-ID columns will be used. label_col Name of label dataset. hdu_index Index of HDU in the FITS file. Default is 1, i.e., the first extension in the FITS file. encode_labels Whether to encode labels as integers. label_encoder Encodes labels as integers. If not specified, the label column will be read and a label encoding generated. """ self.path = path self.feature_cols = feature_cols self.label_col = label_col self.hdu_index = hdu_index self.encode_labels = encode_labels self.label_encoder = label_encoder if self.label_encoder and not self.encode_labels: raise ValueError('label_encoder specified but encode_labels is ' 'False') if self.label_encoder is None: self.label_encoder = sklearn.preprocessing.LabelEncoder() # These will be set when the FITS file is opened. self.n_instances = None self.n_features = None def to_proto(self) -> DatabasePB: """Serialises this database as a protobuf. Returns ------- DatabasePB Protobuf representing this database. """ proto = DatabasePB() proto.path = self.path proto.class_name = 'FITSReader' db_kwargs = { 'feature_cols': self.feature_cols, 'label_col': self.label_col, 'hdu_index': self.hdu_index, 'encode_labels': self.encode_labels} for key, value in db_kwargs.items(): kwarg = proto.kwarg.add() kwarg.key = key kwarg.value = json.dumps(value) proto.label_encoder.CopyFrom(serialise_encoder(self.label_encoder)) return proto def __enter__(self): self._hdulist = io_fits.open(self.path) # If we haven't specified columns, use all except the label column. cols = self._hdulist[self.hdu_index].columns.names if not self.feature_cols: self.feature_cols = [k for k in cols if k != self.label_col] self.n_features = len(self.feature_cols) self.n_instances = \ self._hdulist[self.hdu_index].data[self.label_col].ravel().shape[0] return self def __exit__(self, exc_type: Exception, exc_val: object, exc_tb: Traceback): self._hdulist.close() delattr(self, '_hdulist') def read_features(self, ids: Sequence[int]) -> numpy.ndarray: """Reads feature vectors from the database. Parameters ---------- ids Iterable of IDs. Returns ------- numpy.ndarray N x D array of feature vectors. """ # TODO(MatthewJA): Optimise this. # Allocate output features array. features = numpy.zeros((len(ids), self.n_features)) for f_index, col in enumerate(self.feature_cols): col = self._hdulist[self.hdu_index].data[col] features[:, f_index] = col[ids] return features def read_labels(self, labeller_ids: Sequence[int], instance_ids: Sequence[int]) -> numpy.ndarray: """Reads label vectors from the database. Parameters ---------- labeller_ids Iterable of labeller IDs. instance_ids Iterable of instance IDs. Returns ------- numpy.p T x N x 1 array of label vectors. """ label_col = self._hdulist[self.hdu_index].data[self.label_col] labels = label_col[instance_ids].reshape((1, -1, 1)) # Encode labels. if self.encode_labels: labels = numpy.apply_along_axis( self.label_encoder.fit_transform, axis=1, arr=labels.reshape(labels.shape[:2]) ).reshape(labels.shape) return labels def write_features(self, ids: Sequence[int], features: numpy.ndarray): raise PermissionError('Cannot write to read-only database.') def write_labels(self, labeller_ids: Sequence[int], instance_ids: Sequence[int], labels: numpy.ndarray): raise PermissionError('Cannot write to read-only database.') def get_known_instance_ids(self) -> List[int]: """Returns a list of known instance IDs. Returns ------- List[str] A list of known instance IDs. """ return [i for i in range(self.n_instances)] def get_known_labeller_ids(self) -> List[int]: """Returns a list of known labeller IDs. Returns ------- List[str] A list of known labeller IDs. """ raise NotImplementedError() # For safe string-based access to database classes. DATABASES = { 'ASCIIReader': ASCIIReader, 'GraphReader': GraphReader, 'HDF5Reader': HDF5Reader, 'FITSReader': FITSReader, 'ManagedHDF5Database': ManagedHDF5Database, 'GraphDatabase': GraphDatabase, 'PandasReader': PandasReader, }
bsd-3-clause
pv/scikit-learn
sklearn/tests/test_cross_validation.py
70
41943
"""Test the cross_validation module""" from __future__ import division import warnings import numpy as np from scipy.sparse import coo_matrix from scipy import stats from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import ignore_warnings from sklearn.utils.mocking import CheckingClassifier, MockDataFrame from sklearn import cross_validation as cval from sklearn.datasets import make_regression from sklearn.datasets import load_boston from sklearn.datasets import load_digits from sklearn.datasets import load_iris from sklearn.metrics import explained_variance_score from sklearn.metrics import make_scorer from sklearn.metrics import precision_score from sklearn.externals import six from sklearn.externals.six.moves import zip from sklearn.linear_model import Ridge from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.cluster import KMeans from sklearn.preprocessing import Imputer, LabelBinarizer from sklearn.pipeline import Pipeline class MockClassifier(object): """Dummy classifier to test the cross-validation""" def __init__(self, a=0, allow_nd=False): self.a = a self.allow_nd = allow_nd def fit(self, X, Y=None, sample_weight=None, class_prior=None, sparse_sample_weight=None, sparse_param=None, dummy_int=None, dummy_str=None, dummy_obj=None, callback=None): """The dummy arguments are to test that this fit function can accept non-array arguments through cross-validation, such as: - int - str (this is actually array-like) - object - function """ self.dummy_int = dummy_int self.dummy_str = dummy_str self.dummy_obj = dummy_obj if callback is not None: callback(self) if self.allow_nd: X = X.reshape(len(X), -1) if X.ndim >= 3 and not self.allow_nd: raise ValueError('X cannot be d') if sample_weight is not None: assert_true(sample_weight.shape[0] == X.shape[0], 'MockClassifier extra fit_param sample_weight.shape[0]' ' is {0}, should be {1}'.format(sample_weight.shape[0], X.shape[0])) if class_prior is not None: assert_true(class_prior.shape[0] == len(np.unique(y)), 'MockClassifier extra fit_param class_prior.shape[0]' ' is {0}, should be {1}'.format(class_prior.shape[0], len(np.unique(y)))) if sparse_sample_weight is not None: fmt = ('MockClassifier extra fit_param sparse_sample_weight' '.shape[0] is {0}, should be {1}') assert_true(sparse_sample_weight.shape[0] == X.shape[0], fmt.format(sparse_sample_weight.shape[0], X.shape[0])) if sparse_param is not None: fmt = ('MockClassifier extra fit_param sparse_param.shape ' 'is ({0}, {1}), should be ({2}, {3})') assert_true(sparse_param.shape == P_sparse.shape, fmt.format(sparse_param.shape[0], sparse_param.shape[1], P_sparse.shape[0], P_sparse.shape[1])) return self def predict(self, T): if self.allow_nd: T = T.reshape(len(T), -1) return T[:, 0] def score(self, X=None, Y=None): return 1. / (1 + np.abs(self.a)) def get_params(self, deep=False): return {'a': self.a, 'allow_nd': self.allow_nd} X = np.ones((10, 2)) X_sparse = coo_matrix(X) W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))), shape=(10, 1)) P_sparse = coo_matrix(np.eye(5)) y = np.arange(10) // 2 ############################################################################## # Tests def check_valid_split(train, test, n_samples=None): # Use python sets to get more informative assertion failure messages train, test = set(train), set(test) # Train and test split should not overlap assert_equal(train.intersection(test), set()) if n_samples is not None: # Check that the union of train an test split cover all the indices assert_equal(train.union(test), set(range(n_samples))) def check_cv_coverage(cv, expected_n_iter=None, n_samples=None): # Check that a all the samples appear at least once in a test fold if expected_n_iter is not None: assert_equal(len(cv), expected_n_iter) else: expected_n_iter = len(cv) collected_test_samples = set() iterations = 0 for train, test in cv: check_valid_split(train, test, n_samples=n_samples) iterations += 1 collected_test_samples.update(test) # Check that the accumulated test samples cover the whole dataset assert_equal(iterations, expected_n_iter) if n_samples is not None: assert_equal(collected_test_samples, set(range(n_samples))) def test_kfold_valueerrors(): # Check that errors are raised if there is not enough samples assert_raises(ValueError, cval.KFold, 3, 4) # Check that a warning is raised if the least populated class has too few # members. y = [3, 3, -1, -1, 2] cv = assert_warns_message(Warning, "The least populated class", cval.StratifiedKFold, y, 3) # Check that despite the warning the folds are still computed even # though all the classes are not necessarily represented at on each # side of the split at each split check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y)) # Error when number of folds is <= 1 assert_raises(ValueError, cval.KFold, 2, 0) assert_raises(ValueError, cval.KFold, 2, 1) assert_raises(ValueError, cval.StratifiedKFold, y, 0) assert_raises(ValueError, cval.StratifiedKFold, y, 1) # When n is not integer: assert_raises(ValueError, cval.KFold, 2.5, 2) # When n_folds is not integer: assert_raises(ValueError, cval.KFold, 5, 1.5) assert_raises(ValueError, cval.StratifiedKFold, y, 1.5) def test_kfold_indices(): # Check all indices are returned in the test folds kf = cval.KFold(300, 3) check_cv_coverage(kf, expected_n_iter=3, n_samples=300) # Check all indices are returned in the test folds even when equal-sized # folds are not possible kf = cval.KFold(17, 3) check_cv_coverage(kf, expected_n_iter=3, n_samples=17) def test_kfold_no_shuffle(): # Manually check that KFold preserves the data ordering on toy datasets splits = iter(cval.KFold(4, 2)) train, test = next(splits) assert_array_equal(test, [0, 1]) assert_array_equal(train, [2, 3]) train, test = next(splits) assert_array_equal(test, [2, 3]) assert_array_equal(train, [0, 1]) splits = iter(cval.KFold(5, 2)) train, test = next(splits) assert_array_equal(test, [0, 1, 2]) assert_array_equal(train, [3, 4]) train, test = next(splits) assert_array_equal(test, [3, 4]) assert_array_equal(train, [0, 1, 2]) def test_stratified_kfold_no_shuffle(): # Manually check that StratifiedKFold preserves the data ordering as much # as possible on toy datasets in order to avoid hiding sample dependencies # when possible splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2)) train, test = next(splits) assert_array_equal(test, [0, 2]) assert_array_equal(train, [1, 3]) train, test = next(splits) assert_array_equal(test, [1, 3]) assert_array_equal(train, [0, 2]) splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2)) train, test = next(splits) assert_array_equal(test, [0, 1, 3, 4]) assert_array_equal(train, [2, 5, 6]) train, test = next(splits) assert_array_equal(test, [2, 5, 6]) assert_array_equal(train, [0, 1, 3, 4]) def test_stratified_kfold_ratios(): # Check that stratified kfold preserves label ratios in individual splits # Repeat with shuffling turned off and on n_samples = 1000 labels = np.array([4] * int(0.10 * n_samples) + [0] * int(0.89 * n_samples) + [1] * int(0.01 * n_samples)) for shuffle in [False, True]: for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle): assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10, 2) assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89, 2) assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01, 2) assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2) assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2) assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2) def test_kfold_balance(): # Check that KFold returns folds with balanced sizes for kf in [cval.KFold(i, 5) for i in range(11, 17)]: sizes = [] for _, test in kf: sizes.append(len(test)) assert_true((np.max(sizes) - np.min(sizes)) <= 1) assert_equal(np.sum(sizes), kf.n) def test_stratifiedkfold_balance(): # Check that KFold returns folds with balanced sizes (only when # stratification is possible) # Repeat with shuffling turned off and on labels = [0] * 3 + [1] * 14 for shuffle in [False, True]: for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle) for i in range(11, 17)]: sizes = [] for _, test in skf: sizes.append(len(test)) assert_true((np.max(sizes) - np.min(sizes)) <= 1) assert_equal(np.sum(sizes), skf.n) def test_shuffle_kfold(): # Check the indices are shuffled properly, and that all indices are # returned in the different test folds kf = cval.KFold(300, 3, shuffle=True, random_state=0) ind = np.arange(300) all_folds = None for train, test in kf: sorted_array = np.arange(100) assert_true(np.any(sorted_array != ind[train])) sorted_array = np.arange(101, 200) assert_true(np.any(sorted_array != ind[train])) sorted_array = np.arange(201, 300) assert_true(np.any(sorted_array != ind[train])) if all_folds is None: all_folds = ind[test].copy() else: all_folds = np.concatenate((all_folds, ind[test])) all_folds.sort() assert_array_equal(all_folds, ind) def test_shuffle_stratifiedkfold(): # Check that shuffling is happening when requested, and for proper # sample coverage labels = [0] * 20 + [1] * 20 kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0)) kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1)) for (_, test0), (_, test1) in zip(kf0, kf1): assert_true(set(test0) != set(test1)) check_cv_coverage(kf0, expected_n_iter=5, n_samples=40) def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372 # The digits samples are dependent: they are apparently grouped by authors # although we don't have any information on the groups segment locations # for this data. We can highlight this fact be computing k-fold cross- # validation with and without shuffling: we observe that the shuffling case # wrongly makes the IID assumption and is therefore too optimistic: it # estimates a much higher accuracy (around 0.96) than than the non # shuffling variant (around 0.86). digits = load_digits() X, y = digits.data[:800], digits.target[:800] model = SVC(C=10, gamma=0.005) n = len(y) cv = cval.KFold(n, 5, shuffle=False) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(0.88, mean_score) assert_greater(mean_score, 0.85) # Shuffling the data artificially breaks the dependency and hides the # overfitting of the model with regards to the writing style of the authors # by yielding a seriously overestimated score: cv = cval.KFold(n, 5, shuffle=True, random_state=0) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(mean_score, 0.95) cv = cval.KFold(n, 5, shuffle=True, random_state=1) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(mean_score, 0.95) # Similarly, StratifiedKFold should try to shuffle the data as little # as possible (while respecting the balanced class constraints) # and thus be able to detect the dependency by not overestimating # the CV score either. As the digits dataset is approximately balanced # the estimated mean score is close to the score measured with # non-shuffled KFold cv = cval.StratifiedKFold(y, 5) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(0.88, mean_score) assert_greater(mean_score, 0.85) def test_shuffle_split(): ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0) ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0) ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0) for typ in six.integer_types: ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0) for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4): assert_array_equal(t1[0], t2[0]) assert_array_equal(t2[0], t3[0]) assert_array_equal(t3[0], t4[0]) assert_array_equal(t1[1], t2[1]) assert_array_equal(t2[1], t3[1]) assert_array_equal(t3[1], t4[1]) def test_stratified_shuffle_split_init(): y = np.asarray([0, 1, 1, 1, 2, 2, 2]) # Check that error is raised if there is a class with only one sample assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2) # Check that error is raised if the test set size is smaller than n_classes assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2) # Check that error is raised if the train set size is smaller than # n_classes assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2) y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2]) # Check that errors are raised if there is not enough samples assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6) assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6) assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8) # Train size or test size too small assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2) assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2) def test_stratified_shuffle_split_iter(): ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]), np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]), np.array([-1] * 800 + [1] * 50) ] for y in ys: sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33, random_state=0) for train, test in sss: assert_array_equal(np.unique(y[train]), np.unique(y[test])) # Checks if folds keep classes proportions p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1]) / float(len(y[train]))) p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1]) / float(len(y[test]))) assert_array_almost_equal(p_train, p_test, 1) assert_equal(y[train].size + y[test].size, y.size) assert_array_equal(np.lib.arraysetops.intersect1d(train, test), []) def test_stratified_shuffle_split_even(): # Test the StratifiedShuffleSplit, indices are drawn with a # equal chance n_folds = 5 n_iter = 1000 def assert_counts_are_ok(idx_counts, p): # Here we test that the distribution of the counts # per index is close enough to a binomial threshold = 0.05 / n_splits bf = stats.binom(n_splits, p) for count in idx_counts: p = bf.pmf(count) assert_true(p > threshold, "An index is not drawn with chance corresponding " "to even draws") for n_samples in (6, 22): labels = np.array((n_samples // 2) * [0, 1]) splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter, test_size=1. / n_folds, random_state=0) train_counts = [0] * n_samples test_counts = [0] * n_samples n_splits = 0 for train, test in splits: n_splits += 1 for counter, ids in [(train_counts, train), (test_counts, test)]: for id in ids: counter[id] += 1 assert_equal(n_splits, n_iter) assert_equal(len(train), splits.n_train) assert_equal(len(test), splits.n_test) assert_equal(len(set(train).intersection(test)), 0) label_counts = np.unique(labels) assert_equal(splits.test_size, 1.0 / n_folds) assert_equal(splits.n_train + splits.n_test, len(labels)) assert_equal(len(label_counts), 2) ex_test_p = float(splits.n_test) / n_samples ex_train_p = float(splits.n_train) / n_samples assert_counts_are_ok(train_counts, ex_train_p) assert_counts_are_ok(test_counts, ex_test_p) def test_predefinedsplit_with_kfold_split(): # Check that PredefinedSplit can reproduce a split generated by Kfold. folds = -1 * np.ones(10) kf_train = [] kf_test = [] for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)): kf_train.append(train_ind) kf_test.append(test_ind) folds[test_ind] = i ps_train = [] ps_test = [] ps = cval.PredefinedSplit(folds) for train_ind, test_ind in ps: ps_train.append(train_ind) ps_test.append(test_ind) assert_array_equal(ps_train, kf_train) assert_array_equal(ps_test, kf_test) def test_leave_label_out_changing_labels(): # Check that LeaveOneLabelOut and LeavePLabelOut work normally if # the labels variable is changed before calling __iter__ labels = np.array([0, 1, 2, 1, 1, 2, 0, 0]) labels_changing = np.array(labels, copy=True) lolo = cval.LeaveOneLabelOut(labels) lolo_changing = cval.LeaveOneLabelOut(labels_changing) lplo = cval.LeavePLabelOut(labels, p=2) lplo_changing = cval.LeavePLabelOut(labels_changing, p=2) labels_changing[:] = 0 for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]: for (train, test), (train_chan, test_chan) in zip(llo, llo_changing): assert_array_equal(train, train_chan) assert_array_equal(test, test_chan) def test_cross_val_score(): clf = MockClassifier() for a in range(-10, 10): clf.a = a # Smoke test scores = cval.cross_val_score(clf, X, y) assert_array_equal(scores, clf.score(X, y)) # test with multioutput y scores = cval.cross_val_score(clf, X_sparse, X) assert_array_equal(scores, clf.score(X_sparse, X)) scores = cval.cross_val_score(clf, X_sparse, y) assert_array_equal(scores, clf.score(X_sparse, y)) # test with multioutput y scores = cval.cross_val_score(clf, X_sparse, X) assert_array_equal(scores, clf.score(X_sparse, X)) # test with X and y as list list_check = lambda x: isinstance(x, list) clf = CheckingClassifier(check_X=list_check) scores = cval.cross_val_score(clf, X.tolist(), y.tolist()) clf = CheckingClassifier(check_y=list_check) scores = cval.cross_val_score(clf, X, y.tolist()) assert_raises(ValueError, cval.cross_val_score, clf, X, y, scoring="sklearn") # test with 3d X and X_3d = X[:, :, np.newaxis] clf = MockClassifier(allow_nd=True) scores = cval.cross_val_score(clf, X_3d, y) clf = MockClassifier(allow_nd=False) assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y) def test_cross_val_score_pandas(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((Series, DataFrame)) except ImportError: pass for TargetType, InputFeatureType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) check_df = lambda x: isinstance(x, InputFeatureType) check_series = lambda x: isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) cval.cross_val_score(clf, X_df, y_ser) def test_cross_val_score_mask(): # test that cross_val_score works with boolean masks svm = SVC(kernel="linear") iris = load_iris() X, y = iris.data, iris.target cv_indices = cval.KFold(len(y), 5) scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices) cv_indices = cval.KFold(len(y), 5) cv_masks = [] for train, test in cv_indices: mask_train = np.zeros(len(y), dtype=np.bool) mask_test = np.zeros(len(y), dtype=np.bool) mask_train[train] = 1 mask_test[test] = 1 cv_masks.append((train, test)) scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks) assert_array_equal(scores_indices, scores_masks) def test_cross_val_score_precomputed(): # test for svm with precomputed kernel svm = SVC(kernel="precomputed") iris = load_iris() X, y = iris.data, iris.target linear_kernel = np.dot(X, X.T) score_precomputed = cval.cross_val_score(svm, linear_kernel, y) svm = SVC(kernel="linear") score_linear = cval.cross_val_score(svm, X, y) assert_array_equal(score_precomputed, score_linear) # Error raised for non-square X svm = SVC(kernel="precomputed") assert_raises(ValueError, cval.cross_val_score, svm, X, y) # test error is raised when the precomputed kernel is not array-like # or sparse assert_raises(ValueError, cval.cross_val_score, svm, linear_kernel.tolist(), y) def test_cross_val_score_fit_params(): clf = MockClassifier() n_samples = X.shape[0] n_classes = len(np.unique(y)) DUMMY_INT = 42 DUMMY_STR = '42' DUMMY_OBJ = object() def assert_fit_params(clf): # Function to test that the values are passed correctly to the # classifier arguments for non-array type assert_equal(clf.dummy_int, DUMMY_INT) assert_equal(clf.dummy_str, DUMMY_STR) assert_equal(clf.dummy_obj, DUMMY_OBJ) fit_params = {'sample_weight': np.ones(n_samples), 'class_prior': np.ones(n_classes) / n_classes, 'sparse_sample_weight': W_sparse, 'sparse_param': P_sparse, 'dummy_int': DUMMY_INT, 'dummy_str': DUMMY_STR, 'dummy_obj': DUMMY_OBJ, 'callback': assert_fit_params} cval.cross_val_score(clf, X, y, fit_params=fit_params) def test_cross_val_score_score_func(): clf = MockClassifier() _score_func_args = [] def score_func(y_test, y_predict): _score_func_args.append((y_test, y_predict)) return 1.0 with warnings.catch_warnings(record=True): scoring = make_scorer(score_func) score = cval.cross_val_score(clf, X, y, scoring=scoring) assert_array_equal(score, [1.0, 1.0, 1.0]) assert len(_score_func_args) == 3 def test_cross_val_score_errors(): class BrokenEstimator: pass assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X) def test_train_test_split_errors(): assert_raises(ValueError, cval.train_test_split) assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1) assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6, train_size=0.6) assert_raises(ValueError, cval.train_test_split, range(3), test_size=np.float32(0.6), train_size=np.float32(0.6)) assert_raises(ValueError, cval.train_test_split, range(3), test_size="wrong_type") assert_raises(ValueError, cval.train_test_split, range(3), test_size=2, train_size=4) assert_raises(TypeError, cval.train_test_split, range(3), some_argument=1.1) assert_raises(ValueError, cval.train_test_split, range(3), range(42)) def test_train_test_split(): X = np.arange(100).reshape((10, 10)) X_s = coo_matrix(X) y = np.arange(10) # simple test split = cval.train_test_split(X, y, test_size=None, train_size=.5) X_train, X_test, y_train, y_test = split assert_equal(len(y_test), len(y_train)) # test correspondence of X and y assert_array_equal(X_train[:, 0], y_train * 10) assert_array_equal(X_test[:, 0], y_test * 10) # conversion of lists to arrays (deprecated?) with warnings.catch_warnings(record=True): split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False) X_train, X_test, X_s_train, X_s_test, y_train, y_test = split assert_array_equal(X_train, X_s_train.toarray()) assert_array_equal(X_test, X_s_test.toarray()) # don't convert lists to anything else by default split = cval.train_test_split(X, X_s, y.tolist()) X_train, X_test, X_s_train, X_s_test, y_train, y_test = split assert_true(isinstance(y_train, list)) assert_true(isinstance(y_test, list)) # allow nd-arrays X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) split = cval.train_test_split(X_4d, y_3d) assert_equal(split[0].shape, (7, 5, 3, 2)) assert_equal(split[1].shape, (3, 5, 3, 2)) assert_equal(split[2].shape, (7, 7, 11)) assert_equal(split[3].shape, (3, 7, 11)) # test stratification option y = np.array([1, 1, 1, 1, 2, 2, 2, 2]) for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75], [2, 4, 2, 4, 6]): train, test = cval.train_test_split(y, test_size=test_size, stratify=y, random_state=0) assert_equal(len(test), exp_test_size) assert_equal(len(test) + len(train), len(y)) # check the 1:1 ratio of ones and twos in the data is preserved assert_equal(np.sum(train == 1), np.sum(train == 2)) def train_test_split_pandas(): # check cross_val_score doesn't destroy pandas dataframe types = [MockDataFrame] try: from pandas import DataFrame types.append(DataFrame) except ImportError: pass for InputFeatureType in types: # X dataframe X_df = InputFeatureType(X) X_train, X_test = cval.train_test_split(X_df) assert_true(isinstance(X_train, InputFeatureType)) assert_true(isinstance(X_test, InputFeatureType)) def train_test_split_mock_pandas(): # X mock dataframe X_df = MockDataFrame(X) X_train, X_test = cval.train_test_split(X_df) assert_true(isinstance(X_train, MockDataFrame)) assert_true(isinstance(X_test, MockDataFrame)) X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False) assert_true(isinstance(X_train_arr, np.ndarray)) assert_true(isinstance(X_test_arr, np.ndarray)) def test_cross_val_score_with_score_func_classification(): iris = load_iris() clf = SVC(kernel='linear') # Default score (should be the accuracy score) scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5) assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2) # Correct classification score (aka. zero / one score) - should be the # same as the default estimator score zo_scores = cval.cross_val_score(clf, iris.data, iris.target, scoring="accuracy", cv=5) assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2) # F1 score (class are balanced so f1_score should be equal to zero/one # score f1_scores = cval.cross_val_score(clf, iris.data, iris.target, scoring="f1_weighted", cv=5) assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2) def test_cross_val_score_with_score_func_regression(): X, y = make_regression(n_samples=30, n_features=20, n_informative=5, random_state=0) reg = Ridge() # Default score of the Ridge regression estimator scores = cval.cross_val_score(reg, X, y, cv=5) assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) # R2 score (aka. determination coefficient) - should be the # same as the default estimator score r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5) assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) # Mean squared error; this is a loss function, so "scores" are negative mse_scores = cval.cross_val_score(reg, X, y, cv=5, scoring="mean_squared_error") expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99]) assert_array_almost_equal(mse_scores, expected_mse, 2) # Explained variance scoring = make_scorer(explained_variance_score) ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring) assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) def test_permutation_score(): iris = load_iris() X = iris.data X_sparse = coo_matrix(X) y = iris.target svm = SVC(kernel='linear') cv = cval.StratifiedKFold(y, 2) score, scores, pvalue = cval.permutation_test_score( svm, X, y, n_permutations=30, cv=cv, scoring="accuracy") assert_greater(score, 0.9) assert_almost_equal(pvalue, 0.0, 1) score_label, _, pvalue_label = cval.permutation_test_score( svm, X, y, n_permutations=30, cv=cv, scoring="accuracy", labels=np.ones(y.size), random_state=0) assert_true(score_label == score) assert_true(pvalue_label == pvalue) # check that we obtain the same results with a sparse representation svm_sparse = SVC(kernel='linear') cv_sparse = cval.StratifiedKFold(y, 2) score_label, _, pvalue_label = cval.permutation_test_score( svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse, scoring="accuracy", labels=np.ones(y.size), random_state=0) assert_true(score_label == score) assert_true(pvalue_label == pvalue) # test with custom scoring object def custom_score(y_true, y_pred): return (((y_true == y_pred).sum() - (y_true != y_pred).sum()) / y_true.shape[0]) scorer = make_scorer(custom_score) score, _, pvalue = cval.permutation_test_score( svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0) assert_almost_equal(score, .93, 2) assert_almost_equal(pvalue, 0.01, 3) # set random y y = np.mod(np.arange(len(y)), 3) score, scores, pvalue = cval.permutation_test_score( svm, X, y, n_permutations=30, cv=cv, scoring="accuracy") assert_less(score, 0.5) assert_greater(pvalue, 0.2) def test_cross_val_generator_with_indices(): X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) y = np.array([1, 1, 2, 2]) labels = np.array([1, 2, 3, 4]) # explicitly passing indices value is deprecated loo = cval.LeaveOneOut(4) lpo = cval.LeavePOut(4, 2) kf = cval.KFold(4, 2) skf = cval.StratifiedKFold(y, 2) lolo = cval.LeaveOneLabelOut(labels) lopo = cval.LeavePLabelOut(labels, 2) ps = cval.PredefinedSplit([1, 1, 2, 2]) ss = cval.ShuffleSplit(2) for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]: for train, test in cv: assert_not_equal(np.asarray(train).dtype.kind, 'b') assert_not_equal(np.asarray(train).dtype.kind, 'b') X[train], X[test] y[train], y[test] @ignore_warnings def test_cross_val_generator_with_default_indices(): X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) y = np.array([1, 1, 2, 2]) labels = np.array([1, 2, 3, 4]) loo = cval.LeaveOneOut(4) lpo = cval.LeavePOut(4, 2) kf = cval.KFold(4, 2) skf = cval.StratifiedKFold(y, 2) lolo = cval.LeaveOneLabelOut(labels) lopo = cval.LeavePLabelOut(labels, 2) ss = cval.ShuffleSplit(2) ps = cval.PredefinedSplit([1, 1, 2, 2]) for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]: for train, test in cv: assert_not_equal(np.asarray(train).dtype.kind, 'b') assert_not_equal(np.asarray(train).dtype.kind, 'b') X[train], X[test] y[train], y[test] def test_shufflesplit_errors(): assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1, train_size=0.95) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3) assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None, train_size=None) def test_shufflesplit_reproducible(): # Check that iterating twice on the ShuffleSplit gives the same # sequence of train-test when the random_state is given ss = cval.ShuffleSplit(10, random_state=21) assert_array_equal(list(a for a, b in ss), list(a for a, b in ss)) def test_safe_split_with_precomputed_kernel(): clf = SVC() clfp = SVC(kernel="precomputed") iris = load_iris() X, y = iris.data, iris.target K = np.dot(X, X.T) cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0) tr, te = list(cv)[0] X_tr, y_tr = cval._safe_split(clf, X, y, tr) K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr) assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T)) X_te, y_te = cval._safe_split(clf, X, y, te, tr) K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr) assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T)) def test_cross_val_score_allow_nans(): # Check that cross_val_score allows input data with NaNs X = np.arange(200, dtype=np.float64).reshape(10, -1) X[2, :] = np.nan y = np.repeat([0, 1], X.shape[0] / 2) p = Pipeline([ ('imputer', Imputer(strategy='mean', missing_values='NaN')), ('classifier', MockClassifier()), ]) cval.cross_val_score(p, X, y, cv=5) def test_train_test_split_allow_nans(): # Check that train_test_split allows input data with NaNs X = np.arange(200, dtype=np.float64).reshape(10, -1) X[2, :] = np.nan y = np.repeat([0, 1], X.shape[0] / 2) cval.train_test_split(X, y, test_size=0.2, random_state=42) def test_permutation_test_score_allow_nans(): # Check that permutation_test_score allows input data with NaNs X = np.arange(200, dtype=np.float64).reshape(10, -1) X[2, :] = np.nan y = np.repeat([0, 1], X.shape[0] / 2) p = Pipeline([ ('imputer', Imputer(strategy='mean', missing_values='NaN')), ('classifier', MockClassifier()), ]) cval.permutation_test_score(p, X, y, cv=5) def test_check_cv_return_types(): X = np.ones((9, 2)) cv = cval.check_cv(3, X, classifier=False) assert_true(isinstance(cv, cval.KFold)) y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1]) cv = cval.check_cv(3, X, y_binary, classifier=True) assert_true(isinstance(cv, cval.StratifiedKFold)) y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2]) cv = cval.check_cv(3, X, y_multiclass, classifier=True) assert_true(isinstance(cv, cval.StratifiedKFold)) X = np.ones((5, 2)) y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]] with warnings.catch_warnings(record=True): # deprecated sequence of sequence format cv = cval.check_cv(3, X, y_seq_of_seqs, classifier=True) assert_true(isinstance(cv, cval.KFold)) y_indicator_matrix = LabelBinarizer().fit_transform(y_seq_of_seqs) cv = cval.check_cv(3, X, y_indicator_matrix, classifier=True) assert_true(isinstance(cv, cval.KFold)) y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]]) cv = cval.check_cv(3, X, y_multioutput, classifier=True) assert_true(isinstance(cv, cval.KFold)) def test_cross_val_score_multilabel(): X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1], [-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]]) y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1], [0, 1], [1, 0], [1, 1], [1, 0], [0, 0]]) clf = KNeighborsClassifier(n_neighbors=1) scoring_micro = make_scorer(precision_score, average='micro') scoring_macro = make_scorer(precision_score, average='macro') scoring_samples = make_scorer(precision_score, average='samples') score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5) score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5) score_samples = cval.cross_val_score(clf, X, y, scoring=scoring_samples, cv=5) assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3]) assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4]) assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4]) def test_cross_val_predict(): boston = load_boston() X, y = boston.data, boston.target cv = cval.KFold(len(boston.target)) est = Ridge() # Naive loop (should be same as cross_val_predict): preds2 = np.zeros_like(y) for train, test in cv: est.fit(X[train], y[train]) preds2[test] = est.predict(X[test]) preds = cval.cross_val_predict(est, X, y, cv=cv) assert_array_almost_equal(preds, preds2) preds = cval.cross_val_predict(est, X, y) assert_equal(len(preds), len(y)) cv = cval.LeaveOneOut(len(y)) preds = cval.cross_val_predict(est, X, y, cv=cv) assert_equal(len(preds), len(y)) Xsp = X.copy() Xsp *= (Xsp > np.median(Xsp)) Xsp = coo_matrix(Xsp) preds = cval.cross_val_predict(est, Xsp, y) assert_array_almost_equal(len(preds), len(y)) preds = cval.cross_val_predict(KMeans(), X) assert_equal(len(preds), len(y)) def bad_cv(): for i in range(4): yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8]) assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv()) def test_cross_val_predict_input_types(): clf = Ridge() # Smoke test predictions = cval.cross_val_predict(clf, X, y) assert_equal(predictions.shape, (10,)) # test with multioutput y predictions = cval.cross_val_predict(clf, X_sparse, X) assert_equal(predictions.shape, (10, 2)) predictions = cval.cross_val_predict(clf, X_sparse, y) assert_array_equal(predictions.shape, (10,)) # test with multioutput y predictions = cval.cross_val_predict(clf, X_sparse, X) assert_array_equal(predictions.shape, (10, 2)) # test with X and y as list list_check = lambda x: isinstance(x, list) clf = CheckingClassifier(check_X=list_check) predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist()) clf = CheckingClassifier(check_y=list_check) predictions = cval.cross_val_predict(clf, X, y.tolist()) # test with 3d X and X_3d = X[:, :, np.newaxis] check_3d = lambda x: x.ndim == 3 clf = CheckingClassifier(check_X=check_3d) predictions = cval.cross_val_predict(clf, X_3d, y) assert_array_equal(predictions.shape, (10,)) def test_cross_val_predict_pandas(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((Series, DataFrame)) except ImportError: pass for TargetType, InputFeatureType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) check_df = lambda x: isinstance(x, InputFeatureType) check_series = lambda x: isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) cval.cross_val_predict(clf, X_df, y_ser) def test_sparse_fit_params(): iris = load_iris() X, y = iris.data, iris.target clf = MockClassifier() fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))} a = cval.cross_val_score(clf, X, y, fit_params=fit_params) assert_array_equal(a, np.ones(3)) def test_check_is_partition(): p = np.arange(100) assert_true(cval._check_is_partition(p, 100)) assert_false(cval._check_is_partition(np.delete(p, 23), 100)) p[0] = 23 assert_false(cval._check_is_partition(p, 100))
bsd-3-clause
mwickert/SP-Comm-Tutorial-using-scikit-dsp-comm
hardware_configure/sigsys.py
1
81778
""" Signals and Systems Function Module Copyright (c) March 2017, Mark Wickert All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of the FreeBSD Project. """ """ Notes ----- The primary purpose of this function library is to support the book Signals and Systems for Dummies. Beyond that it should be useful to anyone who wants to use Pylab for general signals and systems modeling and simulation. There is a good collection of digital communication simulation primitives included in the library. More enhancements are planned over time. The formatted docstrings for the library follow. Click index in the upper right to get an alphabetical listing of the library functions. In all of the example code given it is assumed that ssd has been imported into your workspace. See the examples below for import options. Examples -------- >>> import ssd >>> # Commands then need to be prefixed with ssd., i.e., >>> ssd.tri(t,tau) >>> # A full import of the module, to avoid the the need to prefix with ssd, is: >>> from ssd import * Function Catalog ---------------- """ from matplotlib import pylab from matplotlib import mlab import numpy as np from numpy import fft import matplotlib.pyplot as plt from scipy import signal from scipy.io import wavfile def CIC(M,K): """ % b = CIC(M,K) % A functional form implementation of a cascade of integrator comb (CIC) % filters. Commonly used in multirate signal processing digital % down-converters and digital up-converters. A true CIC filter requires no % multiplies, only add and subtract operations. The functional form created % here is a simple FIR requiring real coefficient multiplies via filter() % ======================================================================== % M = Effective number of taps per section (typically the decimation % factor). % K = The number of CIC sections cascaded (larger K gives the filter a % wider image rejection bandwidth. % b = FIR filter coefficients for a simple direct form implementation % using the filter() function. % ======================================================================== % % Mark Wickert November 2007 """ if K == 1: b = np.ones(M) else: h = np.ones(M) b = h for i in range(1,K): b = signal.convolve(b,h) # cascade by convolving impulse responses # Make filter have unity gain at DC return b/np.sum(b) def ten_band_eq_filt(x,GdB,Q=3.5): """ Filter the input signal x with a ten-band equalizer having octave gain values in ndarray GdB. The signal x is filtered using octave-spaced peaking filters starting at 31.25 Hz and stopping at 16 kHz. The Q of each filter is 3.5, but can be changed. The sampling rate is assumed to be 44.1 kHz. Parameters ---------- x : ndarray of the input signal samples GdB : ndarray containing ten octave band gain values [G0dB,...,G9dB] Q : Quality factor vector for each of the NB peaking filters Returns ------- y : ndarray of output signal samples Examples -------- >>> # Test with white noise >>> w = randn(100000) >>> y = ten_band_eq_filt(x,GdB) >>> psd(y,2**10,44.1) """ fs = 44100.0 # Hz NB = len(GdB) Fc = 31.25*2**np.arange(10) B = np.zeros((NB,3)) A = np.zeros((NB,3)) # Create matrix of cascade coefficients for k in range(NB): [b,a] = peaking(GdB[k],Fc[k],Q) B[k,:] = b A[k,:] = a #Pass signal x through the cascade of ten filters y = np.zeros(len(x)) for k in range(NB): if k == 0: y = signal.lfilter(B[k,:],A[k,:],x) else: y = signal.lfilter(B[k,:],A[k,:],y) return y def ten_band_eq_resp(GdB,Q=3.5): """ Create a frequency response magnitude plot in dB of a ten band equalizer using a semilogplot (semilogx()) type plot Parameters ---------- GdB : Gain vector for 10 peaking filters [G0,...,G9] Q : Quality factor for each peaking filter (default 3.5) Returns ------- Nothing : two plots are created Examples -------- >>> ten_band_eq_resp([0,10.0,0,0,-1,0,5,0,-4,0]) """ fs = 44100.0 # Hz Fc = 31.25*2**np.arange(10) NB = len(GdB) B = np.zeros((NB,3)); A = np.zeros((NB,3)); # Create matrix of cascade coefficients for k in range(NB): b,a = peaking(GdB[k],Fc[k],Q,fs) B[k,:] = b A[k,:] = a # Create the cascade frequency response F = np.logspace(1,np.log10(20e3),1000) H = np.ones(len(F))*np.complex(1.0,0.0) for k in range(NB): w,Htemp = signal.freqz(B[k,:],A[k,:],2*np.pi*F/fs) H *= Htemp plt.figure(figsize=(6,4)) plt.subplot(211) plt.semilogx(F,20*np.log10(abs(H))) plt.axis([10, fs/2, -12, 12]) plt.grid() plt.title('Ten-Band Equalizer Frequency Response') plt.xlabel('Frequency (Hz)') plt.ylabel('Gain (dB)') plt.subplot(212) plt.stem(np.arange(NB),GdB,'b','bs') #plt.bar(np.arange(NB)-.1,GdB,0.2) plt.axis([0, NB-1, -12, 12]) plt.xlabel('Equalizer Band Number') plt.ylabel('Gain Set (dB)') plt.grid() def peaking(GdB, fc, Q=3.5, fs=44100.): """ A second-order peaking filter having GdB gain at fc and approximately and 0 dB otherwise. The filter coefficients returns correspond to a biquadratic system function containing five parameters. Parameters ---------- GdB : Lowpass gain in dB fc : Center frequency in Hz Q : Filter Q which is inversely proportional to bandwidth fs : Sampling frquency in Hz Returns ------- b : ndarray containing the numerator filter coefficients a : ndarray containing the denominator filter coefficients Examples -------- >>> from scipy import signal >>> b,a = peaking(2.0,500) >>> b,a = peaking(-5.0,500,4) >>> # Assuming pylab imported >>> f = logspace(1,5,400) >>> .w,H = signal.freqz(b,a,2*pi*f/44100) >>> semilogx(f,20*log10(abs(H))) """ mu = 10**(GdB/20.) kq = 4/(1 + mu)*np.tan(2*np.pi*fc/fs/(2*Q)) Cpk = (1 + kq *mu)/(1 + kq) b1 = -2*np.cos(2*np.pi*fc/fs)/(1 + kq*mu) b2 = (1 - kq*mu)/(1 + kq*mu) a1 = -2*np.cos(2*np.pi*fc/fs)/(1 + kq) a2 = (1 - kq)/(1 + kq) b = Cpk*np.array([1, b1, b2]) a = np.array([1, a1, a2]) return b,a def ex6_2(n): """ Generate a triangle pulse as described in Example 6-2 of Chapter 6. You need to supply an index array n that covers at least [-2, 5]. The function returns the hard-coded signal of the example. Parameters ---------- n : time index ndarray covering at least -2 to +5. Returns ------- x : ndarray of signal samples in x Examples -------- >>> n = arange(-5,8) >>> x = ex6_2(n) >>> stem(n,x) # creates a stem plot of x vs n """ x = np.zeros(len(n)) for k, nn in enumerate(n): if nn >= -2 and nn <= 5: x[k] = 8 - nn return x def position_CD(Ka,out_type = 'fb_exact'): """ CD sled position control case study of Chapter 18. The function returns the closed-loop and open-loop system function for a CD/DVD sled position control system. The loop amplifier gain is the only variable that may be changed. The returned system function can however be changed. Parameters ---------- Ka : loop amplifier gain, start with 50. out_type : 'open_loop' for open loop system function out_type : 'fb_approx' for closed-loop approximation out_type : 'fb_exact' for closed-loop exact Returns ------- b : numerator coefficient ndarray a : denominator coefficient ndarray Notes ----- With the exception of the loop amplifier gain, all other parameters are hard-coded from Case Study example. Examples ------ >>> b,a = position_CD(Ka,'fb_approx') >>> b,a = position_CD(Ka,'fb_exact') """ rs = 10/(2*np.pi) # Load b and a ndarrays with the coefficients if out_type.lower() == 'open_loop': b = np.array([Ka*4000*rs]) a = np.array([1,1275,31250,0]) elif out_type.lower() == 'fb_approx': b = np.array([3.2*Ka*rs]) a = np.array([1, 25, 3.2*Ka*rs]) elif out_type.lower() == 'fb_exact': b = np.array([4000*Ka*rs]) a = np.array([1, 1250+25, 25*1250, 4000*Ka*rs]) else: print('out_type must be: open_loop, fb_approx, or fc_exact') return 1 return b, a def cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='H'): """ Cruise control with PI controller and hill disturbance. This function returns various system function configurations for a the cruise control Case Study example found in the supplementary article. The plant model is obtained by the linearizing the equations of motion and the controller contains a proportional and integral gain term set via the closed-loop parameters natuarl frequency wn (rad/s) and damping zeta. Parameters ---------- wn : closed-loop natural frequency in rad/s, nominally 0.1 zeta : closed-loop damping factor, nominally 1.0 T : vehicle time constant, nominally 10 s vcruise : cruise velocity set point, nominally 75 mph vmax : maximum vehicle velocity, nominally 120 mph tf_mode : 'H', 'HE', 'HVW', or 'HED' controls the system function returned by the function 'H' : closed-loop system function V(s)/R(s) 'HE' : closed-loop system function E(s)/R(s) 'HVW' : closed-loop system function V(s)/W(s) 'HED' : closed-loop system function E(s)/D(s), where D is the hill disturbance input Returns ------- b : numerator coefficient ndarray a : denominator coefficient ndarray Examples -------- >>> # return the closed-loop system function output/input velocity >>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='H') >>> # return the closed-loop system function loop error/hill disturbance >>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='HED') """ tau = T/2.*vmax/vcruise g = 9.8 g *= 3*60**2/5280. # m/s to mph conversion Kp = T/vmax*(2*zeta*wn-1/tau) Ki = T/vmax*wn**2 K = Kp*vmax/T print('wn = ', np.sqrt(K/(Kp/Ki))) print('zeta = ', (K + 1/tau)/(2*wn)) a = np.array([1, 2*zeta*wn, wn**2]) if tf_mode == 'H': b = np.array([K, wn**2]) elif tf_mode == 'HE': b = np.array([1, 2*zeta*wn-K, 0.]) elif tf_mode == 'HVW': b = np.array([ 1, wn**2/K+1/tau, wn**2/(K*tau)]) b *= Kp elif tf_mode == 'HED': b = np.array([g, 0]) else: print('tf_mode must be: H, HE, HVU, or HED') return 1 return b, a def splane(b,a,auto_scale=True,size=[-1,1,-1,1]): """ Create an s-plane pole-zero plot. As input the function uses the numerator and denominator s-domain system function coefficient ndarrays b and a respectively. Assumed to be stored in descending powers of s. Parameters ---------- b : numerator coefficient ndarray. a : denominator coefficient ndarray. auto_scale : True size : [xmin,xmax,ymin,ymax] plot scaling when scale = False Returns ------- (M,N) : tuple of zero and pole counts + plot window Notes ----- This function tries to identify repeated poles and zeros and will place the multiplicity number above and to the right of the pole or zero. The difficulty is setting the tolerance for this detection. Currently it is set at 1e-3 via the function signal.unique_roots. Examples -------- >>> # Here the plot is generated using auto_scale >>> splane(b,a) >>> # Here the plot is generated using manual scaling >>> splane(b,a,False,[-10,1,-10,10]) """ M = len(b) - 1 N = len(a) - 1 plt.figure(figsize=(5,5)) #plt.axis('equal') N_roots = np.array([0.0]) if M > 0: N_roots = np.roots(b) D_roots = np.array([0.0]) if N > 0: D_roots = np.roots(a) if auto_scale: size[0] = min(np.min(np.real(N_roots)),np.min(np.real(D_roots)))-0.5 size[1] = max(np.max(np.real(N_roots)),np.max(np.real(D_roots)))+0.5 size[1] = max(size[1],0.5) size[2] = min(np.min(np.imag(N_roots)),np.min(np.imag(D_roots)))-0.5 size[3] = max(np.max(np.imag(N_roots)),np.max(np.imag(D_roots)))+0.5 plt.plot([size[0],size[1]],[0,0],'k--') plt.plot([0,0],[size[2],size[3]],'r--') # Plot labels if multiplicity greater than 1 x_scale = size[1]-size[0] y_scale = size[3]-size[2] x_off = 0.03 y_off = 0.01 if M > 0: #N_roots = np.roots(b) N_uniq, N_mult=signal.unique_roots(N_roots,tol=1e-3, rtype='avg') plt.plot(np.real(N_uniq),np.imag(N_uniq),'ko',mfc='None',ms=8) idx_N_mult = mlab.find(N_mult>1) for k in range(len(idx_N_mult)): x_loc = np.real(N_uniq[idx_N_mult[k]]) + x_off*x_scale y_loc =np.imag(N_uniq[idx_N_mult[k]]) + y_off*y_scale plt.text(x_loc,y_loc,str(N_mult[idx_N_mult[k]]),ha='center',va='bottom',fontsize=10) if N > 0: #D_roots = np.roots(a) D_uniq, D_mult=signal.unique_roots(D_roots,tol=1e-3, rtype='avg') plt.plot(np.real(D_uniq),np.imag(D_uniq),'kx',ms=8) idx_D_mult = mlab.find(D_mult>1) for k in range(len(idx_D_mult)): x_loc = np.real(D_uniq[idx_D_mult[k]]) + x_off*x_scale y_loc =np.imag(D_uniq[idx_D_mult[k]]) + y_off*y_scale plt.text(x_loc,y_loc,str(D_mult[idx_D_mult[k]]),ha='center',va='bottom',fontsize=10) plt.xlabel('Real Part') plt.ylabel('Imaginary Part') plt.title('Pole-Zero Plot') #plt.grid() plt.axis(np.array(size)) return M,N def OS_filter(x,h,N,mode=0): """ Overlap and save transform domain FIR filtering. This function implements the classical overlap and save method of transform domain filtering using a length P FIR filter. Parameters ---------- x : input signal to be filtered as an ndarray h : FIR filter coefficients as an ndarray of length P N : FFT size > P, typically a power of two mode : 0 or 1, when 1 returns a diagnostic matrix Returns ------- y : the filtered output as an ndarray y_mat : an ndarray whose rows are the individual overlap outputs. Notes ----- y_mat is used for diagnostics and to gain understanding of the algorithm. Examples -------- >>> n = arange(0,100) >>> x cos(2*pi*0.05*n) >>> b = ones(10) >>> y = OS_filter(x,h,N) >>> # set mode = 1 >>> y, y_mat = OS_filter(x,h,N,1) """ P = len(h) # zero pad start of x so first frame can recover first true samples of x x = np.hstack((np.zeros(P-1),x)) L = N - P + 1 Nx = len(x) Nframe = int(np.ceil(Nx/float(L))) # zero pad end of x to full number of frames needed x = np.hstack((x,np.zeros(Nframe*L-Nx))) y = np.zeros(Nframe*N) # create an instrumentation matrix to observe the overlap and save behavior y_mat = np.zeros((Nframe,Nframe*N)) H = fft.fft(h,N) # begin the filtering operation for k in range(Nframe): xk = x[k*L:k*L+N] Xk = fft.fft(xk,N) Yk = H*Xk yk = np.real(fft.ifft(Yk)) # imag part should be zero y[k*L+P-1:k*L+N] = yk[P-1:] y_mat[k,k*L:k*L+N] = yk if mode == 1: return y[P-1:Nx], y_mat[:,P-1:Nx] else: return y[P-1:Nx] def OA_filter(x,h,N,mode=0): """ Overlap and add transform domain FIR filtering. This function implements the classical overlap and add method of transform domain filtering using a length P FIR filter. Parameters ---------- x : input signal to be filtered as an ndarray h : FIR filter coefficients as an ndarray of length P N : FFT size > P, typically a power of two mode : 0 or 1, when 1 returns a diagnostic matrix Returns ------- y : the filtered output as an ndarray y_mat : an ndarray whose rows are the individual overlap outputs. Notes ----- y_mat is used for diagnostics and to gain understanding of the algorithm. Examples -------- >>> n = arange(0,100) >>> x cos(2*pi*0.05*n) >>> b = ones(10) >>> y = OA_filter(x,h,N) >>> # set mode = 1 >>> y, y_mat = OA_filter(x,h,N,1) """ P = len(h) L = N - P + 1 # need N >= L + P -1 Nx = len(x) Nframe = int(np.ceil(Nx/float(L))) # zero pad to full number of frames needed x = np.hstack((x,np.zeros(Nframe*L-Nx))) y = np.zeros(Nframe*N) # create an instrumentation matrix to observe the overlap and add behavior y_mat = np.zeros((Nframe,Nframe*N)) H = fft.fft(h,N) # begin the filtering operation for k in range(Nframe): xk = x[k*L:(k+1)*L] Xk = fft.fft(xk,N) Yk = H*Xk yk = np.real(fft.ifft(Yk)) y[k*L:k*L+N] += yk y_mat[k,k*L:k*L+N] = yk if mode == 1: return y[0:Nx], y_mat[:,0:Nx] else: return y[0:Nx] def lp_samp(fb,fs,fmax,N,shape='tri',fsize=(6,4)): """ Lowpass sampling theorem plotting function. Display the spectrum of a sampled signal after setting the bandwidth, sampling frequency, maximum display frequency, and spectral shape. Parameters ---------- fb : spectrum lowpass bandwidth in Hz fs : sampling frequency in Hz fmax : plot over [-fmax,fmax] shape : 'tri' or 'line' N : number of translates, N positive and N negative fsize : the size of the figure window, default (6,4) Returns ------- Nothing : A plot window opens containing the spectrum plot Examples -------- >>> # No aliasing as 10 < 25/2 >>> lp_samp(10,25,50,10) >>> # Aliasing as 15 > 25/2 >>> lp_samp(15,25,50,10) """ plt.figure(figsize=fsize) # define the plot interval f = np.arange(-fmax,fmax+fmax/200.,fmax/200.) A = 1.0; line_ampl = A/2.*np.array([0, 1]) # plot the lowpass spectrum in black if shape.lower() == 'tri': plt.plot(f,lp_tri(f,fb)) elif shape.lower() == 'line': plt.plot([fb, fb],line_ampl,'b', linewidth=2) plt.plot([-fb, -fb],line_ampl,'b', linewidth=2) else: print('shape must be tri or line') # overlay positive and negative frequency translates for n in range(N): if shape.lower() == 'tri': plt.plot(f,lp_tri(f-(n+1)*fs,fb),'--r') plt.plot(f,lp_tri(f+(n+1)*fs,fb),'--g') elif shape.lower() == 'line': plt.plot([fb+(n+1)*fs, fb+(n+1)*fs],line_ampl,'--r', linewidth=2) plt.plot([-fb+(n+1)*fs, -fb+(n+1)*fs],line_ampl,'--r', linewidth=2) plt.plot([fb-(n+1)*fs, fb-(n+1)*fs],line_ampl,'--g', linewidth=2) plt.plot([-fb-(n+1)*fs, -fb-(n+1)*fs],line_ampl,'--g', linewidth=2) else: print('shape must be tri or line') #plt.title('Lowpass Sampling Theorem for a Real Signal: Blk = orig, dotted = translates') plt.ylabel('Spectrum Magnitude') plt.xlabel('Frequency in Hz') plt.axis([-fmax,fmax,0,1]) plt.grid() def lp_tri(f, fb): """ Triangle spectral shape function used by lp_spec. This is a support function for the lowpass spectrum plotting function lp_spec(). Parameters ---------- f : ndarray containing frequency samples fb : the bandwidth as a float constant Returns ------- x : ndarray of spectrum samples for a single triangle shape Examples -------- >>> x = lp_tri(f, fb) """ x = np.zeros(len(f)) for k in range(len(f)): if abs(f[k]) <= fb: x[k] = 1 - abs(f[k])/float(fb) return x def sinusoidAWGN(x,SNRdB): """ Add white Gaussian noise to a single real sinusoid. Input a single sinusoid to this function and it returns a noisy sinusoid at a specific SNR value in dB. Sinusoid power is calculated using np.var. Parameters ---------- x : Input signal as ndarray consisting of a single sinusoid SNRdB : SNR in dB for output sinusoid Returns ------- y : Noisy sinusoid return vector Examples -------- >>> # set the SNR to 10 dB >>> n = arange(0,10000) >>> x = cos(2*pi*0.04*n) >>> y = sinusoidAWGN(x,10.0) """ # Estimate signal power x_pwr = np.var(x) # Create noise vector noise = np.sqrt(x_pwr/10**(SNRdB/10.))*np.random.randn(len(x)); return x + noise def simpleQuant(x,Btot,Xmax,Limit): """ A simple rounding quantizer for bipolar signals having Btot = B + 1 bits. This function models a quantizer that employs Btot bits that has one of three selectable limiting types: saturation, overflow, and none. The quantizer is bipolar and implements rounding. Parameters ---------- x : input signal ndarray to be quantized Btot : total number of bits in the quantizer, e.g. 16 Xmax : quantizer full-scale dynamic range is [-Xmax, Xmax] Limit = Limiting of the form 'sat', 'over', 'none' Returns ------- xq : quantized output ndarray Notes ----- The quantization can be formed as e = xq - x Examples -------- >>> n = arange(0,10000) >>> x = cos(2*pi*0.211*n) >>> y = sinusoidAWGN(x,90) >>> yq = simpleQuant(y,12,1,sat) >>> psd(y,2**10,Fs=1); >>> psd(yq,2**10,Fs=1) """ B = Btot-1 x = x/Xmax if Limit.lower() == 'over': xq = (np.mod(np.round(x*2**B)+2**B,2**Btot)-2**B)/2**B elif Limit.lower() == 'sat': xq = np.round(x*2**B)+2**B s1 = mlab.find(xq >= 2**Btot-1) s2 = mlab.find(xq < 0) xq[s1] = (2**Btot - 1)*np.ones(len(s1)) xq[s2] = np.zeros(len(s2)) xq = (xq - 2**B)/2**B elif Limit.lower() == 'none': xq = np.round(x*2**B)/2**B else: print('limit must be the string over, sat, or none') return xq*Xmax def prin_alias(f_in,fs): """ Calculate the principle alias frequencies. Given an array of input frequencies the function returns an array of principle alias frequencies. Parameters ---------- f_in : ndarray of input frequencies fs : sampling frequency Returns ------- f_out : ndarray of principle alias frequencies Examples -------- >>> # Linear frequency sweep from 0 to 50 Hz >>> f_in = arange(0,50,0.1) >>> # Calculate principle alias with fs = 10 Hz >>> f_out = prin_alias(f_in,10) """ return abs(np.rint(f_in/fs)*fs - f_in) """ Principle alias via recursion f_out = np.copy(f_in) for k in range(len(f_out)): while f_out[k] > fs/2.: f_out[k] = abs(f_out[k] - fs) return f_out """ def cascade_filters(b1,a1,b2,a2): """ Cascade two IIR digital filters into a single (b,a) coefficient set. To cascade two digital filters (system functions) given their numerator and denominator coefficients you simply convolve the coefficient arrays. Parameters ---------- b1 : ndarray of numerator coefficients for filter 1 a1 : ndarray of denominator coefficients for filter 1 b2 : ndarray of numerator coefficients for filter 2 a2 : ndarray of denominator coefficients for filter 2 Returns ------- b : ndarray of numerator coefficients for the cascade a : ndarray of denominator coefficients for the cascade Examples -------- >>> from scipy import signal >>> b1,a1 = signal.butter(3, 0.1) >>> b2,a2 = signal.butter(3, 0.15) >>> b,a = cascade_filters(b1,a1,b2,a2) """ return signal.convolve(b1,b2), signal.convolve(a1,a2) def soi_snoi_gen(s,SIR_dB,N,fi,fs = 8000): """ Add an interfering sinusoidal tone to the input signal at a given SIR_dB. The input is the signal of interest (SOI) and number of sinsuoid signals not of interest (SNOI) are addedto the SOI at a prescribed signal-to- intereference SIR level in dB. Parameters ---------- s : ndarray of signal of SOI SIR_dB : interference level in dB N : Trim input signal s to length N + 1 samples fi : ndarray of intereference frequencies in Hz fs : sampling rate in Hz, default is 8000 Hz Returns ------- r : ndarray of combined signal plus intereference of length N+1 samples Examples -------- >>> # load a speech ndarray and trim to 5*8000 + 1 samples >>> fs,s = from_wav('OSR_us_000_0030_8k.wav') >>> r = soi_snoi_gen(s,10,5*8000,[1000, 1500]) """ n = np.arange(0,N+1) K = len(fi) si = np.zeros(N+1) for k in range(K): si += np.cos(2*np.pi*fi[k]/fs*n); s = s[:N+1] Ps = np.var(s) Psi = np.var(si) r = s + np.sqrt(Ps/Psi*10**(-SIR_dB/10))*si return r def lms_ic(r,M,mu,delta=1): """ Least mean square (LMS) interference canceller adaptive filter. A complete LMS adaptive filter simulation function for the case of interference cancellation. Used in the digital filtering case study. Parameters ---------- M : FIR Filter length (order M-1) delta : Delay used to generate the reference signal mu : LMS step-size delta : decorrelation delay between input and FIR filter input Returns ------- n : ndarray Index vector r : ndarray noisy (with interference) input signal r_hat : ndarray filtered output (NB_hat[n]) e : ndarray error sequence (WB_hat[n]) ao : ndarray final value of weight vector F : ndarray frequency response axis vector Ao : ndarray frequency response of filter Examples ---------- >>> # import a speech signal >>> fs,s = from_wav('OSR_us_000_0030_8k.wav') >>> # add interference at 1kHz and 1.5 kHz and >>> # truncate to 5 seconds >>> r = soi_snoi_gen(s,10,5*8000,[1000, 1500]) >>> # simulate with a 64 tap FIR and mu = 0.005 >>> n,r,r_hat,e,ao,F,Ao = lms_ic(r,64,0.005) """ N = len(r)-1; # Form the reference signal y via delay delta y = signal.lfilter(np.hstack((np.zeros(delta), np.array([1]))),1,r) # Initialize output vector x_hat to zero r_hat = np.zeros(N+1) # Initialize error vector e to zero e = np.zeros(N+1) # Initialize weight vector to zero ao = np.zeros(M+1) # Initialize filter memory to zero z = np.zeros(M) # Initialize a vector for holding ym of length M+1 ym = np.zeros(M+1) for k in range(N+1): # Filter one sample at a time r_hat[k],z = signal.lfilter(ao,np.array([1]),np.array([y[k]]),zi=z) # Form the error sequence e[k] = r[k] - r_hat[k] # Update the weight vector ao = ao + 2*mu*e[k]*ym # Update vector used for correlation with e(k) ym = np.hstack((np.array([y[k]]), ym[:-1])) # Create filter frequency response F, Ao = signal.freqz(ao,1,1024) F/= (2*np.pi) Ao = 20*np.log10(abs(Ao)) return np.arange(0,N+1), r, r_hat, e, ao, F, Ao def fir_iir_notch(fi,fs,r=0.95): """ Design a second-order FIR or IIR notch filter. A second-order FIR notch filter is created by placing conjugate zeros on the unit circle at angle corresponidng to the notch center frequency. The IIR notch variation places a pair of conjugate poles at the same angle, but with radius r < 1 (typically 0.9 to 0.95). Parameters ---------- fi : notch frequency is Hz relative to fs fs : the sampling frequency in Hz, e.g. 8000 r : pole radius for IIR version, default = 0.95 Returns ------- b : numerator coefficient ndarray a : denominator coefficient ndarray Notes ----- If the pole radius is 0 then an FIR version is created, that is there are no poles except at z = 0. Examples -------- >>> b_FIR, a_FIR = fir_iir_notch(1000,8000,0) >>> b_IIR, a_IIR = fir_iir_notch(1000,8000) """ w0 = 2*np.pi*fi/float(fs) if r >= 1: print('Poles on or outside unit circle.') if r == 0: a = np.array([1.0]) else: a = np.array([1, -2*r*np.cos(w0), r**2]) b = np.array([1, -2*np.cos(w0), 1]) return b, a def simple_SA(x,NS,NFFT,fs,NAVG=1,window='boxcar'): """ Spectral estimation using windowing and averaging. This function implements averaged periodogram spectral estimation estimation similar to the NumPy's psd() function, but more specialized for the the windowing case study of Chapter 16. Parameters ---------- x : ndarray containing the input signal NS : The subrecord length less zero padding, e.g. NS < NFFT NFFT : FFT length, e.g., 1024 = 2**10 fs : sampling rate in Hz NAVG : the number of averages, e.g., 1 for deterministic signals window : hardcoded window 'boxcar' (default) or 'hanning' Returns ------- f : ndarray frequency axis in Hz on [0, fs/2] Sx : ndarray the power spectrum estimate Notes ----- The function also prints the maximum number of averages K possible for the input data record. Examples -------- >>> n = arange(0,2048) >>> x = cos(2*pi*1000/10000*n) + 0.01*cos(2*pi*3000/10000*n) >>> f, Sx = simple_SA(x,128,512,10000) >>> f, Sx = simple_SA(x,256,1024,10000,window='hanning') >>> plot(f, 10*log10(Sx)) """ Nx = len(x) K = Nx/NS print('K = ', K) if NAVG > K: print('NAVG exceeds number of available subrecords') return 0,0 if window.lower() == 'boxcar' or window.lower() == 'rectangle': w = signal.boxcar(NS) elif window.lower() == 'hanning': w = signal.hanning(NS) xsw = np.zeros((K,NS)) + 1j*np.zeros((K,NS)) for k in range(NAVG): xsw[k,] = w*x[k*NS:(k+1)*NS] Sx = np.zeros(NFFT) for k in range(NAVG): X = fft.fft(xsw[k,],NFFT) Sx += abs(X)**2 Sx /= float(NAVG) Sx /= float(NFFT**2) if x.dtype != 'complex128': n = np.arange(NFFT/2) f = fs*n/float(NFFT) Sx = Sx[0:NFFT/2] else: n = np.arange(NFFT/2) f = fs*np.hstack((np.arange(-NFFT/2,0),np.arange(NFFT/2)))/float(NFFT) Sx = np.hstack((Sx[NFFT/2:],Sx[0:NFFT/2])) return f, Sx def line_spectra(fk,Xk,mode,sides=2,linetype='b',lwidth=2,floor_dB=-100,fsize=(6,4)): """ Plot the Fouier series line spectral given the coefficients. This function plots two-sided and one-sided line spectra of a periodic signal given the complex exponential Fourier series coefficients and the corresponding harmonic frequencies. Parameters ---------- fk : vector of real sinusoid frequencies Xk : magnitude and phase at each positive frequency in fk mode : 'mag' => magnitude plot, 'magdB' => magnitude in dB plot, mode cont : 'magdBn' => magnitude in dB normalized, 'phase' => a phase plot in radians sides : 2; 2-sided or 1-sided linetype : line type per Matplotlib definitions, e.g., 'b'; lwidth : 2; linewidth in points fsize : optional figure size in inches, default = (6,4) inches Returns ------- Nothing : A plot window opens containing the line spectrum plot Notes ----- Since real signals are assumed the frequencies of fk are 0 and/or positive numbers. The supplied Fourier coefficients correspond. Examples -------- >>> n = arange(0,25) >>> # a pulse train with 10 Hz fundamental and 20% duty cycle >>> fk = n*10 >>> Xk = sinc(n*10*.02)*exp(-1j*2*pi*n*10*.01) # 1j = sqrt(-1) >>> line_spectra(fk,Xk,'mag') >>> line_spectra(fk,Xk,'phase') """ plt.figure(figsize=fsize) # Eliminate zero valued coefficients idx = pylab.find(Xk != 0) Xk = Xk[idx] fk = fk[idx] if mode == 'mag': for k in range(len(fk)): if fk[k] == 0 and sides == 2: plt.plot([fk[k], fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=lwidth) elif fk[k] == 0 and sides == 1: plt.plot([fk[k], fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=2*lwidth) elif fk[k] > 0 and sides == 2: plt.plot([fk[k], fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=lwidth) plt.plot([-fk[k], -fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=lwidth) elif fk[k] > 0 and sides == 1: plt.plot([fk[k], fk[k]],[0, 2.*np.abs(Xk[k])],linetype, linewidth=lwidth) else: print('Invalid sides type') plt.grid() if sides == 2: plt.axis([-1.2*max(fk), 1.2*max(fk), 0, 1.05*max(abs(Xk))]) elif sides == 1: plt.axis([0, 1.2*max(fk), 0, 1.05*2*max(abs(Xk))]) else: print('Invalid sides type') plt.ylabel('Magnitude') plt.xlabel('Frequency (Hz)') elif mode == 'magdB': Xk_dB = 20*np.log10(np.abs(Xk)) for k in range(len(fk)): if fk[k] == 0 and sides == 2: plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth) elif fk[k] == 0 and sides == 1: plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=2*lwidth) elif fk[k] > 0 and sides == 2: plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth) plt.plot([-fk[k], -fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth) elif fk[k] > 0 and sides == 1: plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]+6.02],linetype, linewidth=lwidth) else: print('Invalid sides type') plt.grid() max_dB = np.ceil(max(Xk_dB/10.))*10 min_dB = max(floor_dB,np.floor(min(Xk_dB/10.))*10) if sides == 2: plt.axis([-1.2*max(fk), 1.2*max(fk), min_dB, max_dB]) elif sides == 1: plt.axis([0, 1.2*max(fk), min_dB, max_dB]) else: print('Invalid sides type') plt.ylabel('Magnitude (dB)') plt.xlabel('Frequency (Hz)') elif mode == 'magdBn': Xk_dB = 20*np.log10(np.abs(Xk)/max(np.abs(Xk))) for k in range(len(fk)): if fk[k] == 0 and sides == 2: plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth) elif fk[k] == 0 and sides == 1: plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=2*lwidth) elif fk[k] > 0 and sides == 2: plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth) plt.plot([-fk[k], -fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth) elif fk[k] > 0 and sides == 1: plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]+6.02],linetype, linewidth=lwidth) else: print('Invalid sides type') plt.grid() max_dB = np.ceil(max(Xk_dB/10.))*10 min_dB = max(floor_dB,np.floor(min(Xk_dB/10.))*10) if sides == 2: plt.axis([-1.2*max(fk), 1.2*max(fk), min_dB, max_dB]) elif sides == 1: plt.axis([0, 1.2*max(fk), min_dB, max_dB]) else: print('Invalid sides type') plt.ylabel('Normalized Magnitude (dB)') plt.xlabel('Frequency (Hz)') elif mode == 'phase': for k in range(len(fk)): if fk[k] == 0 and sides == 2: plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=lwidth) elif fk[k] == 0 and sides == 1: plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=2*lwidth) elif fk[k] > 0 and sides == 2: plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=lwidth) plt.plot([-fk[k], -fk[k]],[0, -np.angle(Xk[k])],linetype, linewidth=lwidth) elif fk[k] > 0 and sides == 1: plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=lwidth) else: print('Invalid sides type') plt.grid() if sides == 2: plt.plot([-1.2*max(fk), 1.2*max(fk)], [0, 0],'k') plt.axis([-1.2*max(fk), 1.2*max(fk), -1.1*max(np.abs(np.angle(Xk))), 1.1*max(np.abs(np.angle(Xk)))]) elif sides == 1: plt.plot([0, 1.2*max(fk)], [0, 0],'k') plt.axis([0, 1.2*max(fk), -1.1*max(np.abs(np.angle(Xk))), 1.1*max(np.abs(np.angle(Xk)))]) else: print('Invalid sides type') plt.ylabel('Phase (rad)') plt.xlabel('Frequency (Hz)') else: print('Invalid mode type') def fs_coeff(xp,N,f0,one_side=True): """ Numerically approximate the Fourier series coefficients given periodic x(t). The input is assummed to represent one period of the waveform x(t) that has been uniformly sampled. The number of samples supplied to represent one period of the waveform sets the sampling rate. Parameters ---------- xp : ndarray of one period of the waveform x(t) N : maximum Fourier series coefficient, [0,...,N] f0 : fundamental frequency used to form fk. Returns ------- Xk : ndarray of the coefficients over indices [0,1,...,N] fk : ndarray of the harmonic frequencies [0, f0,2f0,...,Nf0] Notes ----- len(xp) >= 2*N+1 as len(xp) is the fft length. Examples -------- >>> t = arange(0,1,1/1024.) >>> # a 20% duty cycle pulse starting at t = 0 >>> x_rect = rect(t-.1,0.2) >>> Xk, fk = fs_coeff(x_rect,25,10) >>> # plot the spectral lines >>> line_spectra(fk,Xk,'mag') """ Nint = len(xp) if Nint < 2*N+1: print('Number of samples in xp insufficient for requested N.') return 0,0 Xp = fft.fft(xp,Nint)/float(Nint) # To interface with the line_spectra function use one_side mode if one_side: Xk = Xp[0:N+1] fk = f0*np.arange(0,N+1) else: Xk = np.hstack((Xp[-N:],Xp[0:N+1])) fk = f0*np.arange(-N,N+1) return Xk, fk def fs_approx(Xk,fk,t): """ Synthesize periodic signal x(t) using Fourier series coefficients at harmonic frequencies Assume the signal is real so coefficients Xk are supplied for nonnegative indicies. The negative index coefficients are assumed to be complex conjugates. Parameters ---------- Xk : ndarray of complex Fourier series coefficients fk : ndarray of harmonic frequencies in Hz t : ndarray time axis corresponding to output signal array x_approx Returns ------- x_approx : ndarray of periodic waveform approximation over time span t Examples -------- >>> t = arange(0,2,.002) >>> # a 20% duty cycle pulse train >>> n = arange(0,20,1) # 0 to 19th harmonic >>> fk = 1*n % period = 1s >>> t, x_approx = fs_approx(Xk,fk,t) >>> plot(t,x_approx) """ x_approx = np.zeros(len(t)) for k,Xkk in enumerate(Xk): if fk[k] == 0: x_approx += Xkk*np.ones(len(t)) else: x_approx += 2*np.abs(Xkk)*np.cos(2*np.pi*fk[k]*t+np.angle(Xkk)) return x_approx def conv_sum(x1,nx1,x2,nx2,extent=('f','f')): """ Discrete convolution of x1 and x2 with proper tracking of the output time axis. Convolve two discrete-time signals using the SciPy function signal.convolution. The time (sequence axis) are managed from input to output. y[n] = x1[n]*x2[n]. Parameters ---------- x1 : ndarray of signal x1 corresponding to nx1 nx1 : ndarray time axis for x1 x2 : ndarray of signal x2 corresponding to nx2 nx2 : ndarray time axis for x2 extent : ('e1','e2') where 'e1', 'e2' may be 'f' finite, 'r' right-sided, or 'l' left-sided Returns ------- y : ndarray of output values y ny : ndarray of the corresponding sequence index n Notes ----- The output time axis starts at the sum of the starting values in x1 and x2 and ends at the sum of the two ending values in x1 and x2. The default extents of ('f','f') are used for signals that are active (have support) on or within n1 and n2 respectively. A right-sided signal such as a^n*u[n] is semi-infinite, so it has extent 'r' and the convolution output will be truncated to display only the valid results. Examples -------- >>> nx = arange(-5,10) >>> x = drect(nx,4) >>> y,ny = conv_sum(x,nx,x,nx) >>> stem(ny,y) >>> # Consider a pulse convolved with an exponential ('r' type extent) >>> h = 0.5**nx*dstep(nx) >>> y,ny = conv_sum(x,nx,h,nx,('f','r')) # note extents set >>> stem(ny,y) # expect a pulse charge and discharge sequence """ nnx1 = np.arange(0,len(nx1)) nnx2 = np.arange(0,len(nx2)) n1 = nnx1[0] n2 = nnx1[-1] n3 = nnx2[0] n4 = nnx2[-1] # Start by finding the valid output support or extent interval to insure that # for no finite extent signals ambiquous results are not returned. # Valid extents are f (finite), r (right-sided), and l (left-sided) if extent[0] == 'f' and extent[1] == 'f': nny = np.arange(n1+n3,n2+1+n4+1-1) ny = np.arange(0,len(x1)+len(x2)-1) + nx1[0]+nx2[0] elif extent[0] == 'f' and extent[1] == 'r': nny = np.arange(n1+n3,n1+1+n4+1-1) ny = nny + nx1[0]+nx2[0] elif extent[0] == 'r' and extent[1] == 'f': nny = np.arange(n1+n3,n2+1+n3+1-1) ny = nny + nx1[0]+nx2[0] elif extent[0] == 'f' and extent[1] == 'l': nny = np.arange(n2+n3,n2+1+n4+1-1) ny = nny + nx1[-1]+nx2[0] elif extent[0] == 'l' and extent[1] == 'f': nny = np.arange(n1+n4,n2+1+n4+1-1) ny = nny + tx1[0]+tx2[-1] elif extent[0] == 'r' and extent[1] == 'r': nny = np.arange(n1+n3,min(n1+1+n4+1,n2+1+n3+1)-1) ny = nny + nx1[0]+nx2[0] elif extent[0] == 'l' and extent[1] == 'l': nny = np.arange(max(n1+n4,n2+n3),n2+1+n4+1-1) ny = nny + max(nx1[0]+nx2[-1],nx1[-1]+nx2[0]) else: print('Invalid x1 x2 extents specified or valid extent not found!') return 0,0 # Finally convolve the sequences y = signal.convolve(x1, x2) print('Output support: (%+d, %+d)' % (ny[0],ny[-1])) return y[nny], ny def conv_integral(x1,tx1,x2,tx2,extent = ('f','f')): """ Continuous-time convolution of x1 and x2 with proper tracking of the output time axis. Appromimate the convolution integral for the convolution of two continuous-time signals using the SciPy function signal. The time (sequence axis) are managed from input to output. y(t) = x1(t)*x2(t). Parameters ---------- x1 : ndarray of signal x1 corresponding to tx1 tx1 : ndarray time axis for x1 x2 : ndarray of signal x2 corresponding to tx2 tx2 : ndarray time axis for x2 extent : ('e1','e2') where 'e1', 'e2' may be 'f' finite, 'r' right-sided, or 'l' left-sided Returns ------- y : ndarray of output values y ty : ndarray of the corresponding time axis for y Notes ----- The output time axis starts at the sum of the starting values in x1 and x2 and ends at the sum of the two ending values in x1 and x2. The time steps used in x1(t) and x2(t) must match. The default extents of ('f','f') are used for signals that are active (have support) on or within t1 and t2 respectively. A right-sided signal such as exp(-a*t)*u(t) is semi-infinite, so it has extent 'r' and the convolution output will be truncated to display only the valid results. Examples -------- >>> tx = arange(-5,10,.01) >>> x = rect(tx-2,4) # pulse starts at t = 0 >>> y,ty = conv_integral(x,tx,x,tx) >>> plot(ty,y) # expect a triangle on [0,8] >>> # Consider a pulse convolved with an exponential ('r' type extent) >>> h = 4*exp(-4*tx)*step(tx) >>> y,ty = conv_integral(x,tx,h,tx,extent=('f','r')) # note extents set >>> plot(ty,y) # expect a pulse charge and discharge waveform """ dt = tx1[1] - tx1[0] nx1 = np.arange(0,len(tx1)) nx2 = np.arange(0,len(tx2)) n1 = nx1[0] n2 = nx1[-1] n3 = nx2[0] n4 = nx2[-1] # Start by finding the valid output support or extent interval to insure that # for no finite extent signals ambiquous results are not returned. # Valid extents are f (finite), r (right-sided), and l (left-sided) if extent[0] == 'f' and extent[1] == 'f': ny = np.arange(n1+n3,n2+1+n4+1-1) ty = np.arange(0,len(x1)+len(x2)-1)*dt + tx1[0]+tx2[0] elif extent[0] == 'f' and extent[1] == 'r': ny = np.arange(n1+n3,n1+1+n4+1-1) ty = ny*dt + tx1[0]+tx2[0] elif extent[0] == 'r' and extent[1] == 'f': ny = np.arange(n1+n3,n2+1+n3+1-1) ty = ny*dt + tx1[0]+tx2[0] elif extent[0] == 'f' and extent[1] == 'l': ny = np.arange(n2+n3,n2+1+n4+1-1) ty = ny*dt + tx1[-1]+tx2[0] elif extent[0] == 'l' and extent[1] == 'f': ny = np.arange(n1+n4,n2+1+n4+1-1) ty = ny*dt + tx1[0]+tx2[-1] elif extent[0] == 'r' and extent[1] == 'r': ny = np.arange(n1+n3,min(n1+1+n4+1,n2+1+n3+1)-1) ty = ny*dt + tx1[0]+tx2[0] elif extent[0] == 'l' and extent[1] == 'l': ny = np.arange(max(n1+n4,n2+n3),n2+1+n4+1-1) ty = ny*dt + max(tx1[0]+tx2[-1],tx1[-1]+tx2[0]) else: print('Invalid x1 x2 extents specified or valid extent not found!') return 0,0 # Finally convolve the sampled sequences and scale by dt y = signal.convolve(x1, x2)*dt print('Output support: (%+2.2f, %+2.2f)' % (ty[0],ty[-1])) return y[ny], ty def delta_eps(t,eps): """ Rectangular pulse approximation to impulse function. Parameters ---------- t : ndarray of time axis eps : pulse width Returns ------- d : ndarray containing the impulse approximation Examples -------- >>> t = arange(-2,2,.001) >>> d = delta_eps(t,.1) >>> plot(t,d) """ d = np.zeros(len(t)) for k,tt in enumerate(t): if abs(tt) <= eps/2.: d[k] = 1/float(eps) return d def step(t): """ Approximation to step function signal u(t). In this numerical version of u(t) the step turns on at t = 0. Parameters ---------- t : ndarray of the time axis Returns ------- x : ndarray of the step function signal u(t) Examples -------- >>> t = arange(-1,5,.01) >>> x = step(t) >>> plot(t,x) >>> # to turn on at t = 1 shift t >>> x = step(t - 1.0) >>> plot(t,x) """ x = np.zeros(len(t)) for k,tt in enumerate(t): if tt >= 0: x[k] = 1.0 return x def rect(t,tau): """ Approximation to the rectangle pulse Pi(t/tau). In this numerical version of Pi(t/tau) the pulse is active over -tau/2 <= t <= tau/2. Parameters ---------- t : ndarray of the time axis tau : the pulse width Returns ------- x : ndarray of the signal Pi(t/tau) Examples -------- >>> t = arange(-1,5,.01) >>> x = rect(t,1.0) >>> plot(t,x) >>> # to turn on at t = 1 shift t >>> x = rect(t - 1.0,1.0) >>> plot(t,x) """ x = np.zeros(len(t)) for k,tk in enumerate(t): if np.abs(tk) > tau/2.: x[k] = 0 else: x[k] = 1 return x def tri(t,tau): """ Approximation to the triangle pulse Lambda(t/tau). In this numerical version of Lambda(t/tau) the pulse is active over -tau <= t <= tau. Parameters ---------- t : ndarray of the time axis tau : one half the triangle base width Returns ------- x : ndarray of the signal Lambda(t/tau) Examples -------- >>> t = arange(-1,5,.01) >>> x = tri(t,1.0) >>> plot(t,x) >>> # to turn on at t = 1 shift t >>> x = tri(t - 1.0,1.0) >>> plot(t,x) """ x = np.zeros(len(t)) for k,tk in enumerate(t): if np.abs(tk) > tau/1.: x[k] = 0 else: x[k] = 1 - np.abs(tk)/tau return x def dimpulse(n): """ Discrete impulse function delta[n]. Parameters ---------- n : ndarray of the time axis Returns ------- x : ndarray of the signal delta[n] Examples -------- >>> n = arange(-5,5) >>> x = dimpulse(n) >>> stem(n,x) >>> # shift the delta left by 2 >>> x = dimpulse(n+2) >>> stem(n,x) """ x = np.zeros(len(n)) for k,nn in enumerate(n): if nn == 0: x[k] = 1.0 return x def dstep(n): """ Discrete step function u[n]. Parameters ---------- n : ndarray of the time axis Returns ------- x : ndarray of the signal u[n] Examples -------- >>> n = arange(-5,5) >>> x = dstep(n) >>> stem(n,x) >>> # shift the delta left by 2 >>> x = dstep(n+2) >>> stem(n,x) """ x = np.zeros(len(n)) for k,nn in enumerate(n): if nn >= 0: x[k] = 1.0 return x def drect(n,N): """ Discrete rectangle function of duration N samples. The signal is active on the interval 0 <= n <= N-1. Also known as the rectangular window function, which is available in scipy.signal. Parameters ---------- n : ndarray of the time axis N : the pulse duration Returns ------- x : ndarray of the signal Notes ----- The discrete rectangle turns on at n = 0, off at n = N-1 and has duration of exactly N samples. Examples -------- >>> n = arange(-5,5) >>> x = drect(n) >>> stem(n,x) >>> # shift the delta left by 2 >>> x = drect(n+2) >>> stem(n,x) """ x = np.zeros(len(n)) for k,nn in enumerate(n): if nn >= 0 and nn < N: x[k] = 1.0 return x def rc_imp(Ns,alpha,M=6): """ A truncated raised cosine pulse used in digital communications. The pulse shaping factor 0< alpha < 1 is required as well as the truncation factor M which sets the pulse duration to be 2*M*Tsymbol. Parameters ---------- Ns : number of samples per symbol alpha : excess bandwidth factor on (0, 1), e.g., 0.35 M : equals RC one-sided symbol truncation factor Returns ------- b : ndarray containing the pulse shape Notes ----- The pulse shape b is typically used as the FIR filter coefficients when forming a pulse shaped digital communications waveform. Examples -------- >>> # ten samples per symbol and alpha = 0.35 >>> b = rc_imp(10,0.35) >>> n = arange(-10*6,10*6+1) >>> stem(n,b) """ # Design the filter n = np.arange(-M*Ns,M*Ns+1) b = np.zeros(len(n)); a = alpha; Ns *= 1.0 for i in range(len(n)): if (1 - 4*(a*n[i]/Ns)**2) == 0: b[i] = np.pi/4*np.sinc(1/(2.*a)) else: b[i] = np.sinc(n[i]/Ns)*np.cos(np.pi*a*n[i]/Ns)/(1 - 4*(a*n[i]/Ns)**2) return b def sqrt_rc_imp(Ns,alpha,M=6): """ A truncated square root raised cosine pulse used in digital communications. The pulse shaping factor 0< alpha < 1 is required as well as the truncation factor M which sets the pulse duration to be 2*M*Tsymbol. Parameters ---------- Ns : number of samples per symbol alpha : excess bandwidth factor on (0, 1), e.g., 0.35 M : equals RC one-sided symbol truncation factor Returns ------- b : ndarray containing the pulse shape Notes ----- The pulse shape b is typically used as the FIR filter coefficients when forming a pulse shaped digital communications waveform. When square root raised cosine (SRC) pulse is used generate Tx signals and at the receiver used as a matched filter (receiver FIR filter), the received signal is now raised cosine shaped, this having zero intersymbol interference and the optimum removal of additive white noise if present at the receiver input. Examples -------- >>> # ten samples per symbol and alpha = 0.35 >>> b = sqrt_rc_imp(10,0.35) >>> n = arange(-10*6,10*6+1) >>> stem(n,b) """ # Design the filter n = np.arange(-M*Ns,M*Ns+1) b = np.zeros(len(n)) Ns *= 1.0 a = alpha for i in range(len(n)): if abs(1 - 16*a**2*(n[i]/Ns)**2) <= np.finfo(np.float).eps/2: b[i] = 1/2.*((1+a)*np.sin((1+a)*np.pi/(4.*a))-(1-a)*np.cos((1-a)*np.pi/(4.*a))+(4*a)/np.pi*np.sin((1-a)*np.pi/(4.*a))) else: b[i] = 4*a/(np.pi*(1 - 16*a**2*(n[i]/Ns)**2)) b[i] = b[i]*(np.cos((1+a)*np.pi*n[i]/Ns) + np.sinc((1-a)*n[i]/Ns)*(1-a)*np.pi/(4.*a)) return b def PN_gen(N_bits,m=5): """ Maximal length sequence signal generator. Generates a sequence 0/1 bits of N_bit duration. The bits themselves are obtained from an m-sequence of length m. Available m-sequence (PN generators) include m = 2,3,...,12, & 16. Parameters ---------- N_bits : the number of bits to generate m : the number of shift registers. 2,3, .., 12, & 16 Returns ------- PN : ndarray of the generator output over N_bits Notes ----- The sequence is periodic having period 2**m - 1 (2^m - 1). Examples -------- >>> # A 15 bit period signal nover 50 bits >>> PN = PN_gen(50,4) """ c = m_seq(m) Q = len(c) max_periods = int(np.ceil(N_bits/float(Q))) PN = np.zeros(max_periods*Q) for k in range(max_periods): PN[k*Q:(k+1)*Q] = c PN = np.resize(PN, (1,N_bits)) return PN.flatten() def m_seq(m): """ Generate an m-sequence ndarray using an all-ones initialization. Available m-sequence (PN generators) include m = 2,3,...,12, & 16. Parameters ---------- m : the number of shift registers. 2,3, .., 12, & 16 Returns ------- c : ndarray of one period of the m-sequence Notes ----- The sequence period is 2**m - 1 (2^m - 1). Examples -------- >>> c = m_seq(5) """ # Load shift register with all ones to start sr = np.ones(m) # M-squence length is: Q = 2**m - 1 c = np.zeros(Q) if m == 2: taps = np.array([1, 1, 1]) elif m == 3: taps = np.array([1, 0, 1, 1]) elif m == 4: taps = np.array([1, 0, 0, 1, 1]) elif m == 5: taps = np.array([1, 0, 0, 1, 0, 1]) elif m == 6: taps = np.array([1, 0, 0, 0, 0, 1, 1]) elif m == 7: taps = np.array([1, 0, 0, 0, 1, 0, 0, 1]) elif m == 8: taps = np.array([1, 0, 0, 0, 1, 1, 1, 0, 1]) elif m == 9: taps = np.array([1, 0, 0, 0, 0, 1, 0, 0, 0, 1]) elif m == 10: taps = np.array([1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1]) elif m == 11: taps = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1]) elif m == 12: taps = np.array([1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1]) elif m == 16: taps = np.array([1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1]) else: print('Invalid length specified') for n in range(Q): tap_xor = 0 c[n] = sr[-1] for k in range(1,m): if taps[k] == 1: tap_xor = np.bitwise_xor(tap_xor,np.bitwise_xor(int(sr[-1]),int(sr[m-1-k]))) sr[1:] = sr[:-1] sr[0] = tap_xor return c def BPSK_tx(N_bits,Ns,ach_fc=2.0,ach_lvl_dB=-100,pulse='rect',alpha = 0.25,M=6): """ Genrates biphase shift keyed (BPSK) transmitter with adjacent channel interference. Generates three BPSK signals with rectangular or square root raised cosine (SRC) pulse shaping of duration N_bits and Ns samples per bit. The desired signal is centered on f = 0, which the adjacent channel signals to the left and right are also generated at dB level relative to the desired signal. Used in the digital communications Case Study supplement. Parameters ---------- N_bits : the number of bits to simulate Ns : the number of samples per bit ach_fc : the frequency offset of the adjacent channel signals (default 2.0) ach_lvl_dB : the level of the adjacent channel signals in dB (default -100) pulse :the pulse shape 'rect' or 'src' alpha : square root raised cosine pulse shape factor (default = 0.25) M : square root raised cosine pulse truncation factor (default = 6) Returns ------- x : ndarray of the composite signal x0 + ach_lvl*(x1p + x1m) b : the transmit pulse shape data0 : the data bits used to form the desired signal; used for error checking Notes ----- Examples -------- >>> x,b,data0 = BPSK_tx(1000,10,'src') """ x0,b,data0 = NRZ_bits(N_bits,Ns,pulse,alpha,M) x1p,b,data1p = NRZ_bits(N_bits,Ns,pulse,alpha,M) x1m,b,data1m = NRZ_bits(N_bits,Ns,pulse,alpha,M) n = np.arange(len(x0)) x1p = x1p*np.exp(1j*2*np.pi*ach_fc/float(Ns)*n) x1m = x1m*np.exp(-1j*2*np.pi*ach_fc/float(Ns)*n) ach_lvl = 10**(ach_lvl_dB/20.) return x0 + ach_lvl*(x1p + x1m), b, data0 #def BPSK_rx(r,b,): def NRZ_bits(N_bits,Ns,pulse='rect',alpha = 0.25,M=6): """ Generate non-return-to-zero (NRZ) data bits with pulse shaping. A baseband digital data signal using +/-1 amplitude signal values and including pulse shaping. Parameters ---------- N_bits : number of NRZ +/-1 data bits to produce Ns : the number of samples per bit, pulse_type : 'rect' , 'rc', 'src' (default 'rect') alpha : excess bandwidth factor(default 0.25) M : single sided pulse duration (default = 6) Returns ------- x : ndarray of the NRZ signal values b : ndarray of the pulse shape data : ndarray of the underlying data bits Notes ----- Pulse shapes include 'rect' (rectangular), 'rc' (raised cosine), 'src' (root raised cosine). The actual pulse length is 2*M+1 samples. This function is used by BPSK_tx in the Case Study article. Examples -------- >>> x,b,data = NRZ_bits(100,10) >>> t = arange(len(x)) >>> plot(t,x) """ data = np.random.randint(0,2,N_bits) x = np.hstack((2*data.reshape(N_bits,1)-1,np.zeros((N_bits,Ns-1)))) x =x.flatten() if pulse.lower() == 'rect': b = np.ones(Ns) elif pulse.lower() == 'rc': b = rc_imp(Ns,alpha,M) elif pulse.lower() == 'src': b = sqrt_rc_imp(Ns,alpha,M) else: print('pulse type must be rec, rc, or src') x = signal.lfilter(b,1,x) return x,b/float(Ns),data def NRZ_bits2(data,Ns,pulse='rect',alpha = 0.25,M=6): """ Generate non-return-to-zero (NRZ) data bits with pulse shaping with user data A baseband digital data signal using +/-1 amplitude signal values and including pulse shaping. The data sequence is user supplied. Parameters ---------- data : ndarray of the data bits as 0/1 values Ns : the number of samples per bit, pulse_type : 'rect' , 'rc', 'src' (default 'rect') alpha : excess bandwidth factor(default 0.25) M : single sided pulse duration (default = 6) Returns ------- x : ndarray of the NRZ signal values b : ndarray of the pulse shape Notes ----- Pulse shapes include 'rect' (rectangular), 'rc' (raised cosine), 'src' (root raised cosine). The actual pulse length is 2*M+1 samples. Examples -------- >>> x,b = NRZ_bits2([m_seq(5),10) >>> t = arange(len(x)) >>> plot(t,x) """ N_bits = len(data) x = np.hstack((2*data.reshape(N_bits,1)-1,np.zeros((N_bits,Ns-1)))) x = x.flatten() if pulse.lower() == 'rect': b = np.ones(Ns) elif pulse.lower() == 'rc': b = rc_imp(Ns,alpha,M) elif pulse.lower() == 'src': b = sqrt_rc_imp(Ns,alpha,M) else: print('pulse type must be rec, rc, or src') x = signal.lfilter(b,1,x) return x,b/float(Ns) def eye_plot(x,L,S=0): """ Eye pattern plot of a baseband digital communications waveform. The signal must be real, but can be multivalued in terms of the underlying modulation scheme. Used for BPSK eye plots in the Case Study article. Parameters ---------- x : ndarray of the real input data vector/array L : display length in samples (usually two symbols) S : start index Returns ------- Nothing : A plot window opens containing the eye plot Notes ----- Increase S to eliminate filter transients. Examples -------- >>> # 1000 bits at 10 samples per bit with 'rc' shaping >>> x,b, data = NRZ_bits(1000,10,'rc') >>> eye_plot(x,20,60) """ plt.figure(figsize=(6,4)) idx = np.arange(0,L+1) plt.plot(idx,x[S:S+L+1],'b') k_max = int((len(x) - S)/L)-1 for k in range(1,k_max): plt.plot(idx,x[S+k*L:S+L+1+k*L],'b') plt.grid() plt.xlabel('Time Index - n') plt.ylabel('Amplitude') plt.title('Eye Plot') return 0 def scatter(x,Ns,start): """ Sample a baseband digital communications waveform at the symbol spacing. Parameters ---------- x : ndarray of the input digital comm signal Ns : number of samples per symbol (bit) start : the array index to start the sampling Returns ------- xI : ndarray of the real part of x following sampling xQ : ndarray of the imaginary part of x following sampling Notes ----- Normally the signal is complex, so the scatter plot contains clusters at point in the complex plane. For a binary signal such as BPSK, the point centers are nominally +/-1 on the real axis. Start is used to eliminate transients from the FIR pulse shaping filters from appearing in the scatter plot. Examples -------- >>> x,b, data = NRZ_bits(1000,10,'rc') >>> # add some noise so points are now scattered about +/-1 >>> y = cpx_AWGN(x,20,10) >>> yI,yQ = scatter(y,10,60) >>> plot(yI,yQ,'.') >>> axis('equal') """ xI = np.real(x[start::Ns]) xQ = np.imag(x[start::Ns]) return xI, xQ def bit_errors(z,data,start,Ns): """ A simple bit error counting function. In its present form this function counts bit errors between hard decision BPSK bits in +/-1 form and compares them with 0/1 binary data that was transmitted. Timing between the Tx and Rx data is the responsibility of the user. An enhanced version of this function, which features automatic synching will be created in the future. Parameters ---------- z : ndarray of hard decision BPSK data prior to symbol spaced sampling data : ndarray of reference bits in 1/0 format start : timing reference for the received Ns : the number of samples per symbol Returns ------- Pe_hat : the estimated probability of a bit error Notes ----- The Tx and Rx data streams are exclusive-or'd and the then the bit errors are summed, and finally divided by the number of bits observed to form an estimate of the bit error probability. This function needs to be enhanced to be more useful. Examples -------- >>> from scipy import signal >>> x,b, data = NRZ_bits(1000,10) >>> # set Eb/N0 to 8 dB >>> y = cpx_AWGN(x,8,10) >>> # matched filter the signal >>> z = signal.lfilter(b,1,y) >>> # make bit decisions at 10 and Ns multiples thereafter >>> Pe_hat = bit_errors(z,data,10,10) """ Pe_hat = np.sum(data[0:len(z[start::Ns])]^np.int64((np.sign(np.real(z[start::Ns]))+1)/2))/float(len(z[start::Ns])) return Pe_hat def cpx_AWGN(x,EsN0,Ns): """ Apply white Gaussian noise to a digital communications signal. This function represents a complex baseband white Gaussian noise digital communications channel. The input signal array may be real or complex. Parameters ---------- x : ndarray noise free complex baseband input signal. EsNO : set the channel Es/N0 (Eb/N0 for binary) level in dB Ns : number of samples per symbol (bit) Returns ------- y : ndarray x with additive noise added. Notes ----- Set the channel energy per symbol-to-noise power spectral density ratio (Es/N0) in dB. Examples -------- >>> x,b, data = NRZ_bits(1000,10) >>> # set Eb/N0 = 10 dB >>> y = cpx_AWGN(x,10,10) """ w = np.sqrt(Ns*np.var(x)*10**(-EsN0/10.)/2.)*(np.random.randn(len(x)) + 1j*np.random.randn(len(x))) return x+w def my_psd(x,NFFT=2**10,Fs=1): """ A local version of NumPy's PSD function that returns the plot arrays. A mlab.psd wrapper function that returns two ndarrays; makes no attempt to auto plot anything. Parameters ---------- x : ndarray input signal NFFT : a power of two, e.g., 2**10 = 1024 Fs : the sampling rate in Hz Returns ------- Px : ndarray of the power spectrum estimate f : ndarray of frequency values Notes ----- This function makes it easier to overlay spectrum plots because you have better control over the axis scaling than when using psd() in the autoscale mode. Examples -------- >>> x,b, data = NRZ_bits(10000,10) >>> Px,f = my_psd(x,2**10,10) >>> plot(f, 10*log10(Px)) """ Px,f = pylab.mlab.psd(x,NFFT,Fs) return Px.flatten(), f def am_tx(m,a_mod,fc=75e3): """ AM transmitter for Case Study of Chapter 17. Assume input is sampled at 8 Ksps and upsampling by 24 is performed to arrive at fs_out = 192 Ksps. Parameters ---------- m : ndarray of the input message signal a_mod : AM modulation index, between 0 and 1 fc : the carrier frequency in Hz Returns ------- x192 : ndarray of the upsampled by 24 and modulated carrier t192 : ndarray of the upsampled by 24 time axis m24 : ndarray of the upsampled by 24 message signal Notes ----- The sampling rate of the input signal is assumed to be 8 kHz. Examples -------- >>> n = arange(0,1000) >>> # 1 kHz message signal >>> m = cos(2*pi*1000/8000.*n) >>> x192, t192 = am_tx(m,0.8,fc=75e3) """ m24 = interp24(m) t192 = np.arange(len(m24))/192.0e3 #m24 = np.cos(2*np.pi*2.0e3*t192) m_max = np.max(np.abs(m24)) x192 = (1 + a_mod*m24/m_max)*np.cos(2*np.pi*fc*t192) return x192, t192, m24 def am_rx(x192): """ AM envelope detector receiver for the Chapter 17 Case Study The receiver bandpass filter is not included in this function. Parameters ---------- x192 : ndarray of the AM signal at sampling rate 192 ksps Returns ------- m_rx8 : ndarray of the demodulated message at 8 ksps t8 : ndarray of the time axis at 8 ksps m_rx192 : ndarray of the demodulated output at 192 ksps x_edet192 : ndarray of the envelope detector output at 192 ksps Notes ----- The bandpass filter needed at the receiver front-end can be designed using b_bpf,a_bpf = am_rx_BPF(). Examples -------- >>> n = arange(0,1000) >>> # 1 kHz message signal >>> m = cos(2*pi*1000/8000.*n) >>> m_rx8,t8,m_rx192,x_edet192 = am_rx(x192) """ x_edet192 = env_det(x192) m_rx8 = deci24(x_edet192) # remove DC offset from the env_det + LPF output m_rx8 -= np.mean(m_rx8) t8 = np.arange(len(m_rx8))/8.0e3 """ For performance testing also filter x_env_det 192e3 using a Butterworth cascade. The filter cutoff is 5kHz, the message BW. """ b192,a192 = signal.butter(5,2*5.0e3/192.0e3) m_rx192 = signal.lfilter(b192,a192,x_edet192) m_rx192 = signal.lfilter(b192,a192,m_rx192) m_rx192 -= np.mean(m_rx192) return m_rx8,t8,m_rx192,x_edet192 def am_rx_BPF(N_order = 7, ripple_dB = 1, B = 10e3, fs = 192e3): """ Bandpass filter design for the AM receiver Case Study of Chapter 17. Design a 7th-order Chebyshev type 1 bandpass filter to remove/reduce adjacent channel intereference at the envelope detector input. Parameters ---------- N_order : the filter order (default = 7) ripple_dB : the passband ripple in dB (default = 1) B : the RF bandwidth (default = 10e3) fs : the sampling frequency Returns ------- b_bpf : ndarray of the numerator filter coefficients a_bpf : ndarray of the denominator filter coefficients Examples -------- >>> from scipy import signal >>> # Use the default values >>> b_bpf,a_bpf = am_rx_BPF() >>> # plot the filter pole-zero plot >>> zplane(b_bpf,a_bpf) >>> # plot the frequency response >>> f = arange(0,192/2.,.1) >>> w, Hbpf = signal.freqz(b_bpf,a_bpf,2*pi*f/192) >>> plot(f,20*log10(abs(Hbpf))) >>> axis([0,192/2.,-80,10]) """ b_bpf,a_bpf = signal.cheby1(N_order,ripple_dB,2*np.array([75e3-B/2.,75e3+B/2.])/fs,'bandpass') return b_bpf,a_bpf def env_det(x): """ Ideal envelope detector. This function retains the positive half cycles of the input signal. Parameters ---------- x : ndarray of the input sugnal Returns ------- y : ndarray of the output signal Examples -------- >>> n = arange(0,100) >>> # 1 kHz message signal >>> m = cos(2*pi*1000/8000.*n) >>> x192, t192 = am_tx(m,0.8,fc=75e3) >>> y = env_det(x192) """ y = np.zeros(len(x)) for k,xx in enumerate(x): if xx >= 0: y[k] = xx return y def interp24(x): """ Interpolate by L = 24 using Butterworth filters. The interpolation is done using three stages. Upsample by L = 2 and lowpass filter, upsample by 3 and lowpass filter, then upsample by L = 4 and lowpass filter. In all cases the lowpass filter is a 10th-order Butterworth lowpass. Parameters ---------- x : ndarray of the input signal Returns ------- y : ndarray of the output signal Notes ----- The cutoff frequency of the lowpass filters is 1/2, 1/3, and 1/4 to track the upsampling by 2, 3, and 4 respectively. Examples -------- >>> y = interp24(x) """ # Stage 1: L = 2 b2,a2 = signal.butter(10,1/2.) y1 = upsample(x,2) y1 = signal.lfilter(b2,a2,2*y1) # Stage 2: L = 3 b3,a3 = signal.butter(10,1/3.) y2 = upsample(y1,3) y2 = signal.lfilter(b3,a3,3*y2) # Stage 3: L = 4 b4,a4 = signal.butter(10,1/4.) y3 = upsample(y2,4) y3 = signal.lfilter(b4,a4,4*y3) return y3 def deci24(x): """ Decimate by L = 24 using Butterworth filters. The decimation is done using two three stages. Downsample sample by L = 2 and lowpass filter, downsample by 3 and lowpass filter, then downsample by L = 4 and lowpass filter. In all cases the lowpass filter is a 10th-order Butterworth lowpass. Parameters ---------- x : ndarray of the input signal Returns ------- y : ndarray of the output signal Notes ----- The cutoff frequency of the lowpass filters is 1/2, 1/3, and 1/4 to track the upsampling by 2, 3, and 4 respectively. Examples -------- >>> y = deci24(x) """ # Stage 1: M = 2 b2,a2 = signal.butter(10,1/2.) y1 = signal.lfilter(b2,a2,x) y1 = downsample(y1,2) # Stage 2: M = 3 b3,a3 = signal.butter(10,1/3.) y2 = signal.lfilter(b3,a3,y1) y2 = downsample(y2,3) # Stage 3: L = 4 b4,a4 = signal.butter(10,1/4.) y3 = signal.lfilter(b4,a4,y2) y3 = downsample(y3,4) return y3 def upsample(x,L): """ Upsample by factor L Insert L - 1 zero samples in between each input sample. Parameters ---------- x : ndarray of input signal values L : upsample factor Returns ------- y : ndarray of the output signal values Examples -------- >>> y = upsample(x,3) """ N_input = len(x) y = np.hstack((x.reshape(N_input,1),np.zeros((N_input,L-1)))) y = y.flatten() return y def downsample(x,M,p=0): """ Downsample by factor M Keep every Mth sample of the input. The phase of the input samples kept can be selected. Parameters ---------- x : ndarray of input signal values M : upsample factor p : phase of decimated value, 0 (default), 1, ..., M-1 Returns ------- y : ndarray of the output signal values Examples -------- >>> y = downsample(x,3) >>> y = downsample(x,3,1) """ x = x[0:int(np.floor(len(x)/M))*M] x = x.reshape((int(np.floor(len(x)/M)),M)) y = x[:,p] return y def unique_cpx_roots(rlist,tol = 0.001): """ The average of the root values is used when multiplicity is greater than one. Mark Wickert October 2016 """ uniq = [rlist[0]] mult = [1] for k in range(1,len(rlist)): N_uniq = len(uniq) for m in range(N_uniq): if abs(rlist[k]-uniq[m]) <= tol: mult[m] += 1 uniq[m] = (uniq[m]*(mult[m]-1) + rlist[k])/float(mult[m]) break uniq = np.hstack((uniq,rlist[k])) mult = np.hstack((mult,[1])) return np.array(uniq), np.array(mult) def zplane(b,a,auto_scale=True,size=2,detect_mult=True,tol=0.001): """ Create an z-plane pole-zero plot. Create an z-plane pole-zero plot using the numerator and denominator z-domain system function coefficient ndarrays b and a respectively. Assume descending powers of z. Parameters ---------- b : ndarray of the numerator coefficients a : ndarray of the denominator coefficients auto_scale : bool (default True) size : plot radius maximum when scale = False Returns ------- (M,N) : tuple of zero and pole counts + plot window Notes ----- This function tries to identify repeated poles and zeros and will place the multiplicity number above and to the right of the pole or zero. The difficulty is setting the tolerance for this detection. Currently it is set at 1e-3 via the function signal.unique_roots. Examples -------- >>> # Here the plot is generated using auto_scale >>> zplane(b,a) >>> # Here the plot is generated using manual scaling >>> zplane(b,a,False,1.5) """ M = len(b) - 1 N = len(a) - 1 # Plot labels if multiplicity greater than 1 x_scale = 1.5*size y_scale = 1.5*size x_off = 0.02 y_off = 0.01 #N_roots = np.array([1.0]) if M > 0: N_roots = np.roots(b) #D_roots = np.array([1.0]) if N > 0: D_roots = np.roots(a) if auto_scale: if M > 0 and N > 0: size = max(np.max(np.abs(N_roots)),np.max(np.abs(D_roots)))+.1 elif M > 0: size = max(np.max(np.abs(N_roots)),1.0)+.1 elif N > 0: size = max(1.0,np.max(np.abs(D_roots)))+.1 else: size = 1.1 plt.figure(figsize=(5,5)) plt.axis('equal') r = np.linspace(0,2*np.pi,200) plt.plot(np.cos(r),np.sin(r),'r--') plt.plot([-size,size],[0,0],'k-.') plt.plot([0,0],[-size,size],'k-.') if M > 0: if detect_mult == True: N_uniq, N_mult = unique_cpx_roots(N_roots,tol=tol) plt.plot(np.real(N_uniq),np.imag(N_uniq),'ko',mfc='None',ms=8) idx_N_mult = mlab.find(N_mult>1) for k in range(len(idx_N_mult)): x_loc = np.real(N_uniq[idx_N_mult[k]]) + x_off*x_scale y_loc =np.imag(N_uniq[idx_N_mult[k]]) + y_off*y_scale plt.text(x_loc,y_loc,str(N_mult[idx_N_mult[k]]),ha='center',va='bottom',fontsize=10) else: plt.plot(np.real(N_roots),np.imag(N_roots),'ko',mfc='None',ms=8) if N > 0: if detect_mult == True: D_uniq, D_mult=unique_cpx_roots(D_roots,tol=tol) plt.plot(np.real(D_uniq),np.imag(D_uniq),'kx',ms=8) idx_D_mult = mlab.find(D_mult>1) for k in range(len(idx_D_mult)): x_loc = np.real(D_uniq[idx_D_mult[k]]) + x_off*x_scale y_loc =np.imag(D_uniq[idx_D_mult[k]]) + y_off*y_scale plt.text(x_loc,y_loc,str(D_mult[idx_D_mult[k]]),ha='center',va='bottom',fontsize=10) else: plt.plot(np.real(D_roots),np.imag(D_roots),'kx',ms=8) if M - N < 0: plt.plot(0.0,0.0,'bo',mfc='None',ms=8) elif M - N > 0: plt.plot(0.0,0.0,'kx',ms=8) if abs(M - N) > 1: plt.text(x_off*x_scale,y_off*y_scale,str(abs(M-N)),ha='center',va='bottom',fontsize=10) plt.xlabel('Real Part') plt.ylabel('Imaginary Part') plt.title('Pole-Zero Plot') #plt.grid() plt.axis([-size,size,-size,size]) return M,N def rect_conv(n,N_len): """ The theoretical result of convolving two rectangle sequences. The result is a triangle. The solution is based on pure analysis. Simply coded as opposed to efficiently coded. Parameters ---------- n : ndarray of time axis N_len : rectangle pulse duration Returns ------- y : ndarray of of output signal Examples -------- >>> n = arange(-5,20) >>> y = rect_conv(n,6) """ y = np.zeros(len(n)) for k in range(len(n)): if n[k] >= 0 and n[k] < N_len-1: y[k] = n[k] + 1 elif n[k] >= N_len-1 and n[k] <= 2*N_len-2: y[k] = 2*N_len-1-n[k] return y def biquad2(w_num, r_num, w_den, r_den): """ A biquadratic filter in terms of conjugate pole and zero pairs. Parameters ---------- w_num : zero frequency (angle) in rad/sample r_num : conjugate zeros radius w_den : pole frequency (angle) in rad/sample r_den : conjugate poles radius; less than 1 for stability Returns ------- b : ndarray of numerator coefficients a : ndarray of denominator coefficients Examples -------- b,a = biquad2(pi/4., 1, pi/4., 0.95) """ b = np.array([1, -2*r_num*np.cos(w_num), r_num**2]) a = np.array([1, -2*r_den*np.cos(w_den), r_den**2]) return b, a def plot_na(x,y,mode='stem'): pylab.figure(figsize=(5,2)) frame1 = pylab.gca() if mode.lower() == 'stem': pylab.stem(x,y) else: pylab.plot(x,y) frame1.axes.get_xaxis().set_visible(False) frame1.axes.get_yaxis().set_visible(False) pylab.show() def from_wav(filename): """ Read a wave file. A wrapper function for scipy.io.wavfile.read that also includes int16 to float [-1,1] scaling. Parameters ---------- filename : file name string Returns ------- fs : sampling frequency in Hz x : ndarray of normalized to 1 signal samples Examples -------- >>> fs,x = from_wav('test_file.wav') """ fs, x = wavfile.read(filename) return fs, x/32767. def to_wav(filename,rate,x): """ Write a wave file. A wrapper function for scipy.io.wavfile.write that also includes int16 scaling and conversion. Assume input x is [-1,1] values. Parameters ---------- filename : file name string rate : sampling frequency in Hz Returns ------- Nothing : writes only the *.wav file Examples -------- >>> to_wav('test_file.wav', 8000, x) """ x16 = np.int16(x*32767) wavfile.write(filename, rate, x16) if __name__ == '__main__': b = CIC(10,1) print(b) """ x = np.random.randn(10) print(x) b = signal.remez(16,[0,.1,.2,.5], [1,0], [1,1], 1) w,H = signal.freqz(b,[1],512) plot(w,20*log10(abs(H))) figure(figsize=(6,4)) #plot(arange(0,len(b)),b) y = signal.lfilter(b, [1], x,) print(y) zplane([1,1,1,1,1],[1,-.8],1.25) """
bsd-2-clause
shikhar413/openmc
openmc/filter.py
6
63054
from abc import ABCMeta from collections import OrderedDict from collections.abc import Iterable import hashlib from itertools import product from numbers import Real, Integral from xml.etree import ElementTree as ET import numpy as np import pandas as pd import openmc import openmc.checkvalue as cv from .cell import Cell from .material import Material from .mixin import IDManagerMixin from .surface import Surface from .universe import Universe _FILTER_TYPES = ( 'universe', 'material', 'cell', 'cellborn', 'surface', 'mesh', 'energy', 'energyout', 'mu', 'polar', 'azimuthal', 'distribcell', 'delayedgroup', 'energyfunction', 'cellfrom', 'legendre', 'spatiallegendre', 'sphericalharmonics', 'zernike', 'zernikeradial', 'particle', 'cellinstance' ) _CURRENT_NAMES = ( 'x-min out', 'x-min in', 'x-max out', 'x-max in', 'y-min out', 'y-min in', 'y-max out', 'y-max in', 'z-min out', 'z-min in', 'z-max out', 'z-max in' ) _PARTICLES = {'neutron', 'photon', 'electron', 'positron'} class FilterMeta(ABCMeta): """Metaclass for filters that ensures class names are appropriate.""" def __new__(cls, name, bases, namespace, **kwargs): # Check the class name. required_suffix = 'Filter' if not name.endswith(required_suffix): raise ValueError("All filter class names must end with 'Filter'") # Create a 'short_name' attribute that removes the 'Filter' suffix. namespace['short_name'] = name[:-len(required_suffix)] # Subclass methods can sort of inherit the docstring of parent class # methods. If a function is defined without a docstring, most (all?) # Python interpreters will search through the parent classes to see if # there is a docstring for a function with the same name, and they will # use that docstring. However, Sphinx does not have that functionality. # This chunk of code handles this docstring inheritance manually so that # the autodocumentation will pick it up. if name != required_suffix: # Look for newly-defined functions that were also in Filter. for func_name in namespace: if func_name in Filter.__dict__: # Inherit the docstring from Filter if not defined. if isinstance(namespace[func_name], (classmethod, staticmethod)): new_doc = namespace[func_name].__func__.__doc__ old_doc = Filter.__dict__[func_name].__func__.__doc__ if new_doc is None and old_doc is not None: namespace[func_name].__func__.__doc__ = old_doc else: new_doc = namespace[func_name].__doc__ old_doc = Filter.__dict__[func_name].__doc__ if new_doc is None and old_doc is not None: namespace[func_name].__doc__ = old_doc # Make the class. return super().__new__(cls, name, bases, namespace, **kwargs) def _repeat_and_tile(bins, repeat_factor, data_size): filter_bins = np.repeat(bins, repeat_factor) tile_factor = data_size // len(filter_bins) return np.tile(filter_bins, tile_factor) class Filter(IDManagerMixin, metaclass=FilterMeta): """Tally modifier that describes phase-space and other characteristics. Parameters ---------- bins : Integral or Iterable of Integral or Iterable of Real The bins for the filter. This takes on different meaning for different filters. See the docstrings for sublcasses of this filter or the online documentation for more details. filter_id : int Unique identifier for the filter Attributes ---------- bins : Integral or Iterable of Integral or Iterable of Real The bins for the filter id : int Unique identifier for the filter num_bins : Integral The number of filter bins """ next_id = 1 used_ids = set() def __init__(self, bins, filter_id=None): self.bins = bins self.id = filter_id def __eq__(self, other): if type(self) is not type(other): return False elif len(self.bins) != len(other.bins): return False else: return np.allclose(self.bins, other.bins) def __gt__(self, other): if type(self) is not type(other): if self.short_name in _FILTER_TYPES and \ other.short_name in _FILTER_TYPES: delta = _FILTER_TYPES.index(self.short_name) - \ _FILTER_TYPES.index(other.short_name) return delta > 0 else: return False else: return max(self.bins) > max(other.bins) def __lt__(self, other): return not self > other def __hash__(self): string = type(self).__name__ + '\n' string += '{: <16}=\t{}\n'.format('\tBins', self.bins) return hash(string) def __repr__(self): string = type(self).__name__ + '\n' string += '{: <16}=\t{}\n'.format('\tBins', self.bins) string += '{: <16}=\t{}\n'.format('\tID', self.id) return string @classmethod def _recursive_subclasses(cls): """Return all subclasses and their subclasses, etc.""" all_subclasses = [] for subclass in cls.__subclasses__(): all_subclasses.append(subclass) all_subclasses.extend(subclass._recursive_subclasses()) return all_subclasses @classmethod def from_hdf5(cls, group, **kwargs): """Construct a new Filter instance from HDF5 data. Parameters ---------- group : h5py.Group HDF5 group to read from Keyword arguments ----------------- meshes : dict Dictionary mapping integer IDs to openmc.MeshBase objects. Only used for openmc.MeshFilter objects. """ filter_id = int(group.name.split('/')[-1].lstrip('filter ')) # If the HDF5 'type' variable matches this class's short_name, then # there is no overriden from_hdf5 method. Pass the bins to __init__. if group['type'][()].decode() == cls.short_name.lower(): out = cls(group['bins'][()], filter_id=filter_id) out._num_bins = group['n_bins'][()] return out # Search through all subclasses and find the one matching the HDF5 # 'type'. Call that class's from_hdf5 method. for subclass in cls._recursive_subclasses(): if group['type'][()].decode() == subclass.short_name.lower(): return subclass.from_hdf5(group, **kwargs) raise ValueError("Unrecognized Filter class: '" + group['type'][()].decode() + "'") @property def bins(self): return self._bins @bins.setter def bins(self, bins): self.check_bins(bins) self._bins = bins @property def num_bins(self): return len(self.bins) def check_bins(self, bins): """Make sure given bins are valid for this filter. Raises ------ TypeError ValueError """ pass def to_xml_element(self): """Return XML Element representing the Filter. Returns ------- element : xml.etree.ElementTree.Element XML element containing filter data """ element = ET.Element('filter') element.set('id', str(self.id)) element.set('type', self.short_name.lower()) subelement = ET.SubElement(element, 'bins') subelement.text = ' '.join(str(b) for b in self.bins) return element def can_merge(self, other): """Determine if filter can be merged with another. Parameters ---------- other : openmc.Filter Filter to compare with Returns ------- bool Whether the filter can be merged """ return type(self) is type(other) def merge(self, other): """Merge this filter with another. Parameters ---------- other : openmc.Filter Filter to merge with Returns ------- merged_filter : openmc.Filter Filter resulting from the merge """ if not self.can_merge(other): msg = 'Unable to merge "{0}" with "{1}" '.format( type(self), type(other)) raise ValueError(msg) # Merge unique filter bins merged_bins = np.concatenate((self.bins, other.bins)) merged_bins = np.unique(merged_bins, axis=0) # Create a new filter with these bins and a new auto-generated ID return type(self)(merged_bins) def is_subset(self, other): """Determine if another filter is a subset of this filter. If all of the bins in the other filter are included as bins in this filter, then it is a subset of this filter. Parameters ---------- other : openmc.Filter The filter to query as a subset of this filter Returns ------- bool Whether or not the other filter is a subset of this filter """ if type(self) is not type(other): return False for b in other.bins: if b not in self.bins: return False return True def get_bin_index(self, filter_bin): """Returns the index in the Filter for some bin. Parameters ---------- filter_bin : int or tuple The bin is the integer ID for 'material', 'surface', 'cell', 'cellborn', and 'universe' Filters. The bin is an integer for the cell instance ID for 'distribcell' Filters. The bin is a 2-tuple of floats for 'energy' and 'energyout' filters corresponding to the energy boundaries of the bin of interest. The bin is an (x,y,z) 3-tuple for 'mesh' filters corresponding to the mesh cell of interest. Returns ------- filter_index : int The index in the Tally data array for this filter bin. """ if filter_bin not in self.bins: msg = 'Unable to get the bin index for Filter since "{0}" ' \ 'is not one of the bins'.format(filter_bin) raise ValueError(msg) if isinstance(self.bins, np.ndarray): return np.where(self.bins == filter_bin)[0][0] else: return self.bins.index(filter_bin) def get_pandas_dataframe(self, data_size, stride, **kwargs): """Builds a Pandas DataFrame for the Filter's bins. This method constructs a Pandas DataFrame object for the filter with columns annotated by filter bin information. This is a helper method for :meth:`Tally.get_pandas_dataframe`. Parameters ---------- data_size : int The total number of bins in the tally corresponding to this filter stride : int Stride in memory for the filter Keyword arguments ----------------- paths : bool Only used for DistribcellFilter. If True (default), expand distribcell indices into multi-index columns describing the path to that distribcell through the CSG tree. NOTE: This option assumes that all distribcell paths are of the same length and do not have the same universes and cells but different lattice cell indices. Returns ------- pandas.DataFrame A Pandas DataFrame with columns of strings that characterize the filter's bins. The number of rows in the DataFrame is the same as the total number of bins in the corresponding tally, with the filter bin appropriately tiled to map to the corresponding tally bins. See also -------- Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe() """ # Initialize Pandas DataFrame df = pd.DataFrame() filter_bins = np.repeat(self.bins, stride) tile_factor = data_size // len(filter_bins) filter_bins = np.tile(filter_bins, tile_factor) df = pd.concat([df, pd.DataFrame( {self.short_name.lower(): filter_bins})]) return df class WithIDFilter(Filter): """Abstract parent for filters of types with IDs (Cell, Material, etc.).""" def __init__(self, bins, filter_id=None): bins = np.atleast_1d(bins) # Make sure bins are either integers or appropriate objects cv.check_iterable_type('filter bins', bins, (Integral, self.expected_type)) # Extract ID values bins = np.array([b if isinstance(b, Integral) else b.id for b in bins]) super().__init__(bins, filter_id) def check_bins(self, bins): # Check the bin values. for edge in bins: cv.check_greater_than('filter bin', edge, 0, equality=True) class UniverseFilter(WithIDFilter): """Bins tally event locations based on the Universe they occured in. Parameters ---------- bins : openmc.Universe, int, or iterable thereof The Universes to tally. Either openmc.Universe objects or their Integral ID numbers can be used. filter_id : int Unique identifier for the filter Attributes ---------- bins : Iterable of Integral openmc.Universe IDs. id : int Unique identifier for the filter num_bins : Integral The number of filter bins """ expected_type = Universe class MaterialFilter(WithIDFilter): """Bins tally event locations based on the Material they occured in. Parameters ---------- bins : openmc.Material, Integral, or iterable thereof The Materials to tally. Either openmc.Material objects or their Integral ID numbers can be used. filter_id : int Unique identifier for the filter Attributes ---------- bins : Iterable of Integral openmc.Material IDs. id : int Unique identifier for the filter num_bins : Integral The number of filter bins """ expected_type = Material class CellFilter(WithIDFilter): """Bins tally event locations based on the Cell they occured in. Parameters ---------- bins : openmc.Cell, int, or iterable thereof The cells to tally. Either openmc.Cell objects or their ID numbers can be used. filter_id : int Unique identifier for the filter Attributes ---------- bins : Iterable of Integral openmc.Cell IDs. id : int Unique identifier for the filter num_bins : Integral The number of filter bins """ expected_type = Cell class CellFromFilter(WithIDFilter): """Bins tally on which Cell the neutron came from. Parameters ---------- bins : openmc.Cell, Integral, or iterable thereof The Cell(s) to tally. Either openmc.Cell objects or their Integral ID numbers can be used. filter_id : int Unique identifier for the filter Attributes ---------- bins : Integral or Iterable of Integral openmc.Cell IDs. id : int Unique identifier for the filter num_bins : Integral The number of filter bins """ expected_type = Cell class CellbornFilter(WithIDFilter): """Bins tally events based on which Cell the neutron was born in. Parameters ---------- bins : openmc.Cell, Integral, or iterable thereof The birth Cells to tally. Either openmc.Cell objects or their Integral ID numbers can be used. filter_id : int Unique identifier for the filter Attributes ---------- bins : Iterable of Integral openmc.Cell IDs. id : int Unique identifier for the filter num_bins : Integral The number of filter bins """ expected_type = Cell class CellInstanceFilter(Filter): """Bins tally events based on which cell instance a particle is in. This filter is similar to :class:`DistribcellFilter` but allows one to select particular instances to be tallied (instead of obtaining *all* instances by default) and allows instances from different cells to be specified in a single filter. .. versionadded:: 0.12 Parameters ---------- bins : iterable of 2-tuples or numpy.ndarray The cell instances to tally, given as 2-tuples. For the first value in the tuple, either openmc.Cell objects or their integral ID numbers can be used. filter_id : int Unique identifier for the filter Attributes ---------- bins : numpy.ndarray 2D numpy array of cell IDs and instances id : int Unique identifier for the filter num_bins : Integral The number of filter bins See Also -------- DistribcellFilter """ def __init__(self, bins, filter_id=None): self.bins = bins self.id = filter_id @Filter.bins.setter def bins(self, bins): pairs = np.empty((len(bins), 2), dtype=int) for i, (cell, instance) in enumerate(bins): cv.check_type('cell', cell, (openmc.Cell, Integral)) cv.check_type('instance', instance, Integral) pairs[i, 0] = cell if isinstance(cell, Integral) else cell.id pairs[i, 1] = instance self._bins = pairs def get_pandas_dataframe(self, data_size, stride, **kwargs): """Builds a Pandas DataFrame for the Filter's bins. This method constructs a Pandas DataFrame object for the filter with columns annotated by filter bin information. This is a helper method for :meth:`Tally.get_pandas_dataframe`. Parameters ---------- data_size : int The total number of bins in the tally corresponding to this filter stride : int Stride in memory for the filter Returns ------- pandas.DataFrame A Pandas DataFrame with a multi-index column for the cell instance. The number of rows in the DataFrame is the same as the total number of bins in the corresponding tally, with the filter bin appropriately tiled to map to the corresponding tally bins. See also -------- Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe() """ # Repeat and tile bins as necessary to account for other filters. bins = np.repeat(self.bins, stride, axis=0) tile_factor = data_size // len(bins) bins = np.tile(bins, (tile_factor, 1)) columns = pd.MultiIndex.from_product([[self.short_name.lower()], ['cell', 'instance']]) return pd.DataFrame(bins, columns=columns) def to_xml_element(self): """Return XML Element representing the Filter. Returns ------- element : xml.etree.ElementTree.Element XML element containing filter data """ element = ET.Element('filter') element.set('id', str(self.id)) element.set('type', self.short_name.lower()) subelement = ET.SubElement(element, 'bins') subelement.text = ' '.join(str(i) for i in self.bins.ravel()) return element class SurfaceFilter(WithIDFilter): """Filters particles by surface crossing Parameters ---------- bins : openmc.Surface, int, or iterable of Integral The surfaces to tally over. Either openmc.Surface objects or their ID numbers can be used. filter_id : int Unique identifier for the filter Attributes ---------- bins : Iterable of Integral The surfaces to tally over. Either openmc.Surface objects or their ID numbers can be used. id : int Unique identifier for the filter num_bins : Integral The number of filter bins """ expected_type = Surface class ParticleFilter(Filter): """Bins tally events based on the Particle type. Parameters ---------- bins : str, or iterable of str The particles to tally represented as strings ('neutron', 'photon', 'electron', 'positron'). filter_id : int Unique identifier for the filter Attributes ---------- bins : Iterable of Integral The Particles to tally id : int Unique identifier for the filter num_bins : Integral The number of filter bins """ def __eq__(self, other): if type(self) is not type(other): return False elif len(self.bins) != len(other.bins): return False else: return np.all(self.bins == other.bins) __hash__ = Filter.__hash__ @Filter.bins.setter def bins(self, bins): bins = np.atleast_1d(bins) cv.check_iterable_type('filter bins', bins, str) for edge in bins: cv.check_value('filter bin', edge, _PARTICLES) self._bins = bins @classmethod def from_hdf5(cls, group, **kwargs): if group['type'][()].decode() != cls.short_name.lower(): raise ValueError("Expected HDF5 data for filter type '" + cls.short_name.lower() + "' but got '" + group['type'][()].decode() + " instead") particles = [b.decode() for b in group['bins'][()]] filter_id = int(group.name.split('/')[-1].lstrip('filter ')) return cls(particles, filter_id=filter_id) class MeshFilter(Filter): """Bins tally event locations onto a regular, rectangular mesh. Parameters ---------- mesh : openmc.MeshBase The mesh object that events will be tallied onto filter_id : int Unique identifier for the filter Attributes ---------- mesh : openmc.MeshBase The mesh object that events will be tallied onto id : int Unique identifier for the filter bins : list of tuple A list of mesh indices for each filter bin, e.g. [(1, 1, 1), (2, 1, 1), ...] num_bins : Integral The number of filter bins """ def __init__(self, mesh, filter_id=None): self.mesh = mesh self.id = filter_id def __hash__(self): string = type(self).__name__ + '\n' string += '{: <16}=\t{}\n'.format('\tMesh ID', self.mesh.id) return hash(string) def __repr__(self): string = type(self).__name__ + '\n' string += '{: <16}=\t{}\n'.format('\tMesh ID', self.mesh.id) string += '{: <16}=\t{}\n'.format('\tID', self.id) return string @classmethod def from_hdf5(cls, group, **kwargs): if group['type'][()].decode() != cls.short_name.lower(): raise ValueError("Expected HDF5 data for filter type '" + cls.short_name.lower() + "' but got '" + group['type'][()].decode() + " instead") if 'meshes' not in kwargs: raise ValueError(cls.__name__ + " requires a 'meshes' keyword " "argument.") mesh_id = group['bins'][()] mesh_obj = kwargs['meshes'][mesh_id] filter_id = int(group.name.split('/')[-1].lstrip('filter ')) out = cls(mesh_obj, filter_id=filter_id) return out @property def mesh(self): return self._mesh @mesh.setter def mesh(self, mesh): cv.check_type('filter mesh', mesh, openmc.MeshBase) self._mesh = mesh if isinstance(mesh, openmc.UnstructuredMesh): if mesh.volumes is None: self.bins = [] else: self.bins = list(range(len(mesh.volumes))) else: self.bins = list(mesh.indices) def can_merge(self, other): # Mesh filters cannot have more than one bin return False def get_pandas_dataframe(self, data_size, stride, **kwargs): """Builds a Pandas DataFrame for the Filter's bins. This method constructs a Pandas DataFrame object for the filter with columns annotated by filter bin information. This is a helper method for :meth:`Tally.get_pandas_dataframe`. Parameters ---------- data_size : int The total number of bins in the tally corresponding to this filter stride : int Stride in memory for the filter Returns ------- pandas.DataFrame A Pandas DataFrame with three columns describing the x,y,z mesh cell indices corresponding to each filter bin. The number of rows in the DataFrame is the same as the total number of bins in the corresponding tally, with the filter bin appropriately tiled to map to the corresponding tally bins. See also -------- Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe() """ # Initialize Pandas DataFrame df = pd.DataFrame() # Initialize dictionary to build Pandas Multi-index column filter_dict = {} # Append mesh ID as outermost index of multi-index mesh_key = 'mesh {}'.format(self.mesh.id) # Find mesh dimensions - use 3D indices for simplicity n_dim = len(self.mesh.dimension) if n_dim == 3: nx, ny, nz = self.mesh.dimension elif n_dim == 2: nx, ny = self.mesh.dimension nz = 1 else: nx = self.mesh.dimension ny = nz = 1 # Generate multi-index sub-column for x-axis filter_dict[mesh_key, 'x'] = _repeat_and_tile( np.arange(1, nx + 1), stride, data_size) # Generate multi-index sub-column for y-axis filter_dict[mesh_key, 'y'] = _repeat_and_tile( np.arange(1, ny + 1), nx * stride, data_size) # Generate multi-index sub-column for z-axis filter_dict[mesh_key, 'z'] = _repeat_and_tile( np.arange(1, nz + 1), nx * ny * stride, data_size) # Initialize a Pandas DataFrame from the mesh dictionary df = pd.concat([df, pd.DataFrame(filter_dict)]) return df def to_xml_element(self): """Return XML Element representing the Filter. Returns ------- element : xml.etree.ElementTree.Element XML element containing filter data """ element = super().to_xml_element() element[0].text = str(self.mesh.id) return element class MeshSurfaceFilter(MeshFilter): """Filter events by surface crossings on a regular, rectangular mesh. Parameters ---------- mesh : openmc.MeshBase The mesh object that events will be tallied onto filter_id : int Unique identifier for the filter Attributes ---------- bins : Integral The mesh ID mesh : openmc.MeshBase The mesh object that events will be tallied onto id : int Unique identifier for the filter bins : list of tuple A list of mesh indices / surfaces for each filter bin, e.g. [(1, 1, 'x-min out'), (1, 1, 'x-min in'), ...] num_bins : Integral The number of filter bins """ @MeshFilter.mesh.setter def mesh(self, mesh): cv.check_type('filter mesh', mesh, openmc.MeshBase) self._mesh = mesh # Take the product of mesh indices and current names n_dim = mesh.n_dimension self.bins = [mesh_tuple + (surf,) for mesh_tuple, surf in product(mesh.indices, _CURRENT_NAMES[:4*n_dim])] def get_pandas_dataframe(self, data_size, stride, **kwargs): """Builds a Pandas DataFrame for the Filter's bins. This method constructs a Pandas DataFrame object for the filter with columns annotated by filter bin information. This is a helper method for :meth:`Tally.get_pandas_dataframe`. Parameters ---------- data_size : int The total number of bins in the tally corresponding to this filter stride : int Stride in memory for the filter Returns ------- pandas.DataFrame A Pandas DataFrame with three columns describing the x,y,z mesh cell indices corresponding to each filter bin. The number of rows in the DataFrame is the same as the total number of bins in the corresponding tally, with the filter bin appropriately tiled to map to the corresponding tally bins. See also -------- Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe() """ # Initialize Pandas DataFrame df = pd.DataFrame() # Initialize dictionary to build Pandas Multi-index column filter_dict = {} # Append mesh ID as outermost index of multi-index mesh_key = 'mesh {}'.format(self.mesh.id) # Find mesh dimensions - use 3D indices for simplicity n_surfs = 4 * len(self.mesh.dimension) if len(self.mesh.dimension) == 3: nx, ny, nz = self.mesh.dimension elif len(self.mesh.dimension) == 2: nx, ny = self.mesh.dimension nz = 1 else: nx = self.mesh.dimension ny = nz = 1 # Generate multi-index sub-column for x-axis filter_dict[mesh_key, 'x'] = _repeat_and_tile( np.arange(1, nx + 1), n_surfs * stride, data_size) # Generate multi-index sub-column for y-axis if len(self.mesh.dimension) > 1: filter_dict[mesh_key, 'y'] = _repeat_and_tile( np.arange(1, ny + 1), n_surfs * nx * stride, data_size) # Generate multi-index sub-column for z-axis if len(self.mesh.dimension) > 2: filter_dict[mesh_key, 'z'] = _repeat_and_tile( np.arange(1, nz + 1), n_surfs * nx * ny * stride, data_size) # Generate multi-index sub-column for surface filter_dict[mesh_key, 'surf'] = _repeat_and_tile( _CURRENT_NAMES[:n_surfs], stride, data_size) # Initialize a Pandas DataFrame from the mesh dictionary return pd.concat([df, pd.DataFrame(filter_dict)]) class RealFilter(Filter): """Tally modifier that describes phase-space and other characteristics Parameters ---------- values : iterable of float A list of values for which each successive pair constitutes a range of values for a single bin filter_id : int Unique identifier for the filter Attributes ---------- values : numpy.ndarray An array of values for which each successive pair constitutes a range of values for a single bin id : int Unique identifier for the filter bins : numpy.ndarray An array of shape (N, 2) where each row is a pair of values indicating a filter bin range num_bins : int The number of filter bins """ def __init__(self, values, filter_id=None): self.values = np.asarray(values) self.bins = np.vstack((self.values[:-1], self.values[1:])).T self.id = filter_id def __gt__(self, other): if type(self) is type(other): # Compare largest/smallest bin edges in filters # This logic is used when merging tallies with real filters return self.values[0] >= other.values[-1] else: return super().__gt__(other) def __repr__(self): string = type(self).__name__ + '\n' string += '{: <16}=\t{}\n'.format('\tValues', self.values) string += '{: <16}=\t{}\n'.format('\tID', self.id) return string @Filter.bins.setter def bins(self, bins): Filter.bins.__set__(self, np.asarray(bins)) def check_bins(self, bins): for v0, v1 in bins: # Values should be real cv.check_type('filter value', v0, Real) cv.check_type('filter value', v1, Real) # Make sure that each tuple has values that are increasing if v1 < v0: raise ValueError('Values {} and {} appear to be out of order' .format(v0, v1)) for pair0, pair1 in zip(bins[:-1], bins[1:]): # Successive pairs should be ordered if pair1[1] < pair0[1]: raise ValueError('Values {} and {} appear to be out of order' .format(pair1[1], pair0[1])) def can_merge(self, other): if type(self) is not type(other): return False if self.bins[0, 0] == other.bins[-1][1]: # This low edge coincides with other's high edge return True elif self.bins[-1][1] == other.bins[0, 0]: # This high edge coincides with other's low edge return True else: return False def merge(self, other): if not self.can_merge(other): msg = 'Unable to merge "{0}" with "{1}" ' \ 'filters'.format(type(self), type(other)) raise ValueError(msg) # Merge unique filter bins merged_values = np.concatenate((self.values, other.values)) merged_values = np.unique(merged_values) # Create a new filter with these bins and a new auto-generated ID return type(self)(sorted(merged_values)) def is_subset(self, other): """Determine if another filter is a subset of this filter. If all of the bins in the other filter are included as bins in this filter, then it is a subset of this filter. Parameters ---------- other : openmc.Filter The filter to query as a subset of this filter Returns ------- bool Whether or not the other filter is a subset of this filter """ if type(self) is not type(other): return False elif self.num_bins != other.num_bins: return False else: return np.allclose(self.values, other.values) def get_bin_index(self, filter_bin): i = np.where(self.bins[:, 1] == filter_bin[1])[0] if len(i) == 0: msg = 'Unable to get the bin index for Filter since "{0}" ' \ 'is not one of the bins'.format(filter_bin) raise ValueError(msg) else: return i[0] def get_pandas_dataframe(self, data_size, stride, **kwargs): """Builds a Pandas DataFrame for the Filter's bins. This method constructs a Pandas DataFrame object for the filter with columns annotated by filter bin information. This is a helper method for :meth:`Tally.get_pandas_dataframe`. Parameters ---------- data_size : int The total number of bins in the tally corresponding to this filter stride : int Stride in memory for the filter Returns ------- pandas.DataFrame A Pandas DataFrame with one column of the lower energy bound and one column of upper energy bound for each filter bin. The number of rows in the DataFrame is the same as the total number of bins in the corresponding tally, with the filter bin appropriately tiled to map to the corresponding tally bins. See also -------- Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe() """ # Initialize Pandas DataFrame df = pd.DataFrame() # Extract the lower and upper energy bounds, then repeat and tile # them as necessary to account for other filters. lo_bins = np.repeat(self.bins[:, 0], stride) hi_bins = np.repeat(self.bins[:, 1], stride) tile_factor = data_size // len(lo_bins) lo_bins = np.tile(lo_bins, tile_factor) hi_bins = np.tile(hi_bins, tile_factor) # Add the new energy columns to the DataFrame. if hasattr(self, 'units'): units = ' [{}]'.format(self.units) else: units = '' df.loc[:, self.short_name.lower() + ' low' + units] = lo_bins df.loc[:, self.short_name.lower() + ' high' + units] = hi_bins return df def to_xml_element(self): """Return XML Element representing the Filter. Returns ------- element : xml.etree.ElementTree.Element XML element containing filter data """ element = super().to_xml_element() element[0].text = ' '.join(str(x) for x in self.values) return element class EnergyFilter(RealFilter): """Bins tally events based on incident particle energy. Parameters ---------- values : Iterable of Real A list of values for which each successive pair constitutes a range of energies in [eV] for a single bin filter_id : int Unique identifier for the filter Attributes ---------- values : numpy.ndarray An array of values for which each successive pair constitutes a range of energies in [eV] for a single bin id : int Unique identifier for the filter bins : numpy.ndarray An array of shape (N, 2) where each row is a pair of energies in [eV] for a single filter bin num_bins : int The number of filter bins """ units = 'eV' def get_bin_index(self, filter_bin): # Use lower energy bound to find index for RealFilters deltas = np.abs(self.bins[:, 1] - filter_bin[1]) / filter_bin[1] min_delta = np.min(deltas) if min_delta < 1E-3: return deltas.argmin() else: msg = 'Unable to get the bin index for Filter since "{0}" ' \ 'is not one of the bins'.format(filter_bin) raise ValueError(msg) def check_bins(self, bins): super().check_bins(bins) for v0, v1 in bins: cv.check_greater_than('filter value', v0, 0., equality=True) cv.check_greater_than('filter value', v1, 0., equality=True) class EnergyoutFilter(EnergyFilter): """Bins tally events based on outgoing particle energy. Parameters ---------- values : Iterable of Real A list of values for which each successive pair constitutes a range of energies in [eV] for a single bin filter_id : int Unique identifier for the filter Attributes ---------- values : numpy.ndarray An array of values for which each successive pair constitutes a range of energies in [eV] for a single bin id : int Unique identifier for the filter bins : numpy.ndarray An array of shape (N, 2) where each row is a pair of energies in [eV] for a single filter bin num_bins : int The number of filter bins """ def _path_to_levels(path): """Convert distribcell path to list of levels Parameters ---------- path : str Distribcell path Returns ------- list List of levels in path """ # Split path into universes/cells/lattices path_items = path.split('->') # Pair together universe and cell information from the same level idx = [i for i, item in enumerate(path_items) if item.startswith('u')] for i in reversed(idx): univ_id = int(path_items.pop(i)[1:]) cell_id = int(path_items.pop(i)[1:]) path_items.insert(i, ('universe', univ_id, cell_id)) # Reformat lattice into tuple idx = [i for i, item in enumerate(path_items) if isinstance(item, str)] for i in idx: item = path_items.pop(i)[1:-1] lat_id, lat_xyz = item.split('(') lat_id = int(lat_id) lat_xyz = tuple(int(x) for x in lat_xyz.split(',')) path_items.insert(i, ('lattice', lat_id, lat_xyz)) return path_items class DistribcellFilter(Filter): """Bins tally event locations on instances of repeated cells. This filter provides a separate score for each unique instance of a repeated cell in a geometry. Note that only one cell can be specified in this filter. The related :class:`CellInstanceFilter` allows one to obtain scores for particular cell instances as well as instances from different cells. Parameters ---------- cell : openmc.Cell or Integral The distributed cell to tally. Either an openmc.Cell or an Integral cell ID number can be used. filter_id : int Unique identifier for the filter Attributes ---------- bins : Iterable of Integral An iterable with one element---the ID of the distributed Cell. id : int Unique identifier for the filter num_bins : int The number of filter bins paths : list of str The paths traversed through the CSG tree to reach each distribcell instance (for 'distribcell' filters only) See Also -------- CellInstanceFilter """ def __init__(self, cell, filter_id=None): self._paths = None super().__init__(cell, filter_id) @classmethod def from_hdf5(cls, group, **kwargs): if group['type'][()].decode() != cls.short_name.lower(): raise ValueError("Expected HDF5 data for filter type '" + cls.short_name.lower() + "' but got '" + group['type'][()].decode() + " instead") filter_id = int(group.name.split('/')[-1].lstrip('filter ')) out = cls(group['bins'][()], filter_id=filter_id) out._num_bins = group['n_bins'][()] return out @property def num_bins(self): # Need to handle number of bins carefully -- for distribcell tallies, we # need to know how many instances of the cell there are return self._num_bins @property def paths(self): return self._paths @Filter.bins.setter def bins(self, bins): # Format the bins as a 1D numpy array. bins = np.atleast_1d(bins) # Make sure there is only 1 bin. if not len(bins) == 1: msg = 'Unable to add bins "{0}" to a DistribcellFilter since ' \ 'only a single distribcell can be used per tally'.format(bins) raise ValueError(msg) # Check the type and extract the id, if necessary. cv.check_type('distribcell bin', bins[0], (Integral, openmc.Cell)) if isinstance(bins[0], openmc.Cell): bins = np.atleast_1d(bins[0].id) self._bins = bins @paths.setter def paths(self, paths): cv.check_iterable_type('paths', paths, str) self._paths = paths def can_merge(self, other): # Distribcell filters cannot have more than one bin return False def get_bin_index(self, filter_bin): # Filter bins for distribcells are indices of each unique placement of # the Cell in the Geometry (consecutive integers starting at 0). return filter_bin def get_pandas_dataframe(self, data_size, stride, **kwargs): """Builds a Pandas DataFrame for the Filter's bins. This method constructs a Pandas DataFrame object for the filter with columns annotated by filter bin information. This is a helper method for :meth:`Tally.get_pandas_dataframe`. Parameters ---------- data_size : int The total number of bins in the tally corresponding to this filter stride : int Stride in memory for the filter Keyword arguments ----------------- paths : bool If True (default), expand distribcell indices into multi-index columns describing the path to that distribcell through the CSG tree. NOTE: This option assumes that all distribcell paths are of the same length and do not have the same universes and cells but different lattice cell indices. Returns ------- pandas.DataFrame A Pandas DataFrame with columns describing distributed cells. The dataframe will have either: 1. a single column with the cell instance IDs (without summary info) 2. separate columns for the cell IDs, universe IDs, and lattice IDs and x,y,z cell indices corresponding to each (distribcell paths). The number of rows in the DataFrame is the same as the total number of bins in the corresponding tally, with the filter bin appropriately tiled to map to the corresponding tally bins. See also -------- Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe() """ # Initialize Pandas DataFrame df = pd.DataFrame() level_df = None paths = kwargs.setdefault('paths', True) # Create Pandas Multi-index columns for each level in CSG tree if paths: # Distribcell paths require linked metadata from the Summary if self.paths is None: msg = 'Unable to construct distribcell paths since ' \ 'the Summary is not linked to the StatePoint' raise ValueError(msg) # Make copy of array of distribcell paths to use in # Pandas Multi-index column construction num_offsets = len(self.paths) paths = [_path_to_levels(p) for p in self.paths] # Loop over CSG levels in the distribcell paths num_levels = len(paths[0]) for i_level in range(num_levels): # Use level key as first index in Pandas Multi-index column level_key = 'level {}'.format(i_level + 1) # Create a dictionary for this level for Pandas Multi-index level_dict = OrderedDict() # Use the first distribcell path to determine if level # is a universe/cell or lattice level path = paths[0] if path[i_level][0] == 'lattice': # Initialize prefix Multi-index keys lat_id_key = (level_key, 'lat', 'id') lat_x_key = (level_key, 'lat', 'x') lat_y_key = (level_key, 'lat', 'y') lat_z_key = (level_key, 'lat', 'z') # Allocate NumPy arrays for each CSG level and # each Multi-index column in the DataFrame level_dict[lat_id_key] = np.empty(num_offsets) level_dict[lat_x_key] = np.empty(num_offsets) level_dict[lat_y_key] = np.empty(num_offsets) if len(path[i_level][2]) == 3: level_dict[lat_z_key] = np.empty(num_offsets) else: # Initialize prefix Multi-index keys univ_key = (level_key, 'univ', 'id') cell_key = (level_key, 'cell', 'id') # Allocate NumPy arrays for each CSG level and # each Multi-index column in the DataFrame level_dict[univ_key] = np.empty(num_offsets) level_dict[cell_key] = np.empty(num_offsets) # Populate Multi-index arrays with all distribcell paths for i, path in enumerate(paths): level = path[i_level] if level[0] == 'lattice': # Assign entry to Lattice Multi-index column level_dict[lat_id_key][i] = level[1] level_dict[lat_x_key][i] = level[2][0] level_dict[lat_y_key][i] = level[2][1] if len(level[2]) == 3: level_dict[lat_z_key][i] = level[2][2] else: # Assign entry to Universe, Cell Multi-index columns level_dict[univ_key][i] = level[1] level_dict[cell_key][i] = level[2] # Tile the Multi-index columns for level_key, level_bins in level_dict.items(): level_dict[level_key] = _repeat_and_tile( level_bins, stride, data_size) # Initialize a Pandas DataFrame from the level dictionary if level_df is None: level_df = pd.DataFrame(level_dict) else: level_df = pd.concat([level_df, pd.DataFrame(level_dict)], axis=1) # Create DataFrame column for distribcell instance IDs # NOTE: This is performed regardless of whether the user # requests Summary geometric information filter_bins = _repeat_and_tile( np.arange(self.num_bins), stride, data_size) df = pd.DataFrame({self.short_name.lower() : filter_bins}) # Concatenate with DataFrame of distribcell instance IDs if level_df is not None: level_df = level_df.dropna(axis=1, how='all') level_df = level_df.astype(np.int) df = pd.concat([level_df, df], axis=1) return df class MuFilter(RealFilter): """Bins tally events based on particle scattering angle. Parameters ---------- values : int or Iterable of Real A grid of scattering angles which events will binned into. Values represent the cosine of the scattering angle. If an iterable is given, the values will be used explicitly as grid points. If a single int is given, the range [-1, 1] will be divided up equally into that number of bins. filter_id : int Unique identifier for the filter Attributes ---------- values : numpy.ndarray An array of values for which each successive pair constitutes a range of scattering angle cosines for a single bin id : int Unique identifier for the filter bins : numpy.ndarray An array of shape (N, 2) where each row is a pair of scattering angle cosines for a single filter bin num_bins : Integral The number of filter bins """ def __init__(self, values, filter_id=None): if isinstance(values, Integral): values = np.linspace(-1., 1., values + 1) super().__init__(values, filter_id) def check_bins(self, bins): super().check_bins(bins) for x in np.ravel(bins): if not np.isclose(x, -1.): cv.check_greater_than('filter value', x, -1., equality=True) if not np.isclose(x, 1.): cv.check_less_than('filter value', x, 1., equality=True) class PolarFilter(RealFilter): """Bins tally events based on the incident particle's direction. Parameters ---------- values : int or Iterable of Real A grid of polar angles which events will binned into. Values represent an angle in radians relative to the z-axis. If an iterable is given, the values will be used explicitly as grid points. If a single int is given, the range [0, pi] will be divided up equally into that number of bins. filter_id : int Unique identifier for the filter Attributes ---------- values : numpy.ndarray An array of values for which each successive pair constitutes a range of polar angles in [rad] for a single bin id : int Unique identifier for the filter bins : numpy.ndarray An array of shape (N, 2) where each row is a pair of polar angles for a single filter bin id : int Unique identifier for the filter num_bins : Integral The number of filter bins """ units = 'rad' def __init__(self, values, filter_id=None): if isinstance(values, Integral): values = np.linspace(0., np.pi, values + 1) super().__init__(values, filter_id) def check_bins(self, bins): super().check_bins(bins) for x in np.ravel(bins): if not np.isclose(x, 0.): cv.check_greater_than('filter value', x, 0., equality=True) if not np.isclose(x, np.pi): cv.check_less_than('filter value', x, np.pi, equality=True) class AzimuthalFilter(RealFilter): """Bins tally events based on the incident particle's direction. Parameters ---------- values : int or Iterable of Real A grid of azimuthal angles which events will binned into. Values represent an angle in radians relative to the x-axis and perpendicular to the z-axis. If an iterable is given, the values will be used explicitly as grid points. If a single int is given, the range [-pi, pi) will be divided up equally into that number of bins. filter_id : int Unique identifier for the filter Attributes ---------- values : numpy.ndarray An array of values for which each successive pair constitutes a range of azimuthal angles in [rad] for a single bin id : int Unique identifier for the filter bins : numpy.ndarray An array of shape (N, 2) where each row is a pair of azimuthal angles for a single filter bin num_bins : Integral The number of filter bins """ units = 'rad' def __init__(self, values, filter_id=None): if isinstance(values, Integral): values = np.linspace(-np.pi, np.pi, values + 1) super().__init__(values, filter_id) def check_bins(self, bins): super().check_bins(bins) for x in np.ravel(bins): if not np.isclose(x, -np.pi): cv.check_greater_than('filter value', x, -np.pi, equality=True) if not np.isclose(x, np.pi): cv.check_less_than('filter value', x, np.pi, equality=True) class DelayedGroupFilter(Filter): """Bins fission events based on the produced neutron precursor groups. Parameters ---------- bins : iterable of int The delayed neutron precursor groups. For example, ENDF/B-VII.1 uses 6 precursor groups so a tally with all groups will have bins = [1, 2, 3, 4, 5, 6]. filter_id : int Unique identifier for the filter Attributes ---------- bins : iterable of int The delayed neutron precursor groups. For example, ENDF/B-VII.1 uses 6 precursor groups so a tally with all groups will have bins = [1, 2, 3, 4, 5, 6]. id : int Unique identifier for the filter num_bins : Integral The number of filter bins """ def check_bins(self, bins): # Check the bin values. for g in bins: cv.check_greater_than('delayed group', g, 0) class EnergyFunctionFilter(Filter): """Multiplies tally scores by an arbitrary function of incident energy. The arbitrary function is described by a piecewise linear-linear interpolation of energy and y values. Values outside of the given energy range will be evaluated as zero. Parameters ---------- energy : Iterable of Real A grid of energy values in [eV] y : iterable of Real A grid of interpolant values in [eV] filter_id : int Unique identifier for the filter Attributes ---------- energy : Iterable of Real A grid of energy values in [eV] y : iterable of Real A grid of interpolant values in [eV] id : int Unique identifier for the filter num_bins : Integral The number of filter bins (always 1 for this filter) """ def __init__(self, energy, y, filter_id=None): self.energy = energy self.y = y self.id = filter_id def __eq__(self, other): if type(self) is not type(other): return False elif not all(self.energy == other.energy): return False else: return all(self.y == other.y) def __gt__(self, other): if type(self) is not type(other): if self.short_name in _FILTER_TYPES and \ other.short_name in _FILTER_TYPES: delta = _FILTER_TYPES.index(self.short_name) - \ _FILTER_TYPES.index(other.short_name) return delta > 0 else: return False else: return False def __lt__(self, other): if type(self) is not type(other): if self.short_name in _FILTER_TYPES and \ other.short_name in _FILTER_TYPES: delta = _FILTER_TYPES.index(self.short_name) - \ _FILTER_TYPES.index(other.short_name) return delta < 0 else: return False else: return False def __hash__(self): string = type(self).__name__ + '\n' string += '{: <16}=\t{}\n'.format('\tEnergy', self.energy) string += '{: <16}=\t{}\n'.format('\tInterpolant', self.y) return hash(string) def __repr__(self): string = type(self).__name__ + '\n' string += '{: <16}=\t{}\n'.format('\tEnergy', self.energy) string += '{: <16}=\t{}\n'.format('\tInterpolant', self.y) string += '{: <16}=\t{}\n'.format('\tID', self.id) return string @classmethod def from_hdf5(cls, group, **kwargs): if group['type'][()].decode() != cls.short_name.lower(): raise ValueError("Expected HDF5 data for filter type '" + cls.short_name.lower() + "' but got '" + group['type'][()].decode() + " instead") energy = group['energy'][()] y = group['y'][()] filter_id = int(group.name.split('/')[-1].lstrip('filter ')) return cls(energy, y, filter_id=filter_id) @classmethod def from_tabulated1d(cls, tab1d): """Construct a filter from a Tabulated1D object. Parameters ---------- tab1d : openmc.data.Tabulated1D A linear-linear Tabulated1D object with only a single interpolation region. Returns ------- EnergyFunctionFilter """ cv.check_type('EnergyFunctionFilter tab1d', tab1d, openmc.data.Tabulated1D) if tab1d.n_regions > 1: raise ValueError('Only Tabulated1Ds with a single interpolation ' 'region are supported') if tab1d.interpolation[0] != 2: raise ValueError('Only linear-linar Tabulated1Ds are supported') return cls(tab1d.x, tab1d.y) @property def energy(self): return self._energy @property def y(self): return self._y @property def bins(self): raise AttributeError('EnergyFunctionFilters have no bins.') @property def num_bins(self): return 1 @energy.setter def energy(self, energy): # Format the bins as a 1D numpy array. energy = np.atleast_1d(energy) # Make sure the values are Real and positive. cv.check_type('filter energy grid', energy, Iterable, Real) for E in energy: cv.check_greater_than('filter energy grid', E, 0, equality=True) self._energy = energy @y.setter def y(self, y): # Format the bins as a 1D numpy array. y = np.atleast_1d(y) # Make sure the values are Real. cv.check_type('filter interpolant values', y, Iterable, Real) self._y = y @bins.setter def bins(self, bins): raise RuntimeError('EnergyFunctionFilters have no bins.') def to_xml_element(self): """Return XML Element representing the Filter. Returns ------- element : xml.etree.ElementTree.Element XML element containing filter data """ element = ET.Element('filter') element.set('id', str(self.id)) element.set('type', self.short_name.lower()) subelement = ET.SubElement(element, 'energy') subelement.text = ' '.join(str(e) for e in self.energy) subelement = ET.SubElement(element, 'y') subelement.text = ' '.join(str(y) for y in self.y) return element def can_merge(self, other): return False def is_subset(self, other): return self == other def get_bin_index(self, filter_bin): # This filter only has one bin. Always return 0. return 0 def get_pandas_dataframe(self, data_size, stride, **kwargs): """Builds a Pandas DataFrame for the Filter's bins. This method constructs a Pandas DataFrame object for the filter with columns annotated by filter bin information. This is a helper method for :meth:`Tally.get_pandas_dataframe`. Parameters ---------- data_size : int The total number of bins in the tally corresponding to this filter stride : int Stride in memory for the filter Returns ------- pandas.DataFrame A Pandas DataFrame with a column that is filled with a hash of this filter. EnergyFunctionFilters have only 1 bin so the purpose of this DataFrame column is to differentiate the filter from other EnergyFunctionFilters. The number of rows in the DataFrame is the same as the total number of bins in the corresponding tally. See also -------- Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe() """ df = pd.DataFrame() # There is no clean way of sticking all the energy, y data into a # DataFrame so instead we'll just make a column with the filter name # and fill it with a hash of the __repr__. We want a hash that is # reproducible after restarting the interpreter so we'll use hashlib.md5 # rather than the intrinsic hash(). hash_fun = hashlib.md5() hash_fun.update(repr(self).encode('utf-8')) out = hash_fun.hexdigest() # The full 16 bytes make for a really wide column. Just 7 bytes (14 # hex characters) of the digest are probably sufficient. out = out[:14] filter_bins = _repeat_and_tile(out, stride, data_size) df = pd.concat([df, pd.DataFrame( {self.short_name.lower(): filter_bins})]) return df
mit
Gabriel-p/mcs_rot_angles
aux_modules/MCs_plane.py
1
38946
import numpy as np from astropy.coordinates import Distance, Angle, SkyCoord from astropy import units as u import numpy.linalg as la import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # Define main path. import os import sys r_path = os.path.realpath(__file__)[:-24] # print r_path sys.path.insert(0, r_path + '/modules/') from MCs_data import MCs_data def rho_phi(coord, glx_ctr): # Angular distance between point and center of galaxy. rho = coord.separation(glx_ctr) # Position angle between center and coordinates. This is the angle between # the positive y axis (North) counter-clockwise towards the negative x # axis (East). Phi = glx_ctr.position_angle(coord) # This is the angle measured counter-clockwise from the x positive axis # (West). phi = Phi + Angle('90d') return rho, phi def dist_filter(r_min, r_max, ra_g, dec_g, dm_g, e_dm_g, gal_cent): """ Filter clusters based on their projected angular distances 'rho'. Values are used as: (r_min, r_max] """ # Obtain angular projected distance and position angle for the # clusters in the galaxy. coords = SkyCoord(list(zip(*[ra_g, dec_g])), unit=(u.deg, u.deg)) rho_g, phi_g = rho_phi(coords, gal_cent) # age_f, d_d_f, e_dd_f ra_f, dec_f, dm_f, e_dm_f, rho_f, phi_f = [], [], [], [], [], [] dm_nf, rho_nf, phi_nf = [], [], [] for i, d in enumerate(ra_g): if r_min < rho_g[i].degree <= r_max: ra_f.append(ra_g[i]) dec_f.append(dec_g[i]) dm_f.append(dm_g[i]) e_dm_f.append(e_dm_g[i]) rho_f.append(rho_g[i].degree) phi_f.append(phi_g[i].degree) else: dm_nf.append(dm_g[i]) rho_nf.append(rho_g[i].degree) phi_nf.append(phi_g[i].degree) rho_f = Angle(rho_f, unit=u.deg) phi_f = Angle(phi_f, unit=u.deg) rho_nf = Angle(rho_nf, unit=u.deg) phi_nf = Angle(phi_nf, unit=u.deg) return ra_f, dec_f, rho_f, phi_f, dm_f, e_dm_f, rho_nf, phi_nf, dm_nf def xyz_coords(rho, phi, D_0, r_dist): ''' Obtain coordinates in the (x,y,z) system of van der Marel & Cioni (2001) ''' d_kpc = Distance((10**(0.2 * (np.asarray(r_dist) + 5.))) / 1000., unit=u.kpc) x = d_kpc * np.sin(rho.radian) * np.cos(phi.radian) y = d_kpc * np.sin(rho.radian) * np.sin(phi.radian) z = D_0 - d_kpc * np.cos(rho.radian) x, y, z = x.value, y.value, z.value return np.array([x, y, z]) def mvee(points, tol=0.001): """ Find the minimum volume ellipse. Return A, c where the equation for the ellipse given in "center form" is (x-c).T * A * (x-c) = 1 http://stackoverflow.com/a/14025140/1391441 """ points = np.asmatrix(points) N, d = points.shape Q = np.column_stack((points, np.ones(N))).T err = tol + 1.0 u = np.ones(N) / N while err > tol: # assert u.sum() == 1 # invariant X = Q * np.diag(u) * Q.T M = np.diag(Q.T * la.inv(X) * Q) jdx = np.argmax(M) step_size = (M[jdx] - d - 1.0) / ((d + 1) * (M[jdx] - 1.)) new_u = (1 - step_size) * u new_u[jdx] += step_size err = la.norm(new_u - u) u = new_u c = u * points A = la.inv(points.T * np.diag(u) * points - c.T * c) / d return np.asarray(A), np.squeeze(np.asarray(c)) def ellipse(rx, ry, rz, u, v): x = rx * np.cos(u) * np.cos(v) y = ry * np.sin(u) * np.cos(v) z = rz * np.sin(v) return x, y, z def get_ellipse(N_ran, rho, phi, D_0, dm_g, e_dm_g): """ See: http://stackoverflow.com/a/14025140/1391441 """ ellip_matrix, centers = [], [] for _ in range(N_ran): print(_) # Random draw. r_dist = np.random.normal(np.asarray(dm_g), np.asarray(e_dm_g)) # r_dist = np.asarray(dm_g) # DEL x, y, z = xyz_coords(rho, phi, D_0, r_dist) coords = np.asarray(list(zip(*[x, y, z]))) # A : (d x d) matrix of the ellipse equation in the 'center form': # (x-c)' * A * (x-c) = 1 # 'centroid' is the center coordinates of the ellipse. A, centroid = mvee(coords) ellip_matrix.append(A) centers.append(centroid) A = np.mean(ellip_matrix, axis=0) centroid = np.mean(centers, axis=0) # V is the rotation matrix that gives the orientation of the ellipsoid. # https://en.wikipedia.org/wiki/Rotation_matrix # http://mathworld.wolfram.com/RotationMatrix.html U, D, V = la.svd(A) # x, y, z radii. rx, ry, rz = 1. / np.sqrt(D) # print 'rads', rx/2., ry/2., rz/2. u_small, v_small = np.mgrid[0:2 * np.pi:20j, -np.pi / 2:np.pi / 2:10j] E = np.dstack(ellipse(rx, ry, rz, u_small, v_small)) E = np.dot(E, V) + centroid x_e, y_e, z_e = np.rollaxis(E, axis=-1) return x_e, y_e, z_e def inv_trans_eqs(x_p, y_p, z_p, theta, inc): """ Inverse set of equations. Transform inclined plane system (x',y',z') into face on sky system (x,y,z). """ x = x_p * np.cos(theta.radian) -\ y_p * np.cos(inc.radian) * np.sin(theta.radian) -\ z_p * np.sin(inc.radian) * np.sin(theta.radian) y = x_p * np.sin(theta.radian) +\ y_p * np.cos(inc.radian) * np.cos(theta.radian) +\ z_p * np.sin(inc.radian) * np.cos(theta.radian) z = -1. * y_p * np.sin(inc.radian) + z_p * np.cos(inc.radian) return x, y, z def make_plot(D_0, inc, theta, cl_xyz, cl_xyz_nf, dm_f, x_e, y_e, z_e): """ Original link for plotting intersecting planes: http://stackoverflow.com/a/14825951/1391441 """ # Make plot. fig = plt.figure() ax = Axes3D(fig) # Plot ellipse. # ax.plot_surface(x_e, z_e, y_e, cstride=1, rstride=1, alpha=0.05) # Plot points inside the ellipse, with no random displacement. x_cl, y_cl, z_cl = cl_xyz if cl_xyz.size: SC = ax.scatter(x_cl, z_cl, y_cl, c=dm_f, s=50) min_X, max_X = min(x_cl) - 2., max(x_cl) + 2. min_Y, max_Y = min(y_cl) - 2., max(y_cl) + 2. # # Plot points *outside* the ellipse, with no random displacement. # x_cl_nf, y_cl_nf, z_cl_nf = cl_xyz_nf # if cl_xyz_nf.size: # SC = ax.scatter(x_cl, z_cl_nf, y_cl_nf, c=dm_f, s=50) # x,y plane. X, Y = np.meshgrid([min_X, max_X], [min_Y, max_Y]) Z = np.zeros((2, 2)) # Plot x,y plane. ax.plot_surface(X, Z, Y, color='gray', alpha=.2, linewidth=0, zorder=1) # Axis of x,y plane. # x axis. ax.plot([min_X, max_X], [0., 0.], [0., 0.], ls='--', c='k', zorder=4) # Arrow head pointing in the positive x direction. ax.quiver(max_X, 0., 0., max_X, 0., 0., length=0.3, arrow_length_ratio=1., color='k') # y axis. ax.plot([0., 0.], [0., 0.], [0., max_Y], ls='--', c='k') # Arrow head pointing in the positive y direction. ax.quiver(0., 0., max_Y, 0., 0., max_Y, length=0.3, arrow_length_ratio=1., color='k') ax.plot([0., 0.], [0., 0.], [min_Y, 0.], ls='--', c='k') # # # A plane is a*x+b*y+c*z+d=0, [a,b,c] is the normal. a, b, c, d = -1. * np.sin(theta.radian) * np.sin(inc.radian),\ np.cos(theta.radian) * np.sin(inc.radian),\ np.cos(inc.radian), 0. print('a/c,b/c,1,d/c:', a / c, b / c, 1., d / c) # Rotated plane. X2_t, Y2_t = np.meshgrid([min_X, max_X], [0, max_Y]) Z2_t = (-a * X2_t - b * Y2_t) / c X2_b, Y2_b = np.meshgrid([min_X, max_X], [min_Y, 0]) Z2_b = (-a * X2_b - b * Y2_b) / c # Top half of first x',y' inclined plane. ax.plot_surface(X2_t, Z2_t, Y2_t, color='red', alpha=.2, lw=0, zorder=3) # Bottom half of inclined plane. ax.plot_surface(X2_t, Z2_b, Y2_b, color='red', alpha=.2, lw=0, zorder=-1) # Axis of x',y' plane. # x' axis. x_min, y_min, z_min = inv_trans_eqs(min_X, 0., 0., theta, inc) x_max, y_max, z_max = inv_trans_eqs(max_X, 0., 0., theta, inc) ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='b') # Arrow head pointing in the positive x' direction. ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.3, arrow_length_ratio=1.2) # y' axis. x_min, y_min, z_min = inv_trans_eqs(0., min_Y, 0., theta, inc) x_max, y_max, z_max = inv_trans_eqs(0., max_Y, 0., theta, inc) ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='g') # Arrow head pointing in the positive y' direction. ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.3, arrow_length_ratio=1.2, color='g') # # # a, b, c, d = pts123_abcd # print 'a/c,b/c,1,d/c:', a / c, b / c, 1., d / c # # Plane obtained fitting cloud of clusters. # X3_t, Y3_t = np.meshgrid([min_X, max_X], [0, max_Y]) # Z3_t = (-a*X3_t - b*Y3_t - d) / c # X3_b, Y3_b = np.meshgrid([min_X, max_X], [min_Y, 0]) # Z3_b = (-a*X3_b - b*Y3_b - d) / c # # Top half of second x',y' inclined plane. # ax.plot_surface(X3_t, Z3_t, Y3_t, color='g', alpha=.2, lw=0, zorder=3) # # Bottom half of inclined plane. # ax.plot_surface(X3_b, Z3_b, Y3_b, color='g', alpha=.2, lw=0, zorder=-1) # # Axis of x',y' plane. # # x' axis. # x_min, y_min, z_min = inv_trans_eqs(min_X, 0., 0., theta_pl, inc_pl) # x_max, y_max, z_max = inv_trans_eqs(max_X, 0., 0., theta_pl, inc_pl) # ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='b') # # Arrow head pointing in the positive x' direction. # ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.3, # arrow_length_ratio=1.2) # # y' axis. # x_min, y_min, z_min = inv_trans_eqs(0., min_Y, 0., theta_pl, inc_pl) # x_max, y_max, z_max = inv_trans_eqs(0., max_Y, 0., theta_pl, inc_pl) # ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='g') # # Arrow head pointing in the positive y' direction. # ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.3, # arrow_length_ratio=1.2, color='g') ax.set_xlabel('x (Kpc)') ax.set_ylabel('z (Kpc)') ax.set_ylim(max_Y, min_Y) ax.set_zlabel('y (Kpc)') plt.colorbar(SC, shrink=0.9, aspect=25) ax.axis('equal') ax.axis('tight') # ax.view_init(elev=35., azim=-25.) plt.show() # plt.savefig('MCs_bulge_plane.png', dpi=150) def plot_bulge_plane(ra_g, dec_g, dm_g, e_dm_g, D_0, gal_cent, glx_inc, theta): """ """ # Projected angular distance filter. r_min, r_max = 0., 20. ra_f, dec_f, rho_f, phi_f, dm_f, e_dm_f, rho_nf, phi_nf, dm_nf =\ dist_filter(r_min, r_max, ra_g, dec_g, dm_g, e_dm_g, gal_cent) cl_xyz = xyz_coords(rho_f, phi_f, D_0, dm_f) cl_xyz_nf = xyz_coords(rho_nf, phi_nf, D_0, dm_nf) # Number of times the error ellipse will be obtained after randomly # shifting the distance moduli. N_ran = 2 x_e, y_e, z_e = get_ellipse(N_ran, rho_f, phi_f, D_0, dm_f, e_dm_f) make_plot(D_0, glx_inc, theta, cl_xyz, cl_xyz_nf, dm_f, x_e, y_e, z_e) if __name__ == "__main__": """ 3D plot of bulge, clusters, and fitted planes. """ ra = [[15.5958333333333, 12.1375, 10.9333333333333, 17.225, 15.1416666666667, 15.0041666666667, 14.3416666666667, 13.5625, 357.245833333333, 15.1041666666667, 11.3583333333333, 16.0916666666667, 14.4583333333333, 16.8333333333333, 22.6583333333333, 9.425, 18.875, 18.2583333333333, 13.3541666666667, 24.0041666666667, 11.6833333333333, 23.75, 23.3083333333333, 17.5541666666667, 25.4291666666667, 4.60416666666667, 19.5666666666667, 25.5916666666667, 11.8, 15.2833333333333, 21.2333333333333, 11.475, 29.1833333333333, 18.0166666666667, 5.66666666666667, 14.45, 22.8833333333333, 14.4458333333333, 13.275, 25.6166666666667, 11.2166666666667, 13.1458333333333, 10.7458333333333, 13.8875, 15.9708333333333, 14.425, 6.17916666666667, 20.7, 18.2125, 27.3666666666667, 5.3625, 10.35, 23.6083333333333, 5.76666666666667, 17.0791666666667, 15.1458333333333, 27.5791666666667, 11.9583333333333, 16.0166666666667, 12.3625, 17.6958333333333, 15.2333333333333, 15.4916666666667, 11.5041666666667, 14.325, 15.2041666666667, 17.0583333333333, 14.0583333333333, 16.8791666666667, 16.7375, 13.6291666666667, 12.5875, 14.3333333333333, 15.35, 17.2583333333333, 12.325, 15.1375, 10.8875, 12.1541666666667, 11.775, 16.2583333333333, 11.7291666666667, 10.9083333333333, 12.05, 11.8541666666667, 12.0041666666667, 15.7958333333333, 17.2541666666667, 15.0583333333333], [76.4166666666667, 74.225, 82.9916666666667, 78.8541666666667, 76.1416666666667, 88.0458333333333, 72.3083333333333, 76.5083333333333, 78.8125, 87.7, 76.9458333333333, 77.3, 76.1375, 77.2208333333333, 73.9208333333333, 86.4583333333333, 76.9833333333333, 83.3333333333333, 72.7958333333333, 77.7333333333333, 86.0458333333333, 72.2791666666667, 72.5875, 77.625, 86.7166666666667, 71.8583333333333, 72.6208333333333, 78.9458333333333, 76.975, 74.725, 86.7125, 73.7625, 72.25, 94.3291666666667, 76.5375, 91.8708333333333, 74.5583333333333, 93.4833333333333, 72.1541666666667, 70.8083333333333, 73.4625, 79.7583333333333, 76.55, 82.9416666666667, 74.5416666666667, 76.9416666666667, 78.1041666666667, 74.3916666666667, 76.6416666666667, 85.9833333333333, 81.55, 74.9416666666667, 69.925, 79.5208333333333, 82.6416666666667, 74.9083333333333, 82.2083333333333, 95.3916666666667, 79.4541666666667, 67.6666666666667, 77.7958333333333, 85.375, 77.3958333333333, 76.4708333333333, 69.4125, 76.5125, 74.8083333333333, 76.6041666666667, 85.8958333333333, 82.4833333333333, 79.1666666666667, 77.7875, 78.45, 76.125, 82.925, 73.1875, 82.85, 72.7, 92.2208333333333, 93.9875, 88.8958333333333, 85.8333333333333, 74.1208333333333, 84.7833333333333, 80.05, 72.4125, 73.55, 71.5166666666667, 77.3458333333333, 77.9208333333333, 77.65, 81.3625, 74.7125, 81.1166666666667, 79.2333333333333, 81.125, 68.9083333333333, 93.6166666666667, 91.6291666666667, 74.3583333333333, 73.7541666666667, 83.0125, 86.55, 93.6708333333333, 74.9708333333333, 76.8958333333333, 79.2208333333333, 79.0708333333333, 77.9166666666667, 82.4416666666667, 77.3125, 83.2541666666667, 77.5083333333333, 83.6625, 74.5625, 80.4375, 79.6708333333333, 85.4916666666667, 71.6041666666667, 73.225, 76.9041666666667, 76.4, 86.4833333333333, 85.1083333333333, 77.6666666666667, 74.5916666666667, 77.7208333333333, 73.9666666666667, 82.3333333333333, 85.4541666666667, 77.6333333333333, 79.1125, 80.0083333333333, 71.875, 88.925, 85.6208333333333, 84.4416666666667, 86.3625, 80.8, 83.0958333333333, 77.2125, 82.9625, 76.8375, 76.6708333333333, 83.5541666666667, 82.4958333333333, 85.4125, 82.4125, 76.3541666666667, 76.6416666666667]] dec = [[-72.0030555555556, -73.3069444444444, -72.9766666666667, -73.2416666666667, -72.3655555555556, -72.3688888888889, -71.8913888888889, -72.2413888888889, -72.9452777777778, -71.2947222222222, -73.4813888888889, -72.8477777777778, -72.9436111111111, -73.3775, -76.0544444444444, -73.9083333333333, -71.1788888888889, -70.9627777777778, -72.1963888888889, -75.4577777777778, -72.0630555555556, -75.5566666666667, -74.1672222222222, -73.2091666666667, -71.1611111111111, -74.3186111111111, -72.0016666666667, -74.1733333333333, -73.4772222222222, -74.0736111111111, -71.1836111111111, -73.5066666666667, -74.2194444444445, -75.1975, -75.0747222222222, -73.4216666666667, -71.9527777777778, -74.3266666666667, -73.3802777777778, -71.2791666666667, -73.0019444444445, -72.1930555555556, -72.5886111111111, -74.0636111111111, -72.8261111111111, -74.4727777777778, -73.755, -75.0016666666667, -73.1194444444444, -73.7283333333333, -73.7486111111111, -72.8908333333333, -72.8744444444444, -73.6697222222222, -72.8841666666667, -71.4608333333333, -74.3561111111111, -73.4783333333333, -74.6191666666667, -73.3980555555556, -72.7919444444444, -73.1516666666667, -71.0202777777778, -73.3955555555556, -72.9327777777778, -73.3488888888889, -73.2569444444444, -74.1561111111111, -73.1197222222222, -73.235, -74.1852777777778, -73.3872222222222, -71.1702777777778, -73.2402777777778, -73.0863888888889, -73.3716666666667, -72.2583333333333, -73.4388888888889, -73.4155555555556, -73.3730555555556, -73.0427777777778, -73.4233333333333, -73.4405555555556, -73.4463888888889, -73.4580555555556, -73.4861111111111, -72.2736111111111, -73.2066666666667, -72.4583333333333], [-68.6394444444445, -68.0022222222222, -67.9716666666667, -68.6811111111111, -68.2083333333333, -71.8583333333333, -72.0566666666667, -68.0263888888889, -68.8825, -71.7077777777778, -66.7980555555556, -68.4441666666667, -67.9755555555556, -68.0836111111111, -67.7833333333333, -69.3802777777778, -67.3577777777778, -68.1522222222222, -67.5347222222222, -67.6266666666667, -69.3333333333333, -67.3416666666667, -72.8275, -68.4005555555556, -69.1897222222222, -67.6597222222222, -67.3258333333333, -69.1919444444445, -67.9288888888889, -67.8469444444445, -69.4197222222222, -67.9644444444444, -72.64, -70.0608333333333, -68.4458333333333, -72.4941666666667, -67.7683333333333, -72.5052777777778, -68.5594444444444, -73.8119444444444, -69.5719444444444, -69.0011111111111, -68.0630555555556, -68.2355555555556, -68.0602777777778, -67.8613888888889, -68.7719444444444, -65.2677777777778, -68.3630555555556, -69.1805555555556, -70.9813888888889, -69.8011111111111, -74.0172222222222, -69.1716666666667, -63.2033333333333, -69.5575, -71.6327777777778, -72.79, -68.4727777777778, -66.9569444444444, -67.6266666666667, -69.185, -67.8105555555556, -67.0494444444445, -66.1994444444444, -68.6283333333333, -67.9083333333333, -68.375, -66.2086111111111, -72.0547222222222, -70.5408333333333, -67.6825, -66.62, -68.3497222222222, -72.1461111111111, -72.5177777777778, -72.0425, -72.5766666666667, -72.3838888888889, -70.0730555555556, -62.3452777777778, -66.2622222222222, -67.6227777777778, -74.8533333333333, -68.9041666666667, -72.2480555555556, -69.8069444444444, -66.9113888888889, -67.7783333333333, -67.5655555555556, -70.4875, -73.5702777777778, -69.9577777777778, -67.7286111111111, -68.6827777777778, -67.6780555555556, -73.7316666666667, -72.6094444444445, -72.2263888888889, -67.6852777777778, -67.7141666666667, -64.2422222222222, -69.0825, -69.8019444444445, -67.9236111111111, -67.4608333333333, -69.15, -69.1541666666667, -68.7266666666667, -71.0005555555556, -67.6997222222222, -67.8491666666667, -66.6991666666667, -68.3055555555556, -68.0491666666667, -68.9172222222222, -69.0794444444444, -69.0475, -72.5683333333333, -72.1725, -68.5419444444444, -68.6286111111111, -69.2719444444444, -69.2486111111111, -68.7536111111111, -69.8030555555556, -67.4711111111111, -69.7058333333333, -70.5794444444445, -68.9208333333333, -66.94, -69.0802777777778, -69.2611111111111, -72.5883333333333, -74.3538888888889, -65.3627777777778, -74.7827777777778, -69.3452777777778, -70.7777777777778, -67.9969444444444, -67.9802777777778, -67.9911111111111, -66.8291666666667, -67.8422222222222, -67.8563888888889, -67.8788888888889, -69.2294444444444, -70.9838888888889, -68.5005555555556, -68.4272222222222]] dist_cent = [[1633.20060682, 2320.18925372, 2441.9621715, 1471.75739309, 2376.23276698, 769.377260812, 1217.10160803, 1804.52411396, 5941.22951677, 2131.04017704, 909.015121115, 3069.45508982, 2364.25345255, 2670.26753775, 4890.13489057, 2343.49016847, 3159.23688725, 2711.83412564, 1818.55754823, 4455.35179015, 1123.62228704, 4757.29044595, 3641.00953753, 2648.4875134, 4474.93618064, 3141.57483709, 3560.24707724, 4473.53808798, 1395.16822521, 1590.48289743, 4018.53487003, 1077.40931903, 5110.49822201, 3786.85646257, 4265.8368639, 949.628191291, 3943.07061992, 1767.67615703, 1299.74797416, 4641.567255, 2418.05685486, 897.32557739, 3039.33043949, 1461.11210457, 1901.98375256, 2165.3084445, 2985.15151754, 3722.76959311, 2000.6292342, 4885.31051319, 2625.48765461, 1076.03465615, 3317.63420596, 2482.45477321, 1670.70686978, 1704.67888631, 4679.63812859, 3032.23144391, 2116.49256805, 2422.17347074, 1542.38398274, 3014.72270399, 2369.65890352, 1867.37485958, 688.161984621, 2390.12368317, 1417.8418462, 2759.56312304, 1647.94174103, 2532.76451148, 2666.69527657, 1799.77351614, 1911.04658283, 2378.98229824, 1319.33831563, 646.650576733, 1952.83158151, 2437.45811475, 1828.61711731, 1838.06758207, 1147.07949676, 2459.7173664, 1123.54113051, 1845.94527547, 2373.7888037, 2374.78095982, 1177.87078532, 1347.286255, 724.047475304], [2094.92442356, 2592.17806506, 1926.01374032, 1393.13441938, 2375.35464572, 3301.57725704, 3191.52180512, 1846.0135317, 716.76297379, 3154.98748878, 3076.59112567, 2026.05665608, 2404.83900491, 1555.09594563, 2707.74040478, 2869.88446607, 2126.74995801, 1875.64558761, 3013.12651671, 1938.91000725, 2787.32652108, 3248.71116208, 3895.8901948, 2000.81445911, 2338.12168352, 3264.69637398, 3074.99893799, 2636.47366843, 2026.34617439, 2555.05591583, 1987.45137574, 2962.78454192, 3645.08442741, 4482.58313968, 1839.78891743, 4308.71233216, 2341.82811636, 4655.60143549, 2645.31421226, 4649.97712828, 2659.06169124, 495.958510033, 1712.84745196, 2951.12877171, 2751.29010002, 1869.62116249, 1815.09401991, 4178.76760298, 1500.54520148, 1794.75124939, 2529.48578966, 1640.54629482, 5595.45850131, 776.552314833, 5577.42226175, 2249.44973927, 3280.94442372, 5192.25922245, 2306.05901115, 4799.58913132, 1930.71867668, 1618.96956185, 2738.38874315, 2453.83795634, 4563.65453258, 1390.93463993, 2210.73606753, 2156.22099955, 3489.11690674, 2339.47934036, 1913.24925842, 1765.47926278, 2664.78715134, 1747.9825195, 2559.15521594, 3985.98208869, 2470.42710796, 3449.61772162, 4247.88799427, 4176.76382235, 6981.56554962, 3438.49595369, 2544.39520164, 4846.09325026, 859.207154012, 4248.51583614, 3334.30573069, 3728.40654495, 1759.97165254, 2178.23559988, 1194.19833024, 3858.08899466, 1864.27857663, 1561.90783843, 2706.82540924, 1747.18252909, 4836.99690823, 4641.18832985, 4140.00763706, 2964.26958036, 3078.63612913, 4672.26090061, 2310.20822426, 4180.41248267, 2162.15029365, 2072.33397643, 1238.51032921, 832.357478845, 1488.41649353, 2593.59405836, 1824.3311299, 2309.75855988, 2853.29775447, 2151.58363423, 2434.44194297, 2082.03137409, 1198.78596379, 1997.28834526, 3626.80120077, 3532.99815015, 2924.8945852, 2101.1318558, 2466.86972041, 2506.56843199, 2352.67147924, 2754.38598176, 1932.30496879, 2459.48921061, 1639.6977746, 1695.42812081, 2380.69363485, 2640.55720245, 2121.15585755, 3588.5380412, 4855.28448834, 4857.60200313, 4765.37072491, 1887.71370931, 1942.00303845, 1926.04452804, 2570.73288389, 1574.74408319, 3067.96627803, 1827.77703955, 1778.39064842, 2280.49016348, 1949.46253097, 2136.73909356, 1495.06656829, 1870.72119252]] e_d_cent = [[1039.173740299393, 1435.9536198970777, 1778.3059807875993, 833.5600644307985, 944.8627199983013, 9.540124318824205, 823.8633204223586, 1401.4181419730958, 827.5312450161011, 1163.202855888643, 13.237829767759157, 1792.3207837264274, 1560.168479675753, 1148.1705423534424, 755.9245287150616, 892.191969353227, 884.4665374591882, 415.35433225392086, 1875.7404289875428, 442.41685887935273, 797.3852853246149, 893.6966802217544, 715.515865771003, 1468.0163280348056, 180.07593240624257, 310.93678012208176, 1539.3048577844493, 645.5935589311879, 1221.0569425039175, 622.8771608179757, 777.3761195766289, 830.7302823568057, 74.42330047098997, 821.8292306571432, 1103.510336489361, 940.0405957271148, 822.4108254121722, 608.4887378615729, 1148.9388310016825, 587.5570422769338, 1526.421120290217, 1168.2108316701813, 1273.2683945252095, 393.432086570192, 1557.4353087829875, 844.2681890359437, 770.8856764819485, 760.2515378456518, 910.6248838134877, 598.6815415818562, 38.23452198067387, 978.0345113217967, 34.163250596628764, 36.151558902120726, 1015.1877769341709, 494.3376980922027, 68.14875955525937, 1541.2904129789106, 17.571308861202773, 1792.3673345822394, 458.22021965535055, 1283.3482722452554, 825.5956710209496, 1353.1526557220127, 1291.585217521375, 1159.6867266474756, 599.8811014721533, 1580.683975865727, 857.1852247427792, 1537.8170745504758, 1243.6395933750302, 1169.2812348399343, 365.9048145832149, 1640.885818719533, 13.585851453369786, 5.368550393503098, 1899.2919526321182, 1839.912870204379, 1621.6520186557234, 1144.4604372844262, 875.8209539282942, 1765.885395992803, 762.8973515574974, 1847.1410426886998, 1644.595816646902, 1643.8858310928317, 489.95206768810164, 16.70608038529138, 10.544177978144234], [1785.2629840823188, 1041.566284758639, 1467.6674047933768, 1923.8136494672647, 1716.0321102312591, 915.244895489895, 221.1675461512587, 861.867312378096, 840.6042035290191, 844.2414907308923, 1314.2330413768143, 1770.5151998468243, 1633.4007149260217, 368.65492682278284, 1058.123240416804, 1840.4439280460017, 284.2266672489778, 1506.0332777066492, 514.4581837549737, 878.2567435346746, 1708.3601232217165, 472.68214070414155, 997.4875266424194, 1954.127788865912, 1228.248441899879, 856.8220621495436, 221.15248443469196, 2353.736947917064, 1397.013963090411, 1120.4495754771256, 299.74838064309364, 1170.9093515493155, 414.38923232920945, 912.2142994105723, 1534.6450360693136, 645.6729294664426, 264.5515241024804, 345.43316624042524, 247.75738755022581, 653.3364213144663, 1445.3983698014852, 1050.9281645823662, 344.67425076422523, 1995.0077769507548, 1475.4497099997725, 825.5266504486219, 2150.14206872408, 454.5676595110679, 368.51923389480214, 336.38429850370164, 1890.5055794165903, 351.828770708083, 994.9605893924067, 2229.0283582517486, 548.3166216178901, 1742.920966906669, 1984.4888736732746, 566.106410249732, 2434.639259730216, 622.6818281879467, 936.9663122493897, 318.60972914001735, 1931.1872592082586, 165.28422562460204, 89.07313767059404, 423.11626224110586, 243.6303481764325, 1816.3310639967779, 435.7639805821094, 270.7290811126059, 2139.7993528686457, 310.6312946105225, 629.1766510798647, 907.0254731098203, 580.6065658166992, 1309.0795526339405, 681.692149849808, 155.04438853570673, 92.31684124911177, 136.71413051358266, 630.9594773060937, 443.11461492136965, 209.3880413379435, 110.5204345895767, 2013.1230449025077, 1518.1460692179344, 1951.8103229039705, 762.6417741189457, 290.3259190547951, 1400.7507173640981, 442.85762169709034, 1086.835709620925, 914.7013000175225, 367.2400890475026, 2182.627764031268, 1037.4172952821752, 110.51264193361975, 198.1143051051561, 698.273859249986, 1365.9485757880254, 1122.979469109, 413.50630387776516, 1243.5706321936586, 692.719607065467, 229.5825564846332, 261.3031238931711, 2347.1190868041, 2194.0422166832936, 1670.1045987321418, 1841.9016208506491, 299.8714988772865, 1624.557624100974, 1063.2981681602307, 1820.151128560012, 1081.256744265969, 2280.823993202776, 2062.179945562474, 1302.6619137307714, 206.9994601689073, 1212.1724246853282, 2013.6803279587316, 1956.0130420885787, 1593.5985662002408, 1725.1768982775568, 2138.5128286813383, 1919.6897087683137, 315.76110544137276, 1470.8243533267491, 1766.3716811308943, 332.203147610922, 224.8820964437035, 2478.4850127560644, 2377.8401976977834, 207.7094605398353, 132.62699690261672, 1238.1214902654456, 110.50946129093892, 293.3991946533112, 2113.9358140607815, 1403.4104181381058, 1943.7259970491498, 351.0171521341531, 1318.071612339641, 331.1161250625808, 286.95452010783447, 1695.0764266614954, 1387.1609196274565, 1682.152980716172, 395.94052375974496, 1337.190585009628]] aarr = [[[9.0, 8.9, 8.75, 9.9, 8.2, 8.1, 9.1, 7.9, 9.6, 9.6, 8.9, 9.6, 9.1, 9.2, 9.2, 9.45, 9.1, 9.3, 7.4, 9.6, 9.85, 9.1, 9.7, 9.55, 9.35, 9.15, 9.4, 9.0, 8.45, 9.7, 8.95, 8.0, 7.95, 9.45, 9.45, 9.65, 9.0, 9.05, 8.8, 9.3, 9.05, 8.0, 8.9, 9.5, 7.8, 9.8, 9.15, 9.8, 9.6, 9.55, 9.6, 9.45, 9.7, 9.75, 8.95, 9.6, 8.2, 8.8, 9.2, 8.7, 8.3, 8.4, 9.1, 8.5, 8.8, 8.35, 8.75, 9.6, 8.8, 8.3, 9.4, 8.3, 8.4, 8.0, 8.0, 8.5, 7.8, 8.7, 8.0, 8.4, 8.0, 8.1, 8.0, 8.0, 8.2, 6.9, 7.0, 7.2, 6.7], [9.20411998265592, 9.14612803567824, 8.79934054945358, 9.82607480270083, 8.04139268515822, 8.09691001300806, 8.79934054945358, 7.90308998699194, 9.77815125038364, 9.73239375982297, 9.17609125905568, 9.36172783601759, 9.30102999566398, 9.39794000867204, 9.20411998265592, 9.32221929473392, 9.10037054511756, 9.44715803134222, 7.39794000867204, 9.81291335664286, 9.77815125038364, 9.04139268515822, 9.47712125471966, 9.73239375982297, 9.07918124604762, 9.06069784035361, 9.32221929473392, 9.04139268515822, 8.39794000867204, 9.96848294855393, 8.77815125038364, 8.20411998265593, 8.04139268515822, 9.57978359661681, 9.61278385671973, 9.49136169383427, 9.14612803567824, 9.30102999566398, 8.5051499783199, 9.25527250510331, 9.07918124604762, 8.14612803567824, 9.0, 9.68124123737559, 7.39794000867204, 9.73239375982297, 9.30102999566398, 9.67209785793572, 9.63346845557959, 9.77815125038364, 9.49136169383427, 9.32221929473392, 9.80617997398389, 9.51851393987789, 9.07918124604762, 9.77815125038364, 8.14612803567824, 8.69897000433602, 9.44715803134222, 8.35, 8.25, 8.4, 9.15, 8.4, 8.65, 8.1, 8.65, 9.45, 8.45, 8.1, 9.25, 8.4, 7.9, 8.1, 8.3, 8.05, 7.7, 8.35, 7.9, 8.1, 8.0, 7.9, 7.8, 8.1, 8.4, 8.34242268082221, 7.9, 8.15, 8.1]], [[8.45, 8.2, 8.2, 8.7, 8.9, 9.3, 8.9, 9.15, 8.4, 8.6, 8.8, 9.1, 8.95, 8.8, 8.3, 8.6, 9.0, 9.5, 8.8, 8.6, 8.7, 9.1, 9.05, 9.1, 9.2, 8.6, 9.0, 8.7, 8.3, 9.05, 9.1, 9.1, 9.0, 9.0, 8.2, 9.2, 8.9, 9.05, 9.05, 9.3, 9.1, 9.15, 8.65, 9.4, 9.2, 9.05, 8.8, 9.5, 7.9, 9.0, 8.4, 8.95, 9.2, 8.3, 9.4, 8.3, 8.9, 9.05, 9.5, 9.2, 9.05, 8.0, 8.2, 9.1, 9.15, 8.3, 8.9, 8.65, 9.1, 8.85, 9.1, 8.7, 8.8, 9.0, 9.3, 9.25, 8.75, 9.05, 9.3, 9.4, 9.3, 9.45, 8.95, 9.3, 8.0, 9.3, 9.1, 8.65, 9.1, 8.6, 8.95, 9.4, 9.15, 9.5, 8.75, 8.8, 9.55, 9.15, 9.2, 9.1, 9.15, 9.2, 9.45, 9.2, 9.15, 9.0, 8.8, 8.0, 8.0, 9.5, 7.9, 9.0, 8.3, 8.8, 8.4, 8.8, 8.85, 8.1, 9.55, 8.3, 9.15, 8.8, 8.2, 7.9, 8.8, 8.3, 8.9, 8.1, 8.3, 8.0, 8.4, 8.7, 8.6, 9.25, 9.2, 9.45, 9.25, 8.8, 9.1, 8.8, 9.55, 7.9, 8.6, 9.1, 7.0, 6.7, 7.7, 7.3, 8.85, 7.5], [8.0, 8.1, 8.25, 8.69897000433602, 9.14612803567824, 9.30102999566398, 9.14612803567824, 9.11394335230684, 8.20411998265593, 8.5051499783199, 8.95424250943932, 9.1, 8.75, 8.60205999132796, 8.35, 8.45, 9.04139268515822, 9.39794000867204, 8.60205999132796, 8.69897000433602, 8.7, 9.09691001300805, 9.11394335230684, 8.89762709129044, 9.30102999566398, 8.55, 8.9, 8.75, 8.25, 9.23044892137827, 9.0, 8.89762709129044, 9.04139268515822, 9.14612803567824, 8.0, 9.25527250510331, 8.69897000433602, 9.0, 8.84509804001426, 9.20411998265592, 9.0, 9.11394335230684, 8.55, 9.23044892137827, 9.14612803567824, 9.17609125905568, 8.69897000433602, 9.34242268082221, 7.9, 9.09691001300805, 8.4, 9.20411998265592, 9.39794000867204, 8.3, 9.41497334797082, 8.05, 8.95424250943932, 8.79934054945358, 9.20411998265592, 9.20411998265592, 9.04139268515822, 8.25, 8.25, 9.14612803567824, 9.17609125905568, 8.09691001300806, 8.69897000433602, 8.5051499783199, 9.17609125905568, 8.84509804001426, 9.17609125905568, 8.60205999132796, 8.79934054945358, 8.7481880270062, 9.27875360095283, 9.20411998265592, 8.60205999132796, 8.95424250943932, 9.23044892137827, 9.14612803567824, 9.38021124171161, 9.36172783601759, 8.85125834871907, 9.25527250510331, 8.2, 9.20411998265592, 9.11394335230684, 8.6, 9.14612803567824, 8.60205999132796, 9.07918124604762, 9.25527250510331, 9.17609125905568, 9.34242268082221, 8.65321251377534, 8.69897000433602, 9.39794000867204, 9.04139268515822, 9.25527250510331, 9.20411998265592, 9.20411998265592, 9.23044892137827, 9.36172783601759, 9.23044892137827, 9.17609125905568, 9.14612803567824, 8.3, 8.3, 7.69897000433602, 9.25527250510331, 8.14612803567824, 9.0, 8.14612803567824, 8.60205999132796, 8.45, 8.55, 8.9, 8.2, 9.30102999566398, 8.39794000867204, 9.07918124604762, 8.60205999132796, 8.1, 8.25, 8.39794000867204, 8.45, 8.65321251377534, 7.95, 8.11394335230684, 8.39794000867204, 8.0, 8.20411998265593, 8.1, 9.11394335230684, 9.04139268515822, 9.47712125471966, 9.20411998265592, 8.60205999132796, 9.20411998265592, 8.79934054945358, 9.25527250510331, 8.15, 8.34242268082221, 9.0, 8.15, 8.3, 8.25, 7.9, 7.69897000433602, 8.35]]] darr = [[[18.92, 18.88, 19.04, 18.98, 18.88, 18.96, 18.94, 18.9, 19.06, 19.0, 18.96, 19.06, 19.04, 19.04, 18.88, 18.9, 19.02, 18.98, 18.9, 19.0, 18.98, 18.88, 19.0, 18.88, 18.94, 18.98, 18.86, 19.02, 18.92, 18.94, 19.04, 18.98, 18.96, 19.04, 18.86, 18.98, 18.88, 18.98, 19.0, 19.0, 19.04, 18.98, 19.06, 18.94, 18.9, 19.0, 19.02, 19.02, 19.0, 19.02, 18.96, 18.98, 18.96, 18.96, 18.92, 18.94, 18.96, 19.06, 18.96, 19.04, 18.94, 19.06, 18.92, 18.9, 18.98, 18.88, 18.94, 19.04, 18.92, 18.88, 18.88, 18.9, 18.94, 18.88, 18.96, 18.96, 19.02, 18.88, 18.9, 18.9, 18.94, 19.04, 18.94, 18.9, 18.88, 18.88, 18.94, 18.96, 18.96], [18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9, 18.9]], [[18.42, 18.54, 18.44, 18.44, 18.56, 18.54, 18.48, 18.46, 18.48, 18.54, 18.56, 18.42, 18.42, 18.48, 18.44, 18.58, 18.48, 18.44, 18.52, 18.52, 18.58, 18.52, 18.42, 18.42, 18.54, 18.44, 18.48, 18.6, 18.44, 18.54, 18.48, 18.56, 18.52, 18.42, 18.44, 18.44, 18.48, 18.52, 18.48, 18.44, 18.56, 18.48, 18.5, 18.6, 18.56, 18.46, 18.42, 18.46, 18.48, 18.48, 18.58, 18.48, 18.6, 18.52, 18.44, 18.42, 18.6, 18.44, 18.58, 18.54, 18.52, 18.5, 18.58, 18.5, 18.5, 18.48, 18.5, 18.42, 18.52, 18.48, 18.56, 18.5, 18.52, 18.46, 18.52, 18.58, 18.52, 18.5, 18.5, 18.5, 18.42, 18.52, 18.5, 18.5, 18.52, 18.6, 18.6, 18.44, 18.5, 18.54, 18.5, 18.42, 18.52, 18.48, 18.6, 18.52, 18.5, 18.48, 18.44, 18.56, 18.56, 18.46, 18.54, 18.44, 18.5, 18.5, 18.54, 18.52, 18.44, 18.58, 18.5, 18.42, 18.54, 18.42, 18.44, 18.4, 18.44, 18.44, 18.48, 18.56, 18.6, 18.42, 18.42, 18.4, 18.58, 18.58, 18.48, 18.42, 18.54, 18.48, 18.5, 18.6, 18.58, 18.48, 18.5, 18.6, 18.5, 18.48, 18.42, 18.44, 18.4, 18.5, 18.56, 18.48, 18.5, 18.56, 18.44, 18.42, 18.48, 18.54], [18.5, 18.5, 18.5, 18.5, -9999999999.9, 18.5, 18.5, -9999999999.9, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, -9999999999.9, 18.5, 18.5, 18.5, -9999999999.9, 18.5, 18.5, -9999999999.9, 18.5, 18.5, 18.5, 18.5, -9999999999.9, 18.5, 18.5, 18.5, 18.5, 18.5, -9999999999.9, 18.5, 18.5, 18.5, -9999999999.9, 18.5, -9999999999.9, 18.5, -9999999999.9, -9999999999.9, -9999999999.9, 18.5, 18.5, 18.5, -9999999999.9, 18.5, -9999999999.9, -9999999999.9, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, -9999999999.9, 18.5, 18.5, 18.5, 18.5, -9999999999.9, 18.5, 18.5, 18.5, 18.5, 18.5, -9999999999.9, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, -9999999999.9, 18.5, 18.5, 18.5, 18.5, -9999999999.9, 18.5, 18.5, -9999999999.9, 18.5, -9999999999.9, 18.5, -9999999999.9, 18.5, -9999999999.9, 18.5, 18.5, 18.5, -9999999999.9, 18.5, -9999999999.9, -9999999999.9, -9999999999.9, 18.5, -9999999999.9, -9999999999.9, -9999999999.9, -9999999999.9, 18.5, 18.5, 18.5, -9999999999.9, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, -9999999999.9, 18.5, -9999999999.9, 18.5, -9999999999.9, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5, 18.5]]] dsigma = [[[0.05, 0.05, 0.06, 0.07, 0.03, 0.05, 0.06, 0.05, 0.05, 0.07, 0.06, 0.06, 0.05, 0.04, 0.06, 0.04, 0.05, 0.06, 0.07, 0.05, 0.05, 0.07, 0.07, 0.06, 0.06, 0.05, 0.07, 0.05, 0.05, 0.06, 0.04, 0.05, 0.06, 0.04, 0.06, 0.05, 0.05, 0.06, 0.04, 0.07, 0.05, 0.06, 0.04, 0.03, 0.06, 0.05, 0.04, 0.05, 0.05, 0.05, 0.06, 0.06, 0.04, 0.06, 0.05, 0.05, 0.06, 0.05, 0.03, 0.06, 0.04, 0.04, 0.06, 0.05, 0.05, 0.04, 0.05, 0.06, 0.04, 0.06, 0.05, 0.04, 0.04, 0.06, 0.04, 0.03, 0.07, 0.07, 0.06, 0.04, 0.06, 0.06, 0.05, 0.07, 0.06, 0.06, 0.03, 0.05, 0.06], [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]], [[0.05, 0.05, 0.06, 0.05, 0.06, 0.07, 0.06, 0.04, 0.07, 0.05, 0.06, 0.04, 0.06, 0.05, 0.06, 0.06, 0.05, 0.06, 0.05, 0.06, 0.04, 0.05, 0.05, 0.06, 0.06, 0.05, 0.04, 0.05, 0.06, 0.06, 0.05, 0.03, 0.05, 0.06, 0.06, 0.03, 0.05, 0.06, 0.06, 0.06, 0.05, 0.04, 0.07, 0.04, 0.06, 0.03, 0.06, 0.06, 0.04, 0.06, 0.04, 0.05, 0.04, 0.06, 0.04, 0.06, 0.06, 0.03, 0.07, 0.07, 0.07, 0.05, 0.06, 0.03, 0.04, 0.06, 0.06, 0.06, 0.05, 0.06, 0.06, 0.06, 0.06, 0.04, 0.04, 0.06, 0.06, 0.06, 0.04, 0.06, 0.05, 0.05, 0.06, 0.05, 0.06, 0.06, 0.06, 0.05, 0.05, 0.07, 0.05, 0.07, 0.06, 0.05, 0.04, 0.07, 0.05, 0.07, 0.05, 0.06, 0.03, 0.05, 0.06, 0.05, 0.05, 0.06, 0.06, 0.07, 0.03, 0.04, 0.06, 0.05, 0.07, 0.06, 0.04, 0.05, 0.03, 0.04, 0.04, 0.07, 0.04, 0.07, 0.06, 0.02, 0.05, 0.06, 0.06, 0.04, 0.06, 0.04, 0.06, 0.06, 0.05, 0.04, 0.06, 0.05, 0.05, 0.02, 0.07, 0.05, 0.06, 0.06, 0.06, 0.06, 0.05, 0.05, 0.05, 0.04, 0.06, 0.03], [0.1, 0.1, 0.1, 0.1, -9999999999.9, 0.1, 0.1, -9999999999.9, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, -9999999999.9, 0.1, 0.1, 0.1, -9999999999.9, 0.1, 0.1, -9999999999.9, 0.1, 0.1, 0.1, 0.1, -9999999999.9, 0.1, 0.1, 0.1, 0.1, 0.1, -9999999999.9, 0.1, 0.1, 0.1, -9999999999.9, 0.1, -9999999999.9, 0.1, -9999999999.9, -9999999999.9, -9999999999.9, 0.1, 0.1, 0.1, -9999999999.9, 0.1, -9999999999.9, -9999999999.9, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, -9999999999.9, 0.1, 0.1, 0.1, 0.1, -9999999999.9, 0.1, 0.1, 0.1, 0.1, 0.1, -9999999999.9, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, -9999999999.9, 0.1, 0.1, 0.1, 0.1, -9999999999.9, 0.1, 0.1, -9999999999.9, 0.1, -9999999999.9, 0.1, -9999999999.9, 0.1, -9999999999.9, 0.1, 0.1, 0.1, -9999999999.9, 0.1, -9999999999.9, -9999999999.9, -9999999999.9, 0.1, -9999999999.9, -9999999999.9, -9999999999.9, -9999999999.9, 0.1, 0.1, 0.1, -9999999999.9, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, -9999999999.9, 0.1, -9999999999.9, 0.1, -9999999999.9, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]] j = 1 inc_pa_fit_angles = [[Angle('40.d'), Angle('108.d')], [Angle('18.336d'), Angle('153.06d')]] glx_inc, glx_PA = inc_pa_fit_angles[j] # Equatorial coordinates for clusters in this galaxy. ra_g, dec_g = ra[j], dec[j] # ASteCA distance moduli and their errors. dm_g, e_dm_g = darr[j][0], dsigma[j][0] # Retrieve center coordinates and distance (in parsecs) to galaxy. gal_cent, gal_dist, e_dm_dist = MCs_data(j) gal_dist = gal_dist.kpc * u.kpc theta = glx_PA + Angle('90d') plot_bulge_plane(ra_g, dec_g, dm_g, e_dm_g, gal_dist, gal_cent, glx_inc, theta)
gpl-3.0
rkmaddox/mne-python
tutorials/preprocessing/25_background_filtering.py
3
48286
# -*- coding: utf-8 -*- r""" .. _disc-filtering: =================================== Background information on filtering =================================== Here we give some background information on filtering in general, and how it is done in MNE-Python in particular. Recommended reading for practical applications of digital filter design can be found in Parks & Burrus (1987) :footcite:`ParksBurrus1987` and Ifeachor & Jervis (2002) :footcite:`IfeachorJervis2002`, and for filtering in an M/EEG context we recommend reading Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`. .. note:: This tutorial goes pretty deep into the mathematics of filtering and the design decisions that go into choosing a filter. If you just want to know how to apply the default filters in MNE-Python to your data, skip this tutorial and read :ref:`tut-filter-resample` instead (but someday, you should come back and read this one too 🙂). Problem statement ================= Practical issues with filtering electrophysiological data are covered in Widmann *et al.* (2012) :footcite:`WidmannSchroger2012`, where they conclude with this statement: Filtering can result in considerable distortions of the time course (and amplitude) of a signal as demonstrated by VanRullen (2011) :footcite:`VanRullen2011`. Thus, filtering should not be used lightly. However, if effects of filtering are cautiously considered and filter artifacts are minimized, a valid interpretation of the temporal dynamics of filtered electrophysiological data is possible and signals missed otherwise can be detected with filtering. In other words, filtering can increase signal-to-noise ratio (SNR), but if it is not used carefully, it can distort data. Here we hope to cover some filtering basics so users can better understand filtering trade-offs and why MNE-Python has chosen particular defaults. .. _tut_filtering_basics: Filtering basics ================ Let's get some of the basic math down. In the frequency domain, digital filters have a transfer function that is given by: .. math:: H(z) &= \frac{b_0 + b_1 z^{-1} + b_2 z^{-2} + \ldots + b_M z^{-M}} {1 + a_1 z^{-1} + a_2 z^{-2} + \ldots + a_N z^{-M}} \\ &= \frac{\sum_{k=0}^Mb_kz^{-k}}{\sum_{k=1}^Na_kz^{-k}} In the time domain, the numerator coefficients :math:`b_k` and denominator coefficients :math:`a_k` can be used to obtain our output data :math:`y(n)` in terms of our input data :math:`x(n)` as: .. math:: :label: summations y(n) &= b_0 x(n) + b_1 x(n-1) + \ldots + b_M x(n-M) - a_1 y(n-1) - a_2 y(n - 2) - \ldots - a_N y(n - N)\\ &= \sum_{k=0}^M b_k x(n-k) - \sum_{k=1}^N a_k y(n-k) In other words, the output at time :math:`n` is determined by a sum over 1. the numerator coefficients :math:`b_k`, which get multiplied by the previous input values :math:`x(n-k)`, and 2. the denominator coefficients :math:`a_k`, which get multiplied by the previous output values :math:`y(n-k)`. Note that these summations correspond to (1) a weighted `moving average`_ and (2) an autoregression_. Filters are broken into two classes: FIR_ (finite impulse response) and IIR_ (infinite impulse response) based on these coefficients. FIR filters use a finite number of numerator coefficients :math:`b_k` (:math:`\forall k, a_k=0`), and thus each output value of :math:`y(n)` depends only on the :math:`M` previous input values. IIR filters depend on the previous input and output values, and thus can have effectively infinite impulse responses. As outlined in Parks & Burrus (1987) :footcite:`ParksBurrus1987`, FIR and IIR have different trade-offs: * A causal FIR filter can be linear-phase -- i.e., the same time delay across all frequencies -- whereas a causal IIR filter cannot. The phase and group delay characteristics are also usually better for FIR filters. * IIR filters can generally have a steeper cutoff than an FIR filter of equivalent order. * IIR filters are generally less numerically stable, in part due to accumulating error (due to its recursive calculations). In MNE-Python we default to using FIR filtering. As noted in Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`: Despite IIR filters often being considered as computationally more efficient, they are recommended only when high throughput and sharp cutoffs are required (Ifeachor and Jervis, 2002 :footcite:`IfeachorJervis2002`, p. 321)... FIR filters are easier to control, are always stable, have a well-defined passband, can be corrected to zero-phase without additional computations, and can be converted to minimum-phase. We therefore recommend FIR filters for most purposes in electrophysiological data analysis. When designing a filter (FIR or IIR), there are always trade-offs that need to be considered, including but not limited to: 1. Ripple in the pass-band 2. Attenuation of the stop-band 3. Steepness of roll-off 4. Filter order (i.e., length for FIR filters) 5. Time-domain ringing In general, the sharper something is in frequency, the broader it is in time, and vice-versa. This is a fundamental time-frequency trade-off, and it will show up below. FIR Filters =========== First, we will focus on FIR filters, which are the default filters used by MNE-Python. """ ############################################################################### # Designing FIR filters # --------------------- # Here we'll try to design a low-pass filter and look at trade-offs in terms # of time- and frequency-domain filter characteristics. Later, in # :ref:`tut_effect_on_signals`, we'll look at how such filters can affect # signals when they are used. # # First let's import some useful tools for filtering, and set some default # values for our data that are reasonable for M/EEG. import numpy as np from numpy.fft import fft, fftfreq from scipy import signal import matplotlib.pyplot as plt from mne.time_frequency.tfr import morlet from mne.viz import plot_filter, plot_ideal_filter import mne sfreq = 1000. f_p = 40. flim = (1., sfreq / 2.) # limits for plotting ############################################################################### # Take for example an ideal low-pass filter, which would give a magnitude # response of 1 in the pass-band (up to frequency :math:`f_p`) and a magnitude # response of 0 in the stop-band (down to frequency :math:`f_s`) such that # :math:`f_p=f_s=40` Hz here (shown to a lower limit of -60 dB for simplicity): nyq = sfreq / 2. # the Nyquist frequency is half our sample rate freq = [0, f_p, f_p, nyq] gain = [1, 1, 0, 0] third_height = np.array(plt.rcParams['figure.figsize']) * [1, 1. / 3.] ax = plt.subplots(1, figsize=third_height)[1] plot_ideal_filter(freq, gain, ax, title='Ideal %s Hz lowpass' % f_p, flim=flim) ############################################################################### # This filter hypothetically achieves zero ripple in the frequency domain, # perfect attenuation, and perfect steepness. However, due to the discontinuity # in the frequency response, the filter would require infinite ringing in the # time domain (i.e., infinite order) to be realized. Another way to think of # this is that a rectangular window in the frequency domain is actually a sinc_ # function in the time domain, which requires an infinite number of samples # (and thus infinite time) to represent. So although this filter has ideal # frequency suppression, it has poor time-domain characteristics. # # Let's try to naïvely make a brick-wall filter of length 0.1 s, and look # at the filter itself in the time domain and the frequency domain: n = int(round(0.1 * sfreq)) n -= n % 2 - 1 # make it odd t = np.arange(-(n // 2), n // 2 + 1) / sfreq # center our sinc h = np.sinc(2 * f_p * t) / (4 * np.pi) plot_filter(h, sfreq, freq, gain, 'Sinc (0.1 s)', flim=flim, compensate=True) ############################################################################### # This is not so good! Making the filter 10 times longer (1 s) gets us a # slightly better stop-band suppression, but still has a lot of ringing in # the time domain. Note the x-axis is an order of magnitude longer here, # and the filter has a correspondingly much longer group delay (again equal # to half the filter length, or 0.5 seconds): n = int(round(1. * sfreq)) n -= n % 2 - 1 # make it odd t = np.arange(-(n // 2), n // 2 + 1) / sfreq h = np.sinc(2 * f_p * t) / (4 * np.pi) plot_filter(h, sfreq, freq, gain, 'Sinc (1.0 s)', flim=flim, compensate=True) ############################################################################### # Let's make the stop-band tighter still with a longer filter (10 s), # with a resulting larger x-axis: n = int(round(10. * sfreq)) n -= n % 2 - 1 # make it odd t = np.arange(-(n // 2), n // 2 + 1) / sfreq h = np.sinc(2 * f_p * t) / (4 * np.pi) plot_filter(h, sfreq, freq, gain, 'Sinc (10.0 s)', flim=flim, compensate=True) ############################################################################### # Now we have very sharp frequency suppression, but our filter rings for the # entire 10 seconds. So this naïve method is probably not a good way to build # our low-pass filter. # # Fortunately, there are multiple established methods to design FIR filters # based on desired response characteristics. These include: # # 1. The Remez_ algorithm (:func:`scipy.signal.remez`, `MATLAB firpm`_) # 2. Windowed FIR design (:func:`scipy.signal.firwin2`, # :func:`scipy.signal.firwin`, and `MATLAB fir2`_) # 3. Least squares designs (:func:`scipy.signal.firls`, `MATLAB firls`_) # 4. Frequency-domain design (construct filter in Fourier # domain and use an :func:`IFFT <numpy.fft.ifft>` to invert it) # # .. note:: Remez and least squares designs have advantages when there are # "do not care" regions in our frequency response. However, we want # well controlled responses in all frequency regions. # Frequency-domain construction is good when an arbitrary response # is desired, but generally less clean (due to sampling issues) than # a windowed approach for more straightforward filter applications. # Since our filters (low-pass, high-pass, band-pass, band-stop) # are fairly simple and we require precise control of all frequency # regions, we will primarily use and explore windowed FIR design. # # If we relax our frequency-domain filter requirements a little bit, we can # use these functions to construct a lowpass filter that instead has a # *transition band*, or a region between the pass frequency :math:`f_p` # and stop frequency :math:`f_s`, e.g.: trans_bandwidth = 10 # 10 Hz transition band f_s = f_p + trans_bandwidth # = 50 Hz freq = [0., f_p, f_s, nyq] gain = [1., 1., 0., 0.] ax = plt.subplots(1, figsize=third_height)[1] title = '%s Hz lowpass with a %s Hz transition' % (f_p, trans_bandwidth) plot_ideal_filter(freq, gain, ax, title=title, flim=flim) ############################################################################### # Accepting a shallower roll-off of the filter in the frequency domain makes # our time-domain response potentially much better. We end up with a more # gradual slope through the transition region, but a *much* cleaner time # domain signal. Here again for the 1 s filter: h = signal.firwin2(n, freq, gain, nyq=nyq) plot_filter(h, sfreq, freq, gain, 'Windowed 10 Hz transition (1.0 s)', flim=flim, compensate=True) ############################################################################### # Since our lowpass is around 40 Hz with a 10 Hz transition, we can actually # use a shorter filter (5 cycles at 10 Hz = 0.5 s) and still get acceptable # stop-band attenuation: n = int(round(sfreq * 0.5)) + 1 h = signal.firwin2(n, freq, gain, nyq=nyq) plot_filter(h, sfreq, freq, gain, 'Windowed 10 Hz transition (0.5 s)', flim=flim, compensate=True) ############################################################################### # But if we shorten the filter too much (2 cycles of 10 Hz = 0.2 s), # our effective stop frequency gets pushed out past 60 Hz: n = int(round(sfreq * 0.2)) + 1 h = signal.firwin2(n, freq, gain, nyq=nyq) plot_filter(h, sfreq, freq, gain, 'Windowed 10 Hz transition (0.2 s)', flim=flim, compensate=True) ############################################################################### # If we want a filter that is only 0.1 seconds long, we should probably use # something more like a 25 Hz transition band (0.2 s = 5 cycles @ 25 Hz): trans_bandwidth = 25 f_s = f_p + trans_bandwidth freq = [0, f_p, f_s, nyq] h = signal.firwin2(n, freq, gain, nyq=nyq) plot_filter(h, sfreq, freq, gain, 'Windowed 50 Hz transition (0.2 s)', flim=flim, compensate=True) ############################################################################### # So far, we have only discussed *non-causal* filtering, which means that each # sample at each time point :math:`t` is filtered using samples that come # after (:math:`t + \Delta t`) *and* before (:math:`t - \Delta t`) the current # time point :math:`t`. # In this sense, each sample is influenced by samples that come both before # and after it. This is useful in many cases, especially because it does not # delay the timing of events. # # However, sometimes it can be beneficial to use *causal* filtering, # whereby each sample :math:`t` is filtered only using time points that came # after it. # # Note that the delay is variable (whereas for linear/zero-phase filters it # is constant) but small in the pass-band. Unlike zero-phase filters, which # require time-shifting backward the output of a linear-phase filtering stage # (and thus becoming non-causal), minimum-phase filters do not require any # compensation to achieve small delays in the pass-band. Note that as an # artifact of the minimum phase filter construction step, the filter does # not end up being as steep as the linear/zero-phase version. # # We can construct a minimum-phase filter from our existing linear-phase # filter with the :func:`scipy.signal.minimum_phase` function, and note # that the falloff is not as steep: h_min = signal.minimum_phase(h) plot_filter(h_min, sfreq, freq, gain, 'Minimum-phase', flim=flim) ############################################################################### # .. _tut_effect_on_signals: # # Applying FIR filters # -------------------- # # Now lets look at some practical effects of these filters by applying # them to some data. # # Let's construct a Gaussian-windowed sinusoid (i.e., Morlet imaginary part) # plus noise (random and line). Note that the original clean signal contains # frequency content in both the pass band and transition bands of our # low-pass filter. dur = 10. center = 2. morlet_freq = f_p tlim = [center - 0.2, center + 0.2] tticks = [tlim[0], center, tlim[1]] flim = [20, 70] x = np.zeros(int(sfreq * dur) + 1) blip = morlet(sfreq, [morlet_freq], n_cycles=7)[0].imag / 20. n_onset = int(center * sfreq) - len(blip) // 2 x[n_onset:n_onset + len(blip)] += blip x_orig = x.copy() rng = np.random.RandomState(0) x += rng.randn(len(x)) / 1000. x += np.sin(2. * np.pi * 60. * np.arange(len(x)) / sfreq) / 2000. ############################################################################### # Filter it with a shallow cutoff, linear-phase FIR (which allows us to # compensate for the constant filter delay): transition_band = 0.25 * f_p f_s = f_p + transition_band freq = [0., f_p, f_s, sfreq / 2.] gain = [1., 1., 0., 0.] # This would be equivalent: h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p, fir_design='firwin', verbose=True) x_v16 = np.convolve(h, x) # this is the linear->zero phase, causal-to-non-causal conversion / shift x_v16 = x_v16[len(h) // 2:] plot_filter(h, sfreq, freq, gain, 'MNE-Python 0.16 default', flim=flim, compensate=True) ############################################################################### # Filter it with a different design method ``fir_design="firwin2"``, and also # compensate for the constant filter delay. This method does not produce # quite as sharp a transition compared to ``fir_design="firwin"``, despite # being twice as long: transition_band = 0.25 * f_p f_s = f_p + transition_band freq = [0., f_p, f_s, sfreq / 2.] gain = [1., 1., 0., 0.] # This would be equivalent: # filter_dur = 6.6 / transition_band # sec # n = int(sfreq * filter_dur) # h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.) h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p, fir_design='firwin2', verbose=True) x_v14 = np.convolve(h, x)[len(h) // 2:] plot_filter(h, sfreq, freq, gain, 'MNE-Python 0.14 default', flim=flim, compensate=True) ############################################################################### # Let's also filter with the MNE-Python 0.13 default, which is a # long-duration, steep cutoff FIR that gets applied twice: transition_band = 0.5 # Hz f_s = f_p + transition_band filter_dur = 10. # sec freq = [0., f_p, f_s, sfreq / 2.] gain = [1., 1., 0., 0.] # This would be equivalent # n = int(sfreq * filter_dur) # h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.) h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p, h_trans_bandwidth=transition_band, filter_length='%ss' % filter_dur, fir_design='firwin2', verbose=True) x_v13 = np.convolve(np.convolve(h, x)[::-1], h)[::-1][len(h) - 1:-len(h) - 1] # the effective h is one that is applied to the time-reversed version of itself h_eff = np.convolve(h, h[::-1]) plot_filter(h_eff, sfreq, freq, gain, 'MNE-Python 0.13 default', flim=flim, compensate=True) ############################################################################### # Let's also filter it with the MNE-C default, which is a long-duration # steep-slope FIR filter designed using frequency-domain techniques: h = mne.filter.design_mne_c_filter(sfreq, l_freq=None, h_freq=f_p + 2.5) x_mne_c = np.convolve(h, x)[len(h) // 2:] transition_band = 5 # Hz (default in MNE-C) f_s = f_p + transition_band freq = [0., f_p, f_s, sfreq / 2.] gain = [1., 1., 0., 0.] plot_filter(h, sfreq, freq, gain, 'MNE-C default', flim=flim, compensate=True) ############################################################################### # And now an example of a minimum-phase filter: h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p, phase='minimum', fir_design='firwin', verbose=True) x_min = np.convolve(h, x) transition_band = 0.25 * f_p f_s = f_p + transition_band filter_dur = 6.6 / transition_band # sec n = int(sfreq * filter_dur) freq = [0., f_p, f_s, sfreq / 2.] gain = [1., 1., 0., 0.] plot_filter(h, sfreq, freq, gain, 'Minimum-phase filter', flim=flim) ############################################################################### # Both the MNE-Python 0.13 and MNE-C filters have excellent frequency # attenuation, but it comes at a cost of potential # ringing (long-lasting ripples) in the time domain. Ringing can occur with # steep filters, especially in signals with frequency content around the # transition band. Our Morlet wavelet signal has power in our transition band, # and the time-domain ringing is thus more pronounced for the steep-slope, # long-duration filter than the shorter, shallower-slope filter: axes = plt.subplots(1, 2)[1] def plot_signal(x, offset): """Plot a signal.""" t = np.arange(len(x)) / sfreq axes[0].plot(t, x + offset) axes[0].set(xlabel='Time (s)', xlim=t[[0, -1]]) X = fft(x) freqs = fftfreq(len(x), 1. / sfreq) mask = freqs >= 0 X = X[mask] freqs = freqs[mask] axes[1].plot(freqs, 20 * np.log10(np.maximum(np.abs(X), 1e-16))) axes[1].set(xlim=flim) yscale = 30 yticklabels = ['Original', 'Noisy', 'FIR-firwin (0.16)', 'FIR-firwin2 (0.14)', 'FIR-steep (0.13)', 'FIR-steep (MNE-C)', 'Minimum-phase'] yticks = -np.arange(len(yticklabels)) / yscale plot_signal(x_orig, offset=yticks[0]) plot_signal(x, offset=yticks[1]) plot_signal(x_v16, offset=yticks[2]) plot_signal(x_v14, offset=yticks[3]) plot_signal(x_v13, offset=yticks[4]) plot_signal(x_mne_c, offset=yticks[5]) plot_signal(x_min, offset=yticks[6]) axes[0].set(xlim=tlim, title='FIR, Lowpass=%d Hz' % f_p, xticks=tticks, ylim=[-len(yticks) / yscale, 1. / yscale], yticks=yticks, yticklabels=yticklabels) for text in axes[0].get_yticklabels(): text.set(rotation=45, size=8) axes[1].set(xlim=flim, ylim=(-60, 10), xlabel='Frequency (Hz)', ylabel='Magnitude (dB)') mne.viz.tight_layout() plt.show() ############################################################################### # IIR filters # =========== # # MNE-Python also offers IIR filtering functionality that is based on the # methods from :mod:`scipy.signal`. Specifically, we use the general-purpose # functions :func:`scipy.signal.iirfilter` and :func:`scipy.signal.iirdesign`, # which provide unified interfaces to IIR filter design. # # Designing IIR filters # --------------------- # # Let's continue with our design of a 40 Hz low-pass filter and look at # some trade-offs of different IIR filters. # # Often the default IIR filter is a `Butterworth filter`_, which is designed # to have a *maximally flat pass-band*. Let's look at a few filter orders, # i.e., a few different number of coefficients used and therefore steepness # of the filter: # # .. note:: Notice that the group delay (which is related to the phase) of # the IIR filters below are not constant. In the FIR case, we can # design so-called linear-phase filters that have a constant group # delay, and thus compensate for the delay (making the filter # non-causal) if necessary. This cannot be done with IIR filters, as # they have a non-linear phase (non-constant group delay). As the # filter order increases, the phase distortion near and in the # transition band worsens. However, if non-causal (forward-backward) # filtering can be used, e.g. with :func:`scipy.signal.filtfilt`, # these phase issues can theoretically be mitigated. sos = signal.iirfilter(2, f_p / nyq, btype='low', ftype='butter', output='sos') plot_filter(dict(sos=sos), sfreq, freq, gain, 'Butterworth order=2', flim=flim, compensate=True) x_shallow = signal.sosfiltfilt(sos, x) del sos ############################################################################### # The falloff of this filter is not very steep. # # .. note:: Here we have made use of second-order sections (SOS) # by using :func:`scipy.signal.sosfilt` and, under the # hood, :func:`scipy.signal.zpk2sos` when passing the # ``output='sos'`` keyword argument to # :func:`scipy.signal.iirfilter`. The filter definitions # given :ref:`above <tut_filtering_basics>` use the polynomial # numerator/denominator (sometimes called "tf") form ``(b, a)``, # which are theoretically equivalent to the SOS form used here. # In practice, however, the SOS form can give much better results # due to issues with numerical precision (see # :func:`scipy.signal.sosfilt` for an example), so SOS should be # used whenever possible. # # Let's increase the order, and note that now we have better attenuation, # with a longer impulse response. Let's also switch to using the MNE filter # design function, which simplifies a few things and gives us some information # about the resulting filter: iir_params = dict(order=8, ftype='butter') filt = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p, method='iir', iir_params=iir_params, verbose=True) plot_filter(filt, sfreq, freq, gain, 'Butterworth order=8', flim=flim, compensate=True) x_steep = signal.sosfiltfilt(filt['sos'], x) ############################################################################### # There are other types of IIR filters that we can use. For a complete list, # check out the documentation for :func:`scipy.signal.iirdesign`. Let's # try a Chebychev (type I) filter, which trades off ripple in the pass-band # to get better attenuation in the stop-band: iir_params.update(ftype='cheby1', rp=1., # dB of acceptable pass-band ripple ) filt = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p, method='iir', iir_params=iir_params, verbose=True) plot_filter(filt, sfreq, freq, gain, 'Chebychev-1 order=8, ripple=1 dB', flim=flim, compensate=True) ############################################################################### # If we can live with even more ripple, we can get it slightly steeper, # but the impulse response begins to ring substantially longer (note the # different x-axis scale): iir_params['rp'] = 6. filt = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p, method='iir', iir_params=iir_params, verbose=True) plot_filter(filt, sfreq, freq, gain, 'Chebychev-1 order=8, ripple=6 dB', flim=flim, compensate=True) ############################################################################### # Applying IIR filters # -------------------- # # Now let's look at how our shallow and steep Butterworth IIR filters # perform on our Morlet signal from before: axes = plt.subplots(1, 2)[1] yticks = np.arange(4) / -30. yticklabels = ['Original', 'Noisy', 'Butterworth-2', 'Butterworth-8'] plot_signal(x_orig, offset=yticks[0]) plot_signal(x, offset=yticks[1]) plot_signal(x_shallow, offset=yticks[2]) plot_signal(x_steep, offset=yticks[3]) axes[0].set(xlim=tlim, title='IIR, Lowpass=%d Hz' % f_p, xticks=tticks, ylim=[-0.125, 0.025], yticks=yticks, yticklabels=yticklabels,) for text in axes[0].get_yticklabels(): text.set(rotation=45, size=8) axes[1].set(xlim=flim, ylim=(-60, 10), xlabel='Frequency (Hz)', ylabel='Magnitude (dB)') mne.viz.adjust_axes(axes) mne.viz.tight_layout() plt.show() ############################################################################### # Some pitfalls of filtering # ========================== # # Multiple recent papers have noted potential risks of drawing # errant inferences due to misapplication of filters. # # Low-pass problems # ----------------- # # Filters in general, especially those that are non-causal (zero-phase), can # make activity appear to occur earlier or later than it truly did. As # mentioned in VanRullen (2011) :footcite:`VanRullen2011`, # investigations of commonly (at the time) # used low-pass filters created artifacts when they were applied to simulated # data. However, such deleterious effects were minimal in many real-world # examples in Rousselet (2012) :footcite:`Rousselet2012`. # # Perhaps more revealing, it was noted in Widmann & Schröger (2012) # :footcite:`WidmannSchroger2012` that the problematic low-pass filters from # VanRullen (2011) :footcite:`VanRullen2011`: # # 1. Used a least-squares design (like :func:`scipy.signal.firls`) that # included "do-not-care" transition regions, which can lead to # uncontrolled behavior. # 2. Had a filter length that was independent of the transition bandwidth, # which can cause excessive ringing and signal distortion. # # .. _tut_filtering_hp_problems: # # High-pass problems # ------------------ # # When it comes to high-pass filtering, using corner frequencies above 0.1 Hz # were found in Acunzo *et al.* (2012) :footcite:`AcunzoEtAl2012` to: # # "... generate a systematic bias easily leading to misinterpretations of # neural activity.” # # In a related paper, Widmann *et al.* (2015) :footcite:`WidmannEtAl2015` # also came to suggest a 0.1 Hz highpass. More evidence followed in # Tanner *et al.* (2015) :footcite:`TannerEtAl2015` of such distortions. # Using data from language ERP studies of semantic and # syntactic processing (i.e., N400 and P600), using a high-pass above 0.3 Hz # caused significant effects to be introduced implausibly early when compared # to the unfiltered data. From this, the authors suggested the optimal # high-pass value for language processing to be 0.1 Hz. # # We can recreate a problematic simulation from # Tanner *et al.* (2015) :footcite:`TannerEtAl2015`: # # "The simulated component is a single-cycle cosine wave with an amplitude # of 5µV [sic], onset of 500 ms poststimulus, and duration of 800 ms. The # simulated component was embedded in 20 s of zero values to avoid # filtering edge effects... Distortions [were] caused by 2 Hz low-pass # and high-pass filters... No visible distortion to the original # waveform [occurred] with 30 Hz low-pass and 0.01 Hz high-pass filters... # Filter frequencies correspond to the half-amplitude (-6 dB) cutoff # (12 dB/octave roll-off)." # # .. note:: This simulated signal contains energy not just within the # pass-band, but also within the transition and stop-bands -- perhaps # most easily understood because the signal has a non-zero DC value, # but also because it is a shifted cosine that has been # *windowed* (here multiplied by a rectangular window), which # makes the cosine and DC frequencies spread to other frequencies # (multiplication in time is convolution in frequency, so multiplying # by a rectangular window in the time domain means convolving a sinc # function with the impulses at DC and the cosine frequency in the # frequency domain). # x = np.zeros(int(2 * sfreq)) t = np.arange(0, len(x)) / sfreq - 0.2 onset = np.where(t >= 0.5)[0][0] cos_t = np.arange(0, int(sfreq * 0.8)) / sfreq sig = 2.5 - 2.5 * np.cos(2 * np.pi * (1. / 0.8) * cos_t) x[onset:onset + len(sig)] = sig iir_lp_30 = signal.iirfilter(2, 30. / sfreq, btype='lowpass') iir_hp_p1 = signal.iirfilter(2, 0.1 / sfreq, btype='highpass') iir_lp_2 = signal.iirfilter(2, 2. / sfreq, btype='lowpass') iir_hp_2 = signal.iirfilter(2, 2. / sfreq, btype='highpass') x_lp_30 = signal.filtfilt(iir_lp_30[0], iir_lp_30[1], x, padlen=0) x_hp_p1 = signal.filtfilt(iir_hp_p1[0], iir_hp_p1[1], x, padlen=0) x_lp_2 = signal.filtfilt(iir_lp_2[0], iir_lp_2[1], x, padlen=0) x_hp_2 = signal.filtfilt(iir_hp_2[0], iir_hp_2[1], x, padlen=0) xlim = t[[0, -1]] ylim = [-2, 6] xlabel = 'Time (sec)' ylabel = r'Amplitude ($\mu$V)' tticks = [0, 0.5, 1.3, t[-1]] axes = plt.subplots(2, 2)[1].ravel() for ax, x_f, title in zip(axes, [x_lp_2, x_lp_30, x_hp_2, x_hp_p1], ['LP$_2$', 'LP$_{30}$', 'HP$_2$', 'LP$_{0.1}$']): ax.plot(t, x, color='0.5') ax.plot(t, x_f, color='k', linestyle='--') ax.set(ylim=ylim, xlim=xlim, xticks=tticks, title=title, xlabel=xlabel, ylabel=ylabel) mne.viz.adjust_axes(axes) mne.viz.tight_layout() plt.show() ############################################################################### # Similarly, in a P300 paradigm reported by # Kappenman & Luck (2010) :footcite:`KappenmanLuck2010`, # they found that applying a 1 Hz high-pass decreased the probability of # finding a significant difference in the N100 response, likely because # the P300 response was smeared (and inverted) in time by the high-pass # filter such that it tended to cancel out the increased N100. However, # they nonetheless note that some high-passing can still be useful to deal # with drifts in the data. # # Even though these papers generally advise a 0.1 Hz or lower frequency for # a high-pass, it is important to keep in mind (as most authors note) that # filtering choices should depend on the frequency content of both the # signal(s) of interest and the noise to be suppressed. For example, in # some of the MNE-Python examples involving the :ref:`sample-dataset` dataset, # high-pass values of around 1 Hz are used when looking at auditory # or visual N100 responses, because we analyze standard (not deviant) trials # and thus expect that contamination by later or slower components will # be limited. # # Baseline problems (or solutions?) # --------------------------------- # # In an evolving discussion, Tanner *et al.* (2015) :footcite:`TannerEtAl2015` # suggest using baseline correction to remove slow drifts in data. However, # Maess *et al.* (2016) :footcite:`MaessEtAl2016` # suggest that baseline correction, which is a form of high-passing, does # not offer substantial advantages over standard high-pass filtering. # Tanner *et al.* (2016) :footcite:`TannerEtAl2016` # rebutted that baseline correction can correct for problems with filtering. # # To see what they mean, consider again our old simulated signal ``x`` from # before: def baseline_plot(x): all_axes = plt.subplots(3, 2)[1] for ri, (axes, freq) in enumerate(zip(all_axes, [0.1, 0.3, 0.5])): for ci, ax in enumerate(axes): if ci == 0: iir_hp = signal.iirfilter(4, freq / sfreq, btype='highpass', output='sos') x_hp = signal.sosfiltfilt(iir_hp, x, padlen=0) else: x_hp -= x_hp[t < 0].mean() ax.plot(t, x, color='0.5') ax.plot(t, x_hp, color='k', linestyle='--') if ri == 0: ax.set(title=('No ' if ci == 0 else '') + 'Baseline Correction') ax.set(xticks=tticks, ylim=ylim, xlim=xlim, xlabel=xlabel) ax.set_ylabel('%0.1f Hz' % freq, rotation=0, horizontalalignment='right') mne.viz.adjust_axes(axes) mne.viz.tight_layout() plt.suptitle(title) plt.show() baseline_plot(x) ############################################################################### # In response, Maess *et al.* (2016) :footcite:`MaessEtAl2016a` # note that these simulations do not # address cases of pre-stimulus activity that is shared across conditions, as # applying baseline correction will effectively copy the topology outside the # baseline period. We can see this if we give our signal ``x`` with some # consistent pre-stimulus activity, which makes everything look bad. # # .. note:: An important thing to keep in mind with these plots is that they # are for a single simulated sensor. In multi-electrode recordings # the topology (i.e., spatial pattern) of the pre-stimulus activity # will leak into the post-stimulus period. This will likely create a # spatially varying distortion of the time-domain signals, as the # averaged pre-stimulus spatial pattern gets subtracted from the # sensor time courses. # # Putting some activity in the baseline period: n_pre = (t < 0).sum() sig_pre = 1 - np.cos(2 * np.pi * np.arange(n_pre) / (0.5 * n_pre)) x[:n_pre] += sig_pre baseline_plot(x) ############################################################################### # Both groups seem to acknowledge that the choices of filtering cutoffs, and # perhaps even the application of baseline correction, depend on the # characteristics of the data being investigated, especially when it comes to: # # 1. The frequency content of the underlying evoked activity relative # to the filtering parameters. # 2. The validity of the assumption of no consistent evoked activity # in the baseline period. # # We thus recommend carefully applying baseline correction and/or high-pass # values based on the characteristics of the data to be analyzed. # # # Filtering defaults # ================== # # .. _tut_filtering_in_python: # # Defaults in MNE-Python # ---------------------- # # Most often, filtering in MNE-Python is done at the :class:`mne.io.Raw` level, # and thus :func:`mne.io.Raw.filter` is used. This function under the hood # (among other things) calls :func:`mne.filter.filter_data` to actually # filter the data, which by default applies a zero-phase FIR filter designed # using :func:`scipy.signal.firwin`. # In Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`, they # suggest a specific set of parameters to use for high-pass filtering, # including: # # "... providing a transition bandwidth of 25% of the lower passband # edge but, where possible, not lower than 2 Hz and otherwise the # distance from the passband edge to the critical frequency.” # # In practice, this means that for each high-pass value ``l_freq`` or # low-pass value ``h_freq`` below, you would get this corresponding # ``l_trans_bandwidth`` or ``h_trans_bandwidth``, respectively, # if the sample rate were 100 Hz (i.e., Nyquist frequency of 50 Hz): # # +------------------+-------------------+-------------------+ # | l_freq or h_freq | l_trans_bandwidth | h_trans_bandwidth | # +==================+===================+===================+ # | 0.01 | 0.01 | 2.0 | # +------------------+-------------------+-------------------+ # | 0.1 | 0.1 | 2.0 | # +------------------+-------------------+-------------------+ # | 1.0 | 1.0 | 2.0 | # +------------------+-------------------+-------------------+ # | 2.0 | 2.0 | 2.0 | # +------------------+-------------------+-------------------+ # | 4.0 | 2.0 | 2.0 | # +------------------+-------------------+-------------------+ # | 8.0 | 2.0 | 2.0 | # +------------------+-------------------+-------------------+ # | 10.0 | 2.5 | 2.5 | # +------------------+-------------------+-------------------+ # | 20.0 | 5.0 | 5.0 | # +------------------+-------------------+-------------------+ # | 40.0 | 10.0 | 10.0 | # +------------------+-------------------+-------------------+ # | 50.0 | 12.5 | 12.5 | # +------------------+-------------------+-------------------+ # # MNE-Python has adopted this definition for its high-pass (and low-pass) # transition bandwidth choices when using ``l_trans_bandwidth='auto'`` and # ``h_trans_bandwidth='auto'``. # # To choose the filter length automatically with ``filter_length='auto'``, # the reciprocal of the shortest transition bandwidth is used to ensure # decent attenuation at the stop frequency. Specifically, the reciprocal # (in samples) is multiplied by 3.1, 3.3, or 5.0 for the Hann, Hamming, # or Blackman windows, respectively, as selected by the ``fir_window`` # argument for ``fir_design='firwin'``, and double these for # ``fir_design='firwin2'`` mode. # # .. note:: For ``fir_design='firwin2'``, the multiplicative factors are # doubled compared to what is given in # Ifeachor & Jervis (2002) :footcite:`IfeachorJervis2002` # (p. 357), as :func:`scipy.signal.firwin2` has a smearing effect # on the frequency response, which we compensate for by # increasing the filter length. This is why # ``fir_desgin='firwin'`` is preferred to ``fir_design='firwin2'``. # # In 0.14, we default to using a Hamming window in filter design, as it # provides up to 53 dB of stop-band attenuation with small pass-band ripple. # # .. note:: In band-pass applications, often a low-pass filter can operate # effectively with fewer samples than the high-pass filter, so # it is advisable to apply the high-pass and low-pass separately # when using ``fir_design='firwin2'``. For design mode # ``fir_design='firwin'``, there is no need to separate the # operations, as the lowpass and highpass elements are constructed # separately to meet the transition band requirements. # # For more information on how to use the # MNE-Python filtering functions with real data, consult the preprocessing # tutorial on :ref:`tut-filter-resample`. # # Defaults in MNE-C # ----------------- # MNE-C by default uses: # # 1. 5 Hz transition band for low-pass filters. # 2. 3-sample transition band for high-pass filters. # 3. Filter length of 8197 samples. # # The filter is designed in the frequency domain, creating a linear-phase # filter such that the delay is compensated for as is done with the MNE-Python # ``phase='zero'`` filtering option. # # Squared-cosine ramps are used in the transition regions. Because these # are used in place of more gradual (e.g., linear) transitions, # a given transition width will result in more temporal ringing but also more # rapid attenuation than the same transition width in windowed FIR designs. # # The default filter length will generally have excellent attenuation # but long ringing for the sample rates typically encountered in M/EEG data # (e.g. 500-2000 Hz). # # Defaults in other software # -------------------------- # A good but possibly outdated comparison of filtering in various software # packages is available in Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`. # Briefly: # # * EEGLAB # MNE-Python 0.14 defaults to behavior very similar to that of EEGLAB # (see the `EEGLAB filtering FAQ`_ for more information). # * FieldTrip # By default FieldTrip applies a forward-backward Butterworth IIR filter # of order 4 (band-pass and band-stop filters) or 2 (for low-pass and # high-pass filters). Similar filters can be achieved in MNE-Python when # filtering with :meth:`raw.filter(..., method='iir') <mne.io.Raw.filter>` # (see also :func:`mne.filter.construct_iir_filter` for options). # For more information, see e.g. the # `FieldTrip band-pass documentation <ftbp_>`_. # # Reporting Filters # ================= # On page 45 in Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`, # there is a convenient list of # important filter parameters that should be reported with each publication: # # 1. Filter type (high-pass, low-pass, band-pass, band-stop, FIR, IIR) # 2. Cutoff frequency (including definition) # 3. Filter order (or length) # 4. Roll-off or transition bandwidth # 5. Passband ripple and stopband attenuation # 6. Filter delay (zero-phase, linear-phase, non-linear phase) and causality # 7. Direction of computation (one-pass forward/reverse, or two-pass forward # and reverse) # # In the following, we will address how to deal with these parameters in MNE: # # # Filter type # ----------- # Depending on the function or method used, the filter type can be specified. # To name an example, in :func:`mne.filter.create_filter`, the relevant # arguments would be ``l_freq``, ``h_freq``, ``method``, and if the method is # FIR ``fir_window`` and ``fir_design``. # # # Cutoff frequency # ---------------- # The cutoff of FIR filters in MNE is defined as half-amplitude cutoff in the # middle of the transition band. That is, if you construct a lowpass FIR filter # with ``h_freq = 40``, the filter function will provide a transition # bandwidth that depends on the ``h_trans_bandwidth`` argument. The desired # half-amplitude cutoff of the lowpass FIR filter is then at # ``h_freq + transition_bandwidth/2.``. # # Filter length (order) and transition bandwidth (roll-off) # --------------------------------------------------------- # In the :ref:`tut_filtering_in_python` section, we have already talked about # the default filter lengths and transition bandwidths that are used when no # custom values are specified using the respective filter function's arguments. # # If you want to find out about the filter length and transition bandwidth that # were used through the 'auto' setting, you can use # :func:`mne.filter.create_filter` to print out the settings once more: # Use the same settings as when calling e.g., `raw.filter()` fir_coefs = mne.filter.create_filter( data=None, # data is only used for sanity checking, not strictly needed sfreq=1000., # sfreq of your data in Hz l_freq=None, h_freq=40., # assuming a lowpass of 40 Hz method='fir', fir_window='hamming', fir_design='firwin', verbose=True) # See the printed log for the transition bandwidth and filter length. # Alternatively, get the filter length through: filter_length = fir_coefs.shape[0] ############################################################################### # .. note:: If you are using an IIR filter, :func:`mne.filter.create_filter` # will not print a filter length and transition bandwidth to the log. # Instead, you can specify the roll-off with the ``iir_params`` # argument or stay with the default, which is a fourth order # (Butterworth) filter. # # Passband ripple and stopband attenuation # ---------------------------------------- # # When use standard :func:`scipy.signal.firwin` design (as for FIR filters in # MNE), the passband ripple and stopband attenuation are dependent upon the # window used in design. For standard windows the values are listed in this # table (see Ifeachor & Jervis (2002) :footcite:`IfeachorJervis2002`, p. 357): # # +-------------------------+-----------------+----------------------+ # | Name of window function | Passband ripple | Stopband attenuation | # +=========================+=================+======================+ # | Hann | 0.0545 dB | 44 dB | # +-------------------------+-----------------+----------------------+ # | Hamming | 0.0194 dB | 53 dB | # +-------------------------+-----------------+----------------------+ # | Blackman | 0.0017 dB | 74 dB | # +-------------------------+-----------------+----------------------+ # # # Filter delay and direction of computation # ----------------------------------------- # For reporting this information, it might be sufficient to read the docstring # of the filter function or method that you apply. For example in the # docstring of `mne.filter.create_filter`, for the phase parameter it says: # # Phase of the filter, only used if ``method='fir'``. # By default, a symmetric linear-phase FIR filter is constructed. # If ``phase='zero'`` (default), the delay of this filter # is compensated for. If ``phase=='zero-double'``, then this filter # is applied twice, once forward, and once backward. If 'minimum', # then a minimum-phase, causal filter will be used. # # # Summary # ======= # # When filtering, there are always trade-offs that should be considered. # One important trade-off is between time-domain characteristics (like ringing) # and frequency-domain attenuation characteristics (like effective transition # bandwidth). Filters with sharp frequency cutoffs can produce outputs that # ring for a long time when they operate on signals with frequency content # in the transition band. In general, therefore, the wider a transition band # that can be tolerated, the better behaved the filter will be in the time # domain. # # References # ========== # .. footbibliography:: # # .. _FIR: https://en.wikipedia.org/wiki/Finite_impulse_response # .. _IIR: https://en.wikipedia.org/wiki/Infinite_impulse_response # .. _sinc: https://en.wikipedia.org/wiki/Sinc_function # .. _moving average: https://en.wikipedia.org/wiki/Moving_average # .. _autoregression: https://en.wikipedia.org/wiki/Autoregressive_model # .. _Remez: https://en.wikipedia.org/wiki/Remez_algorithm # .. _matlab firpm: https://www.mathworks.com/help/signal/ref/firpm.html # .. _matlab fir2: https://www.mathworks.com/help/signal/ref/fir2.html # .. _matlab firls: https://www.mathworks.com/help/signal/ref/firls.html # .. _Butterworth filter: https://en.wikipedia.org/wiki/Butterworth_filter # .. _eeglab filtering faq: https://sccn.ucsd.edu/wiki/Firfilt_FAQ # .. _ftbp: http://www.fieldtriptoolbox.org/reference/ft_preproc_bandpassfilter
bsd-3-clause