repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
ContinuumIO/dask | dask/dataframe/io/parquet/arrow.py | 1 | 31580 | from functools import partial
from collections import OrderedDict
import json
import warnings
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from pyarrow.compat import guid
from ....utils import natural_sort_key, getargspec
from ..utils import _get_pyarrow_dtypes, _meta_from_dtypes
from ...utils import clear_known_categories
from .utils import (
_parse_pandas_metadata,
_normalize_index_columns,
Engine,
_analyze_paths,
)
def _get_md_row_groups(pieces):
""" Read file-footer metadata from each individual piece.
Since this operation can be painfully slow in some cases, abort
if any metadata or statistics are missing
"""
row_groups = []
row_groups_per_piece = []
for piece in pieces:
num_row_groups = piece.get_metadata().num_row_groups
for rg in range(num_row_groups):
row_group = piece.get_metadata().row_group(rg)
for c in range(row_group.num_columns):
if not row_group.column(c).statistics:
return (None, None)
row_groups.append(row_group)
row_groups_per_piece.append(num_row_groups)
if len(row_groups) == len(pieces):
row_groups_per_piece = None
# TODO: Skip row_groups_per_piece after ARROW-2801
return row_groups, row_groups_per_piece
def _get_row_groups_per_piece(pieces, metadata, path, fs):
""" Determine number of row groups in each dataset piece.
This function requires access to ParquetDataset.metadata
"""
# TODO: Remove this function after ARROW-2801
if metadata.num_row_groups == len(pieces):
return None # pieces already map to row-groups
result = OrderedDict()
for piece in pieces:
result[piece.path] = 0
for rg in range(metadata.num_row_groups):
filename = metadata.row_group(rg).column(0).file_path
if filename:
result[fs.sep.join([path, filename])] += 1
else:
return None # File path is missing, abort
return tuple(result.values())
def _merge_statistics(stats, s):
""" Update `stats` with vaules in `s`
"""
stats[-1]["total_byte_size"] += s["total_byte_size"]
stats[-1]["num-rows"] += s["num-rows"]
ncols = len(stats[-1]["columns"])
ncols_n = len(s["columns"])
if ncols != ncols_n:
raise ValueError(f"Column count not equal ({ncols} vs {ncols_n})")
for i in range(ncols):
name = stats[-1]["columns"][i]["name"]
j = i
for ii in range(ncols):
if name == s["columns"][j]["name"]:
break
if ii == ncols - 1:
raise KeyError(f"Column statistics missing for {name}")
j = (j + 1) % ncols
min_n = s["columns"][j]["min"]
max_n = s["columns"][j]["max"]
null_count_n = s["columns"][j]["null_count"]
min_i = stats[-1]["columns"][i]["min"]
max_i = stats[-1]["columns"][i]["max"]
stats[-1]["columns"][i]["min"] = min(min_i, min_n)
stats[-1]["columns"][i]["max"] = max(max_i, max_n)
stats[-1]["columns"][i]["null_count"] += null_count_n
return True
class SimplePiece:
""" SimplePiece
Surrogate class for PyArrow ParquetDatasetPiece.
Only used for flat datasets (not partitioned) where
a "_metadata" file is available.
"""
def __init__(self, path):
self.path = path
self.partition_keys = None
self.row_group = None
def _determine_dataset_parts(fs, paths, gather_statistics, filters, dataset_kwargs):
""" Determine how to access metadata and break read into ``parts``
This logic is mostly to handle `gather_statistics=False` cases,
because this also means we should avoid scanning every file in the
dataset.
"""
parts = []
if len(paths) > 1:
base, fns = _analyze_paths(paths, fs)
if "_metadata" in fns:
# We have a _metadata file
# PyArrow cannot handle "_metadata"
# when `paths` is a list.
paths.remove(base + fs.sep + "_metadata")
fns.remove("_metadata")
if gather_statistics is not False:
# If we are allowed to gather statistics,
# lets use "_metadata" instead of opening
# every file. Note that we don't need to check if
# the dataset is flat here, because PyArrow cannot
# properly handle partitioning in this case anyway.
dataset = pq.ParquetDataset(
base + fs.sep + "_metadata",
filesystem=fs,
filters=filters,
**dataset_kwargs,
)
dataset.metadata = dataset.pieces[0].get_metadata()
dataset.pieces = [SimplePiece(path) for path in paths]
dataset.partitions = None
return parts, dataset
if gather_statistics is not False:
# This scans all the files
dataset = pq.ParquetDataset(
paths, filesystem=fs, filters=filters, **dataset_kwargs
)
if dataset.schema is None:
# The dataset may have inconsistent schemas between files.
# If so, we should try to use a "_common_metadata" file
proxy_path = (
base + fs.sep + "_common_metadata"
if "_common_metadata" in fns
else paths[0]
)
dataset.schema = pq.ParquetDataset(proxy_path, filesystem=fs).schema
else:
# Rely on schema for 0th file.
# Will need to pass a list of paths to read_partition
dataset = pq.ParquetDataset(paths[0], filesystem=fs, **dataset_kwargs)
parts = [base + fs.sep + fn for fn in fns]
elif fs.isdir(paths[0]):
# This is a directory, check for _metadata, then _common_metadata
allpaths = fs.glob(paths[0] + fs.sep + "*")
base, fns = _analyze_paths(allpaths, fs)
# Check if dataset is "not flat" (partitioned into directories).
# If so, we will need to let pyarrow generate the `dataset` object.
not_flat = any([fs.isdir(p) for p in fs.glob(fs.sep.join([base, "*"]))])
if "_metadata" in fns and "validate_schema" not in dataset_kwargs:
dataset_kwargs["validate_schema"] = False
if not_flat or "_metadata" in fns or gather_statistics is not False:
# Let arrow do its thing (use _metadata or scan files)
dataset = pq.ParquetDataset(
paths, filesystem=fs, filters=filters, **dataset_kwargs
)
if dataset.schema is None:
# The dataset may have inconsistent schemas between files.
# If so, we should try to use a "_common_metadata" file
proxy_path = (
base + fs.sep + "_common_metadata"
if "_common_metadata" in fns
else allpaths[0]
)
dataset.schema = pq.ParquetDataset(proxy_path, filesystem=fs).schema
else:
# Use _common_metadata file if it is available.
# Otherwise, just use 0th file
if "_common_metadata" in fns:
dataset = pq.ParquetDataset(
base + fs.sep + "_common_metadata", filesystem=fs, **dataset_kwargs
)
else:
dataset = pq.ParquetDataset(
allpaths[0], filesystem=fs, **dataset_kwargs
)
parts = [base + fs.sep + fn for fn in fns if fn != "_common_metadata"]
else:
# There is only one file to read
dataset = pq.ParquetDataset(paths, filesystem=fs, **dataset_kwargs)
return parts, dataset
def _write_partitioned(
table, root_path, partition_cols, fs, preserve_index=True, **kwargs
):
""" Write table to a partitioned dataset with pyarrow.
Logic copied from pyarrow.parquet.
(arrow/python/pyarrow/parquet.py::write_to_dataset)
TODO: Remove this in favor of pyarrow's `write_to_dataset`
once ARROW-8244 is addressed.
"""
fs.mkdirs(root_path, exist_ok=True)
df = table.to_pandas(ignore_metadata=True)
partition_keys = [df[col] for col in partition_cols]
data_df = df.drop(partition_cols, axis="columns")
data_cols = df.columns.drop(partition_cols)
if len(data_cols) == 0 and not preserve_index:
raise ValueError("No data left to save outside partition columns")
subschema = table.schema
for col in table.schema.names:
if col in partition_cols:
subschema = subschema.remove(subschema.get_field_index(col))
md_list = []
for keys, subgroup in data_df.groupby(partition_keys):
if not isinstance(keys, tuple):
keys = (keys,)
subdir = fs.sep.join(
[
"{colname}={value}".format(colname=name, value=val)
for name, val in zip(partition_cols, keys)
]
)
subtable = pa.Table.from_pandas(
subgroup, preserve_index=False, schema=subschema, safe=False
)
prefix = fs.sep.join([root_path, subdir])
fs.mkdir(prefix, exists_ok=True)
outfile = guid() + ".parquet"
full_path = fs.sep.join([prefix, outfile])
with fs.open(full_path, "wb") as f:
pq.write_table(subtable, f, metadata_collector=md_list, **kwargs)
md_list[-1].set_file_path(fs.sep.join([subdir, outfile]))
return md_list
class ArrowEngine(Engine):
@classmethod
def read_metadata(
cls,
fs,
paths,
categories=None,
index=None,
gather_statistics=None,
filters=None,
split_row_groups=True,
**kwargs,
):
# Define the dataset object to use for metadata,
# Also, initialize `parts`. If `parts` is populated here,
# then each part will correspond to a file. Otherwise, each part will
# correspond to a row group (populated below)
parts, dataset = _determine_dataset_parts(
fs, paths, gather_statistics, filters, kwargs.get("dataset", {})
)
# Check if the column-chunk file_path's are set in "_metadata".
# If available, we can use the path to sort the row-groups
col_chunk_paths = False
if dataset.metadata:
col_chunk_paths = all(
dataset.metadata.row_group(i).column(0).file_path is not None
for i in range(dataset.metadata.num_row_groups)
)
# TODO: Call to `_determine_dataset_parts` uses `pq.ParquetDataset`
# to define the `dataset` object. `split_row_groups` should be passed
# to that constructor once it is supported (see ARROW-2801).
if dataset.partitions is not None:
partitions = [
n for n in dataset.partitions.partition_names if n is not None
]
if partitions and dataset.metadata:
# Dont use dataset.metadata for partitioned datasets, unless
# the column-chunk metadata includes the `"file_path"`.
# The order of dataset.metadata.row_group items is often
# different than the order of `dataset.pieces`.
if not col_chunk_paths or (
len(dataset.pieces) != dataset.metadata.num_row_groups
):
dataset.schema = dataset.metadata.schema
dataset.metadata = None
else:
partitions = []
# Statistics are currently collected at the row-group level only.
# Therefore, we cannot perform filtering with split_row_groups=False.
# For "partitioned" datasets, each file (usually) corresponds to a
# row-group anyway.
# TODO: Map row-group statistics onto file pieces for filtering.
# This shouldn't be difficult if `col_chunk_paths==True`
if not split_row_groups and not col_chunk_paths:
if gather_statistics is None and not partitions:
gather_statistics = False
if filters:
raise ValueError(
"Filters not supported with split_row_groups=False "
"(unless proper _metadata is available)."
)
if gather_statistics and not partitions:
raise ValueError(
"Statistics not supported with split_row_groups=False."
"(unless proper _metadata is available)."
)
if dataset.metadata:
schema = dataset.metadata.schema.to_arrow_schema()
else:
schema = dataset.schema.to_arrow_schema()
columns = None
has_pandas_metadata = (
schema.metadata is not None and b"pandas" in schema.metadata
)
if has_pandas_metadata:
pandas_metadata = json.loads(schema.metadata[b"pandas"].decode("utf8"))
(
index_names,
column_names,
storage_name_mapping,
column_index_names,
) = _parse_pandas_metadata(pandas_metadata)
if categories is None:
categories = []
for col in pandas_metadata["columns"]:
if (col["pandas_type"] == "categorical") and (
col["name"] not in categories
):
categories.append(col["name"])
else:
index_names = []
column_names = schema.names
storage_name_mapping = {k: k for k in column_names}
column_index_names = [None]
if index is None and index_names:
index = index_names
if set(column_names).intersection(partitions):
raise ValueError(
"partition(s) should not exist in columns.\n"
"categories: {} | partitions: {}".format(column_names, partitions)
)
column_names, index_names = _normalize_index_columns(
columns, column_names + partitions, index, index_names
)
all_columns = index_names + column_names
pieces = sorted(dataset.pieces, key=lambda piece: natural_sort_key(piece.path))
# Check that categories are included in columns
if categories and not set(categories).intersection(all_columns):
raise ValueError(
"categories not in available columns.\n"
"categories: {} | columns: {}".format(categories, list(all_columns))
)
dtypes = _get_pyarrow_dtypes(schema, categories)
dtypes = {storage_name_mapping.get(k, k): v for k, v in dtypes.items()}
index_cols = index or ()
meta = _meta_from_dtypes(all_columns, dtypes, index_cols, column_index_names)
meta = clear_known_categories(meta, cols=categories)
if (
gather_statistics is None
and dataset.metadata
and dataset.metadata.num_row_groups >= len(pieces)
):
gather_statistics = True
if not pieces:
gather_statistics = False
if filters:
# Filters may require us to gather statistics
if gather_statistics is False and partitions:
warnings.warn(
"Filtering with gather_statistics=False. "
"Only partition columns will be filtered correctly."
)
elif gather_statistics is False:
raise ValueError("Cannot apply filters with gather_statistics=False")
elif not gather_statistics:
gather_statistics = True
row_groups_per_piece = None
if gather_statistics:
# Read from _metadata file
if dataset.metadata and dataset.metadata.num_row_groups >= len(pieces):
row_groups = [
dataset.metadata.row_group(i)
for i in range(dataset.metadata.num_row_groups)
]
# Re-order row-groups by path name if known
if col_chunk_paths:
row_groups = sorted(
row_groups,
key=lambda row_group: natural_sort_key(
row_group.column(0).file_path
),
)
if split_row_groups and len(dataset.paths) == 1:
row_groups_per_piece = _get_row_groups_per_piece(
pieces, dataset.metadata, dataset.paths[0], fs
)
names = dataset.metadata.schema.names
else:
# Read from each individual piece (quite possibly slow).
row_groups, row_groups_per_piece = _get_md_row_groups(pieces)
if row_groups:
piece = pieces[0]
md = piece.get_metadata()
names = md.schema.names
else:
gather_statistics = False
if gather_statistics:
stats = []
skip_cols = set() # Columns with min/max = None detected
path_last = None
for ri, row_group in enumerate(row_groups):
s = {"num-rows": row_group.num_rows, "columns": []}
for i, name in enumerate(names):
if name not in skip_cols:
column = row_group.column(i)
d = {"name": name}
if column.statistics:
cs_min = column.statistics.min
cs_max = column.statistics.max
if not column.statistics.has_min_max:
cs_min, cs_max = None, None
if None in [cs_min, cs_max] and ri == 0:
skip_cols.add(name)
continue
cs_vals = pd.Series([cs_min, cs_max])
d.update(
{
"min": cs_vals[0],
"max": cs_vals[1],
"null_count": column.statistics.null_count,
}
)
s["columns"].append(d)
s["total_byte_size"] = row_group.total_byte_size
if col_chunk_paths:
s["file_path_0"] = row_group.column(0).file_path
if not split_row_groups and (s["file_path_0"] == path_last):
# Rather than appending a new "row-group", just merge
# new `s` statistics into last element of `stats`.
# Note that each stats element will now correspond to an
# entire file (rather than actual "row-groups")
_merge_statistics(stats, s)
continue
else:
path_last = s["file_path_0"]
stats.append(s)
else:
stats = None
if dataset.partitions:
for partition in dataset.partitions:
if isinstance(index, list) and partition.name == index[0]:
meta.index = pd.CategoricalIndex(
categories=partition.keys, name=index[0]
)
elif partition.name == meta.index.name:
meta.index = pd.CategoricalIndex(
categories=partition.keys, name=meta.index.name
)
elif partition.name in meta.columns:
meta[partition.name] = pd.Categorical(
categories=partition.keys, values=[]
)
# Create `parts`
# This is a list of row-group-descriptor dicts, or file-paths
# if we have a list of files and gather_statistics=False
if not parts:
if split_row_groups and row_groups_per_piece:
# TODO: This block can be removed after ARROW-2801
parts = []
rg_tot = 0
for i, piece in enumerate(pieces):
num_row_groups = row_groups_per_piece[i]
for rg in range(num_row_groups):
parts.append((piece.path, rg, piece.partition_keys))
# Setting file_path here, because it may be
# missing from the row-group/column-chunk stats
if "file_path_0" not in stats[rg_tot]:
stats[rg_tot]["file_path_0"] = piece.path
rg_tot += 1
else:
parts = [
(piece.path, piece.row_group, piece.partition_keys)
for piece in pieces
]
parts = [
{
"piece": piece,
"kwargs": {"partitions": dataset.partitions, "categories": categories},
}
for piece in parts
]
return (meta, stats, parts)
@classmethod
def read_partition(
cls, fs, piece, columns, index, categories=(), partitions=(), **kwargs
):
if isinstance(index, list):
for level in index:
# unclear if we can use set ops here. I think the order matters.
# Need the membership test to avoid duplicating index when
# we slice with `columns` later on.
if level not in columns:
columns.append(level)
if isinstance(piece, str):
# `piece` is a file-path string
piece = pq.ParquetDatasetPiece(
piece, open_file_func=partial(fs.open, mode="rb")
)
else:
# `piece` contains (path, row_group, partition_keys)
(path, row_group, partition_keys) = piece
piece = pq.ParquetDatasetPiece(
path,
row_group=row_group,
partition_keys=partition_keys,
open_file_func=partial(fs.open, mode="rb"),
)
# Ensure `columns` and `partitions` do not overlap
columns_and_parts = columns.copy()
if columns_and_parts and partitions:
for part_name in partitions.partition_names:
if part_name in columns:
columns.remove(part_name)
else:
columns_and_parts.append(part_name)
columns = columns or None
arrow_table = cls._parquet_piece_as_arrow(piece, columns, partitions, **kwargs)
df = cls._arrow_table_to_pandas(arrow_table, categories, **kwargs)
# Note that `to_pandas(ignore_metadata=False)` means
# pyarrow will use the pandas metadata to set the index.
index_in_columns_and_parts = set(df.index.names).issubset(
set(columns_and_parts)
)
if not index:
if index_in_columns_and_parts:
# User does not want to set index and a desired
# column/partition has been set to the index
df.reset_index(drop=False, inplace=True)
else:
# User does not want to set index and an
# "unwanted" column has been set to the index
df.reset_index(drop=True, inplace=True)
else:
if set(df.index.names) != set(index) and index_in_columns_and_parts:
# The wrong index has been set and it contains
# one or more desired columns/partitions
df.reset_index(drop=False, inplace=True)
elif index_in_columns_and_parts:
# The correct index has already been set
index = False
columns_and_parts = list(
set(columns_and_parts).difference(set(df.index.names))
)
df = df[list(columns_and_parts)]
if index:
df = df.set_index(index)
return df
@classmethod
def _arrow_table_to_pandas(
cls, arrow_table: pa.Table, categories, **kwargs
) -> pd.DataFrame:
_kwargs = kwargs.get("arrow_to_pandas", {})
_kwargs.update({"use_threads": False, "ignore_metadata": False})
return arrow_table.to_pandas(categories=categories, **_kwargs)
@classmethod
def _parquet_piece_as_arrow(
cls, piece: pq.ParquetDatasetPiece, columns, partitions, **kwargs
) -> pa.Table:
arrow_table = piece.read(
columns=columns,
partitions=partitions,
use_pandas_metadata=True,
use_threads=False,
**kwargs.get("read", {}),
)
return arrow_table
@staticmethod
def initialize_write(
df,
fs,
path,
append=False,
partition_on=None,
ignore_divisions=False,
division_info=None,
**kwargs,
):
dataset = fmd = None
i_offset = 0
if append and division_info is None:
ignore_divisions = True
fs.mkdirs(path, exist_ok=True)
if append:
try:
# Allow append if the dataset exists.
# Also need dataset.metadata object if
# ignore_divisions is False (to check divisions)
dataset = pq.ParquetDataset(path, filesystem=fs)
if not dataset.metadata and not ignore_divisions:
# TODO: Be more flexible about existing metadata.
raise NotImplementedError(
"_metadata file needed to `append` "
"with `engine='pyarrow'` "
"unless `ignore_divisions` is `True`"
)
fmd = dataset.metadata
except (IOError, ValueError, IndexError):
# Original dataset does not exist - cannot append
append = False
if append:
names = dataset.metadata.schema.names
has_pandas_metadata = (
dataset.schema.to_arrow_schema().metadata is not None
and b"pandas" in dataset.schema.to_arrow_schema().metadata
)
if has_pandas_metadata:
pandas_metadata = json.loads(
dataset.schema.to_arrow_schema().metadata[b"pandas"].decode("utf8")
)
categories = [
c["name"]
for c in pandas_metadata["columns"]
if c["pandas_type"] == "categorical"
]
else:
categories = None
dtypes = _get_pyarrow_dtypes(dataset.schema.to_arrow_schema(), categories)
if set(names) != set(df.columns) - set(partition_on):
raise ValueError(
"Appended columns not the same.\n"
"Previous: {} | New: {}".format(names, list(df.columns))
)
elif (pd.Series(dtypes).loc[names] != df[names].dtypes).any():
# TODO Coerce values for compatible but different dtypes
raise ValueError(
"Appended dtypes differ.\n{}".format(
set(dtypes.items()) ^ set(df.dtypes.iteritems())
)
)
i_offset = len(dataset.pieces)
if division_info["name"] not in names:
ignore_divisions = True
if not ignore_divisions:
old_end = None
row_groups = [
dataset.metadata.row_group(i)
for i in range(dataset.metadata.num_row_groups)
]
for row_group in row_groups:
for i, name in enumerate(names):
if name != division_info["name"]:
continue
column = row_group.column(i)
if column.statistics:
if not old_end:
old_end = column.statistics.max
else:
old_end = max(old_end, column.statistics.max)
break
divisions = division_info["divisions"]
if divisions[0] < old_end:
raise ValueError(
"Appended divisions overlapping with the previous ones"
" (set ignore_divisions=True to append anyway).\n"
"Previous: {} | New: {}".format(old_end, divisions[0])
)
return fmd, i_offset
@staticmethod
def write_partition(
df,
path,
fs,
filename,
partition_on,
return_metadata,
fmd=None,
compression=None,
index_cols=None,
schema=None,
**kwargs,
):
_meta = None
preserve_index = False
if index_cols:
df = df.set_index(index_cols)
preserve_index = True
t = pa.Table.from_pandas(df, preserve_index=preserve_index, schema=schema)
if partition_on:
md_list = _write_partitioned(
t, path, partition_on, fs, preserve_index=preserve_index, **kwargs
)
if md_list:
_meta = md_list[0]
for i in range(1, len(md_list)):
_meta.append_row_groups(md_list[i])
else:
md_list = []
with fs.open(fs.sep.join([path, filename]), "wb") as fil:
pq.write_table(
t,
fil,
compression=compression,
metadata_collector=md_list,
**kwargs,
)
if md_list:
_meta = md_list[0]
_meta.set_file_path(filename)
# Return the schema needed to write the metadata
if return_metadata:
return [{"schema": t.schema, "meta": _meta}]
else:
return []
@staticmethod
def write_metadata(parts, fmd, fs, path, append=False, **kwargs):
if parts:
if not append:
# Get only arguments specified in the function
common_metadata_path = fs.sep.join([path, "_common_metadata"])
keywords = getargspec(pq.write_metadata).args
kwargs_meta = {k: v for k, v in kwargs.items() if k in keywords}
with fs.open(common_metadata_path, "wb") as fil:
pq.write_metadata(parts[0][0]["schema"], fil, **kwargs_meta)
# Aggregate metadata and write to _metadata file
metadata_path = fs.sep.join([path, "_metadata"])
if append and fmd is not None:
_meta = fmd
i_start = 0
else:
_meta = parts[0][0]["meta"]
i_start = 1
for i in range(i_start, len(parts)):
_meta.append_row_groups(parts[i][0]["meta"])
with fs.open(metadata_path, "wb") as fil:
_meta.write_metadata_file(fil)
| bsd-3-clause |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/pandas/tools/tests/test_util.py | 6 | 3094 | import os
import locale
import codecs
import nose
import numpy as np
from numpy.testing import assert_equal
from pandas import date_range, Index
import pandas.util.testing as tm
from pandas.tools.util import cartesian_product
CURRENT_LOCALE = locale.getlocale()
LOCALE_OVERRIDE = os.environ.get('LOCALE_OVERRIDE', None)
class TestCartesianProduct(tm.TestCase):
def test_simple(self):
x, y = list('ABC'), [1, 22]
result = cartesian_product([x, y])
expected = [np.array(['A', 'A', 'B', 'B', 'C', 'C']),
np.array([ 1, 22, 1, 22, 1, 22])]
assert_equal(result, expected)
def test_datetimeindex(self):
# regression test for GitHub issue #6439
# make sure that the ordering on datetimeindex is consistent
x = date_range('2000-01-01', periods=2)
result = [Index(y).day for y in cartesian_product([x, x])]
expected = [np.array([1, 1, 2, 2]), np.array([1, 2, 1, 2])]
assert_equal(result, expected)
class TestLocaleUtils(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestLocaleUtils, cls).setUpClass()
cls.locales = tm.get_locales()
if not cls.locales:
raise nose.SkipTest("No locales found")
if os.name == 'nt': # we're on windows
raise nose.SkipTest("Running on Windows")
@classmethod
def tearDownClass(cls):
super(TestLocaleUtils, cls).tearDownClass()
del cls.locales
def test_get_locales(self):
# all systems should have at least a single locale
assert len(tm.get_locales()) > 0
def test_get_locales_prefix(self):
if len(self.locales) == 1:
raise nose.SkipTest("Only a single locale found, no point in "
"trying to test filtering locale prefixes")
first_locale = self.locales[0]
assert len(tm.get_locales(prefix=first_locale[:2])) > 0
def test_set_locale(self):
if len(self.locales) == 1:
raise nose.SkipTest("Only a single locale found, no point in "
"trying to test setting another locale")
if LOCALE_OVERRIDE is not None:
lang, enc = LOCALE_OVERRIDE.split('.')
else:
lang, enc = 'it_CH', 'UTF-8'
enc = codecs.lookup(enc).name
new_locale = lang, enc
if not tm._can_set_locale(new_locale):
with tm.assertRaises(locale.Error):
with tm.set_locale(new_locale):
pass
else:
with tm.set_locale(new_locale) as normalized_locale:
new_lang, new_enc = normalized_locale.split('.')
new_enc = codecs.lookup(enc).name
normalized_locale = new_lang, new_enc
self.assertEqual(normalized_locale, new_locale)
current_locale = locale.getlocale()
self.assertEqual(current_locale, CURRENT_LOCALE)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
roxyboy/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
KaiSzuttor/espresso | samples/lb_profile.py | 1 | 2744 | # Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Simulate the flow of a lattice-Boltzmann fluid past a cylinder,
obtain the velocity profile in polar coordinates and compare it
to the analytical solution.
"""
import numpy as np
import matplotlib.pyplot as plt
import espressomd
required_features = ["CUDA", "LB_BOUNDARIES_GPU"]
espressomd.assert_features(required_features)
import espressomd.lb
import espressomd.observables
import espressomd.shapes
import espressomd.lbboundaries
import espressomd.accumulators
system = espressomd.System(box_l=[10.0, 10.0, 5.0])
system.time_step = 0.01
system.cell_system.skin = 0.4
n_steps = 500
lb_fluid = espressomd.lb.LBFluidGPU(
agrid=1.0, dens=1.0, visc=1.0, tau=0.01, ext_force_density=[0, 0, 0.15], kT=1.0, seed=32)
system.actors.add(lb_fluid)
system.thermostat.set_lb(LB_fluid=lb_fluid, seed=23)
fluid_obs = espressomd.observables.CylindricalLBVelocityProfile(
center=[5.0, 5.0, 0.0],
axis=[0, 0, 1],
n_r_bins=100,
n_phi_bins=1,
n_z_bins=1,
min_r=0.0,
max_r=4.0,
min_phi=-np.pi,
max_phi=np.pi,
min_z=0.0,
max_z=10.0,
sampling_density=0.1)
cylinder_shape = espressomd.shapes.Cylinder(
center=[5.0, 5.0, 5.0],
axis=[0, 0, 1],
direction=-1,
radius=4.0,
length=20.0)
cylinder_boundary = espressomd.lbboundaries.LBBoundary(shape=cylinder_shape)
system.lbboundaries.add(cylinder_boundary)
system.integrator.run(n_steps)
accumulator = espressomd.accumulators.MeanVarianceCalculator(obs=fluid_obs)
system.auto_update_accumulators.add(accumulator)
system.integrator.run(n_steps)
lb_fluid_profile = accumulator.get_mean()
def poiseuille_flow(r, R, ext_force_density):
return ext_force_density * 1. / 4 * (R**2.0 - r**2.0)
# Please note that due to symmetry and interpolation, a plateau is seen
# near r=0.
n_bins = len(lb_fluid_profile[:, 0, 0, 2])
r_max = 4.0
r = np.linspace(0.0, r_max, n_bins)
plt.plot(r, lb_fluid_profile[:, 0, 0, 2], label='LB profile')
plt.plot(r, poiseuille_flow(r, r_max, 0.15), label='analytical solution')
plt.legend()
plt.show()
| gpl-3.0 |
btabibian/scikit-learn | examples/feature_selection/plot_permutation_test_for_classification.py | 5 | 2294 | """
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import permutation_test_score
from sklearn import datasets
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
###############################################################################
# View histogram of permutation scores
plt.hist(permutation_scores, 20, label='Permutation scores',
edgecolor='black')
ylim = plt.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
# plt.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
# plt.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
| bsd-3-clause |
hsiaoyi0504/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 254 | 2795 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
print(grid_search.grid_scores_)
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
ominux/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 2 | 1318 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties supported by `sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print __doc__
import numpy as np
import pylab as pl
def l1(xs): return np.array([np.sqrt((1 - np.sqrt(x**2.0))**2.0) for x in xs])
def l2(xs): return np.array([np.sqrt(1.0-x**2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2*x - 2*z + 4*x*z -
(4*z**2 - 8*x*z**2 + 8*x**2*z**2 -
16*x**2*z**3 + 8*x*z**3 + 4*x**2*z**4)**(1/2) -
2*x*z**2)/(2 - 4*z) for x in xs])
def cross(ext):
pl.plot([-ext,ext],[0,0], "k-")
pl.plot([0,0], [-ext,ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
pl.plot(xs, l1(xs), "r-", label="L1")
pl.plot(xs, -1.0*l1(xs), "r-")
pl.plot(-1*xs, l1(xs), "r-")
pl.plot(-1*xs, -1.0*l1(xs), "r-")
pl.plot(xs, l2(xs), "b-", label="L2")
pl.plot(xs, -1.0 * l2(xs), "b-")
pl.plot(-1*xs, l2(xs), "b-")
pl.plot(-1*xs, -1.0 * l2(xs), "b-")
pl.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
pl.plot(xs, -1.0 * el(xs, alpha), "y-")
pl.plot(-1*xs, el(xs, alpha), "y-")
pl.plot(-1*xs, -1.0 * el(xs, alpha), "y-")
pl.xlabel(r"$w_0$")
pl.ylabel(r"$w_1$")
pl.legend()
pl.axis("equal")
pl.show()
| bsd-3-clause |
vinodkc/spark | python/pyspark/pandas/tests/data_type_ops/test_categorical_ops.py | 7 | 7205 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import pandas as pd
import numpy as np
from pandas.api.types import CategoricalDtype
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.tests.data_type_ops.testing_utils import TestCasesUtils
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class CategoricalOpsTest(PandasOnSparkTestCase, TestCasesUtils):
@property
def pser(self):
return pd.Series([1, "x", "y"], dtype="category")
@property
def psser(self):
return ps.from_pandas(self.pser)
def test_add(self):
self.assertRaises(TypeError, lambda: self.psser + "x")
self.assertRaises(TypeError, lambda: self.psser + 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser + psser)
def test_sub(self):
self.assertRaises(TypeError, lambda: self.psser - "x")
self.assertRaises(TypeError, lambda: self.psser - 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser - psser)
def test_mul(self):
self.assertRaises(TypeError, lambda: self.psser * "x")
self.assertRaises(TypeError, lambda: self.psser * 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser * psser)
def test_truediv(self):
self.assertRaises(TypeError, lambda: self.psser / "x")
self.assertRaises(TypeError, lambda: self.psser / 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser / psser)
def test_floordiv(self):
self.assertRaises(TypeError, lambda: self.psser // "x")
self.assertRaises(TypeError, lambda: self.psser // 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser // psser)
def test_mod(self):
self.assertRaises(TypeError, lambda: self.psser % "x")
self.assertRaises(TypeError, lambda: self.psser % 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser % psser)
def test_pow(self):
self.assertRaises(TypeError, lambda: self.psser ** "x")
self.assertRaises(TypeError, lambda: self.psser ** 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser ** psser)
def test_radd(self):
self.assertRaises(TypeError, lambda: "x" + self.psser)
self.assertRaises(TypeError, lambda: 1 + self.psser)
def test_rsub(self):
self.assertRaises(TypeError, lambda: "x" - self.psser)
self.assertRaises(TypeError, lambda: 1 - self.psser)
def test_rmul(self):
self.assertRaises(TypeError, lambda: "x" * self.psser)
self.assertRaises(TypeError, lambda: 2 * self.psser)
def test_rtruediv(self):
self.assertRaises(TypeError, lambda: "x" / self.psser)
self.assertRaises(TypeError, lambda: 1 / self.psser)
def test_rfloordiv(self):
self.assertRaises(TypeError, lambda: "x" // self.psser)
self.assertRaises(TypeError, lambda: 1 // self.psser)
def test_rmod(self):
self.assertRaises(TypeError, lambda: 1 % self.psser)
def test_rpow(self):
self.assertRaises(TypeError, lambda: "x" ** self.psser)
self.assertRaises(TypeError, lambda: 1 ** self.psser)
def test_and(self):
self.assertRaises(TypeError, lambda: self.psser & True)
self.assertRaises(TypeError, lambda: self.psser & False)
self.assertRaises(TypeError, lambda: self.psser & self.psser)
def test_rand(self):
self.assertRaises(TypeError, lambda: True & self.psser)
self.assertRaises(TypeError, lambda: False & self.psser)
def test_or(self):
self.assertRaises(TypeError, lambda: self.psser | True)
self.assertRaises(TypeError, lambda: self.psser | False)
self.assertRaises(TypeError, lambda: self.psser | self.psser)
def test_ror(self):
self.assertRaises(TypeError, lambda: True | self.psser)
self.assertRaises(TypeError, lambda: False | self.psser)
def test_from_to_pandas(self):
data = [1, "x", "y"]
pser = pd.Series(data, dtype="category")
psser = ps.Series(data, dtype="category")
self.assert_eq(pser, psser.to_pandas())
self.assert_eq(ps.from_pandas(pser), psser)
def test_isnull(self):
self.assert_eq(self.pser.isnull(), self.psser.isnull())
def test_astype(self):
data = [1, 2, 3]
pser = pd.Series(data, dtype="category")
psser = ps.from_pandas(pser)
self.assert_eq(pser.astype(int), psser.astype(int))
self.assert_eq(pser.astype(float), psser.astype(float))
self.assert_eq(pser.astype(np.float32), psser.astype(np.float32))
self.assert_eq(pser.astype(np.int32), psser.astype(np.int32))
self.assert_eq(pser.astype(np.int16), psser.astype(np.int16))
self.assert_eq(pser.astype(np.int8), psser.astype(np.int8))
self.assert_eq(pser.astype(str), psser.astype(str))
self.assert_eq(pser.astype(bool), psser.astype(bool))
self.assert_eq(pser.astype("category"), psser.astype("category"))
cat_type = CategoricalDtype(categories=[3, 1, 2])
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(pser.astype(cat_type), psser.astype(cat_type))
else:
self.assert_eq(pd.Series(data).astype(cat_type), psser.astype(cat_type))
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.data_type_ops.test_categorical_ops import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
Winand/pandas | pandas/core/ops.py | 1 | 55389 | """
Arithmetic operations for PandasObjects
This is not a public API.
"""
# necessary to enforce truediv in Python 2.X
from __future__ import division
import operator
import warnings
import numpy as np
import pandas as pd
import datetime
from pandas._libs import (lib, index as libindex,
tslib as libts, algos as libalgos, iNaT)
from pandas import compat
from pandas.util._decorators import Appender
import pandas.core.computation.expressions as expressions
from pandas.compat import bind_method
import pandas.core.missing as missing
from pandas.errors import PerformanceWarning
from pandas.core.common import _values_from_object, _maybe_match_name
from pandas.core.dtypes.missing import notna, isna
from pandas.core.dtypes.common import (
needs_i8_conversion,
is_datetimelike_v_numeric,
is_integer_dtype, is_categorical_dtype,
is_object_dtype, is_timedelta64_dtype,
is_datetime64_dtype, is_datetime64tz_dtype,
is_bool_dtype, is_datetimetz,
is_list_like,
is_scalar,
_ensure_object)
from pandas.core.dtypes.cast import maybe_upcast_putmask, find_common_type
from pandas.core.dtypes.generic import (
ABCSeries,
ABCIndex,
ABCPeriodIndex,
ABCDateOffset)
# -----------------------------------------------------------------------------
# Functions that add arithmetic methods to objects, given arithmetic factory
# methods
def _create_methods(arith_method, comp_method, bool_method,
use_numexpr, special=False, default_axis='columns',
have_divmod=False):
# creates actual methods based upon arithmetic, comp and bool method
# constructors.
# NOTE: Only frame cares about default_axis, specifically: special methods
# have default axis None, whereas flex methods have default axis 'columns'
# if we're not using numexpr, then don't pass a str_rep
if use_numexpr:
op = lambda x: x
else:
op = lambda x: None
if special:
def names(x):
if x[-1] == "_":
return "__{name}_".format(name=x)
else:
return "__{name}__".format(name=x)
else:
names = lambda x: x
# Inframe, all special methods have default_axis=None, flex methods have
# default_axis set to the default (columns)
# yapf: disable
new_methods = dict(
add=arith_method(operator.add, names('add'), op('+'),
default_axis=default_axis),
radd=arith_method(lambda x, y: y + x, names('radd'), op('+'),
default_axis=default_axis),
sub=arith_method(operator.sub, names('sub'), op('-'),
default_axis=default_axis),
mul=arith_method(operator.mul, names('mul'), op('*'),
default_axis=default_axis),
truediv=arith_method(operator.truediv, names('truediv'), op('/'),
truediv=True, fill_zeros=np.inf,
default_axis=default_axis),
floordiv=arith_method(operator.floordiv, names('floordiv'), op('//'),
default_axis=default_axis, fill_zeros=np.inf),
# Causes a floating point exception in the tests when numexpr enabled,
# so for now no speedup
mod=arith_method(operator.mod, names('mod'), None,
default_axis=default_axis, fill_zeros=np.nan),
pow=arith_method(operator.pow, names('pow'), op('**'),
default_axis=default_axis),
# not entirely sure why this is necessary, but previously was included
# so it's here to maintain compatibility
rmul=arith_method(operator.mul, names('rmul'), op('*'),
default_axis=default_axis, reversed=True),
rsub=arith_method(lambda x, y: y - x, names('rsub'), op('-'),
default_axis=default_axis, reversed=True),
rtruediv=arith_method(lambda x, y: operator.truediv(y, x),
names('rtruediv'), op('/'), truediv=True,
fill_zeros=np.inf, default_axis=default_axis,
reversed=True),
rfloordiv=arith_method(lambda x, y: operator.floordiv(y, x),
names('rfloordiv'), op('//'),
default_axis=default_axis, fill_zeros=np.inf,
reversed=True),
rpow=arith_method(lambda x, y: y**x, names('rpow'), op('**'),
default_axis=default_axis, reversed=True),
rmod=arith_method(lambda x, y: y % x, names('rmod'), op('%'),
default_axis=default_axis, fill_zeros=np.nan,
reversed=True),)
# yapf: enable
new_methods['div'] = new_methods['truediv']
new_methods['rdiv'] = new_methods['rtruediv']
# Comp methods never had a default axis set
if comp_method:
new_methods.update(dict(
eq=comp_method(operator.eq, names('eq'), op('==')),
ne=comp_method(operator.ne, names('ne'), op('!='), masker=True),
lt=comp_method(operator.lt, names('lt'), op('<')),
gt=comp_method(operator.gt, names('gt'), op('>')),
le=comp_method(operator.le, names('le'), op('<=')),
ge=comp_method(operator.ge, names('ge'), op('>=')), ))
if bool_method:
new_methods.update(
dict(and_=bool_method(operator.and_, names('and_'), op('&')),
or_=bool_method(operator.or_, names('or_'), op('|')),
# For some reason ``^`` wasn't used in original.
xor=bool_method(operator.xor, names('xor'), op('^')),
rand_=bool_method(lambda x, y: operator.and_(y, x),
names('rand_'), op('&')),
ror_=bool_method(lambda x, y: operator.or_(y, x),
names('ror_'), op('|')),
rxor=bool_method(lambda x, y: operator.xor(y, x),
names('rxor'), op('^'))))
if have_divmod:
# divmod doesn't have an op that is supported by numexpr
new_methods['divmod'] = arith_method(
divmod,
names('divmod'),
None,
default_axis=default_axis,
construct_result=_construct_divmod_result,
)
new_methods = dict((names(k), v) for k, v in new_methods.items())
return new_methods
def add_methods(cls, new_methods, force, select, exclude):
if select and exclude:
raise TypeError("May only pass either select or exclude")
if select:
select = set(select)
methods = {}
for key, method in new_methods.items():
if key in select:
methods[key] = method
new_methods = methods
if exclude:
for k in exclude:
new_methods.pop(k, None)
for name, method in new_methods.items():
if force or name not in cls.__dict__:
bind_method(cls, name, method)
# ----------------------------------------------------------------------
# Arithmetic
def add_special_arithmetic_methods(cls, arith_method=None,
comp_method=None, bool_method=None,
use_numexpr=True, force=False, select=None,
exclude=None, have_divmod=False):
"""
Adds the full suite of special arithmetic methods (``__add__``,
``__sub__``, etc.) to the class.
Parameters
----------
arith_method : function (optional)
factory for special arithmetic methods, with op string:
f(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs)
comp_method : function (optional)
factory for rich comparison - signature: f(op, name, str_rep)
bool_method : function (optional)
factory for boolean methods - signature: f(op, name, str_rep)
use_numexpr : bool, default True
whether to accelerate with numexpr, defaults to True
force : bool, default False
if False, checks whether function is defined **on ``cls.__dict__``**
before defining if True, always defines functions on class base
select : iterable of strings (optional)
if passed, only sets functions with names in select
exclude : iterable of strings (optional)
if passed, will not set functions with names in exclude
have_divmod : bool, (optional)
should a divmod method be added? this method is special because it
returns a tuple of cls instead of a single element of type cls
"""
# in frame, special methods have default_axis = None, comp methods use
# 'columns'
new_methods = _create_methods(arith_method, comp_method,
bool_method, use_numexpr, default_axis=None,
special=True, have_divmod=have_divmod)
# inplace operators (I feel like these should get passed an `inplace=True`
# or just be removed
def _wrap_inplace_method(method):
"""
return an inplace wrapper for this method
"""
def f(self, other):
result = method(self, other)
# this makes sure that we are aligned like the input
# we are updating inplace so we want to ignore is_copy
self._update_inplace(result.reindex_like(self, copy=False)._data,
verify_is_copy=False)
return self
return f
new_methods.update(
dict(__iadd__=_wrap_inplace_method(new_methods["__add__"]),
__isub__=_wrap_inplace_method(new_methods["__sub__"]),
__imul__=_wrap_inplace_method(new_methods["__mul__"]),
__itruediv__=_wrap_inplace_method(new_methods["__truediv__"]),
__ifloordiv__=_wrap_inplace_method(new_methods["__floordiv__"]),
__imod__=_wrap_inplace_method(new_methods["__mod__"]),
__ipow__=_wrap_inplace_method(new_methods["__pow__"])))
if not compat.PY3:
new_methods["__idiv__"] = _wrap_inplace_method(new_methods["__div__"])
if bool_method:
new_methods.update(
dict(__iand__=_wrap_inplace_method(new_methods["__and__"]),
__ior__=_wrap_inplace_method(new_methods["__or__"]),
__ixor__=_wrap_inplace_method(new_methods["__xor__"])))
add_methods(cls, new_methods=new_methods, force=force, select=select,
exclude=exclude)
def add_flex_arithmetic_methods(cls, flex_arith_method,
flex_comp_method=None, flex_bool_method=None,
use_numexpr=True, force=False, select=None,
exclude=None):
"""
Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``)
to the class.
Parameters
----------
flex_arith_method : function
factory for special arithmetic methods, with op string:
f(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs)
flex_comp_method : function, optional,
factory for rich comparison - signature: f(op, name, str_rep)
use_numexpr : bool, default True
whether to accelerate with numexpr, defaults to True
force : bool, default False
if False, checks whether function is defined **on ``cls.__dict__``**
before defining if True, always defines functions on class base
select : iterable of strings (optional)
if passed, only sets functions with names in select
exclude : iterable of strings (optional)
if passed, will not set functions with names in exclude
"""
# in frame, default axis is 'columns', doesn't matter for series and panel
new_methods = _create_methods(flex_arith_method,
flex_comp_method, flex_bool_method,
use_numexpr, default_axis='columns',
special=False)
new_methods.update(dict(multiply=new_methods['mul'],
subtract=new_methods['sub'],
divide=new_methods['div']))
# opt out of bool flex methods for now
for k in ('ror_', 'rxor', 'rand_'):
if k in new_methods:
new_methods.pop(k)
add_methods(cls, new_methods=new_methods, force=force, select=select,
exclude=exclude)
class _Op(object):
"""
Wrapper around Series arithmetic operations.
Generally, you should use classmethod ``_Op.get_op`` as an entry point.
This validates and coerces lhs and rhs depending on its dtype and
based on op. See _TimeOp also.
Parameters
----------
left : Series
lhs of op
right : object
rhs of op
name : str
name of op
na_op : callable
a function which wraps op
"""
fill_value = np.nan
wrap_results = staticmethod(lambda x: x)
dtype = None
def __init__(self, left, right, name, na_op):
self.left = left
self.right = right
self.name = name
self.na_op = na_op
self.lvalues = left
self.rvalues = right
@classmethod
def get_op(cls, left, right, name, na_op):
"""
Get op dispatcher, returns _Op or _TimeOp.
If ``left`` and ``right`` are appropriate for datetime arithmetic with
operation ``name``, processes them and returns a ``_TimeOp`` object
that stores all the required values. Otherwise, it will generate
either a ``_Op``, indicating that the operation is performed via
normal numpy path.
"""
is_timedelta_lhs = is_timedelta64_dtype(left)
is_datetime_lhs = (is_datetime64_dtype(left) or
is_datetime64tz_dtype(left))
if not (is_datetime_lhs or is_timedelta_lhs):
return _Op(left, right, name, na_op)
else:
return _TimeOp(left, right, name, na_op)
class _TimeOp(_Op):
"""
Wrapper around Series datetime/time/timedelta arithmetic operations.
Generally, you should use classmethod ``_Op.get_op`` as an entry point.
"""
fill_value = iNaT
def __init__(self, left, right, name, na_op):
super(_TimeOp, self).__init__(left, right, name, na_op)
lvalues = self._convert_to_array(left, name=name)
rvalues = self._convert_to_array(right, name=name, other=lvalues)
# left
self.is_offset_lhs = self._is_offset(left)
self.is_timedelta_lhs = is_timedelta64_dtype(lvalues)
self.is_datetime64_lhs = is_datetime64_dtype(lvalues)
self.is_datetime64tz_lhs = is_datetime64tz_dtype(lvalues)
self.is_datetime_lhs = (self.is_datetime64_lhs or
self.is_datetime64tz_lhs)
self.is_integer_lhs = left.dtype.kind in ['i', 'u']
self.is_floating_lhs = left.dtype.kind == 'f'
# right
self.is_offset_rhs = self._is_offset(right)
self.is_datetime64_rhs = is_datetime64_dtype(rvalues)
self.is_datetime64tz_rhs = is_datetime64tz_dtype(rvalues)
self.is_datetime_rhs = (self.is_datetime64_rhs or
self.is_datetime64tz_rhs)
self.is_timedelta_rhs = is_timedelta64_dtype(rvalues)
self.is_integer_rhs = rvalues.dtype.kind in ('i', 'u')
self.is_floating_rhs = rvalues.dtype.kind == 'f'
self._validate(lvalues, rvalues, name)
self.lvalues, self.rvalues = self._convert_for_datetime(lvalues,
rvalues)
def _validate(self, lvalues, rvalues, name):
# timedelta and integer mul/div
if ((self.is_timedelta_lhs and
(self.is_integer_rhs or self.is_floating_rhs)) or
(self.is_timedelta_rhs and
(self.is_integer_lhs or self.is_floating_lhs))):
if name not in ('__div__', '__truediv__', '__mul__', '__rmul__'):
raise TypeError("can only operate on a timedelta and an "
"integer or a float for division and "
"multiplication, but the operator [{name}] "
"was passed".format(name=name))
# 2 timedeltas
elif ((self.is_timedelta_lhs and
(self.is_timedelta_rhs or self.is_offset_rhs)) or
(self.is_timedelta_rhs and
(self.is_timedelta_lhs or self.is_offset_lhs))):
if name not in ('__div__', '__rdiv__', '__truediv__',
'__rtruediv__', '__add__', '__radd__', '__sub__',
'__rsub__'):
raise TypeError("can only operate on a timedeltas for addition"
", subtraction, and division, but the operator"
" [{name}] was passed".format(name=name))
# datetime and timedelta/DateOffset
elif (self.is_datetime_lhs and
(self.is_timedelta_rhs or self.is_offset_rhs)):
if name not in ('__add__', '__radd__', '__sub__'):
raise TypeError("can only operate on a datetime with a rhs of "
"a timedelta/DateOffset for addition and "
"subtraction, but the operator [{name}] was "
"passed".format(name=name))
elif (self.is_datetime_rhs and
(self.is_timedelta_lhs or self.is_offset_lhs)):
if name not in ('__add__', '__radd__', '__rsub__'):
raise TypeError("can only operate on a timedelta/DateOffset "
"with a rhs of a datetime for addition, "
"but the operator [{name}] was passed"
.format(name=name))
# 2 datetimes
elif self.is_datetime_lhs and self.is_datetime_rhs:
if name not in ('__sub__', '__rsub__'):
raise TypeError("can only operate on a datetimes for"
" subtraction, but the operator [{name}] was"
" passed".format(name=name))
# if tz's must be equal (same or None)
if getattr(lvalues, 'tz', None) != getattr(rvalues, 'tz', None):
raise ValueError("Incompatible tz's on datetime subtraction "
"ops")
elif ((self.is_timedelta_lhs or self.is_offset_lhs) and
self.is_datetime_rhs):
if name not in ('__add__', '__radd__'):
raise TypeError("can only operate on a timedelta/DateOffset "
"and a datetime for addition, but the operator"
" [{name}] was passed".format(name=name))
else:
raise TypeError('cannot operate on a series without a rhs '
'of a series/ndarray of type datetime64[ns] '
'or a timedelta')
def _convert_to_array(self, values, name=None, other=None):
"""converts values to ndarray"""
from pandas.core.tools.timedeltas import to_timedelta
ovalues = values
supplied_dtype = None
if not is_list_like(values):
values = np.array([values])
# if this is a Series that contains relevant dtype info, then use this
# instead of the inferred type; this avoids coercing Series([NaT],
# dtype='datetime64[ns]') to Series([NaT], dtype='timedelta64[ns]')
elif (isinstance(values, pd.Series) and
(is_timedelta64_dtype(values) or is_datetime64_dtype(values))):
supplied_dtype = values.dtype
inferred_type = lib.infer_dtype(values)
if (inferred_type in ('datetime64', 'datetime', 'date', 'time') or
is_datetimetz(inferred_type)):
# if we have a other of timedelta, but use pd.NaT here we
# we are in the wrong path
if (supplied_dtype is None and other is not None and
(other.dtype in ('timedelta64[ns]', 'datetime64[ns]')) and
isna(values).all()):
values = np.empty(values.shape, dtype='timedelta64[ns]')
values[:] = iNaT
# a datelike
elif isinstance(values, pd.DatetimeIndex):
values = values.to_series()
# datetime with tz
elif (isinstance(ovalues, datetime.datetime) and
hasattr(ovalues, 'tzinfo')):
values = pd.DatetimeIndex(values)
# datetime array with tz
elif is_datetimetz(values):
if isinstance(values, ABCSeries):
values = values._values
elif not (isinstance(values, (np.ndarray, ABCSeries)) and
is_datetime64_dtype(values)):
values = libts.array_to_datetime(values)
elif inferred_type in ('timedelta', 'timedelta64'):
# have a timedelta, convert to to ns here
values = to_timedelta(values, errors='coerce', box=False)
elif inferred_type == 'integer':
# py3 compat where dtype is 'm' but is an integer
if values.dtype.kind == 'm':
values = values.astype('timedelta64[ns]')
elif isinstance(values, pd.PeriodIndex):
values = values.to_timestamp().to_series()
elif name not in ('__truediv__', '__div__', '__mul__', '__rmul__'):
raise TypeError("incompatible type for a datetime/timedelta "
"operation [{name}]".format(name=name))
elif inferred_type == 'floating':
if (isna(values).all() and
name in ('__add__', '__radd__', '__sub__', '__rsub__')):
values = np.empty(values.shape, dtype=other.dtype)
values[:] = iNaT
return values
elif self._is_offset(values):
return values
else:
raise TypeError("incompatible type [{dtype}] for a "
"datetime/timedelta operation"
.format(dtype=np.array(values).dtype))
return values
def _convert_for_datetime(self, lvalues, rvalues):
from pandas.core.tools.timedeltas import to_timedelta
mask = isna(lvalues) | isna(rvalues)
# datetimes require views
if self.is_datetime_lhs or self.is_datetime_rhs:
# datetime subtraction means timedelta
if self.is_datetime_lhs and self.is_datetime_rhs:
if self.name in ('__sub__', '__rsub__'):
self.dtype = 'timedelta64[ns]'
else:
self.dtype = 'datetime64[ns]'
elif self.is_datetime64tz_lhs:
self.dtype = lvalues.dtype
elif self.is_datetime64tz_rhs:
self.dtype = rvalues.dtype
else:
self.dtype = 'datetime64[ns]'
# if adding single offset try vectorized path
# in DatetimeIndex; otherwise elementwise apply
def _offset(lvalues, rvalues):
if len(lvalues) == 1:
rvalues = pd.DatetimeIndex(rvalues)
lvalues = lvalues[0]
else:
warnings.warn("Adding/subtracting array of DateOffsets to "
"Series not vectorized", PerformanceWarning)
rvalues = rvalues.astype('O')
# pass thru on the na_op
self.na_op = lambda x, y: getattr(x, self.name)(y)
return lvalues, rvalues
if self.is_offset_lhs:
lvalues, rvalues = _offset(lvalues, rvalues)
elif self.is_offset_rhs:
rvalues, lvalues = _offset(rvalues, lvalues)
else:
# with tz, convert to UTC
if self.is_datetime64tz_lhs:
lvalues = lvalues.tz_convert('UTC').tz_localize(None)
if self.is_datetime64tz_rhs:
rvalues = rvalues.tz_convert('UTC').tz_localize(None)
lvalues = lvalues.view(np.int64)
rvalues = rvalues.view(np.int64)
# otherwise it's a timedelta
else:
self.dtype = 'timedelta64[ns]'
# convert Tick DateOffset to underlying delta
if self.is_offset_lhs:
lvalues = to_timedelta(lvalues, box=False)
if self.is_offset_rhs:
rvalues = to_timedelta(rvalues, box=False)
lvalues = lvalues.astype(np.int64)
if not self.is_floating_rhs:
rvalues = rvalues.astype(np.int64)
# time delta division -> unit less
# integer gets converted to timedelta in np < 1.6
if ((self.is_timedelta_lhs and self.is_timedelta_rhs) and
not self.is_integer_rhs and not self.is_integer_lhs and
self.name in ('__div__', '__truediv__')):
self.dtype = 'float64'
self.fill_value = np.nan
lvalues = lvalues.astype(np.float64)
rvalues = rvalues.astype(np.float64)
# if we need to mask the results
if mask.any():
def f(x):
# datetime64[ns]/timedelta64[ns] masking
try:
x = np.array(x, dtype=self.dtype)
except TypeError:
x = np.array(x, dtype='datetime64[ns]')
np.putmask(x, mask, self.fill_value)
return x
self.wrap_results = f
return lvalues, rvalues
def _is_offset(self, arr_or_obj):
""" check if obj or all elements of list-like is DateOffset """
if isinstance(arr_or_obj, ABCDateOffset):
return True
elif is_list_like(arr_or_obj) and len(arr_or_obj):
return all(isinstance(x, ABCDateOffset) for x in arr_or_obj)
return False
def _align_method_SERIES(left, right, align_asobject=False):
""" align lhs and rhs Series """
# ToDo: Different from _align_method_FRAME, list, tuple and ndarray
# are not coerced here
# because Series has inconsistencies described in #13637
if isinstance(right, ABCSeries):
# avoid repeated alignment
if not left.index.equals(right.index):
if align_asobject:
# to keep original value's dtype for bool ops
left = left.astype(object)
right = right.astype(object)
left, right = left.align(right, copy=False)
return left, right
def _construct_result(left, result, index, name, dtype):
return left._constructor(result, index=index, name=name, dtype=dtype)
def _construct_divmod_result(left, result, index, name, dtype):
"""divmod returns a tuple of like indexed series instead of a single series.
"""
constructor = left._constructor
return (
constructor(result[0], index=index, name=name, dtype=dtype),
constructor(result[1], index=index, name=name, dtype=dtype),
)
def _arith_method_SERIES(op, name, str_rep, fill_zeros=None, default_axis=None,
construct_result=_construct_result, **eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True, **eval_kwargs)
except TypeError:
if isinstance(y, (np.ndarray, ABCSeries, pd.Index)):
dtype = find_common_type([x.dtype, y.dtype])
result = np.empty(x.size, dtype=dtype)
mask = notna(x) & notna(y)
result[mask] = op(x[mask], _values_from_object(y[mask]))
elif isinstance(x, np.ndarray):
result = np.empty(len(x), dtype=x.dtype)
mask = notna(x)
result[mask] = op(x[mask], y)
else:
raise TypeError("{typ} cannot perform the operation "
"{op}".format(typ=type(x).__name__,
op=str_rep))
result, changed = maybe_upcast_putmask(result, ~mask, np.nan)
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
def safe_na_op(lvalues, rvalues):
try:
with np.errstate(all='ignore'):
return na_op(lvalues, rvalues)
except Exception:
if isinstance(rvalues, ABCSeries):
if is_object_dtype(rvalues):
# if dtype is object, try elementwise op
return libalgos.arrmap_object(rvalues,
lambda x: op(lvalues, x))
else:
if is_object_dtype(lvalues):
return libalgos.arrmap_object(lvalues,
lambda x: op(x, rvalues))
raise
def wrapper(left, right, name=name, na_op=na_op):
if isinstance(right, pd.DataFrame):
return NotImplemented
left, right = _align_method_SERIES(left, right)
converted = _Op.get_op(left, right, name, na_op)
left, right = converted.left, converted.right
lvalues, rvalues = converted.lvalues, converted.rvalues
dtype = converted.dtype
wrap_results = converted.wrap_results
na_op = converted.na_op
if isinstance(rvalues, ABCSeries):
name = _maybe_match_name(left, rvalues)
lvalues = getattr(lvalues, 'values', lvalues)
rvalues = getattr(rvalues, 'values', rvalues)
# _Op aligns left and right
else:
name = left.name
if (hasattr(lvalues, 'values') and
not isinstance(lvalues, pd.DatetimeIndex)):
lvalues = lvalues.values
result = wrap_results(safe_na_op(lvalues, rvalues))
return construct_result(
left,
result,
index=left.index,
name=name,
dtype=dtype,
)
return wrapper
def _comp_method_OBJECT_ARRAY(op, x, y):
if isinstance(y, list):
y = lib.list_to_object_array(y)
if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)):
if not is_object_dtype(y.dtype):
y = y.astype(np.object_)
if isinstance(y, (ABCSeries, ABCIndex)):
y = y.values
result = lib.vec_compare(x, y, op)
else:
result = lib.scalar_compare(x, y, op)
return result
def _comp_method_SERIES(op, name, str_rep, masker=False):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
# dispatch to the categorical if we have a categorical
# in either operand
if is_categorical_dtype(x):
return op(x, y)
elif is_categorical_dtype(y) and not is_scalar(y):
return op(y, x)
if is_object_dtype(x.dtype):
result = _comp_method_OBJECT_ARRAY(op, x, y)
else:
# we want to compare like types
# we only want to convert to integer like if
# we are not NotImplemented, otherwise
# we would allow datetime64 (but viewed as i8) against
# integer comparisons
if is_datetimelike_v_numeric(x, y):
raise TypeError("invalid type comparison")
# numpy does not like comparisons vs None
if is_scalar(y) and isna(y):
if name == '__ne__':
return np.ones(len(x), dtype=bool)
else:
return np.zeros(len(x), dtype=bool)
# we have a datetime/timedelta and may need to convert
mask = None
if (needs_i8_conversion(x) or
(not is_scalar(y) and needs_i8_conversion(y))):
if is_scalar(y):
mask = isna(x)
y = libindex.convert_scalar(x, _values_from_object(y))
else:
mask = isna(x) | isna(y)
y = y.view('i8')
x = x.view('i8')
try:
with np.errstate(all='ignore'):
result = getattr(x, name)(y)
if result is NotImplemented:
raise TypeError("invalid type comparison")
except AttributeError:
result = op(x, y)
if mask is not None and mask.any():
result[mask] = masker
return result
def wrapper(self, other, axis=None):
# Validate the axis parameter
if axis is not None:
self._get_axis_number(axis)
if isinstance(other, ABCSeries):
name = _maybe_match_name(self, other)
if not self._indexed_same(other):
msg = 'Can only compare identically-labeled Series objects'
raise ValueError(msg)
return self._constructor(na_op(self.values, other.values),
index=self.index, name=name)
elif isinstance(other, pd.DataFrame): # pragma: no cover
return NotImplemented
elif isinstance(other, (np.ndarray, pd.Index)):
# do not check length of zerodim array
# as it will broadcast
if (not is_scalar(lib.item_from_zerodim(other)) and
len(self) != len(other)):
raise ValueError('Lengths must match to compare')
if isinstance(other, ABCPeriodIndex):
# temp workaround until fixing GH 13637
# tested in test_nat_comparisons
# (pandas.tests.series.test_operators.TestSeriesOperators)
return self._constructor(na_op(self.values,
other.asobject.values),
index=self.index)
return self._constructor(na_op(self.values, np.asarray(other)),
index=self.index).__finalize__(self)
elif isinstance(other, pd.Categorical):
if not is_categorical_dtype(self):
msg = ("Cannot compare a Categorical for op {op} with Series "
"of dtype {typ}.\nIf you want to compare values, use "
"'series <op> np.asarray(other)'.")
raise TypeError(msg.format(op=op, typ=self.dtype))
if is_categorical_dtype(self):
# cats are a special case as get_values() would return an ndarray,
# which would then not take categories ordering into account
# we can go directly to op, as the na_op would just test again and
# dispatch to it.
with np.errstate(all='ignore'):
res = op(self.values, other)
else:
values = self.get_values()
if isinstance(other, (list, np.ndarray)):
other = np.asarray(other)
with np.errstate(all='ignore'):
res = na_op(values, other)
if is_scalar(res):
raise TypeError('Could not compare {typ} type with Series'
.format(typ=type(other)))
# always return a full value series here
res = _values_from_object(res)
res = pd.Series(res, index=self.index, name=self.name, dtype='bool')
return res
return wrapper
def _bool_method_SERIES(op, name, str_rep):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
if isinstance(y, list):
y = lib.list_to_object_array(y)
if isinstance(y, (np.ndarray, ABCSeries)):
if (is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype)):
result = op(x, y) # when would this be hit?
else:
x = _ensure_object(x)
y = _ensure_object(y)
result = lib.vec_binop(x, y, op)
else:
try:
# let null fall thru
if not isna(y):
y = bool(y)
result = lib.scalar_binop(x, y, op)
except:
msg = ("cannot compare a dtyped [{dtype}] array "
"with a scalar of type [{type}]"
).format(dtype=x.dtype, type=type(y).__name__)
raise TypeError(msg)
return result
def wrapper(self, other):
is_self_int_dtype = is_integer_dtype(self.dtype)
fill_int = lambda x: x.fillna(0)
fill_bool = lambda x: x.fillna(False).astype(bool)
self, other = _align_method_SERIES(self, other, align_asobject=True)
if isinstance(other, ABCSeries):
name = _maybe_match_name(self, other)
is_other_int_dtype = is_integer_dtype(other.dtype)
other = fill_int(other) if is_other_int_dtype else fill_bool(other)
filler = (fill_int if is_self_int_dtype and is_other_int_dtype
else fill_bool)
return filler(self._constructor(na_op(self.values, other.values),
index=self.index, name=name))
elif isinstance(other, pd.DataFrame):
return NotImplemented
else:
# scalars, list, tuple, np.array
filler = (fill_int if is_self_int_dtype and
is_integer_dtype(np.asarray(other)) else fill_bool)
return filler(self._constructor(
na_op(self.values, other),
index=self.index)).__finalize__(self)
return wrapper
_op_descriptions = {'add': {'op': '+',
'desc': 'Addition',
'reversed': False,
'reverse': 'radd'},
'sub': {'op': '-',
'desc': 'Subtraction',
'reversed': False,
'reverse': 'rsub'},
'mul': {'op': '*',
'desc': 'Multiplication',
'reversed': False,
'reverse': 'rmul'},
'mod': {'op': '%',
'desc': 'Modulo',
'reversed': False,
'reverse': 'rmod'},
'pow': {'op': '**',
'desc': 'Exponential power',
'reversed': False,
'reverse': 'rpow'},
'truediv': {'op': '/',
'desc': 'Floating division',
'reversed': False,
'reverse': 'rtruediv'},
'floordiv': {'op': '//',
'desc': 'Integer division',
'reversed': False,
'reverse': 'rfloordiv'},
'divmod': {'op': 'divmod',
'desc': 'Integer division and modulo',
'reversed': False,
'reverse': None},
'eq': {'op': '==',
'desc': 'Equal to',
'reversed': False,
'reverse': None},
'ne': {'op': '!=',
'desc': 'Not equal to',
'reversed': False,
'reverse': None},
'lt': {'op': '<',
'desc': 'Less than',
'reversed': False,
'reverse': None},
'le': {'op': '<=',
'desc': 'Less than or equal to',
'reversed': False,
'reverse': None},
'gt': {'op': '>',
'desc': 'Greater than',
'reversed': False,
'reverse': None},
'ge': {'op': '>=',
'desc': 'Greater than or equal to',
'reversed': False,
'reverse': None}}
_op_names = list(_op_descriptions.keys())
for k in _op_names:
reverse_op = _op_descriptions[k]['reverse']
_op_descriptions[reverse_op] = _op_descriptions[k].copy()
_op_descriptions[reverse_op]['reversed'] = True
_op_descriptions[reverse_op]['reverse'] = k
_flex_doc_SERIES = """
%s of series and other, element-wise (binary operator `%s`).
Equivalent to ``%s``, but with support to substitute a fill_value for
missing data in one of the inputs.
Parameters
----------
other : Series or scalar value
fill_value : None or float value, default None (NaN)
Fill missing (NaN) values with this value. If both Series are
missing, the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
Returns
-------
result : Series
See also
--------
Series.%s
"""
def _flex_method_SERIES(op, name, str_rep, default_axis=None, fill_zeros=None,
**eval_kwargs):
op_name = name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' series'
else:
equiv = 'series ' + op_desc['op'] + ' other'
doc = _flex_doc_SERIES % (op_desc['desc'], op_name, equiv,
op_desc['reverse'])
@Appender(doc)
def flex_wrapper(self, other, level=None, fill_value=None, axis=0):
# validate axis
if axis is not None:
self._get_axis_number(axis)
if isinstance(other, ABCSeries):
return self._binop(other, op, level=level, fill_value=fill_value)
elif isinstance(other, (np.ndarray, list, tuple)):
if len(other) != len(self):
raise ValueError('Lengths must be equal')
return self._binop(self._constructor(other, self.index), op,
level=level, fill_value=fill_value)
else:
if fill_value is not None:
self = self.fillna(fill_value)
return self._constructor(op(self, other),
self.index).__finalize__(self)
flex_wrapper.__name__ = name
return flex_wrapper
series_flex_funcs = dict(flex_arith_method=_flex_method_SERIES,
flex_comp_method=_flex_method_SERIES)
series_special_funcs = dict(arith_method=_arith_method_SERIES,
comp_method=_comp_method_SERIES,
bool_method=_bool_method_SERIES,
have_divmod=True)
_arith_doc_FRAME = """
Binary operator %s with support to substitute a fill_value for missing data in
one of the inputs
Parameters
----------
other : Series, DataFrame, or constant
axis : {0, 1, 'index', 'columns'}
For Series input, axis to match Series index on
fill_value : None or float value, default None
Fill missing (NaN) values with this value. If both DataFrame locations are
missing, the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
Notes
-----
Mismatched indices will be unioned together
Returns
-------
result : DataFrame
"""
_flex_doc_FRAME = """
%s of dataframe and other, element-wise (binary operator `%s`).
Equivalent to ``%s``, but with support to substitute a fill_value for
missing data in one of the inputs.
Parameters
----------
other : Series, DataFrame, or constant
axis : {0, 1, 'index', 'columns'}
For Series input, axis to match Series index on
fill_value : None or float value, default None
Fill missing (NaN) values with this value. If both DataFrame
locations are missing, the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
Notes
-----
Mismatched indices will be unioned together
Returns
-------
result : DataFrame
See also
--------
DataFrame.%s
"""
def _align_method_FRAME(left, right, axis):
""" convert rhs to meet lhs dims if input is list, tuple or np.ndarray """
def to_series(right):
msg = ('Unable to coerce to Series, length must be {req_len}: '
'given {given_len}')
if axis is not None and left._get_axis_name(axis) == 'index':
if len(left.index) != len(right):
raise ValueError(msg.format(req_len=len(left.index),
given_len=len(right)))
right = left._constructor_sliced(right, index=left.index)
else:
if len(left.columns) != len(right):
raise ValueError(msg.format(req_len=len(left.columns),
given_len=len(right)))
right = left._constructor_sliced(right, index=left.columns)
return right
if isinstance(right, (list, tuple)):
right = to_series(right)
elif isinstance(right, np.ndarray) and right.ndim: # skips np scalar
if right.ndim == 1:
right = to_series(right)
elif right.ndim == 2:
if left.shape != right.shape:
msg = ("Unable to coerce to DataFrame, shape "
"must be {req_shape}: given {given_shape}"
).format(req_shape=left.shape, given_shape=right.shape)
raise ValueError(msg)
right = left._constructor(right, index=left.index,
columns=left.columns)
else:
raise ValueError('Unable to coerce to Series/DataFrame, dim '
'must be <= 2: {dim}'.format(dim=right.shape))
return right
def _arith_method_FRAME(op, name, str_rep=None, default_axis='columns',
fill_zeros=None, **eval_kwargs):
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True, **eval_kwargs)
except TypeError:
xrav = x.ravel()
if isinstance(y, (np.ndarray, ABCSeries)):
dtype = np.find_common_type([x.dtype, y.dtype], [])
result = np.empty(x.size, dtype=dtype)
yrav = y.ravel()
mask = notna(xrav) & notna(yrav)
xrav = xrav[mask]
# we may need to manually
# broadcast a 1 element array
if yrav.shape != mask.shape:
yrav = np.empty(mask.shape, dtype=yrav.dtype)
yrav.fill(yrav.item())
yrav = yrav[mask]
if np.prod(xrav.shape) and np.prod(yrav.shape):
with np.errstate(all='ignore'):
result[mask] = op(xrav, yrav)
elif hasattr(x, 'size'):
result = np.empty(x.size, dtype=x.dtype)
mask = notna(xrav)
xrav = xrav[mask]
if np.prod(xrav.shape):
with np.errstate(all='ignore'):
result[mask] = op(xrav, y)
else:
raise TypeError("cannot perform operation {op} between "
"objects of type {x} and {y}".format(
op=name, x=type(x), y=type(y)))
result, changed = maybe_upcast_putmask(result, ~mask, np.nan)
result = result.reshape(x.shape)
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
if name in _op_descriptions:
op_name = name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' dataframe'
else:
equiv = 'dataframe ' + op_desc['op'] + ' other'
doc = _flex_doc_FRAME % (op_desc['desc'], op_name, equiv,
op_desc['reverse'])
else:
doc = _arith_doc_FRAME % name
@Appender(doc)
def f(self, other, axis=default_axis, level=None, fill_value=None):
other = _align_method_FRAME(self, other, axis)
if isinstance(other, pd.DataFrame): # Another DataFrame
return self._combine_frame(other, na_op, fill_value, level)
elif isinstance(other, ABCSeries):
return self._combine_series(other, na_op, fill_value, axis, level)
else:
if fill_value is not None:
self = self.fillna(fill_value)
return self._combine_const(other, na_op)
f.__name__ = name
return f
# Masker unused for now
def _flex_comp_method_FRAME(op, name, str_rep=None, default_axis='columns',
masker=False):
def na_op(x, y):
try:
with np.errstate(invalid='ignore'):
result = op(x, y)
except TypeError:
xrav = x.ravel()
result = np.empty(x.size, dtype=bool)
if isinstance(y, (np.ndarray, ABCSeries)):
yrav = y.ravel()
mask = notna(xrav) & notna(yrav)
result[mask] = op(np.array(list(xrav[mask])),
np.array(list(yrav[mask])))
else:
mask = notna(xrav)
result[mask] = op(np.array(list(xrav[mask])), y)
if op == operator.ne: # pragma: no cover
np.putmask(result, ~mask, True)
else:
np.putmask(result, ~mask, False)
result = result.reshape(x.shape)
return result
@Appender('Wrapper for flexible comparison methods {name}'
.format(name=name))
def f(self, other, axis=default_axis, level=None):
other = _align_method_FRAME(self, other, axis)
if isinstance(other, pd.DataFrame): # Another DataFrame
return self._flex_compare_frame(other, na_op, str_rep, level,
try_cast=False)
elif isinstance(other, ABCSeries):
return self._combine_series(other, na_op, None, axis, level,
try_cast=False)
else:
return self._combine_const(other, na_op, try_cast=False)
f.__name__ = name
return f
def _comp_method_FRAME(func, name, str_rep, masker=False):
@Appender('Wrapper for comparison method {name}'.format(name=name))
def f(self, other):
if isinstance(other, pd.DataFrame): # Another DataFrame
return self._compare_frame(other, func, str_rep)
elif isinstance(other, ABCSeries):
return self._combine_series_infer(other, func, try_cast=False)
else:
# straight boolean comparisions we want to allow all columns
# (regardless of dtype to pass thru) See #4537 for discussion.
res = self._combine_const(other, func,
raise_on_error=False,
try_cast=False)
return res.fillna(True).astype(bool)
f.__name__ = name
return f
frame_flex_funcs = dict(flex_arith_method=_arith_method_FRAME,
flex_comp_method=_flex_comp_method_FRAME)
frame_special_funcs = dict(arith_method=_arith_method_FRAME,
comp_method=_comp_method_FRAME,
bool_method=_arith_method_FRAME)
def _arith_method_PANEL(op, name, str_rep=None, fill_zeros=None,
default_axis=None, **eval_kwargs):
# copied from Series na_op above, but without unnecessary branch for
# non-scalar
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True, **eval_kwargs)
except TypeError:
# TODO: might need to find_common_type here?
result = np.empty(len(x), dtype=x.dtype)
mask = notna(x)
result[mask] = op(x[mask], y)
result, changed = maybe_upcast_putmask(result, ~mask, np.nan)
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
# work only for scalars
def f(self, other):
if not is_scalar(other):
raise ValueError('Simple arithmetic with {name} can only be '
'done with scalar values'
.format(name=self._constructor.__name__))
return self._combine(other, op)
f.__name__ = name
return f
def _comp_method_PANEL(op, name, str_rep=None, masker=False):
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True)
except TypeError:
xrav = x.ravel()
result = np.empty(x.size, dtype=bool)
if isinstance(y, np.ndarray):
yrav = y.ravel()
mask = notna(xrav) & notna(yrav)
result[mask] = op(np.array(list(xrav[mask])),
np.array(list(yrav[mask])))
else:
mask = notna(xrav)
result[mask] = op(np.array(list(xrav[mask])), y)
if op == operator.ne: # pragma: no cover
np.putmask(result, ~mask, True)
else:
np.putmask(result, ~mask, False)
result = result.reshape(x.shape)
return result
@Appender('Wrapper for comparison method {name}'.format(name=name))
def f(self, other, axis=None):
# Validate the axis parameter
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, self._constructor):
return self._compare_constructor(other, na_op, try_cast=False)
elif isinstance(other, (self._constructor_sliced, pd.DataFrame,
ABCSeries)):
raise Exception("input needs alignment for this object [{object}]"
.format(object=self._constructor))
else:
return self._combine_const(other, na_op, try_cast=False)
f.__name__ = name
return f
panel_special_funcs = dict(arith_method=_arith_method_PANEL,
comp_method=_comp_method_PANEL,
bool_method=_arith_method_PANEL)
| bsd-3-clause |
elmadjian/mac0499 | programas/plot_some_graph.py | 1 | 7762 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import threading
import matplotlib.pyplot as plt
import numpy as np
import re
import sys
import time
import math
#Detector de leitura (Elmadjian, 2015)
#------------------------------------
class Detector (threading.Thread):
def __init__(self, thresh, cv):
threading.Thread.__init__(self)
self.curr_x = None
self.curr_y = None
self.curr_t = None
self.next_x = None
self.next_y = None
self.next_t = None
self.thresh = thresh
self.state = 0
self.ant_saccade = False
self.stop = False
self.cv = cv
self.elapsed = time.time()
self.cnt_yes = 0
self.cnt_no = 0
self.cnt_total = 0
def run(self):
self.detect(self.cv)
def storeValues(self, next_point):
self.next_x = next_point[0]
self.next_y = next_point[1]
self.next_t = next_point[2]
def changeState(self, changeValue):
self.state += changeValue
if self.state > 100:
self.state = 100
elif self.state < -40:
self.state = -40
def changePercentage(self, var, value):
var += value
if var > 100:
var = 100
elif var < 15:
var = 15
return var
def checkCombination(self, buff):
if buff[0] and buff[1] and buff[2]:
return True
if buff[0] and buff[1] and not buff[2]:
return True
if buff[0] and not buff[1] and buff[2]:
return True
if not buff[0] and buff[1] and buff[2]:
return True
return False
def detect(self, cv):
prev_x = 0
prev_y = 0
prev_t = 0.000000001
diff_cnt = 0
reading_buff = []
yaxs_mov = np.array([])
xaxs_mov = np.array([])
global dxlist
global dylist
global timelist
global xlist
global ylist
global xdetected
global tdetected
global xnot_related
global tnot_related
global read_on
global skim_on
short_mov = 0
reading = 50
skimming = 50
finite_diff = 0.1
finite_cnt = 0
fix_cnt = 0
while True:
with cv:
cv.wait()
if self.stop:
break
dx = (self.next_x - prev_x) / finite_diff
dy = (self.next_y - prev_y) / finite_diff
yaxs_mov = np.append(yaxs_mov, [prev_y])
xaxs_mov = np.append(xaxs_mov, [prev_x])
finite_cnt += 0.1
timelist.append(finite_cnt)
xlist.append(self.next_x)
ylist.append(self.next_y)
dxlist.append(dx)
dylist.append(dy)
#read forward
if 0.15 < dx < 1.5 and -0.5 < dy < 0.5:
short_mov += 1
fix_cnt = 0
reading_buff.append(True)
if short_mov >= 1:
xdetected.append(self.next_x)
tdetected.append(finite_cnt)
#regression
elif dx < -2.0 and -1.0 < dy < 0.0:
fix_cnt = 0
reading_buff.append(False)
xdetected.append(self.next_x)
tdetected.append(finite_cnt)
if len(yaxs_mov) > 1 and short_mov >= 1:
criteria = np.ptp(xaxs_mov) * short_mov
xaxs_mov = np.array([])
yaxs_mov = np.array([])
short_mov = 0
if criteria < 2:
skimming = self.changePercentage(skimming, 20)
reading = self.changePercentage(reading, -20)
else:
skimming = self.changePercentage(skimming, -5)
reading = self.changePercentage(reading, 5)
#fixations
elif -0.15 < dx < 0.15 and -0.2 < dy < 0.2:
fix_cnt += 1
if fix_cnt % 10 == 0:
self.changeState(-5)
#unrelated pattern
else:
fix_cnt = 0
self.changeState(-15)
xnot_related.append(self.next_x)
tnot_related.append(finite_cnt)
#validating window
if len(reading_buff) == 3:
if self.checkCombination(reading_buff):
self.changeState(20)
else:
pass
#self.changeState(-5)
reading_buff.pop(0)
#record state
if self.state >= self.thresh:
self.cnt_yes += 1
if reading > skimming:
read_on.append(finite_cnt)
elif reading < skimming:
skim_on.append(finite_cnt)
#print("time:", finite_cnt, "state:", self.state)
prev_x = self.next_x
prev_y = self.next_y
prev_t = self.next_t
self.cnt_total += 1
#Lê o arquivo de entrada
#-----------------------
class FileReader:
def __init__(self, filename):
self.x = []
self.y = []
self.time = []
self.values = []
self.readFile(filename)
def readFile(self, filename):
pattern = re.compile("\d+.?\d+")
with open(filename, 'r') as sample:
for line in sample:
group = pattern.findall(line)
if group:
x = float(group[0])
y = float(group[1])
t = float(group[2])
self.x.append(x)
self.y.append(y)
self.time.append(t)
self.values.append([x,y,t])
#------------------------
if __name__ == '__main__':
fr = FileReader(sys.argv[1])
cv = threading.Condition()
detector = Detector(30, cv)
detector.start()
timelist = []
dxlist = []
dylist = []
xlist = []
ylist = []
xdetected = []
tdetected = []
read_on = []
skim_on = []
xnot_related = []
tnot_related = []
for i in range(len(fr.values) - 1):
#detector.storeValues(fr.x.pop(0), fr.y.pop(0), fr.time.pop(0))
detector.storeValues(fr.values.pop(0))#, fr.values[0])
with cv:
cv.notify_all()
time.sleep(0.0001)
#fr.plota()
detector.stop = True
print("total:", detector.cnt_total)
print("found:", detector.cnt_yes)
print("percent:", 100*(detector.cnt_yes/detector.cnt_total))
with cv:
cv.notify_all()
detector.join()
#plot everything:
# plt.plot(timelist, dxlist, 'bo-')
# plt.grid()
# plt.show()
# plt.plot(timelist, dylist, 'ro-')
# plt.grid()
# plt.show()
# plt.plot(timelist, dxlist, 'bo-')
# plt.plot(timelist, dylist, 'ro-')
# plt.show()
plt.plot(timelist, xlist, 'bo-')
plt.plot(timelist, ylist, 'ro-')
# plt.plot(tdetected, xdetected, 'yo-')
# plt.plot(tnot_related, xnot_related, 'gs')
# last = timelist[-1]
# ceil = max(max(xlist), max(ylist))
# #print(read_on)
# for i in range(len(read_on) - 1):
# if read_on[i+1] - read_on[i] <= 0.15:
# plt.axhspan(0.0, ceil, read_on[i]/last, read_on[i+1]/last, edgecolor='none', facecolor='y', alpha=0.5)
# for i in range(len(skim_on) - 1):
# if skim_on[i+1] - skim_on[i] <= 0.15:
# plt.axhspan(0.0, ceil, skim_on[i]/last, skim_on[i+1]/last, edgecolor='none', facecolor='g', alpha=0.5)
# plt.ylim(0, max(ylist))
# plt.xlim(0, max(timelist))
# plt.grid()
plt.show() | mit |
khkaminska/scikit-learn | examples/bicluster/bicluster_newsgroups.py | 142 | 7183 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
# Note: the following is identical to X[rows[:, np.newaxis], cols].sum() but
# much faster in scipy <= 0.16
weight = X[rows][:, cols].sum()
cut = (X[row_complement][:, cols].sum() +
X[rows][:, col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
vvinuv/pyvinu | mytools.py | 1 | 10543 | import pylab as pl
import pyfits
import numpy as np
import os
import healpy as hp
from numpy import ma
from astropy.io import fits
import astropy.coordinates as co
from astropy import units as u
from astropy import wcs
import time
from matplotlib import ticker
def matrc1():
MatPlotParams = {'axes.titlesize': 15, 'axes.linewidth' : 2.5, 'axes.labelsize': 22, 'xtick.labelsize': 20, 'ytick.labelsize': 20, 'xtick.major.size': 22, 'ytick.major.size' : 22, 'xtick.minor.size': 14, 'ytick.minor.size': 14, 'figure.figsize' : [6.0, 6.0], 'xtick.major.pad' : 8, 'ytick.major.pad' : 6}
pl.rcParams.update(MatPlotParams)
def my_formatter(x, p):
'''x and p are the ticks and the position. i.e. if the second tick is 100 th
en x=100, p=2'''
if x == 0:
pow = 0
x = 0
elif x < 0:
pow = np.floor(np.log10(abs(x)))
else:
pow = np.floor(np.log10(x))
return r'$%.1f \times 10^{%.0f}$' % (x/10**pow, pow)
def tick_format(ax, axis='x'):
'''In the script call as
>>> import mytools as my
>>> ax = pl.subplot(111)
>>> my.tick_format(ax, axis='y')
>>> my.tick_format(ax, axis='x')
>>> pl.plot(a,a)
'''
if axis == 'x':
ax.get_xaxis().set_major_formatter(ticker.FuncFormatter(my_formatter))
if axis == 'y':
ax.get_yaxis().set_major_formatter(ticker.FuncFormatter(my_formatter))
def whiskerplot(shear,dRA=1.,dDEC=1.,scale=5, combine=1,offset=(0,0) ):
if combine>1:
s = (combine*int(shear.shape[0]/combine),
combine*int(shear.shape[1]/combine))
shear = shear[0:s[0]:combine, 0:s[1]:combine] \
+ shear[1:s[0]:combine, 0:s[1]:combine] \
+ shear[0:s[0]:combine, 1:s[1]:combine] \
+ shear[1:s[0]:combine, 1:s[1]:combine]
shear *= 0.25
dRA *= combine
dDEC *= combine
theta = shear**0.5
RA = offset[0] + np.arange(shear.shape[0])*dRA
DEC = offset[1] + np.arange(shear.shape[1])*dDEC
pl.quiver(RA,DEC,
theta.real.T,theta.imag.T,
pivot = 'middle',
headwidth = 0,
headlength = 0,
headaxislength = 0,
scale=scale)
pl.xlim(0,shear.shape[0]*dRA)
pl.ylim(0,shear.shape[1]*dDEC)
#pl.xlabel('RA (arcmin)')
#pl.ylabel('DEC (arcmin)')
def write_fits_table(outfile, keys, data, formats=None):
'''Given keys and data it write it to fits table'''
os.system('rm -f %s'%outfile)
if formats is None:
formats = ['E'] * len(keys)
cols = []
for key, d, format in zip(keys, data, formats):
cols.append(pyfits.Column(name=key, format=format, array=d))
cols = pyfits.ColDefs(cols)
tbhdu = pyfits.new_table(cols)
tbhdu.writeto(outfile)
def return_healang(pixel, nside=8, nest=False):
"""Given healpix pixel number returns RA and DEC """
theta, phi = hp.pix2ang(nside, pixel, nest=False)
dec = 90.0 - (theta * 180.0 / np.pi)
ra = phi * 180.0 / np.pi
return ra, dec
def return_healpixel(ra, dec, nside=8, nest=False):
"""Given RA and DEC in degrees returns array of pixel number"""
theta = 90.0 - dec
ra = ra * np.pi / 180.
theta = theta * np.pi / 180.
pixels = hp.ang2pix(nside, theta, ra, nest=False)
return pixels
def get_random_cata_heal(ipath, ifile, mfile, nside, ofile='rand_points.fits', mode='grow', opath='./', do_plot=True):
"""Given an input catalog (ifile) and mask (mfile, in healpix format,
0-masked, 1-unmasked), this function generate
a random catalog. The input catalog should contains ra and dec in
equitorial coordinate system and the output will in galactic coordinate.
mode = grow -> grows the mask by one healpix pixel
mode = shrink -> shrink the mask by one pixel (not yet implimented)
mode = None -> Nothing will be done to the mask
"""
ofile = os.path.join(opath, ofile)
ifile = os.path.join(ipath, ifile)
# Read input RA and DEC
f = fits.open(ifile)[1].data
ra = f['ra']
dec = f['dec']
# Convert RA DEC to l and b
coords = co.ICRS(ra=ra, dec=dec, unit=(u.degree, u.degree))
ra = coords.galactic.l.degree
dec = coords.galactic.b.degree
# Generating the corresponding healpix pixels
healpixels = return_healpixel(ra, dec, nside=nside, nest=False)
uniquepixels = np.unique(healpixels)
# Generating angular mask from input file
mask = np.zeros(hp.nside2npix(nside)).astype(np.int)
mask[uniquepixels] = 1
if mode == 'grow':
# Generating neighbour pixels. This will grow the mask
neigh_pixels = hp.get_all_neighbours(nside, uniquepixels).ravel()
neigh_pixels = np.unique(neigh_pixels)
mask[neigh_pixels] = 1
elif mode == 'shrink':
pass
#mask = hp.smoothing(mask, 0.01)
#mask = np.where(mask > 0.1, 1, 0)
# The given mask
imask = hp.read_map(mfile)
imask = hp.ud_grade(imask, nside)
# Union of input catalog mask and input mask
mask = (imask*mask).astype('bool')
# Unique unmasked pixels
uniquepixels = np.arange(hp.nside2npix(nside))[mask]
# generating random points and their corresponding healpix pixels
rra = np.random.uniform(0, 360, 100000)
rdec = np.random.uniform(-90, 90, 100000)
healpixels_rand = return_healpixel(rra, rdec, nside=nside, nest=False)
# Random pixels found in unmasked pixels
i = np.in1d(healpixels_rand, uniquepixels)
rra = rra[i]
rdec = rdec[i]
write_fits_table(ofile, ['RA', 'DEC'], [rra, rdec])
if do_plot:
hp.mollview(mask, coord='G', fig=1)
hp.mollview(mask, coord='G', fig=2)
pl.figure(3)
pl.scatter(rra, rdec, c='k', label='Random')
pl.scatter(ra, dec, c='r', s=0.2, edgecolor='r', label='Groups')
pl.show()
def sigma_clipping(d, sigma, iter=10, mask=None):
'''Sigma clipping d within sigma * std_dev. mask=0 for masked points'''
i = 0
tavg = -1e11
if mask is None:
mask = np.ones(d.shape).astype('bool')
while i < iter:
std_dev = d[mask].std()
avg = d[mask].mean()
mask *= np.where(abs(d - avg) > sigma * std_dev, 0, 1).astype('bool')
frac_avg = abs(avg - tavg) / avg
#print '%d %2.2f %2.2f %2.2f \n'%(i, avg, frac_avg, std_dev)
if frac_avg < 0.10:
i = iter
tavg = avg * 1.0
i += 1
masked_d = ma.masked_array(d, np.logical_not(mask))
return masked_d, avg, std_dev
def get_ra_dec(type, ifile, use_wcs=False, do_plot=False, field='KAPPA'):
'''Given an image or table (ifile) this function returns the ra, dec of
the pixels and the value in the pixel. If the input is image then it
should have WCS and the ra, dec are generated based WCS.
If use_wcs=False, then a simple convertion from pixel to WCS will be
performed based on a few parameters in the header. For table, this
just return the ra, dec columns and the field name is KAPPA by
default. Input and output coordinates must be equitorial
'''
if type == 'image':
f = fits.open(ifile)
kappa = f[0].data
h = f[0].header
f.close()
#generate pixel coordinates
pix_ra, pix_dec = np.meshgrid(np.arange(kappa.shape[1]),
np.arange(kappa.shape[0]))
if use_wcs:
w = wcs.WCS(h)
#corresponding equitorial coordinates
ra, dec = w.wcs_pix2world(pix_ra, pix_dec, 0)
else:
ra = h['CRVAL1'] + h['CDELT1'] * (pix_ra - h['CRPIX1'])
dec = h['CRVAL2'] + h['CDELT2'] * (pix_dec - h['CRPIX2'])
if do_plot:
pl.subplot(121)
pl.imshow(ra, origin='lower')
pl.title('RA')
pl.colorbar()
pl.subplot(122)
pl.imshow(dec, origin='lower')
pl.title('DEC')
pl.colorbar()
pl.show()
elif type == 'table':
f = fits.open(ifile)[1].data
ra = f['ra']
dec = f['dec']
kappa = f[field]
else:
raise ValueError("Input should be either image or table")
return ra.ravel(), dec.ravel(), kappa.ravel()
def get_healpix_map(type, ipath, ifiles, nside, ofile='test.fits',
use_wcs=False, do_plot=False, field='KAPPA',
ocoord='galactic', opath='./'):
'''Generage healpix map from given image(s) with WCS or table(s). Input WCS
should be equitorial. type is either image or table, ifiles must be
list or array of input images or tables. Output healpix (ocoord)
map can be either in equitorial or galactic (default=galactic)'''
ti = time.time()
for i, f in enumerate(ifiles):
ifile = os.path.join(ipath, f)
r, d, k = get_ra_dec(type, ifile, use_wcs=use_wcs, do_plot=False,
field=field)
if i == 0:
ra = r.copy()
dec = d.copy()
kappa = k.copy()
else:
ra = np.concatenate((ra, r))
dec = np.concatenate((dec, d))
kappa = np.concatenate((kappa, k))
tf = time.time()
print 'Read files in %2.2f sec'%(tf-ti)
ti = time.time()
if ocoord == 'equitorial':
l = ra.copy()
b = dec.copy()
coord = 'C'
elif ocoord == 'galactic':
#equitorial to galactic
coords = co.ICRS(ra=ra, dec=dec, unit=(u.degree, u.degree))
l = coords.galactic.l.degree
b = coords.galactic.b.degree
coord = 'G'
tf = time.time()
print 'Converted coordinates in %2.2f sec'%(tf-ti)
#Generate healpix pixel
healpixels = return_healpixel(l, b, nside=nside, nest=False)
uniquepixels = np.unique(healpixels)
ti = time.time()
print 'Unique pixels in %2.2f sec'%(ti-tf)
#Averaging pixels values belongs to same heal pixel
kappa_map = [np.average(kappa[healpixels == i]) for i in uniquepixels]
kappa_map = np.array(kappa_map)
tf = time.time()
print 'Averaged kappa in %2.2f sec'%(tf-ti)
#Generating healpix base
full_kappa_map = np.zeros(hp.nside2npix(nside)).astype(np.float64)
full_kappa_map[uniquepixels] = kappa_map
hp.write_map(ofile, full_kappa_map, coord=coord)
if do_plot:
hp.mollview(full_kappa_map, fig=1)
pl.show()
if __name__=='__main__':
pass
| gpl-2.0 |
bgris/ODL_bgris | lib/python3.5/site-packages/ipykernel/inprocess/tests/test_kernel.py | 8 | 2417 | # Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import sys
import unittest
from ipykernel.inprocess.blocking import BlockingInProcessKernelClient
from ipykernel.inprocess.manager import InProcessKernelManager
from ipykernel.inprocess.ipkernel import InProcessKernel
from ipykernel.tests.utils import assemble_output
from IPython.testing.decorators import skipif_not_matplotlib
from IPython.utils.io import capture_output
from ipython_genutils import py3compat
if py3compat.PY3:
from io import StringIO
else:
from StringIO import StringIO
class InProcessKernelTestCase(unittest.TestCase):
def setUp(self):
self.km = InProcessKernelManager()
self.km.start_kernel()
self.kc = self.km.client()
self.kc.start_channels()
self.kc.wait_for_ready()
@skipif_not_matplotlib
def test_pylab(self):
"""Does %pylab work in the in-process kernel?"""
kc = self.kc
kc.execute('%pylab')
out, err = assemble_output(kc.iopub_channel)
self.assertIn('matplotlib', out)
def test_raw_input(self):
""" Does the in-process kernel handle raw_input correctly?
"""
io = StringIO('foobar\n')
sys_stdin = sys.stdin
sys.stdin = io
try:
if py3compat.PY3:
self.kc.execute('x = input()')
else:
self.kc.execute('x = raw_input()')
finally:
sys.stdin = sys_stdin
self.assertEqual(self.km.kernel.shell.user_ns.get('x'), 'foobar')
def test_stdout(self):
""" Does the in-process kernel correctly capture IO?
"""
kernel = InProcessKernel()
with capture_output() as io:
kernel.shell.run_cell('print("foo")')
self.assertEqual(io.stdout, 'foo\n')
kc = BlockingInProcessKernelClient(kernel=kernel, session=kernel.session)
kernel.frontends.append(kc)
kc.execute('print("bar")')
out, err = assemble_output(kc.iopub_channel)
self.assertEqual(out, 'bar\n')
def test_getpass_stream(self):
"Tests that kernel getpass accept the stream parameter"
kernel = InProcessKernel()
kernel._allow_stdin = True
kernel._input_request = lambda *args, **kwargs : None
kernel.getpass(stream='non empty')
| gpl-3.0 |
LohithBlaze/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
simon-pepin/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 249 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
TomAugspurger/pandas | pandas/tests/extension/test_boolean.py | 1 | 12167 | """
This file contains a minimal set of tests for compliance with the extension
array interface test suite, and should contain no other tests.
The test suite for the full functionality of the array is located in
`pandas/tests/arrays/`.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
Additional tests should either be added to one of the BaseExtensionTests
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.boolean import BooleanDtype
from pandas.tests.extension import base
def make_data():
return [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False]
@pytest.fixture
def dtype():
return BooleanDtype()
@pytest.fixture
def data(dtype):
return pd.array(make_data(), dtype=dtype)
@pytest.fixture
def data_for_twos(dtype):
return pd.array(np.ones(100), dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return pd.array([np.nan, True], dtype=dtype)
@pytest.fixture
def data_for_sorting(dtype):
return pd.array([True, True, False], dtype=dtype)
@pytest.fixture
def data_missing_for_sorting(dtype):
return pd.array([True, np.nan, False], dtype=dtype)
@pytest.fixture
def na_cmp():
# we are pd.NA
return lambda x, y: x is pd.NA and y is pd.NA
@pytest.fixture
def na_value():
return pd.NA
@pytest.fixture
def data_for_grouping(dtype):
b = True
a = False
na = np.nan
return pd.array([b, b, na, na, a, a, b], dtype=dtype)
class TestDtype(base.BaseDtypeTests):
pass
class TestInterface(base.BaseInterfaceTests):
pass
class TestConstructors(base.BaseConstructorsTests):
pass
class TestGetitem(base.BaseGetitemTests):
pass
class TestSetitem(base.BaseSetitemTests):
pass
class TestMissing(base.BaseMissingTests):
pass
class TestArithmeticOps(base.BaseArithmeticOpsTests):
implements = {"__sub__", "__rsub__"}
def check_opname(self, s, op_name, other, exc=None):
# overwriting to indicate ops don't raise an error
super().check_opname(s, op_name, other, exc=None)
def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if exc is None:
if op_name in self.implements:
msg = r"numpy boolean subtract"
with pytest.raises(TypeError, match=msg):
op(s, other)
return
result = op(s, other)
expected = s.combine(other, op)
if op_name in (
"__floordiv__",
"__rfloordiv__",
"__pow__",
"__rpow__",
"__mod__",
"__rmod__",
):
# combine keeps boolean type
expected = expected.astype("Int8")
elif op_name in ("__truediv__", "__rtruediv__"):
# combine with bools does not generate the correct result
# (numpy behaviour for div is to regard the bools as numeric)
expected = s.astype(float).combine(other, op)
if op_name == "__rpow__":
# for rpow, combine does not propagate NaN
expected[result.isna()] = np.nan
self.assert_series_equal(result, expected)
else:
with pytest.raises(exc):
op(s, other)
def _check_divmod_op(self, s, op, other, exc=None):
# override to not raise an error
super()._check_divmod_op(s, op, other, None)
@pytest.mark.skip(reason="BooleanArray does not error on ops")
def test_error(self, data, all_arithmetic_operators):
# other specific errors tested in the boolean array specific tests
pass
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request):
# frame & scalar
op_name = all_arithmetic_operators
if op_name not in self.implements:
mark = pytest.mark.xfail(reason="_reduce needs implementation")
request.node.add_marker(mark)
super().test_arith_frame_with_scalar(data, all_arithmetic_operators)
class TestComparisonOps(base.BaseComparisonOpsTests):
def check_opname(self, s, op_name, other, exc=None):
# overwriting to indicate ops don't raise an error
super().check_opname(s, op_name, other, exc=None)
def _compare_other(self, s, data, op_name, other):
self.check_opname(s, op_name, other)
@pytest.mark.skip(reason="Tested in tests/arrays/test_boolean.py")
def test_compare_scalar(self, data, all_compare_operators):
pass
@pytest.mark.skip(reason="Tested in tests/arrays/test_boolean.py")
def test_compare_array(self, data, all_compare_operators):
pass
class TestReshaping(base.BaseReshapingTests):
pass
class TestMethods(base.BaseMethodsTests):
@pytest.mark.parametrize("na_sentinel", [-1, -2])
def test_factorize(self, data_for_grouping, na_sentinel):
# override because we only have 2 unique values
labels, uniques = pd.factorize(data_for_grouping, na_sentinel=na_sentinel)
expected_labels = np.array(
[0, 0, na_sentinel, na_sentinel, 1, 1, 0], dtype=np.intp
)
expected_uniques = data_for_grouping.take([0, 4])
tm.assert_numpy_array_equal(labels, expected_labels)
self.assert_extension_array_equal(uniques, expected_uniques)
def test_combine_le(self, data_repeated):
# override because expected needs to be boolean instead of bool dtype
orig_data1, orig_data2 = data_repeated(2)
s1 = pd.Series(orig_data1)
s2 = pd.Series(orig_data2)
result = s1.combine(s2, lambda x1, x2: x1 <= x2)
expected = pd.Series(
[a <= b for (a, b) in zip(list(orig_data1), list(orig_data2))],
dtype="boolean",
)
self.assert_series_equal(result, expected)
val = s1.iloc[0]
result = s1.combine(val, lambda x1, x2: x1 <= x2)
expected = pd.Series([a <= val for a in list(orig_data1)], dtype="boolean")
self.assert_series_equal(result, expected)
def test_searchsorted(self, data_for_sorting, as_series):
# override because we only have 2 unique values
data_for_sorting = pd.array([True, False], dtype="boolean")
b, a = data_for_sorting
arr = type(data_for_sorting)._from_sequence([a, b])
if as_series:
arr = pd.Series(arr)
assert arr.searchsorted(a) == 0
assert arr.searchsorted(a, side="right") == 1
assert arr.searchsorted(b) == 1
assert arr.searchsorted(b, side="right") == 2
result = arr.searchsorted(arr.take([0, 1]))
expected = np.array([0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
# sorter
sorter = np.array([1, 0])
assert data_for_sorting.searchsorted(a, sorter=sorter) == 0
@pytest.mark.skip(reason="uses nullable integer")
def test_value_counts(self, all_data, dropna):
return super().test_value_counts(all_data, dropna)
class TestCasting(base.BaseCastingTests):
pass
class TestGroupby(base.BaseGroupbyTests):
"""
Groupby-specific tests are overridden because boolean only has 2
unique values, base tests uses 3 groups.
"""
def test_grouping_grouper(self, data_for_grouping):
df = pd.DataFrame(
{"A": ["B", "B", None, None, "A", "A", "B"], "B": data_for_grouping}
)
gr1 = df.groupby("A").grouper.groupings[0]
gr2 = df.groupby("B").grouper.groupings[0]
tm.assert_numpy_array_equal(gr1.grouper, df.A.values)
tm.assert_extension_array_equal(gr2.grouper, data_for_grouping)
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1], "B": data_for_grouping})
result = df.groupby("B", as_index=as_index).A.mean()
_, index = pd.factorize(data_for_grouping, sort=True)
index = pd.Index(index, name="B")
expected = pd.Series([3, 1], index=index, name="A")
if as_index:
self.assert_series_equal(result, expected)
else:
expected = expected.reset_index()
self.assert_frame_equal(result, expected)
def test_groupby_extension_no_sort(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1], "B": data_for_grouping})
result = df.groupby("B", sort=False).A.mean()
_, index = pd.factorize(data_for_grouping, sort=False)
index = pd.Index(index, name="B")
expected = pd.Series([1, 3], index=index, name="A")
self.assert_series_equal(result, expected)
def test_groupby_extension_transform(self, data_for_grouping):
valid = data_for_grouping[~data_for_grouping.isna()]
df = pd.DataFrame({"A": [1, 1, 3, 3, 1], "B": valid})
result = df.groupby("B").A.transform(len)
expected = pd.Series([3, 3, 2, 2, 3], name="A")
self.assert_series_equal(result, expected)
def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1], "B": data_for_grouping})
df.groupby("B").apply(groupby_apply_op)
df.groupby("B").A.apply(groupby_apply_op)
df.groupby("A").apply(groupby_apply_op)
df.groupby("A").B.apply(groupby_apply_op)
def test_groupby_apply_identity(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1], "B": data_for_grouping})
result = df.groupby("A").B.apply(lambda x: x.array)
expected = pd.Series(
[
df.B.iloc[[0, 1, 6]].array,
df.B.iloc[[2, 3]].array,
df.B.iloc[[4, 5]].array,
],
index=pd.Index([1, 2, 3], name="A"),
name="B",
)
self.assert_series_equal(result, expected)
def test_in_numeric_groupby(self, data_for_grouping):
df = pd.DataFrame(
{
"A": [1, 1, 2, 2, 3, 3, 1],
"B": data_for_grouping,
"C": [1, 1, 1, 1, 1, 1, 1],
}
)
result = df.groupby("A").sum().columns
if data_for_grouping.dtype._is_numeric:
expected = pd.Index(["B", "C"])
else:
expected = pd.Index(["C"])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("min_count", [0, 10])
def test_groupby_sum_mincount(self, data_for_grouping, min_count):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1], "B": data_for_grouping})
result = df.groupby("A").sum(min_count=min_count)
if min_count == 0:
expected = pd.DataFrame(
{"B": pd.array([3, 0, 0], dtype="Int64")},
index=pd.Index([1, 2, 3], name="A"),
)
tm.assert_frame_equal(result, expected)
else:
expected = pd.DataFrame(
{"B": pd.array([pd.NA] * 3, dtype="Int64")},
index=pd.Index([1, 2, 3], name="A"),
)
tm.assert_frame_equal(result, expected)
class TestNumericReduce(base.BaseNumericReduceTests):
def check_reduce(self, s, op_name, skipna):
result = getattr(s, op_name)(skipna=skipna)
expected = getattr(s.astype("float64"), op_name)(skipna=skipna)
# override parent function to cast to bool for min/max
if np.isnan(expected):
expected = pd.NA
elif op_name in ("min", "max"):
expected = bool(expected)
tm.assert_almost_equal(result, expected)
class TestBooleanReduce(base.BaseBooleanReduceTests):
pass
class TestPrinting(base.BasePrintingTests):
pass
class TestUnaryOps(base.BaseUnaryOpsTests):
pass
class TestParsing(base.BaseParsingTests):
pass
| bsd-3-clause |
nomadcube/scikit-learn | examples/manifold/plot_swissroll.py | 330 | 1446 | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| bsd-3-clause |
tongwang01/tensorflow | tensorflow/examples/learn/text_classification_cnn.py | 13 | 4470 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for CNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
EMBEDDING_SIZE = 20
N_FILTERS = 10
WINDOW_SIZE = 20
FILTER_SHAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE]
FILTER_SHAPE2 = [WINDOW_SIZE, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
n_words = 0
def cnn_model(features, target):
"""2 layer ConvNet to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
target = tf.one_hot(target, 15, 1, 0)
word_vectors = tf.contrib.layers.embed_sequence(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.contrib.layers.convolution2d(word_vectors, N_FILTERS,
FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(
conv1, ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1], padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.contrib.layers.convolution2d(pool1, N_FILTERS,
FILTER_SHAPE2, padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.contrib.layers.fully_connected(pool2, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return (
{'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
loss, train_op)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
classifier = learn.Estimator(model_fn=cnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(x_test, as_iterable=True)]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
fqez/JdeRobot | src/drivers/MAVLinkServer/MAVProxy/pymavlink/tools/magfit.py | 6 | 5105 | #!/usr/bin/env python
'''
fit best estimate of magnetometer offsets
'''
import sys, time, os, math
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--no-timestamps", dest="notimestamps", action='store_true', help="Log doesn't have timestamps")
parser.add_argument("--condition", default=None, help="select packets by condition")
parser.add_argument("--noise", type=float, default=0, help="noise to add")
parser.add_argument("--mag2", action='store_true', help="use 2nd mag from DF log")
parser.add_argument("--radius", default=None, type=float, help="target radius")
parser.add_argument("--plot", action='store_true', help="plot points in 3D")
parser.add_argument("logs", metavar="LOG", nargs="+")
args = parser.parse_args()
from pymavlink import mavutil
from pymavlink.rotmat import Vector3
def noise():
'''a noise vector'''
from random import gauss
v = Vector3(gauss(0, 1), gauss(0, 1), gauss(0, 1))
v.normalize()
return v * args.noise
def select_data(data):
ret = []
counts = {}
for d in data:
mag = d
key = "%u:%u:%u" % (mag.x/20,mag.y/20,mag.z/20)
if key in counts:
counts[key] += 1
else:
counts[key] = 1
if counts[key] < 3:
ret.append(d)
print(len(data), len(ret))
return ret
def radius(mag, offsets):
'''return radius give data point and offsets'''
return (mag + offsets).length()
def radius_cmp(a, b, offsets):
'''return +1 or -1 for for sorting'''
diff = radius(a, offsets) - radius(b, offsets)
if diff > 0:
return 1
if diff < 0:
return -1
return 0
def sphere_error(p, data):
from scipy import sqrt
x,y,z,r = p
if args.radius is not None:
r = args.radius
ofs = Vector3(x,y,z)
ret = []
for d in data:
mag = d
err = r - radius(mag, ofs)
ret.append(err)
return ret
def fit_data(data):
import numpy, scipy
from scipy import optimize
p0 = [0.0, 0.0, 0.0, 0.0]
p1, ier = optimize.leastsq(sphere_error, p0[:], args=(data))
if not ier in [1, 2, 3, 4]:
raise RuntimeError("Unable to find solution")
if args.radius is not None:
r = args.radius
else:
r = p1[3]
return (Vector3(p1[0], p1[1], p1[2]), r)
def magfit(logfile):
'''find best magnetometer offset fit to a log file'''
print("Processing log %s" % filename)
mlog = mavutil.mavlink_connection(filename, notimestamps=args.notimestamps)
data = []
last_t = 0
offsets = Vector3(0,0,0)
# now gather all the data
while True:
m = mlog.recv_match(condition=args.condition)
if m is None:
break
if m.get_type() == "SENSOR_OFFSETS":
# update current offsets
offsets = Vector3(m.mag_ofs_x, m.mag_ofs_y, m.mag_ofs_z)
if m.get_type() == "RAW_IMU":
mag = Vector3(m.xmag, m.ymag, m.zmag)
# add data point after subtracting the current offsets
data.append(mag - offsets + noise())
if m.get_type() == "MAG" and not args.mag2:
offsets = Vector3(m.OfsX,m.OfsY,m.OfsZ)
mag = Vector3(m.MagX,m.MagY,m.MagZ)
data.append(mag - offsets + noise())
if m.get_type() == "MAG2" and args.mag2:
offsets = Vector3(m.OfsX,m.OfsY,m.OfsZ)
mag = Vector3(m.MagX,m.MagY,m.MagZ)
data.append(mag - offsets + noise())
print("Extracted %u data points" % len(data))
print("Current offsets: %s" % offsets)
orig_data = data
data = select_data(data)
# remove initial outliers
data.sort(lambda a,b : radius_cmp(a,b,offsets))
data = data[len(data)/16:-len(data)/16]
# do an initial fit
(offsets, field_strength) = fit_data(data)
for count in range(3):
# sort the data by the radius
data.sort(lambda a,b : radius_cmp(a,b,offsets))
print("Fit %u : %s field_strength=%6.1f to %6.1f" % (
count, offsets,
radius(data[0], offsets), radius(data[-1], offsets)))
# discard outliers, keep the middle 3/4
data = data[len(data)/8:-len(data)/8]
# fit again
(offsets, field_strength) = fit_data(data)
print("Final : %s field_strength=%6.1f to %6.1f" % (
offsets,
radius(data[0], offsets), radius(data[-1], offsets)))
if args.plot:
plot_data(orig_data, data)
def plot_data(orig_data, data):
'''plot data in 3D'''
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
for dd, c in [(orig_data, 'r'), (data, 'b')]:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xs = [ d.x for d in dd ]
ys = [ d.y for d in dd ]
zs = [ d.z for d in dd ]
ax.scatter(xs, ys, zs, c=c, marker='o')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
total = 0.0
for filename in args.logs:
magfit(filename)
| gpl-3.0 |
kevinhikali/ml_kevin | bottom/gradient_descent.py | 1 | 1449 | # -*- coding: utf-8 -*-
"""
@author: kevinhikali
@email: hmingwei@gmail.com
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# global variable
SampleTh = np.array([[2],
[5]])
# function
def h(th, x):
global SampleTh
return np.dot(th.transpose(), x)
nSample = 40
nParam = 2
SampleX = abs(np.random.rand(nParam, nSample))
# if bias required
SampleX[0, :] = np.ones(nSample)
SampleY = np.dot(SampleTh.transpose(), SampleX)
SampleY[0, :] += 0.5*np.random.rand(nSample)
a = 0.4
th = np.zeros(nParam)
x = np.zeros(nParam)
y = np.zeros(1)
# temp = 0
# # batch gradient descent
# for _ in range(10000):
# for j in range(nParam):
# for i in range(nSample):
# x = SampleX[:, i]
# y = SampleY[:, i]
# temp += (y - h(th, x))*x[j]
# th[j] += a*temp
# temp = 0
# stochastic gradient descent
for _ in range(10):
for i in range(nSample):
for j in range(nParam):
x = SampleX[:, i]
y = SampleY[:, i]
th[j] += a*(y-h(th, x))*x[j]
fig = plt.figure()
plt_x = 0
for i in range(nSample):
global SampleTh
plt_x = SampleX[1, i]
x = SampleX[:, i]
# origin
y = h(SampleTh, x)
plot, = plt.plot(plt_x, y, 'go')
# sample
y = SampleY[0, i]
plot, = plt.plot(plt_x, y, 'bo')
# trained
y = h(th, x)
plot, = plt.plot(plt_x, y, 'ro')
plt.show()
| gpl-3.0 |
lbsantos/psycholinguistic-regression | mlp.py | 1 | 2990 | from multiview_regression import W2VTransformer
from keras.layers import Input, Dense, Dropout, merge
from scipy.stats import spearmanr, pearsonr
from keras.callbacks import EarlyStopping
from sklearn.model_selection import KFold
from keras.models import Sequential, Model
import numpy as np
import pandas
import gensim
import json
import keras
import os
seed = 42
np.random.seed(seed)
def build_model(neurons):
model = Sequential()
model.add(Dense(neurons,
input_shape=(neurons,),
activation='relu',
init='glorot_normal'))
model.add(Dropout(0.5))
model.add(Dense(neurons,
input_shape=(neurons,),
activation='relu',
init='glorot_normal'))
model.add(Dropout(0.5))
model.add(Dense(1, init='glorot_normal'))
model.compile(optimizer='adam',
loss='mean_squared_error')
return model
def mse_spearman_pearson(ground_truth, predictions):
ground_truth = ground_truth.reshape(len(ground_truth))
predictions = predictions.reshape(len(ground_truth))
mse = np.mean((ground_truth - predictions) ** 2)
spearman = spearmanr(ground_truth, predictions)[0]
pearson = pearsonr(ground_truth, predictions)[0]
return mse, spearman, pearson
def my_cross_val_score(X, y, cv):
cv_iter = list(cv.split(X, y))
scores = []
for train, test in cv_iter:
estimator = build_model(300)
early_stopping = EarlyStopping(monitor='val_loss',
patience=2)
estimator.fit(X[train],
y[train],
validation_split=0.1,
batch_size=2,
nb_epoch=100,
callbacks=[early_stopping])
preds = estimator.predict(X[test]).reshape(len(test))
ground_truth = y[test].reshape(len(test))
scores.append(
mse_spearman_pearson(ground_truth, preds))
return np.array(scores)
model_embedding = gensim.models.Word2Vec.load_word2vec_format(
'./models_nathan/tokenized/skip_s300_w5_m5',
binary=True,
unicode_errors='ignore'
)
w2v_transform = W2VTransformer(model_embedding)
path_root = './data/'
files = ['word_lists_concretenes.csv',
'word_lists_familiarity.csv',
'word_lists_imagery.csv',
'word_lists_oao.csv',
'word_lists_aoa_final_1717.csv']
cv = KFold(5)
for file in files:
data_path = os.path.join(path_root, file)
data = pandas.read_csv(data_path)
data = np.asarray(data)
data_X = data[:, 0].reshape((len(data), -1))
data_y = data[:, 1].reshape((len(data), -1))
data_new2 = w2v_transform.fit_transform(data_X)
print(data_new2.shape)
scores = my_cross_val_score(data_new2,
data_y,
cv)
print("ReLu + Linear")
print(file)
print(scores.mean(axis=0))
print("\n======\n") | gpl-3.0 |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/indexes/timedeltas/test_timedelta.py | 1 | 22801 | import pytest
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas import (timedelta_range, date_range, Series, Timedelta,
DatetimeIndex, TimedeltaIndex, Index, DataFrame,
Int64Index)
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_index_equal)
from ..datetimelike import DatetimeLike
randn = np.random.randn
class TestTimedeltaIndex(DatetimeLike):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
def setup_method(self, method):
self.indices = dict(index=tm.makeTimedeltaIndex(10))
self.setup_indices()
def create_index(self):
return pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
def test_shift(self):
# test shift for TimedeltaIndex
# err8083
drange = self.create_index()
result = drange.shift(1)
expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00',
'4 days 01:00:00', '5 days 01:00:00'],
freq='D')
tm.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D 1s')
expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',
'8 days 01:00:03', '9 days 01:00:03',
'10 days 01:00:03'], freq='D')
tm.assert_index_equal(result, expected)
def test_get_loc(self):
idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
for method in [None, 'pad', 'backfill', 'nearest']:
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].to_pytimedelta(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
assert idx.get_loc(idx[1], 'pad',
tolerance=pd.Timedelta(0)) == 1
assert idx.get_loc(idx[1], 'pad',
tolerance=np.timedelta64(0, 's')) == 1
assert idx.get_loc(idx[1], 'pad',
tolerance=timedelta(0)) == 1
with tm.assert_raises_regex(ValueError,
'unit abbreviation w/o a number'):
idx.get_loc(idx[1], method='nearest', tolerance='foo')
with pytest.raises(
ValueError,
match='tolerance size must match'):
idx.get_loc(idx[1], method='nearest',
tolerance=[Timedelta(0).to_timedelta64(),
Timedelta(0).to_timedelta64()])
for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
assert idx.get_loc('1 day 1 hour', method) == loc
# GH 16909
assert idx.get_loc(idx[1].to_timedelta64()) == 1
# GH 16896
assert idx.get_loc('0 days') == 0
def test_get_loc_nat(self):
tidx = TimedeltaIndex(['1 days 01:00:00', 'NaT', '2 days 01:00:00'])
assert tidx.get_loc(pd.NaT) == 1
assert tidx.get_loc(None) == 1
assert tidx.get_loc(float('nan')) == 1
assert tidx.get_loc(np.nan) == 1
def test_get_indexer(self):
idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
tm.assert_numpy_array_equal(idx.get_indexer(idx),
np.array([0, 1, 2], dtype=np.intp))
target = pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
res = idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour'))
tm.assert_numpy_array_equal(res, np.array([0, -1, 1], dtype=np.intp))
def test_numeric_compat(self):
idx = self._holder(np.arange(5, dtype='int64'))
didx = self._holder(np.arange(5, dtype='int64') ** 2)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5, dtype='int64')
tm.assert_index_equal(result,
self._holder(np.arange(5, dtype='int64') * 5))
result = idx * np.arange(5, dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5, dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5, dtype='float64') + 0.1)
tm.assert_index_equal(result, self._holder(np.arange(
5, dtype='float64') * (np.arange(5, dtype='float64') + 0.1)))
# invalid
pytest.raises(TypeError, lambda: idx * idx)
pytest.raises(ValueError, lambda: idx * self._holder(np.arange(3)))
pytest.raises(ValueError, lambda: idx * np.array([1, 2]))
def test_pickle_compat_construction(self):
pass
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_fillna_timedelta(self):
# GH 11343
idx = pd.TimedeltaIndex(['1 day', pd.NaT, '3 day'])
exp = pd.TimedeltaIndex(['1 day', '2 day', '3 day'])
tm.assert_index_equal(idx.fillna(pd.Timedelta('2 day')), exp)
exp = pd.TimedeltaIndex(['1 day', '3 hour', '3 day'])
idx.fillna(pd.Timedelta('3 hour'))
exp = pd.Index(
[pd.Timedelta('1 day'), 'x', pd.Timedelta('3 day')], dtype=object)
tm.assert_index_equal(idx.fillna('x'), exp)
def test_difference_freq(self):
# GH14323: Difference of TimedeltaIndex should not preserve frequency
index = timedelta_range("0 days", "5 days", freq="D")
other = timedelta_range("1 days", "4 days", freq="D")
expected = TimedeltaIndex(["0 days", "5 days"], freq=None)
idx_diff = index.difference(other)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
other = timedelta_range("2 days", "5 days", freq="D")
idx_diff = index.difference(other)
expected = TimedeltaIndex(["0 days", "1 days"], freq=None)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
def test_take(self):
tds = ['1day 02:00:00', '1 day 04:00:00', '1 day 10:00:00']
idx = TimedeltaIndex(start='1d', end='2d', freq='H', name='idx')
expected = TimedeltaIndex(tds, freq=None, name='idx')
taken1 = idx.take([2, 4, 10])
taken2 = idx[[2, 4, 10]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, TimedeltaIndex)
assert taken.freq is None
assert taken.name == expected.name
def test_take_fill_value(self):
# GH 12631
idx = pd.TimedeltaIndex(['1 days', '2 days', '3 days'],
name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.TimedeltaIndex(['2 days', '1 days', '3 days'],
name='xxx')
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'],
name='xxx')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.TimedeltaIndex(['2 days', '1 days', '3 days'],
name='xxx')
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def test_isin(self):
index = tm.makeTimedeltaIndex(4)
result = index.isin(index)
assert result.all()
result = index.isin(list(index))
assert result.all()
assert_almost_equal(index.isin([index[2], 5]),
np.array([False, False, True, False]))
def test_factorize(self):
idx1 = TimedeltaIndex(['1 day', '1 day', '2 day', '2 day', '3 day',
'3 day'])
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = TimedeltaIndex(['1 day', '2 day', '3 day'])
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
# freq must be preserved
idx3 = timedelta_range('1 day', periods=4, freq='s')
exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)
arr, idx = idx3.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, idx3)
def test_join_self(self):
index = timedelta_range('1 day', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
tm.assert_index_equal(index, joined)
def test_slice_keeps_name(self):
# GH4226
dr = pd.timedelta_range('1d', '5d', freq='H', name='timebucket')
assert dr[1:].name == dr.name
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10,
data_gen_f=lambda *args, **kwargs: randn(),
r_idx_type='i', c_idx_type='td')
str(df)
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
assert cols.dtype == np.dtype('O')
assert cols.dtype == joined.dtype
tm.assert_index_equal(cols, joined)
def test_sort_values(self):
idx = TimedeltaIndex(['4d', '1d', '2d'])
ordered = idx.sort_values()
assert ordered.is_monotonic
ordered = idx.sort_values(ascending=False)
assert ordered[::-1].is_monotonic
ordered, dexer = idx.sort_values(return_indexer=True)
assert ordered.is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0]),
check_dtype=False)
ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)
assert ordered[::-1].is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1]),
check_dtype=False)
def test_get_duplicates(self):
idx = TimedeltaIndex(['1 day', '2 day', '2 day', '3 day', '3day',
'4day'])
result = idx.get_duplicates()
ex = TimedeltaIndex(['2 day', '3day'])
tm.assert_index_equal(result, ex)
def test_argmin_argmax(self):
idx = TimedeltaIndex(['1 day 00:00:05', '1 day 00:00:01',
'1 day 00:00:02'])
assert idx.argmin() == 1
assert idx.argmax() == 0
def test_misc_coverage(self):
rng = timedelta_range('1 day', periods=5)
result = rng.groupby(rng.days)
assert isinstance(list(result.values())[0][0], Timedelta)
idx = TimedeltaIndex(['3d', '1d', '2d'])
assert not idx.equals(list(idx))
non_td = Index(list('abc'))
assert not idx.equals(list(non_td))
def test_map(self):
rng = timedelta_range('1 day', periods=10)
f = lambda x: x.days
result = rng.map(f)
exp = Int64Index([f(x) for x in rng])
tm.assert_index_equal(result, exp)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
exp = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, exp)
# raise TypeError for now
pytest.raises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
def test_total_seconds(self):
# GH 10939
# test index
rng = timedelta_range('1 days, 10:11:12.100123456', periods=2,
freq='s')
expt = [1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9,
1 * 86400 + 10 * 3600 + 11 * 60 + 13 + 100123456. / 1e9]
tm.assert_almost_equal(rng.total_seconds(), Index(expt))
# test Series
s = Series(rng)
s_expt = Series(expt, index=[0, 1])
tm.assert_series_equal(s.dt.total_seconds(), s_expt)
# with nat
s[1] = np.nan
s_expt = Series([1 * 86400 + 10 * 3600 + 11 * 60 +
12 + 100123456. / 1e9, np.nan], index=[0, 1])
tm.assert_series_equal(s.dt.total_seconds(), s_expt)
# with both nat
s = Series([np.nan, np.nan], dtype='timedelta64[ns]')
tm.assert_series_equal(s.dt.total_seconds(),
Series([np.nan, np.nan], index=[0, 1]))
def test_pass_TimedeltaIndex_to_index(self):
rng = timedelta_range('1 days', '10 days')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pytimedelta(), dtype=object)
tm.assert_numpy_array_equal(idx.values, expected.values)
def test_pickle(self):
rng = timedelta_range('1 days', periods=10)
rng_p = tm.round_trip_pickle(rng)
tm.assert_index_equal(rng, rng_p)
def test_hash_error(self):
index = timedelta_range('1 days', periods=10)
with tm.assert_raises_regex(TypeError, "unhashable type: %r" %
type(index).__name__):
hash(index)
def test_append_join_nondatetimeindex(self):
rng = timedelta_range('1 days', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
assert isinstance(result[0], Timedelta)
# it works
rng.join(idx, how='outer')
def test_append_numpy_bug_1681(self):
td = timedelta_range('1 days', '10 days', freq='2D')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': td}, index=td)
str(c)
result = a.append(c)
assert (result['B'] == td).all()
def test_fields(self):
rng = timedelta_range('1 days, 10:11:12.100123456', periods=2,
freq='s')
tm.assert_index_equal(rng.days, Index([1, 1], dtype='int64'))
tm.assert_index_equal(
rng.seconds,
Index([10 * 3600 + 11 * 60 + 12, 10 * 3600 + 11 * 60 + 13],
dtype='int64'))
tm.assert_index_equal(
rng.microseconds,
Index([100 * 1000 + 123, 100 * 1000 + 123], dtype='int64'))
tm.assert_index_equal(rng.nanoseconds,
Index([456, 456], dtype='int64'))
pytest.raises(AttributeError, lambda: rng.hours)
pytest.raises(AttributeError, lambda: rng.minutes)
pytest.raises(AttributeError, lambda: rng.milliseconds)
# with nat
s = Series(rng)
s[1] = np.nan
tm.assert_series_equal(s.dt.days, Series([1, np.nan], index=[0, 1]))
tm.assert_series_equal(s.dt.seconds, Series(
[10 * 3600 + 11 * 60 + 12, np.nan], index=[0, 1]))
# preserve name (GH15589)
rng.name = 'name'
assert rng.days.name == 'name'
def test_freq_conversion(self):
# doc example
# series
td = Series(date_range('20130101', periods=4)) - \
Series(date_range('20121201', periods=4))
td[2] += timedelta(minutes=5, seconds=3)
td[3] = np.nan
result = td / np.timedelta64(1, 'D')
expected = Series([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan
])
assert_series_equal(result, expected)
result = td.astype('timedelta64[D]')
expected = Series([31, 31, 31, np.nan])
assert_series_equal(result, expected)
result = td / np.timedelta64(1, 's')
expected = Series([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3,
np.nan])
assert_series_equal(result, expected)
result = td.astype('timedelta64[s]')
assert_series_equal(result, expected)
# tdi
td = TimedeltaIndex(td)
result = td / np.timedelta64(1, 'D')
expected = Index([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan])
assert_index_equal(result, expected)
result = td.astype('timedelta64[D]')
expected = Index([31, 31, 31, np.nan])
assert_index_equal(result, expected)
result = td / np.timedelta64(1, 's')
expected = Index([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3,
np.nan])
assert_index_equal(result, expected)
result = td.astype('timedelta64[s]')
assert_index_equal(result, expected)
class TestSlicing(object):
@pytest.mark.parametrize('freq', ['B', 'D'])
def test_timedelta(self, freq):
index = date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH4134, buggy with timedeltas
rng = date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestTimeSeries(object):
_multiprocess_can_split_ = True
def test_series_box_timedelta(self):
rng = timedelta_range('1 day 1 s', periods=5, freq='h')
s = Series(rng)
assert isinstance(s[1], Timedelta)
assert isinstance(s.iat[2], Timedelta)
| apache-2.0 |
paineliu/tflearn | han_cnn.py | 1 | 5666 | # encoding: UTF-8
# neural network structure:
#
# · · · · · · · · · · (input data, 1-deep) X [batch, 28, 28, 1]
# @ @ @ @ @ @ @ @ @ @ -- conv. layer 5x5x1=>4 stride 1 W1 [5, 5, 1, 4] B1 [4]
# ∶∶∶∶∶∶∶∶∶∶∶∶∶∶∶∶∶∶∶ Y1 [batch, 28, 28, 4]
# @ @ @ @ @ @ @ @ -- conv. layer 5x5x4=>8 stride 2 W2 [5, 5, 4, 8] B2 [8]
# ∶∶∶∶∶∶∶∶∶∶∶∶∶∶∶ Y2 [batch, 14, 14, 8]
# @ @ @ @ @ @ -- conv. layer 4x4x8=>12 stride 2 W3 [4, 4, 8, 12] B3 [12]
# ∶∶∶∶∶∶∶∶∶∶∶ Y3 [batch, 7, 7, 12] => reshaped to YY [batch, 7*7*12]
# \x/x\x\x/ -- fully connected layer (relu) W4 [7*7*12, 200] B4 [200]
# · · · · Y4 [batch, 200]
# \x/x\x/ -- fully connected layer (softmax) W5 [200, 10] B5 [10]
# · · · Y [batch, 10]
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
tf.logging.set_verbosity(tf.logging.ERROR)
def read_queue_data(filenames):
filename_queue = tf.train.string_input_producer(filenames, shuffle=False) #生成一个queue队列
reader = tf.TFRecordReader()
_, example = reader.read(filename_queue) #返回文件名和文件
features = tf.parse_single_example(example,
features={
'label': tf.FixedLenFeature([], tf.int64),
'image': tf.FixedLenFeature([], tf.string),
})#将image数据和label取出来
img = tf.decode_raw(features['image'], tf.uint8)
img = tf.reshape(img, [64 * 64 * 1])
img = tf.cast(img, tf.float32) # * (1. / 255)
label = tf.one_hot(features['label'], 100)
return img, label
xx, yy = read_queue_data(['../records/han_train.tfrecords'])
xx_batch, yy_batch = tf.train.shuffle_batch([xx, yy], batch_size=1000, capacity=5000, min_after_dequeue=1000, num_threads=2)
xx_test, yy_test = read_queue_data(['../records/han_test.tfrecords'])
xx_test_batch, yy_test_batch = tf.train.shuffle_batch([xx_test, yy_test], batch_size=1000, capacity=2000, min_after_dequeue=1000, num_threads=2)
tf.set_random_seed(1)
np.random.seed(1)
LR = 0.001 # learning rate
tf_x = tf.placeholder(tf.float32, [None, 64 * 64]) / 255.
image = tf.reshape(tf_x, [-1, 64, 64, 1]) # (batch, height, width, channel)
tf_y = tf.placeholder(tf.int32, [None, 100]) # input y
# CNN
conv1 = tf.layers.conv2d( # shape (64, 64, 1)
inputs=image,
filters=16,
kernel_size=5,
strides=1,
padding='same',
activation=tf.nn.relu
) # -> (64, 64, 16)
pool1 = tf.layers.max_pooling2d(
conv1,
pool_size=2,
strides=2,
) # -> (32, 32, 16)
conv2 = tf.layers.conv2d(pool1, 32, 5, 1, 'same', activation=tf.nn.relu) # -> (32, 32, 32)
pool2 = tf.layers.max_pooling2d(conv2, 2, 2) # -> (16, 16, 32)
flat = tf.reshape(pool2, [-1, 16*16*32]) # -> (7*7*32, )
output = tf.layers.dense(flat, 100) # output layer
loss = tf.losses.softmax_cross_entropy(onehot_labels=tf_y, logits=output) # compute cost
train_op = tf.train.AdamOptimizer(LR).minimize(loss)
accuracy = tf.metrics.accuracy( # return (acc, update_op), and create 2 local variables
labels=tf.argmax(tf_y, axis=1), predictions=tf.argmax(output, axis=1),)[1]
sess = tf.Session()
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) # the local var is for accuracy_op
sess.run(init_op) # initialize var in graph
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# following function (plot_with_labels) is for visualization, can be ignored if not interested
from matplotlib import cm
try: from sklearn.manifold import TSNE; HAS_SK = True
except: HAS_SK = False; print('\nPlease install sklearn for layer visualization\n')
def plot_with_labels(lowDWeights, labels):
plt.cla(); X, Y = lowDWeights[:, 0], lowDWeights[:, 1]
for x, y, s in zip(X, Y, labels):
c = cm.rainbow(int(255 * s / 9)); plt.text(x, y, s, backgroundcolor=c, fontsize=9)
plt.xlim(X.min(), X.max()); plt.ylim(Y.min(), Y.max()); plt.title('Visualize last layer'); plt.show(); plt.pause(0.01)
plt.ion()
test_x, test_y = sess.run([xx_test_batch, yy_test_batch])
for step in range(600):
b_x, b_y = sess.run([xx_batch, yy_batch])
_, loss_ = sess.run([train_op, loss], {tf_x: b_x, tf_y: b_y})
if step % 50 == 0:
accuracy_, flat_representation = sess.run([accuracy, flat], {tf_x: test_x, tf_y: test_y})
print('Step:', step, '| train loss: %.4f' % loss_, '| test accuracy: %.2f' % accuracy_)
if HAS_SK:
# Visualization of trained flatten layer (T-SNE)
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000); plot_only = 500
low_dim_embs = tsne.fit_transform(flat_representation[:plot_only, :])
labels = np.argmax(test_y, axis=1)[:plot_only]; plot_with_labels(low_dim_embs, labels)
plt.ioff()
# print 10 predictions from test data
test_output = sess.run(output, {tf_x: test_x[:10]})
pred_y = np.argmax(test_output, 1)
print(pred_y, 'prediction number')
print(np.argmax(test_y[:10], 1), 'real number')
coord.request_stop()
coord.join(threads)
| apache-2.0 |
ecatkins/telegram_chatbot | main_chatbot.py | 1 | 3683 | import json
import requests
import time
import urllib
from gensim import utils, matutils, models
from sklearn.externals import joblib
from secret import *
URL = "https://api.telegram.org/bot{}/".format(TOKEN)
def get_url(url):
''' Gets url that returns content '''
response = requests.get(url)
content = response.content.decode("utf8")
return content
def get_json_from_url(url):
'''Gets json from url '''
content = get_url(url)
js = json.loads(content)
return js
def get_updates(offset=None):
''' Gets the updates from the Telegram server '''
url = URL + "getUpdates?timeout=100"
if offset:
url += "&offset={}".format(offset)
js = get_json_from_url(url)
return js
def get_last_update_id(updates):
''' Gets the ID of the last update '''
update_ids = []
for update in updates["result"]:
update_ids.append(int(update["update_id"]))
return max(update_ids)
def get_last_chat_id_and_text(updates):
''' Gets the last chat ID and the text '''
num_updates = len(updates["result"])
last_update = num_updates - 1
text = updates["result"][last_update]["message"]["text"]
chat_id = updates["result"][last_update]["message"]["chat"]["id"]
return (text, chat_id)
def send_message(text, chat_id):
''' Sends message to client '''
text = urllib.parse.quote_plus(text)
url = URL + "sendMessage?text={}&chat_id={}".format(text, chat_id)
get_url(url)
def echo_all(updates):
''' Tests echoing messages received '''
for update in updates["result"]:
text = update["message"]["text"]
chat = update["message"]["chat"]["id"]
send_message(text, chat)
def basic_responses(updates):
''' Tests basic rule-set for sending messages '''
for update in updates["result"]:
chat = update["message"]["chat"]["id"]
text = update["message"]["text"]
if text.lower() == 'hello':
response = 'Hello there'
elif text.lower() == 'how are you?':
response = "Very well thankyou"
else:
continue
send_message(response, chat)
def doc_response(updates):
''' Generates responses to messages recived '''
# For each message
for update in updates["result"]:
# Extract chat_id and text
chat = update["message"]["chat"]["id"]
text = update["message"]["text"]
# Turn input text into a vector and reshape
vector = doc_model.infer_vector(utils.simple_preprocess(text), steps = 40)
vector = vector.reshape(1, -1)
# Predict the appropriate response label
prediction = logistic_model.predict(vector)
# Use label to exstract the text from the array of all possible responses
response = possible_responses[prediction[0]]
# Send message to client
send_message(response, chat)
def main():
''' Runs the chatbot functionality in a loop '''
last_update_id = None
# Loop indefinitel
while True:
# Get the latest messages from client
updates = get_updates(last_update_id)
# If they exist
if len(updates["result"]) > 0:
# Set last_update_id
last_update_id = get_last_update_id(updates) + 1
# Generate response to message
doc_response(updates)
# Sleep program for half a second
time.sleep(0.5)
if __name__ == '__main__':
# Loads saved models from disk
doc_model = models.Doc2Vec.load(doc2vec_location)
model_details = joblib.load('model.pkl')
logistic_model = model_details['model']
possible_responses = model_details['response_vector']
main() | mit |
amboar/fpos | lib/fpos/predict.py | 1 | 14065 | #!/usr/bin/python3
#
# Forecasts future spending based on cycles in historical data
# Copyright (C) 2013 Andrew Jeffery <andrew@aj.id.au>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import pystrgrp
import csv
import numpy as np
from collections import namedtuple
from datetime import datetime, timedelta
from itertools import chain, cycle, islice
from .core import money
import matplotlib.pyplot as plt
cmd_description = \
"""Predict spending based on past habits"""
cmd_help = cmd_description
def pd(datestr):
return datestr if isinstance(datestr, datetime) \
else datetime.strptime(datestr, "%d/%m/%Y")
def group_deltas(group):
""" group_deltas(group) -> list(int)
Calculate the difference in days between subsequent spend group entries.
The length of the returned list is one less than the length of the input
list, or zero length if the input list is zero length. The input member
list is sorted by ascending date order before the deltas are calculated.
"""
sm = sorted(group, key=lambda x: pd(x[0]))
return [ ( pd(e[0]) - pd(sm[p][0]) ).days for p, e in enumerate(sm[1:]) ]
def group_delta_bins(deltas):
""" group_delta_bins(deltas) -> list(int)
Takes a value sequence as input and generates a list of length
max(deltas) - 1, where the value at each index of the list is a count of
the occurrences of the index value in the deltas input. The zeroth index is
then discarded as it counts multiple spends for the spend group on a single
day; this isn't of interest as the minimum resolution of the calculation is
one day.
"""
return np.bincount(deltas)[1:] # Discard the zero bin
def period(bins):
""" period(bins) -> int
Takes a bincount list as input and returns the last non-zero bin index of
the spend group.
"""
if 0 == len(bins):
raise ValueError("No bins provided")
for i, e in enumerate(reversed(bins)):
if e > 0:
return len(bins) - i
def pmf(bins):
""" pmf(bins) -> list(float)
Takes a bincount list as input and returns the associated probability mass
function. If the returned list is v, then sum(v) == 1.0.
"""
n = sum(bins)
return [ (v / n) for v in bins ]
def icmf(bins, threshold=0.75):
"""Inverse cumulative probability mass function"""
s = 0
for i, v in enumerate(pmf(bins)):
s += v
if s > threshold:
return i
raise ValueError("Whut?")
def probable_spend(mf, mean):
""" probable_spend(mf, mean) -> list(float)
Given a reified mass function (i.e., the list of bins representing the
probability mass function), distribute the provided mean across the bins of
the mass function. If the returned list is v, then sum(v) == mean.
"""
return [ (v * mean) for v in mf ]
def last(members):
""" last(members) -> datetime
Find the most recent date in a spend group.
"""
return max(pd(x[0]) for x in members)
def align(bins, delta):
""" align(bins, delta) -> list(float)
Redistribute unspent predictions over the remaining predicted bins in the
spending period.
Predictions of future spending are anchored in time at the date of the last
entry in the spend group. The spending prediction is probabilistic, in that
the mean value of the spend group is distributed across the probability
mass function of the differences in consecutive, date-sorted spend group
entries. As time progresses without an expense falling into the spend group,
we advance through the predicted spending sequence and possibly over some
bins which predict a spend. As we know an expense hasn't occurred up until
now for the spend group any predicted spends between the anchor point and
now have not eventuated, but we still expect to spend the mean value. Thus,
we redistribute the value in the mis-predicted bins over the remaining
predicted bins to retain the property that the sum of the predicted values
is equal to the mean.
"""
# Value of entries residing in bins less than delta
s = sum(bins[:delta])
# Redistribution value, the mean of the ignored bins relative to the
# remaining bins
r = len([x for x in bins[delta:] if x != 0])
if 0 == r:
return [ 0 ] * (len(bins) - delta)
m = (s / r)
# Recalculate the spend distribution with respect to delta and the
# redistribution value
return [ 0 if 0 == e else (e + m) for e in bins[delta:] ]
def group_forecast(members, date, debug=False):
""" group_forecast(members, date) -> seq(float)
Predict future spending based on spending groups and the probability mass
function of the deltas of sorted group member dates. Progression in time is
accounted for by redistributing mis-predicted spends across the remaining
predicted spends for the calculated spend period. If no spend occurs in the
calculated spend period then future predictions are cancelled.
"""
if not members:
raise ValueError("Requires at least one member in members list")
bins = group_delta_bins(group_deltas(members))
mean = sum(float(e[1]) for e in members) / (sum(bins) + 1)
d = (date - last(members)).days
if 0 == len(bins):
return []
else:
p = period(bins)
if p < d:
if debug:
fmt = "Dropped mean={} as \"{}\": p={}, d={}"
msg = fmt.format(mean, members[0][2], p, d)
print(msg)
return []
# Estimate the periodicity using icmf.
#
# The goal is to estimate cashflow for around the next 30 days. Some
# spending patterns are going to have periods of less than 30 days, so we
# need to cycle our the observed pattern to meet the projection length.
#
# A property of using icmf is that the estimated periodicity will likely
# be less than the full period of the PMF. To account for this we merge
# the following cycle of the probability distribution into the current from
# the index of icmf's periodicity estimate. To paint a picture, imagine
# the bin distribution is such:
#
# *
# *
# *
# ** * * *
# ----------
# 0123456789
#
# Giving a PMF of:
#
# 00 0 0
# .. . .
# 11 0 1 1
# 22 . 2 2
# 55 5 5 5
# ----------
# 0123456789
#
# icmf estimates the periodicity as 4, and the code below overlaps the
# PMF on-top of itself such that each PMF is repeated from bin 4 of the
# previous:
#
# 00 0
# .. .0 0
# 11 01. 0 .
# 22 .22 . 2
# 55 555 5 5
# ----------
# 0123456789
mass = pmf(bins)
interval = icmf(bins)
overlap = period(mass) - (interval + 1)
merged_mass = mass[:]
for i in range(overlap):
merged_mass[interval + 1 + i] += mass[i]
ps = probable_spend(merged_mass, mean)
aps = align(ps, d)
return chain(aps, cycle(ps[overlap:]))
def name():
return __name__.split(".")[-1]
def parse_args(subparser=None):
parser_init = subparser.add_parser if subparser else argparse.ArgumentParser
parser = parser_init(name(), description=cmd_description, help=cmd_help)
parser.add_argument('infile', metavar="INPUT", type=argparse.FileType('r'),
help="The IR document containing un-categorised transactions")
return [ parser ] if subparser else parser.parse_args()
def bottoms(data):
if not data:
raise ValueError("Must be at least one element in data")
bots = [ 0 ]
for v in data[:-1]:
bots.append(bots[-1] + v)
return bots
def forecast(groups, dates, length=32):
""" forecast(groups, date, length) -> list(float), list(float)
Predict cashflow in terms of spending (first return value) and income
(second return value) using the history embodied in the `groups` parameter.
Each element of the returned lists represents the amount spent or earnt on
the day relative to the day following the `date` parameter, that is the
zeroth entry of either list represents the amount spent or earnt on the day
after `date`. Each returned list has a length governed by the `length`
parameter.
"""
spend = [ 0 ] * length
income = [ 0 ] * length
# noise values
nv = [ float(b[0][1]) for b in groups if len(b) <= 2 ]
ns, ni = 0, 0
span = (dates[1] - dates[0]).days
if nv and span:
ns = sum(v for v in nv if v < 0) / span
ni = sum(v for v in nv if v > 0) / span
for g in ( g for g in groups if len(g) > 2 ):
for k, v in enumerate(islice(group_forecast(g, dates[1]), length)):
d = spend if v < 0 else income
d[k] += v
return [ (v + ns) for v in spend ], [ (v + ni) for v in income ]
def graph_bar_cashflow(groups, dates):
fl = 31 # forecast length
gl = fl + 2 # graph length
ey, iy = forecast(groups, dates, fl)
bs = bottoms(list(chain(*zip(ey, iy))))
ex = [ x + 0.1 for x in range(1, len(ey) + 1)]
be = plt.bar(ex, ey, bottom=bs[::2], color="r", width=0.3)
ix = [ x + 0.6 for x in range(1, len(iy) + 1)]
bi = plt.bar(ix, iy, bottom=bs[1::2], color="b", width=0.3)
plt.axhline(0, color="black")
majors = list(range(1, gl, 7))
labels = [ (dates[1] + timedelta(i)).strftime("%d/%m/%y") for i in majors ]
plt.xticks(majors, labels, rotation=33)
plt.xlim(0, gl)
plt.grid(axis="x", which="both")
plt.grid(axis="y", which="major")
plt.title("Cashflow Forecast")
plt.legend((be, bi), ("Forecast Expenditure", "Forecast Income"))
plt.show()
gdt = namedtuple("gdt", [ "group", "deltas" ])
fet = namedtuple("fet", ["distance", "previous", "next", "description", "n",
"period", "mean"])
def expense_offset(delta, period, dist):
start = max(delta, period)
for i, v in enumerate(dist[start:]):
if 0 < v:
return start + i
return None
def prune_groups(groups, date, keep=None, cmax=None):
if not cmax:
cmax = len
gds = ( gdt(g, group_delta_bins(group_deltas(g)))
for g in groups if len(g) > 2 or (keep and keep(g[0][2])))
return ( gd for gd in gds if 0 < len(gd.deltas) and
cmax(gd.deltas) > (date - last(gd.group)).days )
def print_forecast_expenses(groups, date):
keep = prune_groups(groups, date)
table = {}
for gd in keep:
prev = last(gd.group)
delta = (date - prev).days
period = icmf(gd.deltas)
f = expense_offset(delta, period, gd.deltas)
if not f:
continue
mean = sum(float(e[1]) for e in gd.group) / (sum(gd.deltas) + 1)
for d in range(f - delta, 31, period):
row = fet(distance=d,
previous=prev.strftime("%d/%m/%y"),
next=(date + timedelta(d)).strftime("%d/%m/%y"),
description=gd.group[0][2],
n=len(gd.group),
period=period,
mean=mean)
if d not in table:
table[d] = []
table[d].append(row)
print("Distance,Previous,Next,Description,N,Period,Mean Value,Sum Expense")
e, i = 0, 0
for k in sorted(x for x in table.keys() if x >= 0 and x <= 31):
for row in table[k]:
if row.mean < 0:
e += row.mean
else:
i += row.mean
strrow = [str(e) for e in row[:-1]]
strrow.append(money(row.mean))
strrow.append(money(e))
print(",".join(strrow))
pet = namedtuple("pet", [ "description", "n", "period", "mean", "annual", "monthly" ])
def calculate_periodic_expenses(groups, date):
keep = prune_groups(groups, date)
table = []
for gd in keep:
mean = sum(float(e[1]) for e in gd.group) / (sum(gd.deltas) + 1)
est = icmf(gd.deltas)
annual = (365 / est) * mean
monthly = annual / 12
row = pet(description=gd.group[0][2],
n=len(gd.group),
period=est,
mean=mean,
annual=annual,
monthly=monthly)
table.append(row)
return sorted(list(table), key=lambda x: float(x.annual))
def print_periodic_expenses(groups, date):
table = calculate_periodic_expenses(groups, date)
ordered = sorted(table, key=lambda x: float(x.annual))
print("Description,N,Period,Mean Value,Annual Value,Monthly Value")
for row in ordered:
p0 = (str(x) for x in row[:3])
p1 = (money(x) for x in row[3:])
print(",".join(chain(p0, p1)))
def print_commitment_targets(groups, date, m_income, monthlies):
si = sum(v for v in m_income.values())
print("sum income: {}".format(si))
se = sum(sum(v) for v in monthlies)
print("sum expenses: {}".format(se))
keep = prune_groups(groups, date)
def main(args=None):
if args is None:
args = parse_args()
grouper = pystrgrp.Strgrp()
reader = csv.reader(args.infile, dialect='excel')
dates = [ None, None ]
for r in reader:
if len(r) >= 4 and not "Internal" == r[3]:
grouper.add(r[2].upper(), r)
dates[0] = pd(r[0]) if not dates[0] else min(pd(r[0]), dates[0])
dates[1] = pd(r[0]) if not dates[1] else max(pd(r[0]), dates[1])
graph_bar_cashflow([ list(i.data() for i in g) for g in grouper ], dates, 32)
| gpl-3.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/pandas/stats/moments.py | 7 | 31610 | """
Provides rolling statistical moments and related descriptive
statistics implemented in Cython
"""
from __future__ import division
import warnings
import numpy as np
from pandas.types.common import is_scalar
from pandas.core.api import DataFrame, Series
from pandas.util.decorators import Substitution, Appender
__all__ = ['rolling_count', 'rolling_max', 'rolling_min',
'rolling_sum', 'rolling_mean', 'rolling_std', 'rolling_cov',
'rolling_corr', 'rolling_var', 'rolling_skew', 'rolling_kurt',
'rolling_quantile', 'rolling_median', 'rolling_apply',
'rolling_window',
'ewma', 'ewmvar', 'ewmstd', 'ewmvol', 'ewmcorr', 'ewmcov',
'expanding_count', 'expanding_max', 'expanding_min',
'expanding_sum', 'expanding_mean', 'expanding_std',
'expanding_cov', 'expanding_corr', 'expanding_var',
'expanding_skew', 'expanding_kurt', 'expanding_quantile',
'expanding_median', 'expanding_apply']
# -----------------------------------------------------------------------------
# Docs
# The order of arguments for the _doc_template is:
# (header, args, kwargs, returns, notes)
_doc_template = """
%s
Parameters
----------
%s%s
Returns
-------
%s
%s
"""
_roll_kw = """window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
how : string, default '%s'
Method for down- or re-sampling
"""
_roll_notes = r"""
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
_ewm_kw = r"""com : float, optional
Specify decay in terms of center of mass,
:math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`
span : float, optional
Specify decay in terms of span,
:math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`
halflife : float, optional
Specify decay in terms of half-life,
:math:`\alpha = 1 - exp(log(0.5) / halflife),\text{ for } halflife > 0`
alpha : float, optional
Specify smoothing factor :math:`\alpha` directly,
:math:`0 < \alpha \leq 1`
.. versionadded:: 0.18.0
min_periods : int, default 0
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : None or string alias / date offset object, default=None
Frequency to conform to before computing statistic
adjust : boolean, default True
Divide by decaying adjustment factor in beginning periods to account for
imbalance in relative weightings (viewing EWMA as a moving average)
how : string, default 'mean'
Method for down- or re-sampling
ignore_na : boolean, default False
Ignore missing values when calculating weights;
specify True to reproduce pre-0.15.0 behavior
"""
_ewm_notes = r"""
Notes
-----
Exactly one of center of mass, span, half-life, and alpha must be provided.
Allowed values and relationship between the parameters are specified in the
parameter descriptions above; see the link at the end of this section for
a detailed explanation.
When adjust is True (default), weighted averages are calculated using weights
(1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.
When adjust is False, weighted averages are calculated recursively as:
weighted_average[0] = arg[0];
weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i].
When ignore_na is False (default), weights are based on absolute positions.
For example, the weights of x and y used in calculating the final weighted
average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and
(1-alpha)**2 and alpha (if adjust is False).
When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based on
relative positions. For example, the weights of x and y used in calculating
the final weighted average of [x, None, y] are 1-alpha and 1 (if adjust is
True), and 1-alpha and alpha (if adjust is False).
More details can be found at
http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-windows
"""
_expanding_kw = """min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
"""
_type_of_input_retval = "y : type of input argument"
_flex_retval = """y : type depends on inputs
DataFrame / DataFrame -> DataFrame (matches on columns) or Panel (pairwise)
DataFrame / Series -> Computes result for each column
Series / Series -> Series"""
_pairwise_retval = "y : Panel whose items are df1.index values"
_unary_arg = "arg : Series, DataFrame\n"
_binary_arg_flex = """arg1 : Series, DataFrame, or ndarray
arg2 : Series, DataFrame, or ndarray, optional
if not supplied then will default to arg1 and produce pairwise output
"""
_binary_arg = """arg1 : Series, DataFrame, or ndarray
arg2 : Series, DataFrame, or ndarray
"""
_pairwise_arg = """df1 : DataFrame
df2 : DataFrame
"""
_pairwise_kw = """pairwise : bool, default False
If False then only matching columns between arg1 and arg2 will be used and
the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the output
will be a Panel in the case of DataFrame inputs. In the case of missing
elements, only complete pairwise observations will be used.
"""
_ddof_kw = """ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
"""
_bias_kw = r"""bias : boolean, default False
Use a standard estimation bias correction
"""
def ensure_compat(dispatch, name, arg, func_kw=None, *args, **kwargs):
"""
wrapper function to dispatch to the appropriate window functions
wraps/unwraps ndarrays for compat
can be removed when ndarray support is removed
"""
is_ndarray = isinstance(arg, np.ndarray)
if is_ndarray:
if arg.ndim == 1:
arg = Series(arg)
elif arg.ndim == 2:
arg = DataFrame(arg)
else:
raise AssertionError("cannot support ndim > 2 for ndarray compat")
warnings.warn("pd.{dispatch}_{name} is deprecated for ndarrays and "
"will be removed "
"in a future version"
.format(dispatch=dispatch, name=name),
FutureWarning, stacklevel=3)
# get the functional keywords here
if func_kw is None:
func_kw = []
kwds = {}
for k in func_kw:
value = kwargs.pop(k, None)
if value is not None:
kwds[k] = value
# how is a keyword that if not-None should be in kwds
how = kwargs.pop('how', None)
if how is not None:
kwds['how'] = how
r = getattr(arg, dispatch)(**kwargs)
if not is_ndarray:
# give a helpful deprecation message
# with copy-pastable arguments
pargs = ','.join(["{a}={b}".format(a=a, b=b)
for a, b in kwargs.items() if b is not None])
aargs = ','.join(args)
if len(aargs):
aargs += ','
def f(a, b):
if is_scalar(b):
return "{a}={b}".format(a=a, b=b)
return "{a}=<{b}>".format(a=a, b=type(b).__name__)
aargs = ','.join([f(a, b) for a, b in kwds.items() if b is not None])
warnings.warn("pd.{dispatch}_{name} is deprecated for {klass} "
"and will be removed in a future version, replace with "
"\n\t{klass}.{dispatch}({pargs}).{name}({aargs})"
.format(klass=type(arg).__name__, pargs=pargs,
aargs=aargs, dispatch=dispatch, name=name),
FutureWarning, stacklevel=3)
result = getattr(r, name)(*args, **kwds)
if is_ndarray:
result = result.values
return result
def rolling_count(arg, window, **kwargs):
"""
Rolling count of number of non-NaN observations inside provided window.
Parameters
----------
arg : DataFrame or numpy ndarray-like
window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the
statistic. Specified as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
how : string, default 'mean'
Method for down- or re-sampling
Returns
-------
rolling_count : type of caller
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
return ensure_compat('rolling', 'count', arg, window=window, **kwargs)
@Substitution("Unbiased moving covariance.", _binary_arg_flex,
_roll_kw % 'None' + _pairwise_kw + _ddof_kw, _flex_retval,
_roll_notes)
@Appender(_doc_template)
def rolling_cov(arg1, arg2=None, window=None, pairwise=None, **kwargs):
if window is None and isinstance(arg2, (int, float)):
window = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
elif arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
return ensure_compat('rolling',
'cov',
arg1,
other=arg2,
window=window,
pairwise=pairwise,
func_kw=['other', 'pairwise', 'ddof'],
**kwargs)
@Substitution("Moving sample correlation.", _binary_arg_flex,
_roll_kw % 'None' + _pairwise_kw, _flex_retval, _roll_notes)
@Appender(_doc_template)
def rolling_corr(arg1, arg2=None, window=None, pairwise=None, **kwargs):
if window is None and isinstance(arg2, (int, float)):
window = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
elif arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
return ensure_compat('rolling',
'corr',
arg1,
other=arg2,
window=window,
pairwise=pairwise,
func_kw=['other', 'pairwise'],
**kwargs)
# -----------------------------------------------------------------------------
# Exponential moving moments
@Substitution("Exponentially-weighted moving average", _unary_arg, _ewm_kw,
_type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewma(arg, com=None, span=None, halflife=None, alpha=None, min_periods=0,
freq=None, adjust=True, how=None, ignore_na=False):
return ensure_compat('ewm',
'mean',
arg,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
freq=freq,
adjust=adjust,
how=how,
ignore_na=ignore_na)
@Substitution("Exponentially-weighted moving variance", _unary_arg,
_ewm_kw + _bias_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmvar(arg, com=None, span=None, halflife=None, alpha=None, min_periods=0,
bias=False, freq=None, how=None, ignore_na=False, adjust=True):
return ensure_compat('ewm',
'var',
arg,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
freq=freq,
adjust=adjust,
how=how,
ignore_na=ignore_na,
bias=bias,
func_kw=['bias'])
@Substitution("Exponentially-weighted moving std", _unary_arg,
_ewm_kw + _bias_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmstd(arg, com=None, span=None, halflife=None, alpha=None, min_periods=0,
bias=False, freq=None, how=None, ignore_na=False, adjust=True):
return ensure_compat('ewm',
'std',
arg,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
freq=freq,
adjust=adjust,
how=how,
ignore_na=ignore_na,
bias=bias,
func_kw=['bias'])
ewmvol = ewmstd
@Substitution("Exponentially-weighted moving covariance", _binary_arg_flex,
_ewm_kw + _pairwise_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmcov(arg1, arg2=None, com=None, span=None, halflife=None, alpha=None,
min_periods=0, bias=False, freq=None, pairwise=None, how=None,
ignore_na=False, adjust=True):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and com is None:
com = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
return ensure_compat('ewm',
'cov',
arg1,
other=arg2,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
bias=bias,
freq=freq,
how=how,
ignore_na=ignore_na,
adjust=adjust,
pairwise=pairwise,
func_kw=['other', 'pairwise', 'bias'])
@Substitution("Exponentially-weighted moving correlation", _binary_arg_flex,
_ewm_kw + _pairwise_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmcorr(arg1, arg2=None, com=None, span=None, halflife=None, alpha=None,
min_periods=0, freq=None, pairwise=None, how=None, ignore_na=False,
adjust=True):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and com is None:
com = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
return ensure_compat('ewm',
'corr',
arg1,
other=arg2,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
freq=freq,
how=how,
ignore_na=ignore_na,
adjust=adjust,
pairwise=pairwise,
func_kw=['other', 'pairwise'])
# ---------------------------------------------------------------------
# Python interface to Cython functions
def _rolling_func(name, desc, how=None, func_kw=None, additional_kw=''):
if how is None:
how_arg_str = 'None'
else:
how_arg_str = "'%s" % how
@Substitution(desc, _unary_arg, _roll_kw % how_arg_str + additional_kw,
_type_of_input_retval, _roll_notes)
@Appender(_doc_template)
def f(arg, window, min_periods=None, freq=None, center=False,
**kwargs):
return ensure_compat('rolling',
name,
arg,
window=window,
min_periods=min_periods,
freq=freq,
center=center,
func_kw=func_kw,
**kwargs)
return f
rolling_max = _rolling_func('max', 'Moving maximum.', how='max')
rolling_min = _rolling_func('min', 'Moving minimum.', how='min')
rolling_sum = _rolling_func('sum', 'Moving sum.')
rolling_mean = _rolling_func('mean', 'Moving mean.')
rolling_median = _rolling_func('median', 'Moving median.', how='median')
rolling_std = _rolling_func('std', 'Moving standard deviation.',
func_kw=['ddof'],
additional_kw=_ddof_kw)
rolling_var = _rolling_func('var', 'Moving variance.',
func_kw=['ddof'],
additional_kw=_ddof_kw)
rolling_skew = _rolling_func('skew', 'Unbiased moving skewness.')
rolling_kurt = _rolling_func('kurt', 'Unbiased moving kurtosis.')
def rolling_quantile(arg, window, quantile, min_periods=None, freq=None,
center=False):
"""Moving quantile.
Parameters
----------
arg : Series, DataFrame
window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
quantile : float
0 <= quantile <= 1
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the
statistic. Specified as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
Returns
-------
y : type of input argument
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
return ensure_compat('rolling',
'quantile',
arg,
window=window,
freq=freq,
center=center,
min_periods=min_periods,
func_kw=['quantile'],
quantile=quantile)
def rolling_apply(arg, window, func, min_periods=None, freq=None,
center=False, args=(), kwargs={}):
"""Generic moving function application.
Parameters
----------
arg : Series, DataFrame
window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
func : function
Must produce a single value from an ndarray input
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the
statistic. Specified as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
args : tuple
Passed on to func
kwargs : dict
Passed on to func
Returns
-------
y : type of input argument
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
return ensure_compat('rolling',
'apply',
arg,
window=window,
freq=freq,
center=center,
min_periods=min_periods,
func_kw=['func', 'args', 'kwargs'],
func=func,
args=args,
kwargs=kwargs)
def rolling_window(arg, window=None, win_type=None, min_periods=None,
freq=None, center=False, mean=True,
axis=0, how=None, **kwargs):
"""
Applies a moving window of type ``window_type`` and size ``window``
on the data.
Parameters
----------
arg : Series, DataFrame
window : int or ndarray
Weighting window specification. If the window is an integer, then it is
treated as the window length and win_type is required
win_type : str, default None
Window type (see Notes)
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the
statistic. Specified as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
mean : boolean, default True
If True computes weighted mean, else weighted sum
axis : {0, 1}, default 0
how : string, default 'mean'
Method for down- or re-sampling
Returns
-------
y : type of input argument
Notes
-----
The recognized window types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width).
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
func = 'mean' if mean else 'sum'
return ensure_compat('rolling',
func,
arg,
window=window,
win_type=win_type,
freq=freq,
center=center,
min_periods=min_periods,
axis=axis,
func_kw=kwargs.keys(),
**kwargs)
def _expanding_func(name, desc, func_kw=None, additional_kw=''):
@Substitution(desc, _unary_arg, _expanding_kw + additional_kw,
_type_of_input_retval, "")
@Appender(_doc_template)
def f(arg, min_periods=1, freq=None, **kwargs):
return ensure_compat('expanding',
name,
arg,
min_periods=min_periods,
freq=freq,
func_kw=func_kw,
**kwargs)
return f
expanding_max = _expanding_func('max', 'Expanding maximum.')
expanding_min = _expanding_func('min', 'Expanding minimum.')
expanding_sum = _expanding_func('sum', 'Expanding sum.')
expanding_mean = _expanding_func('mean', 'Expanding mean.')
expanding_median = _expanding_func('median', 'Expanding median.')
expanding_std = _expanding_func('std', 'Expanding standard deviation.',
func_kw=['ddof'],
additional_kw=_ddof_kw)
expanding_var = _expanding_func('var', 'Expanding variance.',
func_kw=['ddof'],
additional_kw=_ddof_kw)
expanding_skew = _expanding_func('skew', 'Unbiased expanding skewness.')
expanding_kurt = _expanding_func('kurt', 'Unbiased expanding kurtosis.')
def expanding_count(arg, freq=None):
"""
Expanding count of number of non-NaN observations.
Parameters
----------
arg : DataFrame or numpy ndarray-like
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the
statistic. Specified as a frequency string or DateOffset object.
Returns
-------
expanding_count : type of caller
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
return ensure_compat('expanding', 'count', arg, freq=freq)
def expanding_quantile(arg, quantile, min_periods=1, freq=None):
"""Expanding quantile.
Parameters
----------
arg : Series, DataFrame
quantile : float
0 <= quantile <= 1
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the
statistic. Specified as a frequency string or DateOffset object.
Returns
-------
y : type of input argument
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
return ensure_compat('expanding',
'quantile',
arg,
freq=freq,
min_periods=min_periods,
func_kw=['quantile'],
quantile=quantile)
@Substitution("Unbiased expanding covariance.", _binary_arg_flex,
_expanding_kw + _pairwise_kw + _ddof_kw, _flex_retval, "")
@Appender(_doc_template)
def expanding_cov(arg1, arg2=None, min_periods=1, freq=None,
pairwise=None, ddof=1):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and min_periods is None:
min_periods = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
return ensure_compat('expanding',
'cov',
arg1,
other=arg2,
min_periods=min_periods,
pairwise=pairwise,
freq=freq,
ddof=ddof,
func_kw=['other', 'pairwise', 'ddof'])
@Substitution("Expanding sample correlation.", _binary_arg_flex,
_expanding_kw + _pairwise_kw, _flex_retval, "")
@Appender(_doc_template)
def expanding_corr(arg1, arg2=None, min_periods=1, freq=None, pairwise=None):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and min_periods is None:
min_periods = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
return ensure_compat('expanding',
'corr',
arg1,
other=arg2,
min_periods=min_periods,
pairwise=pairwise,
freq=freq,
func_kw=['other', 'pairwise', 'ddof'])
def expanding_apply(arg, func, min_periods=1, freq=None,
args=(), kwargs={}):
"""Generic expanding function application.
Parameters
----------
arg : Series, DataFrame
func : function
Must produce a single value from an ndarray input
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the
statistic. Specified as a frequency string or DateOffset object.
args : tuple
Passed on to func
kwargs : dict
Passed on to func
Returns
-------
y : type of input argument
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
return ensure_compat('expanding',
'apply',
arg,
freq=freq,
min_periods=min_periods,
func_kw=['func', 'args', 'kwargs'],
func=func,
args=args,
kwargs=kwargs)
| mit |
florian-wagner/gimli | python/pygimli/mplviewer/dataview.py | 1 | 2988 | # -*- coding: utf-8 -*-
import pygimli as pg
#import pygimli.utils
import numpy as np
#from numpy import arange, ndarray, array, ma
#import matplotlib as mpl
#from colorbar import *
#def annotateSeparationAxis(ax, shemeID, grid = False):
#'''
#Draw y-axes tick labels corresponding to the separation
#'''
#prefix = DataShemeManager().sheme(shemeID).prefix
#def sepName(l):
#suffix = ""
#if l == 0:
#return ''
#elif l > 0:
#suffix = "'"
#if grid:
#ax.plot(ax.get_xlim(), [l,l], color = 'black', linewidth = 1, linestyle='dotted')
#return prefix + ' $' + str(abs(int(l))) + suffix +'$'
#ax.yaxis.set_ticklabels(map(lambda l: sepName(l), ax.yaxis.get_ticklocs()))
## END def annotateSeparations(...)
def drawSensorAsMarker(ax, data):
'''
Draw Sensor marker, these marker are pickable
'''
elecsX = []
elecsY = []
for i in range(len(data.sensorPositions())):
elecsX.append(data.sensorPositions()[i][ 0 ])
elecsY.append(data.sensorPositions()[i][ 1 ])
electrodeMarker, = ax.plot(elecsX, elecsY, 'x', color = 'black', picker = 5.)
ax.set_xlim([ data.sensorPositions()[0][0]-1., data.sensorPositions()[ data.sensorCount() -1][0] + 1. ])
#print electrodeMarker
return electrodeMarker
# END def drawElectrodesAsMarker(...)
def drawTravelTimeData(a, data):
'''
Draw first arrival traveltime data into mpl axes a.
data of type \ref DataContainer must contain sensorIdx 's' and 'g' and thus numbered internal from [0..n)
'''
x = pg.x(data.sensorPositions())
z = pg.z(data.sensorPositions())
shots = pg.unique(pg.sort(data('s')))
geoph = pg.unique(pg.sort(data('g')))
startOffsetIDX = 0
if min(min(shots), min(geoph) == 1):
startOffsetIDX = 1
a.set_xlim([ min(x), max(x) ])
a.set_ylim([ max(data('t')), -0.002 ])
a.figure.show()
for shot in shots:
gIdx = pg.find(data('s') == shot)
sensorIdx = [int(i__ - startOffsetIDX) for i__ in data('g')[ gIdx ]]
a.plot(x[ sensorIdx ], data('t')[ gIdx ], 'x-')
yPixel = a.transData.inverted().transform_point((1, 1))[1]-a.transData.inverted().transform_point((0, 0))[1]
xPixel = a.transData.inverted().transform_point((1, 1))[0]-a.transData.inverted().transform_point((0, 0))[0]
# draw shot points
a.plot(x[ [int(i__ - startOffsetIDX) for i__ in shots] ], np.zeros(len(shots)) + 8.*yPixel, 'gv', markersize = 8)
# draw geophone points
a.plot(x[ [int(i__ - startOffsetIDX) for i__ in geoph] ], np.zeros(len(geoph)) + 3.*yPixel, 'r^', markersize = 8)
a.grid()
a.set_ylim([ max(data('t')), +16.*yPixel])
a.set_xlim([ min(x)-5.*xPixel, max(x)+5.*xPixel ])
a.set_xlabel('x-Coordinate [m]')
a.set_ylabel('Traveltime [ms]')
# def drawTravelTimeData(...) | gpl-3.0 |
CforED/Machine-Learning | sklearn/linear_model/tests/test_base.py | 19 | 12955 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data
from sklearn.linear_model.base import sparse_center_data
from sklearn.linear_model.base import _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [1])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [0])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [0])
def test_linear_regression_sample_weights():
# TODO: loop over sparse data as well
rng = np.random.RandomState(0)
# It would not work with under-determined systems
for n_samples, n_features in ((6, 5), ):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for intercept in (True, False):
# LinearRegression with explicit sample_weight
reg = LinearRegression(fit_intercept=intercept)
reg.fit(X, y, sample_weight=sample_weight)
coefs1 = reg.coef_
inter1 = reg.intercept_
assert_equal(reg.coef_.shape, (X.shape[1], )) # sanity checks
assert_greater(reg.score(X, y), 0.5)
# Closed form of the weighted least square
# theta = (X^T W X)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
coefs2 = linalg.solve(X_aug.T.dot(W).dot(X_aug),
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs1, coefs2)
else:
assert_array_almost_equal(coefs1, coefs2[1:])
assert_almost_equal(inter1, coefs2[0])
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
reg = LinearRegression()
# make sure the "OK" sample weights actually work
reg.fit(X, y, sample_weights_OK)
reg.fit(X, y, sample_weights_OK_1)
reg.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
# Test that linear regression also works with sparse data
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.predict(X) - y.ravel(), 0)
def test_linear_regression_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
reg = LinearRegression(fit_intercept=True)
reg.fit((X), Y)
assert_equal(reg.coef_.shape, (2, n_features))
Y_pred = reg.predict(X)
reg.fit(X, y)
y_pred = reg.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions with sparse data
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
def test_rescale_data():
n_samples = 200
n_features = 2
rng = np.random.RandomState(0)
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
| bsd-3-clause |
drusk/pml | test/matchers/pandas_matchers.py | 1 | 5445 | # Copyright (C) 2012 David Rusk
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Custom Hamcrest matchers for pandas data structures.
@author: drusk
"""
from hamcrest.core.base_matcher import BaseMatcher
from test.matchers.util import equals
class IsSeries(BaseMatcher):
"""
Matches a pandas Series data structure.
"""
def __init__(self, as_dict, places=None):
"""
Creates a new matcher given the expected input as a dictionary.
Args:
as_dict: dictionary
The expected data in key-value format.
places: int
The number of decimal places to check when comparing data values.
Defaults to None, in which case full equality is checked (good for
ints, but not for floats).
"""
self.as_dict = as_dict
self.places = places
def _numeric_equals(self, val1, val2):
return equals(val1, val2, self.places)
def _non_numeric_equals(self, val1, val2):
return val1 == val2
def _get_equals_function(self, data):
if _is_numeric_type(data):
return self._numeric_equals
else:
return self._non_numeric_equals
def _matches(self, series):
equals = self._get_equals_function(series)
if len(self.as_dict) != len(series):
return False
for key in self.as_dict:
if not equals(series[key], self.as_dict[key]):
return False
return True
def describe_to(self, description):
description.append_text("pandas Series with elements: ")
description.append_text(self.as_dict.__str__())
class IsDataFrame(BaseMatcher):
"""
Matches a pandas DataFrame data structure.
"""
def __init__(self, as_list, places):
"""
Creates a new matcher given the expected input as a list of lists.
Args:
as_list: list(list)
The expected data as a list of lists.
places: int
The number of decimal places to check when comparing data values.
Defaults to None, in which case full equality is checked (good for
ints, but not for floats).
"""
self.as_list = as_list
self.places = places
def _matches(self, dataframe):
expected_num_rows = len(self.as_list)
if expected_num_rows != dataframe.shape[0]:
return False
row_lengths = map(len, self.as_list)
if len(set(row_lengths)) != 1:
# row lengths are not all the same
return False
expected_num_columns = row_lengths[0]
if expected_num_columns != dataframe.shape[1]:
return False
for i, row in enumerate(self.as_list):
actual_row = dataframe.ix[i].tolist()
for j, expected in enumerate(row):
if not equals(actual_row[j], expected, places=self.places):
return False
return True
def describe_to(self, description):
description.append_text("pandas DataFrame with elements: ")
description.append_text(self.as_list.__str__())
def equals_series(as_dict, places=None):
"""
Compares a pandas Series object to the provided dictionary representation.
Args:
as_dict: dictionary
The expected data in key-value format.
places: int
The number of decimal places to check when comparing data values.
Defaults to None, in which case full equality is checked (good for
ints, but not for floats).
"""
return IsSeries(as_dict, places)
def equals_dataframe(as_list, places=None):
"""
Compares a pandas DataFrame object to the provided list representation.
Since DataFrames are two dimensional, the list should actually be a list
of lists.
"""
return IsDataFrame(as_list, places)
def _is_numeric_type(data):
"""
Checks if the data is any of the numeric types.
Args:
data: data structure with dtype, i.e. pandas.Series, pandas.DataFrame
The data whose type will be checked.
Returns:
True if the data type is numeric, false otherwise.
"""
return "int" in data.dtype.name or "float" in data.dtype.name
| mit |
UNR-AERIAL/scikit-learn | examples/mixture/plot_gmm.py | 248 | 2817 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
| bsd-3-clause |
has2k1/plotnine | plotnine/tests/test_geom_segment.py | 1 | 1315 | import pandas as pd
from plotnine import ggplot, aes, geom_segment, arrow, theme
n = 4
# stepped horizontal line segments
df = pd.DataFrame({
'x': range(1, n+1),
'xend': range(2, n+2),
'y': range(n, 0, -1),
'yend': range(n, 0, -1),
'z': range(1, n+1)
})
_theme = theme(subplots_adjust={'right': 0.85})
def test_aesthetics():
p = (ggplot(df, aes('x', 'y', xend='xend', yend='yend')) +
geom_segment(size=2) +
# Positive slope segments
geom_segment(aes(yend='yend+1', color='factor(z)'), size=2) +
geom_segment(aes(yend='yend+2', linetype='factor(z)'), size=2) +
geom_segment(aes(yend='yend+3', size='z'),
show_legend=False) +
geom_segment(aes(yend='yend+4', alpha='z'), size=2,
show_legend=False))
assert p + _theme == 'aesthetics'
def test_arrow():
p = (ggplot(df, aes('x', 'y', xend='xend', yend='yend')) +
geom_segment(aes('x+2', xend='xend+2'),
arrow=arrow(), size=2) +
geom_segment(aes('x+4', xend='xend+4'),
arrow=arrow(ends='first'), size=2) +
geom_segment(aes('x+6', xend='xend+6'),
arrow=arrow(ends='both'), size=2)
)
assert p == 'arrow'
| gpl-2.0 |
igryski/TRMM_blend | src/folium/plot_TRMM_precip_correct_map_folium.py | 1 | 14152 | # Make python script executable
#!/usr/bin/python
# pylint: disable=C0103
# pylint: disable-msg=C0103
# Script produces TRMM precip filtered using update (Zhong) Land Sea mask
# ==========================================
# Author: I.Stepanov (igor.stepanov@knmi.nl)
# 22.04.2016 @KNMI
# ============================================================================================
# Updates list
# 22.04.2016. Script created as a derivative of plotting TRMM Land Sea Mask
# ============================================================================================
import folium
print(folium.__file__)
print(folium.__version__)
from fastkml.kml import KML
from IPython.core.display import HTML
def read_kml(fname='ss.kml'):
kml = KML()
kml.from_string(open(fname).read())
points = dict()
for feature in kml.features():
for placemark in feature.features():
if placemark.styleUrl.startswith('#hf'):
points.update({placemark.name:
(placemark.geometry.y, placemark.geometry.x, )})
return points
fname = './data/ss.kml'
locations = read_kml(fname)
def inline_map(m, width=650, height=500):
"""Takes a folium instance and embed HTML."""
m._build_map()
srcdoc = m.HTML.replace('"', '"')
embed = HTML('<iframe srcdoc="{}" '
'style="width: {}px; height: {}px; '
'border: none"></iframe>'.format(srcdoc, width, height))
return embed
width, height = 650, 500
radars = folium.Map(location=[40, -122], zoom_start=5,
tiles='OpenStreetMap', width=width, height=height)
for name, location in locations.items():
radars.simple_marker(location=location, popup=name)
inline_map(radars)
# # Load python modules
# import netCDF4
# import pylab as pl
# import numpy as np
# import matplotlib as mpl
# import matplotlib.pyplot as plt
# from netCDF4 import Dataset
# from pylab import *
# import math as m
# from mpl_toolkits.basemap import Basemap, cm
# # Define some paths
# # ==========================================================================================
# in_path = "/nobackup/users/stepanov/TRMM_data/nc/annual_files/cropped/"
# in_path_rr_SACA = "/nobackup/users/stepanov/SACA/final/"
# in_path_lsmsk_TRMM = "/nobackup/users/stepanov/TRMM_data/Land_Sea_Mask/"
# # Files
# #===========================================================================================
# # Precip TRMM
# #
# #file_name='3B42_daily.2013_georef_SACA.nc'
# file_name = '3B42_daily.2000_georef_SACA.nc'
# # ncks -d latitude,-24.875,25.125 -d longitude,80.125,179.875 3B42_daily.2015.12.29.7.nc
# # 3B42_daily.2015.12.29.7_georef_SACA.nc
# # Precip SACA rr
# file_r_SACA = 'rr_0.25deg_regular.nc'
# # Land Sea Maks TRMM update by Zhong Liu, Ph.D. Zhong.Liu-1@nasa.gov, remapped as NN to TRMM r
# file_lsm_TRMM_cdo_to_SACA_coords = 'TMPA_land_sea_mask_georef_SACA.nc'
# #ncks -d lat,-24.875,25.125 -d lon,80.125,179.875 TMPA_land_sea_mask.nc
# # TMPA_land_sea_mask_georef_SACA.nc
# #===========================================================================================
# # Full file paths
# #===========================================================================================
# file_pr = [in_path+file_name]
# file_rr_SACA = [in_path_rr_SACA+file_r_SACA]
# file_lsmask_TRMM_cdo_to_SACA = [in_path_lsmsk_TRMM+file_lsm_TRMM_cdo_to_SACA_coords]
# # Review imported file paths in log
# print "Location of TRMM precipitation file is: ", file_pr
# print
# print
# print "Location of SACA precip file is: ", file_rr_SACA
# print
# print
# print "Location of TRMM land-sea mask file is: ", file_lsmask_TRMM_cdo_to_SACA
# print
# print
# #===========================================================================================
# # Define paths to NC files
# #===========================================================================================
# # Precip and elevation (Land Sea Mask)
# nc_trmm = Dataset(in_path+file_name, 'r')
# # [latitude, longitude][201x400]
# nc_SACA_rr = Dataset(in_path_rr_SACA+file_r_SACA, 'r')
# # [longitude, latitude][400x201]
# nc_lsmask_trmm = Dataset(in_path_lsmsk_TRMM+file_lsm_TRMM_cdo_to_SACA_coords)
# # new LS maks by Zhong Liu
# # Coordinates for TRMM
# lons = nc_trmm.variables['longitude']
# lats = nc_trmm.variables['latitude']
# # Coordinates for SACA
# lons_saca = nc_SACA_rr.variables['longitude']
# lats_saca = nc_SACA_rr.variables['latitude']
# # Coordinates for LS mask
# lons_ls_mask = nc_lsmask_trmm.variables['lon'][:]
# lats_ls_mask = nc_lsmask_trmm.variables['lat'][:]
# print 'lats_ls_mask', lats_ls_mask
# # =======================================================================================
# # Extract the actual variable
# # For TRMM data go from 1-365 in ncview, but python counts 0-364
# #
# # INDIVIDUAL DAY
# #
# # =======================================================================================
# # trmm_precip = nc_trmm.variables['r'][89,:,:]
# # [time, lat, lon], 0= 01.01.2013 (python). 90 is 31st March ideally.
# trmm_precip = nc_trmm.variables['r'][161, :, :]
# # [time, lat, lon], 0= 01.01.2013 (python). 161 is 31st March ideally.
# saca_precip = nc_SACA_rr.variables['rr'][11688, :, :]
# # 11688 = 01.Jan.2013. (python)
# trmm_lsmask = nc_lsmask_trmm.variables['landseamask'][:, :]
# # [landseamask, latitude, longitude]
# # =======================================================================================
# # 1-12418 in ncview, but python counts 0-12417
# # Accumulated period
# # =======================================================================================
# # Import entire year of precip data now
# trmm_precip_array = nc_trmm.variables['r'][0-364, :, :]
# # [time, lat, lon], 0= 01.01.2013 (python)
# trmm_precip_array_2 = nc_trmm.variables['r'][:, :, :]
# # =======================================================================================
# print
# #print 'precip array 2013', trmm_precip_array
# print
# #print 'precip array 2013_2', trmm_precip_array_2
# print
# #print 'precip array 2013_2 - precip array 2013', trmm_precip_array_2-trmm_precip_array
# #quit()
# # Data pre-processing
# #===========================================================================================
# # Pre-process TRMM land sea mask
# #==================================
# # # Define fill_value
# # fill_value=-999.9
# # # All land points convert to 1
# # trmm_lsmask[trmm_lsmask!=100]=1.
# # # All sea points convert to fill_value (-999.9)
# # trmm_lsmask[trmm_lsmask==100]=fill_value
# # # New mask should now be: 1=land, fill_value=sea
# # # Multiply with TRMM data when plotting
# # # SPrint new TRMM mask (1,fill_value only!)
# # print 'TRMM land sea mask',trmm_lsmask
# # print
# # Do the same with new TRMM land sea mask (cdo remapnn to SACA coordinates)
# # Pre-process SACA land sea mask
# #==================================
# # # All land points convert to 1
# # trmm_lsmask_cdo[trmm_lsmask_cdo!=100]=1.
# # # All sea points convert to fill_value (-999.9)
# # trmm_lsmask_cdo[trmm_lsmask_cdo==100]=fill_value
# # # New mask should now be: 1=land, fill_value=sea
# # # Multiply with TRMM data when plotting
# # # SPrint new TRMM mask (1,fill_value only!)
# # print 'TRMM land sea mask CDO to SACA',trmm_lsmask_cdo
# # print
# # Design FIGURE
# # ================================================================
# xsize = 20
# ysize = 10
# fig = plt.figure(figsize=(xsize, ysize))
# # Map projection
# # ================================================================
# # Experimental to match coast line better with TRMM orography
# m = Basemap(projection='gall',
# # lat_0=0.125, lon_0=130,
# llcrnrlon=80.125, llcrnrlat=-24.875,
# urcrnrlon=179.875, urcrnrlat=25.125,
# # fix_aspect=True,
# area_thresh=100.0,
# resolution='i')
# m.drawcoastlines(linewidth=0.75)
# m.drawcountries(linewidth=0.75)
# # draw parallels.
# parallels = np.arange(-40., 40, 10.)
# m.drawparallels(parallels, labels=[1, 0, 0, 0], fontsize=10)
# # draw meridians
# meridians = np.arange(80., 180., 10.)
# m.drawmeridians(meridians, labels=[0, 0, 0, 1], fontsize=10)
# # Colorbar with NSW Precip colors
# nws_precip_colors = [
# "#04e9e7", # 0.01 - 0.10 inches
# "#019ff4", # 0.10 - 0.25 inches
# "#0300f4", # 0.25 - 0.50 inches
# "#02fd02", # 0.50 - 0.75 inches
# "#01c501", # 0.75 - 1.00 inches
# "#008e00", # 1.00 - 1.50 inches
# "#fdf802", # 1.50 - 2.00 inches
# "#e5bc00", # 2.00 - 2.50 inches
# "#fd9500", # 2.50 - 3.00 inches
# "#fd0000", # 3.00 - 4.00 inches
# "#d40000", # 4.00 - 5.00 inches
# "#bc0000", # 5.00 - 6.00 inches
# "#f800fd", # 6.00 - 8.00 inches
# "#9854c6", # 8.00 - 10.00 inches
# "#fdfdfd" # 10.00+
# ]
# precip_colormap = matplotlib.colors.ListedColormap(nws_precip_colors)
# # Make grid for TRMM data
# # ny = trmm_precip.shape[0]
# # nx = trmm_precip.shape[1]
# # lons, lats = m.makegrid(nx, ny) # get lat/lons of ny by nx evenly spaced grid.
# # x, y = m(lons, lats) # compute map proj coordinates.
# # Alternative grid
# lonst, latst = np.meshgrid(lons, lats)
# x, y = m(lonst, latst)
# # Make grid for SACA data
# ny_saca = saca_precip.shape[0]
# nx_saca = saca_precip.shape[1]
# lons_saca, lats_saca = m.makegrid(nx_saca, ny_saca)
# x_saca, y_saca = m(lons_saca, lats_saca)
# # Make grid for TRMM Land Sea mask (updated)
# lons_mask, lats_mask = np.meshgrid(lons_ls_mask, lats_ls_mask)
# x_mask, y_mask = m(lons_mask, lats_mask)
# print 'lons_mask', lons_mask
# print 'lats_mask', lats_mask
# print
# # ================================================================
# # Actual plotting and rendering
# # ================================================================
# # # Alternative SACA NaN removal
# # #
# # # where_are_NaNs = isnan(saca_precip)
# # # saca_precip[where_are_NaNs] = 0
# # print 'SACA LS mask is: ', saca_precip
# # print
# # clevs_saca_oro=(0.0,1.0)
# # #cs = m.contourf(x_saca,y_saca,saca_precip,clevs_saca_oro,cmap=cm.s3pcpn)
# # #cs = m.contourf(x_saca,y_saca,trmm_lsmask-(saca_precip*0.0+5.0),cmap=cm.s3pcpn)
# # cs = m.contourf(x_saca,y_saca,trmm_lsmask-saca_precip,cmap=cm.s3pcpn)
# # cs = m.contourf(x_saca,y_saca,trmm_lsmask-(saca_precip*0.0+5.0),clevs,cmap=cm.s3pcpn)
# # cbar = m.colorbar(cs)
# # plt.title('TRMM min SACA [precip] Land Sea Mask (01. January 2014)', size=26)
# # savefig('plots/Land_sea_mask_TRMM_min_SACA_precip.png',optimize=True,quality=85,dpi=900)
# #SACA LS mask
# # ===============
# # cs = m.contourf(x_saca,y_saca,saca_precip*0.0+5.0,clevs,cmap=cm.s3pcpn)
# # cbar = m.colorbar(cs)
# # plt.title('SACA precip Land Sea Mask (01. January 2013)', size=26)
# # savefig('plots/Land_sea_mask_SACA_precip.png',optimize=True,quality=85,dpi=900)
# # TRMM LS mask
# # Process TRMM_LS_maks so that only land points are used
# #
# trmm_lsmask[trmm_lsmask==100.0]=np.NaN
# # clevs_oro=[0.0,5.0,10.0]
# # cs = m.contourf(x,y,trmm_lsmask,clevs_oro)
# # cs = m.contourf(x,y,trmm_lsmask,clevs,cmap=cm.s3pcpn)
# # Add colorbar
# # cbar = m.colorbar(cs)
# # Add title
# # plt.title('TRMM (NASA) land-sea mask for precip (01. January 2013))', size=26)
# # Set label
# # savefig('plots/Land_sea_mask_TRMM_precip.png',optimize=True,quality=85,dpi=900)
# # TRMM LS mask, when CDO remapped to SACA
# # Process TRMM_LS_maks so that only land points have values
# #
# # trmm_lsmask_cdo[trmm_lsmask_cdo==100.0]=np.NaN
# # Updated LS Mask by NASA
# #cs = m.pcolor(x,y,trmm_lsmask_update)
# #cs= m.pcolormesh(x,y,trmm_lsmask_update)
# #cs =m.contourf(x_mask,y_mask,trmm_lsmask,clevs_wat_perc)
# # Update TRMM precip using new LS mask that is correct (Zhong, NASA)
# # ---------------------------------------------------------------------------------------
# # Without LS mask
# # cs =m.contourf(x,y,trmm_precip,clevs_precip,cmap=cm.s3pcpn)
# # With LS mask
# # cs =m.contourf(x,y,trmm_precip*trmm_lsmask,clevs_precip,cmap=cm.s3pcpn)
# # Used last time
# # clevs_precip_white_zero_SACA = [-0.5,0,0.1,0.5,2.5,5,7.5,10,15,20,30,40,50,100]
# # New clevel upgrade
# # clevs_precip_white_zero_SACA = [0,0.1,0.5,2.5,5,7.5,10,15,20,30,40,50,100]#,200,250]
# # From original color map
# clevs_precip_white_zero_SACA = [0.01, 0.1, 0.25, 0.50, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0, 5.0,
# 6.0]
# cs = m.contourf(x, y, trmm_precip*trmm_lsmask,
# clevs_precip_white_zero_SACA,
# cmap=precip_colormap)
# # cs =m.contourf(x,y,trmm_precip*trmm_lsmask,
# # clevs_precip_white_zero,
# # cmap=plt.cm.jet,
# # ticks=[0,1,2.5,7.5,10,15,30,50,100,150])
# # Heavy rain, sum over a year
# #
# # cs = m.contourf(x,y,trmm_precip_array*trmm_lsmask,clevs_precip_med_heavy)
# # cs = m.contourf(x,y,trmm_precip_array*trmm_lsmask,clevs_precip) # entire year
# # cs = m.pcolormesh(x,y,trmm_precip_array+100.0*trmm_lsmask)
# # ---------------------------------------------------------------------------------------
# # Add colorbar
# # cbar =m.colorbar(cs,ticks=[0,0.1,0.5,2.5,5.0,7.5,10,15,20,30,40,50,100]) #
# cbar = m.colorbar(cs)
# # Colorbar units
# cbar.set_label('Rainfall [mm]', fontsize=16)
# # Title
# #
# # plt.title('TRMM precipitation | w/ Land Sea Mask | 31.03.2000', size=26)
# plt.title('TRMM precipitation | w/ Land Sea Mask | 10.06.2010', size=20)
# # Save plot as PNG
# # ------------------------------------------------------------------------------------
# #
# # Without LS mask one day
# #
# # savefig('plots/Precip_TRMM_from_LS_mask_update_contourf_new_lat_0_correct_grid_wo_LS_mask'
# # '_IN.png',optimize=True,quality=85,dpi=300)
# # With LS mask one day
# #
# savefig('plots/Precip_TRMM_from_LS_mask_update_contourf_new_lat_0_correct_grid_w_LS_mask_'
# 'IN_10062010_white_zero_mm_min_one day.png',
# bbox_inches='tight',
# optimize=True,
# quality=85,
# dpi=300)
# quit() | gpl-3.0 |
HolgerPeters/scikit-learn | examples/ensemble/plot_partial_dependence.py | 54 | 4704 | """
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [2]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [1]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [2] For classification you can think of it as the regression score before
the link function.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
def main():
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print(" done.")
print('Convenience plot with ``partial_dependence_plots``')
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features,
feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('Custom 3d plot via ``partial_dependence``')
fig = plt.figure()
target_feature = (1, 5)
pdp, axes = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[0].reshape(list(map(np.size, axes))).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
# Needed on Windows because plot_partial_dependence uses multiprocessing
if __name__ == '__main__':
main()
| bsd-3-clause |
herilalaina/scikit-learn | sklearn/feature_selection/variance_threshold.py | 123 | 2572 | # Author: Lars Buitinck
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
mje-nz/PX4-Firmware | Tools/process_sensor_caldata.py | 4 | 31659 | #! /usr/bin/env python
from __future__ import print_function
import argparse
import os
import matplotlib.pyplot as plt
import numpy as np
from pyulog import *
"""
Reads in IMU data from a static thermal calibration test and performs a curve fit of gyro, accel and baro bias vs temperature
Data can be gathered using the following sequence:
1) Power up the board and set the TC_A_ENABLE, TC_B_ENABLE and TC_G_ENABLE parameters to 1
2) Set all CAL_GYR and CAL_ACC parameters to defaults
3) Set the parameter SDLOG_MODE to 2, and SDLOG_PROFILE "Thermal calibration" bit (2) to enable logging of sensor data for calibration and power off
4) Cold soak the board for 30 minutes
5) Move to a warm dry, still air, constant pressure environment.
6) Apply power for 45 minutes, keeping the board still.
7) Remove power and extract the .ulog file
8) Open a terminal window in the Firmware/Tools directory and run the python calibration script script file: 'python process_sensor_caldata.py <full path name to .ulog file>
9) Power the board, connect QGC and load the parameter from the generated .params file onto the board using QGC. Due to the number of parameters, loading them may take some time.
10) TODO - we need a way for user to reliably tell when parameters have all been changed and saved.
11) After parameters have finished loading, set SDLOG_MODE and SDLOG_PROFILE to their respective values prior to step 4) and remove power.
12) Power the board and perform a normal gyro and accelerometer sensor calibration using QGC. The board must be repowered after this step before flying due to large parameter changes and the thermal compensation parameters only being read on startup.
Outputs thermal compensation parameters in a file named <inputfilename>.params which can be loaded onto the board using QGroundControl
Outputs summary plots in a pdf file named <inputfilename>.pdf
"""
parser = argparse.ArgumentParser(description='Reads in IMU data from a static thermal calibration test and performs a curve fit of gyro, accel and baro bias vs temperature')
parser.add_argument('filename', metavar='file.ulg', help='ULog input file')
def is_valid_directory(parser, arg):
if os.path.isdir(arg):
# Directory exists so return the directory
return arg
else:
parser.error('The directory {} does not exist'.format(arg))
args = parser.parse_args()
ulog_file_name = args.filename
ulog = ULog(ulog_file_name, None)
data = ulog.data_list
# extract gyro data
sensor_instance = 0
num_gyros = 0
for d in data:
if d.name == 'sensor_gyro':
if sensor_instance == 0:
sensor_gyro_0 = d.data
print('found gyro 0 data')
num_gyros = 1
if sensor_instance == 1:
sensor_gyro_1 = d.data
print('found gyro 1 data')
num_gyros = 2
if sensor_instance == 2:
sensor_gyro_2 = d.data
print('found gyro 2 data')
num_gyros = 3
sensor_instance = sensor_instance +1
# extract accel data
sensor_instance = 0
num_accels = 0
for d in data:
if d.name == 'sensor_accel':
if sensor_instance == 0:
sensor_accel_0 = d.data
print('found accel 0 data')
num_accels = 1
if sensor_instance == 1:
sensor_accel_1 = d.data
print('found accel 1 data')
num_accels = 2
if sensor_instance == 2:
sensor_accel_2 = d.data
print('found accel 2 data')
num_accels = 3
sensor_instance = sensor_instance +1
# extract baro data
sensor_instance = 0
num_baros = 0
for d in data:
if d.name == 'sensor_baro':
if sensor_instance == 0:
sensor_baro_0 = d.data
print('found baro 0 data')
num_baros = 1
if sensor_instance == 1:
sensor_baro_1 = d.data
print('found baro 1 data')
num_baros = 2
sensor_instance = sensor_instance +1
# open file to save plots to PDF
from matplotlib.backends.backend_pdf import PdfPages
output_plot_filename = ulog_file_name + ".pdf"
pp = PdfPages(output_plot_filename)
#################################################################################
# define data dictionary of gyro 0 thermal correction parameters
gyro_0_params = {
'TC_G0_ID':0,
'TC_G0_TMIN':0.0,
'TC_G0_TMAX':0.0,
'TC_G0_TREF':0.0,
'TC_G0_X0_0':0.0,
'TC_G0_X1_0':0.0,
'TC_G0_X2_0':0.0,
'TC_G0_X3_0':0.0,
'TC_G0_X0_1':0.0,
'TC_G0_X1_1':0.0,
'TC_G0_X2_1':0.0,
'TC_G0_X3_1':0.0,
'TC_G0_X0_2':0.0,
'TC_G0_X1_2':0.0,
'TC_G0_X2_2':0.0,
'TC_G0_X3_2':0.0,
'TC_G0_SCL_0':1.0,
'TC_G0_SCL_1':1.0,
'TC_G0_SCL_2':1.0
}
# curve fit the data for gyro 0 corrections
if num_gyros >= 1:
gyro_0_params['TC_G0_ID'] = int(np.median(sensor_gyro_0['device_id']))
# find the min, max and reference temperature
gyro_0_params['TC_G0_TMIN'] = np.amin(sensor_gyro_0['temperature'])
gyro_0_params['TC_G0_TMAX'] = np.amax(sensor_gyro_0['temperature'])
gyro_0_params['TC_G0_TREF'] = 0.5 * (gyro_0_params['TC_G0_TMIN'] + gyro_0_params['TC_G0_TMAX'])
temp_rel = sensor_gyro_0['temperature'] - gyro_0_params['TC_G0_TREF']
temp_rel_resample = np.linspace(gyro_0_params['TC_G0_TMIN']-gyro_0_params['TC_G0_TREF'], gyro_0_params['TC_G0_TMAX']-gyro_0_params['TC_G0_TREF'], 100)
temp_resample = temp_rel_resample + gyro_0_params['TC_G0_TREF']
# fit X axis
coef_gyro_0_x = np.polyfit(temp_rel,sensor_gyro_0['x'],3)
gyro_0_params['TC_G0_X3_0'] = coef_gyro_0_x[0]
gyro_0_params['TC_G0_X2_0'] = coef_gyro_0_x[1]
gyro_0_params['TC_G0_X1_0'] = coef_gyro_0_x[2]
gyro_0_params['TC_G0_X0_0'] = coef_gyro_0_x[3]
fit_coef_gyro_0_x = np.poly1d(coef_gyro_0_x)
gyro_0_x_resample = fit_coef_gyro_0_x(temp_rel_resample)
# fit Y axis
coef_gyro_0_y = np.polyfit(temp_rel,sensor_gyro_0['y'],3)
gyro_0_params['TC_G0_X3_1'] = coef_gyro_0_y[0]
gyro_0_params['TC_G0_X2_1'] = coef_gyro_0_y[1]
gyro_0_params['TC_G0_X1_1'] = coef_gyro_0_y[2]
gyro_0_params['TC_G0_X0_1'] = coef_gyro_0_y[3]
fit_coef_gyro_0_y = np.poly1d(coef_gyro_0_y)
gyro_0_y_resample = fit_coef_gyro_0_y(temp_rel_resample)
# fit Z axis
coef_gyro_0_z = np.polyfit(temp_rel,sensor_gyro_0['z'],3)
gyro_0_params['TC_G0_X3_2'] = coef_gyro_0_z[0]
gyro_0_params['TC_G0_X2_2'] = coef_gyro_0_z[1]
gyro_0_params['TC_G0_X1_2'] = coef_gyro_0_z[2]
gyro_0_params['TC_G0_X0_2'] = coef_gyro_0_z[3]
fit_coef_gyro_0_z = np.poly1d(coef_gyro_0_z)
gyro_0_z_resample = fit_coef_gyro_0_z(temp_rel_resample)
# gyro0 vs temperature
plt.figure(1,figsize=(20,13))
# draw plots
plt.subplot(3,1,1)
plt.plot(sensor_gyro_0['temperature'],sensor_gyro_0['x'],'b')
plt.plot(temp_resample,gyro_0_x_resample,'r')
plt.title('Gyro 0 Bias vs Temperature')
plt.ylabel('X bias (rad/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,2)
plt.plot(sensor_gyro_0['temperature'],sensor_gyro_0['y'],'b')
plt.plot(temp_resample,gyro_0_y_resample,'r')
plt.ylabel('Y bias (rad/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,3)
plt.plot(sensor_gyro_0['temperature'],sensor_gyro_0['z'],'b')
plt.plot(temp_resample,gyro_0_z_resample,'r')
plt.ylabel('Z bias (rad/s)')
plt.xlabel('temperature (degC)')
plt.grid()
pp.savefig()
#################################################################################
#################################################################################
# define data dictionary of gyro 1 thermal correction parameters
gyro_1_params = {
'TC_G1_ID':0,
'TC_G1_TMIN':0.0,
'TC_G1_TMAX':0.0,
'TC_G1_TREF':0.0,
'TC_G1_X0_0':0.0,
'TC_G1_X1_0':0.0,
'TC_G1_X2_0':0.0,
'TC_G1_X3_0':0.0,
'TC_G1_X0_1':0.0,
'TC_G1_X1_1':0.0,
'TC_G1_X2_1':0.0,
'TC_G1_X3_1':0.0,
'TC_G1_X0_2':0.0,
'TC_G1_X1_2':0.0,
'TC_G1_X2_2':0.0,
'TC_G1_X3_2':0.0,
'TC_G1_SCL_0':1.0,
'TC_G1_SCL_1':1.0,
'TC_G1_SCL_2':1.0
}
# curve fit the data for gyro 1 corrections
if num_gyros >= 2:
gyro_1_params['TC_G1_ID'] = int(np.median(sensor_gyro_1['device_id']))
# find the min, max and reference temperature
gyro_1_params['TC_G1_TMIN'] = np.amin(sensor_gyro_1['temperature'])
gyro_1_params['TC_G1_TMAX'] = np.amax(sensor_gyro_1['temperature'])
gyro_1_params['TC_G1_TREF'] = 0.5 * (gyro_1_params['TC_G1_TMIN'] + gyro_1_params['TC_G1_TMAX'])
temp_rel = sensor_gyro_1['temperature'] - gyro_1_params['TC_G1_TREF']
temp_rel_resample = np.linspace(gyro_1_params['TC_G1_TMIN']-gyro_1_params['TC_G1_TREF'], gyro_1_params['TC_G1_TMAX']-gyro_1_params['TC_G1_TREF'], 100)
temp_resample = temp_rel_resample + gyro_1_params['TC_G1_TREF']
# fit X axis
coef_gyro_1_x = np.polyfit(temp_rel,sensor_gyro_1['x'],3)
gyro_1_params['TC_G1_X3_0'] = coef_gyro_1_x[0]
gyro_1_params['TC_G1_X2_0'] = coef_gyro_1_x[1]
gyro_1_params['TC_G1_X1_0'] = coef_gyro_1_x[2]
gyro_1_params['TC_G1_X0_0'] = coef_gyro_1_x[3]
fit_coef_gyro_1_x = np.poly1d(coef_gyro_1_x)
gyro_1_x_resample = fit_coef_gyro_1_x(temp_rel_resample)
# fit Y axis
coef_gyro_1_y = np.polyfit(temp_rel,sensor_gyro_1['y'],3)
gyro_1_params['TC_G1_X3_1'] = coef_gyro_1_y[0]
gyro_1_params['TC_G1_X2_1'] = coef_gyro_1_y[1]
gyro_1_params['TC_G1_X1_1'] = coef_gyro_1_y[2]
gyro_1_params['TC_G1_X0_1'] = coef_gyro_1_y[3]
fit_coef_gyro_1_y = np.poly1d(coef_gyro_1_y)
gyro_1_y_resample = fit_coef_gyro_1_y(temp_rel_resample)
# fit Z axis
coef_gyro_1_z = np.polyfit(temp_rel,sensor_gyro_1['z'],3)
gyro_1_params['TC_G1_X3_2'] = coef_gyro_1_z[0]
gyro_1_params['TC_G1_X2_2'] = coef_gyro_1_z[1]
gyro_1_params['TC_G1_X1_2'] = coef_gyro_1_z[2]
gyro_1_params['TC_G1_X0_2'] = coef_gyro_1_z[3]
fit_coef_gyro_1_z = np.poly1d(coef_gyro_1_z)
gyro_1_z_resample = fit_coef_gyro_1_z(temp_rel_resample)
# gyro1 vs temperature
plt.figure(2,figsize=(20,13))
# draw plots
plt.subplot(3,1,1)
plt.plot(sensor_gyro_1['temperature'],sensor_gyro_1['x'],'b')
plt.plot(temp_resample,gyro_1_x_resample,'r')
plt.title('Gyro 1 Bias vs Temperature')
plt.ylabel('X bias (rad/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,2)
plt.plot(sensor_gyro_1['temperature'],sensor_gyro_1['y'],'b')
plt.plot(temp_resample,gyro_1_y_resample,'r')
plt.ylabel('Y bias (rad/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,3)
plt.plot(sensor_gyro_1['temperature'],sensor_gyro_1['z'],'b')
plt.plot(temp_resample,gyro_1_z_resample,'r')
plt.ylabel('Z bias (rad/s)')
plt.xlabel('temperature (degC)')
plt.grid()
pp.savefig()
#################################################################################
#################################################################################
# define data dictionary of gyro 2 thermal correction parameters
gyro_2_params = {
'TC_G2_ID':0,
'TC_G2_TMIN':0.0,
'TC_G2_TMAX':0.0,
'TC_G2_TREF':0.0,
'TC_G2_X0_0':0.0,
'TC_G2_X1_0':0.0,
'TC_G2_X2_0':0.0,
'TC_G2_X3_0':0.0,
'TC_G2_X0_1':0.0,
'TC_G2_X1_1':0.0,
'TC_G2_X2_1':0.0,
'TC_G2_X3_1':0.0,
'TC_G2_X0_2':0.0,
'TC_G2_X1_2':0.0,
'TC_G2_X2_2':0.0,
'TC_G2_X3_2':0.0,
'TC_G2_SCL_0':1.0,
'TC_G2_SCL_1':1.0,
'TC_G2_SCL_2':1.0
}
# curve fit the data for gyro 2 corrections
if num_gyros >= 3:
gyro_2_params['TC_G2_ID'] = int(np.median(sensor_gyro_2['device_id']))
# find the min, max and reference temperature
gyro_2_params['TC_G2_TMIN'] = np.amin(sensor_gyro_2['temperature'])
gyro_2_params['TC_G2_TMAX'] = np.amax(sensor_gyro_2['temperature'])
gyro_2_params['TC_G2_TREF'] = 0.5 * (gyro_2_params['TC_G2_TMIN'] + gyro_2_params['TC_G2_TMAX'])
temp_rel = sensor_gyro_2['temperature'] - gyro_2_params['TC_G2_TREF']
temp_rel_resample = np.linspace(gyro_2_params['TC_G2_TMIN']-gyro_2_params['TC_G2_TREF'], gyro_2_params['TC_G2_TMAX']-gyro_2_params['TC_G2_TREF'], 100)
temp_resample = temp_rel_resample + gyro_2_params['TC_G2_TREF']
# fit X axis
coef_gyro_2_x = np.polyfit(temp_rel,sensor_gyro_2['x'],3)
gyro_2_params['TC_G2_X3_0'] = coef_gyro_2_x[0]
gyro_2_params['TC_G2_X2_0'] = coef_gyro_2_x[1]
gyro_2_params['TC_G2_X1_0'] = coef_gyro_2_x[2]
gyro_2_params['TC_G2_X0_0'] = coef_gyro_2_x[3]
fit_coef_gyro_2_x = np.poly1d(coef_gyro_2_x)
gyro_2_x_resample = fit_coef_gyro_2_x(temp_rel_resample)
# fit Y axis
coef_gyro_2_y = np.polyfit(temp_rel,sensor_gyro_2['y'],3)
gyro_2_params['TC_G2_X3_1'] = coef_gyro_2_y[0]
gyro_2_params['TC_G2_X2_1'] = coef_gyro_2_y[1]
gyro_2_params['TC_G2_X1_1'] = coef_gyro_2_y[2]
gyro_2_params['TC_G2_X0_1'] = coef_gyro_2_y[3]
fit_coef_gyro_2_y = np.poly1d(coef_gyro_2_y)
gyro_2_y_resample = fit_coef_gyro_2_y(temp_rel_resample)
# fit Z axis
coef_gyro_2_z = np.polyfit(temp_rel,sensor_gyro_2['z'],3)
gyro_2_params['TC_G2_X3_2'] = coef_gyro_2_z[0]
gyro_2_params['TC_G2_X2_2'] = coef_gyro_2_z[1]
gyro_2_params['TC_G2_X1_2'] = coef_gyro_2_z[2]
gyro_2_params['TC_G2_X0_2'] = coef_gyro_2_z[3]
fit_coef_gyro_2_z = np.poly1d(coef_gyro_2_z)
gyro_2_z_resample = fit_coef_gyro_2_z(temp_rel_resample)
# gyro2 vs temperature
plt.figure(3,figsize=(20,13))
# draw plots
plt.subplot(3,1,1)
plt.plot(sensor_gyro_2['temperature'],sensor_gyro_2['x'],'b')
plt.plot(temp_resample,gyro_2_x_resample,'r')
plt.title('Gyro 2 Bias vs Temperature')
plt.ylabel('X bias (rad/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,2)
plt.plot(sensor_gyro_2['temperature'],sensor_gyro_2['y'],'b')
plt.plot(temp_resample,gyro_2_y_resample,'r')
plt.ylabel('Y bias (rad/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,3)
plt.plot(sensor_gyro_2['temperature'],sensor_gyro_2['z'],'b')
plt.plot(temp_resample,gyro_2_z_resample,'r')
plt.ylabel('Z bias (rad/s)')
plt.xlabel('temperature (degC)')
plt.grid()
pp.savefig()
#################################################################################
#################################################################################
# define data dictionary of accel 0 thermal correction parameters
accel_0_params = {
'TC_A0_ID':0,
'TC_A0_TMIN':0.0,
'TC_A0_TMAX':0.0,
'TC_A0_TREF':0.0,
'TC_A0_X0_0':0.0,
'TC_A0_X1_0':0.0,
'TC_A0_X2_0':0.0,
'TC_A0_X3_0':0.0,
'TC_A0_X0_1':0.0,
'TC_A0_X1_1':0.0,
'TC_A0_X2_1':0.0,
'TC_A0_X3_1':0.0,
'TC_A0_X0_2':0.0,
'TC_A0_X1_2':0.0,
'TC_A0_X2_2':0.0,
'TC_A0_X3_2':0.0,
'TC_A0_SCL_0':1.0,
'TC_A0_SCL_1':1.0,
'TC_A0_SCL_2':1.0
}
# curve fit the data for accel 0 corrections
if num_accels >= 1:
accel_0_params['TC_A0_ID'] = int(np.median(sensor_accel_0['device_id']))
# find the min, max and reference temperature
accel_0_params['TC_A0_TMIN'] = np.amin(sensor_accel_0['temperature'])
accel_0_params['TC_A0_TMAX'] = np.amax(sensor_accel_0['temperature'])
accel_0_params['TC_A0_TREF'] = 0.5 * (accel_0_params['TC_A0_TMIN'] + accel_0_params['TC_A0_TMAX'])
temp_rel = sensor_accel_0['temperature'] - accel_0_params['TC_A0_TREF']
temp_rel_resample = np.linspace(accel_0_params['TC_A0_TMIN']-accel_0_params['TC_A0_TREF'], accel_0_params['TC_A0_TMAX']-accel_0_params['TC_A0_TREF'], 100)
temp_resample = temp_rel_resample + accel_0_params['TC_A0_TREF']
# fit X axis
correction_x = sensor_accel_0['x'] - np.median(sensor_accel_0['x'])
coef_accel_0_x = np.polyfit(temp_rel,correction_x,3)
accel_0_params['TC_A0_X3_0'] = coef_accel_0_x[0]
accel_0_params['TC_A0_X2_0'] = coef_accel_0_x[1]
accel_0_params['TC_A0_X1_0'] = coef_accel_0_x[2]
accel_0_params['TC_A0_X0_0'] = coef_accel_0_x[3]
fit_coef_accel_0_x = np.poly1d(coef_accel_0_x)
correction_x_resample = fit_coef_accel_0_x(temp_rel_resample)
# fit Y axis
correction_y = sensor_accel_0['y']-np.median(sensor_accel_0['y'])
coef_accel_0_y = np.polyfit(temp_rel,correction_y,3)
accel_0_params['TC_A0_X3_1'] = coef_accel_0_y[0]
accel_0_params['TC_A0_X2_1'] = coef_accel_0_y[1]
accel_0_params['TC_A0_X1_1'] = coef_accel_0_y[2]
accel_0_params['TC_A0_X0_1'] = coef_accel_0_y[3]
fit_coef_accel_0_y = np.poly1d(coef_accel_0_y)
correction_y_resample = fit_coef_accel_0_y(temp_rel_resample)
# fit Z axis
correction_z = sensor_accel_0['z']-np.median(sensor_accel_0['z'])
coef_accel_0_z = np.polyfit(temp_rel,correction_z,3)
accel_0_params['TC_A0_X3_2'] = coef_accel_0_z[0]
accel_0_params['TC_A0_X2_2'] = coef_accel_0_z[1]
accel_0_params['TC_A0_X1_2'] = coef_accel_0_z[2]
accel_0_params['TC_A0_X0_2'] = coef_accel_0_z[3]
fit_coef_accel_0_z = np.poly1d(coef_accel_0_z)
correction_z_resample = fit_coef_accel_0_z(temp_rel_resample)
# accel 0 vs temperature
plt.figure(4,figsize=(20,13))
# draw plots
plt.subplot(3,1,1)
plt.plot(sensor_accel_0['temperature'],correction_x,'b')
plt.plot(temp_resample,correction_x_resample,'r')
plt.title('Accel 0 Bias vs Temperature')
plt.ylabel('X bias (m/s/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,2)
plt.plot(sensor_accel_0['temperature'],correction_y,'b')
plt.plot(temp_resample,correction_y_resample,'r')
plt.ylabel('Y bias (m/s/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,3)
plt.plot(sensor_accel_0['temperature'],correction_z,'b')
plt.plot(temp_resample,correction_z_resample,'r')
plt.ylabel('Z bias (m/s/s)')
plt.xlabel('temperature (degC)')
plt.grid()
pp.savefig()
#################################################################################
#################################################################################
# define data dictionary of accel 1 thermal correction parameters
accel_1_params = {
'TC_A1_ID':0,
'TC_A1_TMIN':0.0,
'TC_A1_TMAX':0.0,
'TC_A1_TREF':0.0,
'TC_A1_X0_0':0.0,
'TC_A1_X1_0':0.0,
'TC_A1_X2_0':0.0,
'TC_A1_X3_0':0.0,
'TC_A1_X0_1':0.0,
'TC_A1_X1_1':0.0,
'TC_A1_X2_1':0.0,
'TC_A1_X3_1':0.0,
'TC_A1_X0_2':0.0,
'TC_A1_X1_2':0.0,
'TC_A1_X2_2':0.0,
'TC_A1_X3_2':0.0,
'TC_A1_SCL_0':1.0,
'TC_A1_SCL_1':1.0,
'TC_A1_SCL_2':1.0
}
# curve fit the data for accel 1 corrections
if num_accels >= 2:
accel_1_params['TC_A1_ID'] = int(np.median(sensor_accel_1['device_id']))
# find the min, max and reference temperature
accel_1_params['TC_A1_TMIN'] = np.amin(sensor_accel_1['temperature'])
accel_1_params['TC_A1_TMAX'] = np.amax(sensor_accel_1['temperature'])
accel_1_params['TC_A1_TREF'] = 0.5 * (accel_1_params['TC_A1_TMIN'] + accel_1_params['TC_A1_TMAX'])
temp_rel = sensor_accel_1['temperature'] - accel_1_params['TC_A1_TREF']
temp_rel_resample = np.linspace(accel_1_params['TC_A1_TMIN']-accel_1_params['TC_A1_TREF'], accel_1_params['TC_A1_TMAX']-accel_1_params['TC_A1_TREF'], 100)
temp_resample = temp_rel_resample + accel_1_params['TC_A1_TREF']
# fit X axis
correction_x = sensor_accel_1['x']-np.median(sensor_accel_1['x'])
coef_accel_1_x = np.polyfit(temp_rel,correction_x,3)
accel_1_params['TC_A1_X3_0'] = coef_accel_1_x[0]
accel_1_params['TC_A1_X2_0'] = coef_accel_1_x[1]
accel_1_params['TC_A1_X1_0'] = coef_accel_1_x[2]
accel_1_params['TC_A1_X0_0'] = coef_accel_1_x[3]
fit_coef_accel_1_x = np.poly1d(coef_accel_1_x)
correction_x_resample = fit_coef_accel_1_x(temp_rel_resample)
# fit Y axis
correction_y = sensor_accel_1['y']-np.median(sensor_accel_1['y'])
coef_accel_1_y = np.polyfit(temp_rel,correction_y,3)
accel_1_params['TC_A1_X3_1'] = coef_accel_1_y[0]
accel_1_params['TC_A1_X2_1'] = coef_accel_1_y[1]
accel_1_params['TC_A1_X1_1'] = coef_accel_1_y[2]
accel_1_params['TC_A1_X0_1'] = coef_accel_1_y[3]
fit_coef_accel_1_y = np.poly1d(coef_accel_1_y)
correction_y_resample = fit_coef_accel_1_y(temp_rel_resample)
# fit Z axis
correction_z = (sensor_accel_1['z'])-np.median(sensor_accel_1['z'])
coef_accel_1_z = np.polyfit(temp_rel,correction_z,3)
accel_1_params['TC_A1_X3_2'] = coef_accel_1_z[0]
accel_1_params['TC_A1_X2_2'] = coef_accel_1_z[1]
accel_1_params['TC_A1_X1_2'] = coef_accel_1_z[2]
accel_1_params['TC_A1_X0_2'] = coef_accel_1_z[3]
fit_coef_accel_1_z = np.poly1d(coef_accel_1_z)
correction_z_resample = fit_coef_accel_1_z(temp_rel_resample)
# accel 1 vs temperature
plt.figure(5,figsize=(20,13))
# draw plots
plt.subplot(3,1,1)
plt.plot(sensor_accel_1['temperature'],correction_x,'b')
plt.plot(temp_resample,correction_x_resample,'r')
plt.title('Accel 1 Bias vs Temperature')
plt.ylabel('X bias (m/s/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,2)
plt.plot(sensor_accel_1['temperature'],correction_y,'b')
plt.plot(temp_resample,correction_y_resample,'r')
plt.ylabel('Y bias (m/s/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,3)
plt.plot(sensor_accel_1['temperature'],correction_z,'b')
plt.plot(temp_resample,correction_z_resample,'r')
plt.ylabel('Z bias (m/s/s)')
plt.xlabel('temperature (degC)')
plt.grid()
pp.savefig()
#################################################################################
#################################################################################
# define data dictionary of accel 2 thermal correction parameters
accel_2_params = {
'TC_A2_ID':0,
'TC_A2_TMIN':0.0,
'TC_A2_TMAX':0.0,
'TC_A2_TREF':0.0,
'TC_A2_X0_0':0.0,
'TC_A2_X1_0':0.0,
'TC_A2_X2_0':0.0,
'TC_A2_X3_0':0.0,
'TC_A2_X0_1':0.0,
'TC_A2_X1_1':0.0,
'TC_A2_X2_1':0.0,
'TC_A2_X3_1':0.0,
'TC_A2_X0_2':0.0,
'TC_A2_X1_2':0.0,
'TC_A2_X2_2':0.0,
'TC_A2_X3_2':0.0,
'TC_A2_SCL_0':1.0,
'TC_A2_SCL_1':1.0,
'TC_A2_SCL_2':1.0
}
# curve fit the data for accel 2 corrections
if num_accels >= 3:
accel_2_params['TC_A2_ID'] = int(np.median(sensor_accel_2['device_id']))
# find the min, max and reference temperature
accel_2_params['TC_A2_TMIN'] = np.amin(sensor_accel_2['temperature'])
accel_2_params['TC_A2_TMAX'] = np.amax(sensor_accel_2['temperature'])
accel_2_params['TC_A2_TREF'] = 0.5 * (accel_2_params['TC_A2_TMIN'] + accel_2_params['TC_A2_TMAX'])
temp_rel = sensor_accel_2['temperature'] - accel_2_params['TC_A2_TREF']
temp_rel_resample = np.linspace(accel_2_params['TC_A2_TMIN']-accel_2_params['TC_A2_TREF'], accel_2_params['TC_A2_TMAX']-accel_2_params['TC_A2_TREF'], 100)
temp_resample = temp_rel_resample + accel_2_params['TC_A2_TREF']
# fit X axis
correction_x = sensor_accel_2['x']-np.median(sensor_accel_2['x'])
coef_accel_2_x = np.polyfit(temp_rel,correction_x,3)
accel_2_params['TC_A2_X3_0'] = coef_accel_2_x[0]
accel_2_params['TC_A2_X2_0'] = coef_accel_2_x[1]
accel_2_params['TC_A2_X1_0'] = coef_accel_2_x[2]
accel_2_params['TC_A2_X0_0'] = coef_accel_2_x[3]
fit_coef_accel_2_x = np.poly1d(coef_accel_2_x)
correction_x_resample = fit_coef_accel_2_x(temp_rel_resample)
# fit Y axis
correction_y = sensor_accel_2['y']-np.median(sensor_accel_2['y'])
coef_accel_2_y = np.polyfit(temp_rel,correction_y,3)
accel_2_params['TC_A2_X3_1'] = coef_accel_2_y[0]
accel_2_params['TC_A2_X2_1'] = coef_accel_2_y[1]
accel_2_params['TC_A2_X1_1'] = coef_accel_2_y[2]
accel_2_params['TC_A2_X0_1'] = coef_accel_2_y[3]
fit_coef_accel_2_y = np.poly1d(coef_accel_2_y)
correction_y_resample = fit_coef_accel_2_y(temp_rel_resample)
# fit Z axis
correction_z = sensor_accel_2['z']-np.median(sensor_accel_2['z'])
coef_accel_2_z = np.polyfit(temp_rel,correction_z,3)
accel_2_params['TC_A2_X3_2'] = coef_accel_2_z[0]
accel_2_params['TC_A2_X2_2'] = coef_accel_2_z[1]
accel_2_params['TC_A2_X1_2'] = coef_accel_2_z[2]
accel_2_params['TC_A2_X0_2'] = coef_accel_2_z[3]
fit_coef_accel_2_z = np.poly1d(coef_accel_2_z)
correction_z_resample = fit_coef_accel_2_z(temp_rel_resample)
# accel 2 vs temperature
plt.figure(6,figsize=(20,13))
# draw plots
plt.subplot(3,1,1)
plt.plot(sensor_accel_2['temperature'],correction_x,'b')
plt.plot(temp_resample,correction_x_resample,'r')
plt.title('Accel 2 Bias vs Temperature')
plt.ylabel('X bias (m/s/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,2)
plt.plot(sensor_accel_2['temperature'],correction_y,'b')
plt.plot(temp_resample,correction_y_resample,'r')
plt.ylabel('Y bias (m/s/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,3)
plt.plot(sensor_accel_2['temperature'],correction_z,'b')
plt.plot(temp_resample,correction_z_resample,'r')
plt.ylabel('Z bias (m/s/s)')
plt.xlabel('temperature (degC)')
plt.grid()
pp.savefig()
#################################################################################
#################################################################################
# define data dictionary of baro 0 thermal correction parameters
baro_0_params = {
'TC_B0_ID':0,
'TC_B0_TMIN':0.0,
'TC_B0_TMAX':0.0,
'TC_B0_TREF':0.0,
'TC_B0_X0':0.0,
'TC_B0_X1':0.0,
'TC_B0_X2':0.0,
'TC_B0_X3':0.0,
'TC_B0_X4':0.0,
'TC_B0_X5':0.0,
'TC_B0_SCL':1.0,
}
# curve fit the data for baro 0 corrections
baro_0_params['TC_B0_ID'] = int(np.median(sensor_baro_0['device_id']))
# find the min, max and reference temperature
baro_0_params['TC_B0_TMIN'] = np.amin(sensor_baro_0['temperature'])
baro_0_params['TC_B0_TMAX'] = np.amax(sensor_baro_0['temperature'])
baro_0_params['TC_B0_TREF'] = 0.5 * (baro_0_params['TC_B0_TMIN'] + baro_0_params['TC_B0_TMAX'])
temp_rel = sensor_baro_0['temperature'] - baro_0_params['TC_B0_TREF']
temp_rel_resample = np.linspace(baro_0_params['TC_B0_TMIN']-baro_0_params['TC_B0_TREF'], baro_0_params['TC_B0_TMAX']-baro_0_params['TC_B0_TREF'], 100)
temp_resample = temp_rel_resample + baro_0_params['TC_B0_TREF']
# fit data
median_pressure = np.median(sensor_baro_0['pressure']);
coef_baro_0_x = np.polyfit(temp_rel,100*(sensor_baro_0['pressure']-median_pressure),5) # convert from hPa to Pa
baro_0_params['TC_B0_X5'] = coef_baro_0_x[0]
baro_0_params['TC_B0_X4'] = coef_baro_0_x[1]
baro_0_params['TC_B0_X3'] = coef_baro_0_x[2]
baro_0_params['TC_B0_X2'] = coef_baro_0_x[3]
baro_0_params['TC_B0_X1'] = coef_baro_0_x[4]
baro_0_params['TC_B0_X0'] = coef_baro_0_x[5]
fit_coef_baro_0_x = np.poly1d(coef_baro_0_x)
baro_0_x_resample = fit_coef_baro_0_x(temp_rel_resample)
# baro 0 vs temperature
plt.figure(7,figsize=(20,13))
# draw plots
plt.plot(sensor_baro_0['temperature'],100*sensor_baro_0['pressure']-100*median_pressure,'b')
plt.plot(temp_resample,baro_0_x_resample,'r')
plt.title('Baro 0 Bias vs Temperature')
plt.ylabel('Z bias (Pa)')
plt.xlabel('temperature (degC)')
plt.grid()
pp.savefig()
# define data dictionary of baro 1 thermal correction parameters
baro_1_params = {
'TC_B1_ID':0,
'TC_B1_TMIN':0.0,
'TC_B1_TMAX':0.0,
'TC_B1_TREF':0.0,
'TC_B1_X0':0.0,
'TC_B1_X1':0.0,
'TC_B1_X2':0.0,
'TC_B1_X3':0.0,
'TC_B1_X4':0.0,
'TC_B1_X5':0.0,
'TC_B1_SCL':1.0,
}
if num_baros >= 2:
# curve fit the data for baro 0 corrections
baro_1_params['TC_B1_ID'] = int(np.median(sensor_baro_1['device_id']))
# find the min, max and reference temperature
baro_1_params['TC_B1_TMIN'] = np.amin(sensor_baro_1['temperature'])
baro_1_params['TC_B1_TMAX'] = np.amax(sensor_baro_1['temperature'])
baro_1_params['TC_B1_TREF'] = 0.5 * (baro_1_params['TC_B1_TMIN'] + baro_1_params['TC_B1_TMAX'])
temp_rel = sensor_baro_1['temperature'] - baro_1_params['TC_B1_TREF']
temp_rel_resample = np.linspace(baro_1_params['TC_B1_TMIN']-baro_1_params['TC_B1_TREF'], baro_1_params['TC_B1_TMAX']-baro_1_params['TC_B1_TREF'], 100)
temp_resample = temp_rel_resample + baro_1_params['TC_B1_TREF']
# fit data
median_pressure = np.median(sensor_baro_1['pressure']);
coef_baro_1_x = np.polyfit(temp_rel,100*(sensor_baro_1['pressure']-median_pressure),5) # convert from hPa to Pa
baro_1_params['TC_B1_X5'] = coef_baro_1_x[0]
baro_1_params['TC_B1_X4'] = coef_baro_1_x[1]
baro_1_params['TC_B1_X3'] = coef_baro_1_x[2]
baro_1_params['TC_B1_X2'] = coef_baro_1_x[3]
baro_1_params['TC_B1_X1'] = coef_baro_1_x[4]
baro_1_params['TC_B1_X0'] = coef_baro_1_x[5]
fit_coef_baro_1_x = np.poly1d(coef_baro_1_x)
baro_1_x_resample = fit_coef_baro_1_x(temp_rel_resample)
# baro 1 vs temperature
plt.figure(8,figsize=(20,13))
# draw plots
plt.plot(sensor_baro_1['temperature'],100*sensor_baro_1['pressure']-100*median_pressure,'b')
plt.plot(temp_resample,baro_1_x_resample,'r')
plt.title('Baro 1 Bias vs Temperature')
plt.ylabel('Z bias (Pa)')
plt.xlabel('temperature (degC)')
plt.grid()
pp.savefig()
#################################################################################
# close the pdf file
pp.close()
# clase all figures
plt.close("all")
# write correction parameters to file
test_results_filename = ulog_file_name + ".params"
file = open(test_results_filename,"w")
file.write("# Sensor thermal compensation parameters\n")
file.write("#\n")
file.write("# Vehicle-Id Component-Id Name Value Type\n")
# accel 0 corrections
key_list_accel = list(accel_0_params.keys())
key_list_accel.sort
for key in key_list_accel:
if key == 'TC_A0_ID':
type = "6"
else:
type = "9"
file.write("1"+"\t"+"1"+"\t"+key+"\t"+str(accel_0_params[key])+"\t"+type+"\n")
# accel 1 corrections
key_list_accel = list(accel_1_params.keys())
key_list_accel.sort
for key in key_list_accel:
if key == 'TC_A1_ID':
type = "6"
else:
type = "9"
file.write("1"+"\t"+"1"+"\t"+key+"\t"+str(accel_1_params[key])+"\t"+type+"\n")
# accel 2 corrections
key_list_accel = list(accel_2_params.keys())
key_list_accel.sort
for key in key_list_accel:
if key == 'TC_A2_ID':
type = "6"
else:
type = "9"
file.write("1"+"\t"+"1"+"\t"+key+"\t"+str(accel_2_params[key])+"\t"+type+"\n")
# baro 0 corrections
key_list_baro = list(baro_0_params.keys())
key_list_baro.sort
for key in key_list_baro:
if key == 'TC_B0_ID':
type = "6"
else:
type = "9"
file.write("1"+"\t"+"1"+"\t"+key+"\t"+str(baro_0_params[key])+"\t"+type+"\n")
# baro 1 corrections
key_list_baro = list(baro_1_params.keys())
key_list_baro.sort
for key in key_list_baro:
if key == 'TC_B1_ID':
type = "6"
else:
type = "9"
file.write("1"+"\t"+"1"+"\t"+key+"\t"+str(baro_1_params[key])+"\t"+type+"\n")
# gyro 0 corrections
key_list_gyro = list(gyro_0_params.keys())
key_list_gyro.sort()
for key in key_list_gyro:
if key == 'TC_G0_ID':
type = "6"
else:
type = "9"
file.write("1"+"\t"+"1"+"\t"+key+"\t"+str(gyro_0_params[key])+"\t"+type+"\n")
# gyro 1 corrections
key_list_gyro = list(gyro_1_params.keys())
key_list_gyro.sort()
for key in key_list_gyro:
if key == 'TC_G1_ID':
type = "6"
else:
type = "9"
file.write("1"+"\t"+"1"+"\t"+key+"\t"+str(gyro_1_params[key])+"\t"+type+"\n")
# gyro 2 corrections
key_list_gyro = list(gyro_2_params.keys())
key_list_gyro.sort()
for key in key_list_gyro:
if key == 'TC_G2_ID':
type = "6"
else:
type = "9"
file.write("1"+"\t"+"1"+"\t"+key+"\t"+str(gyro_2_params[key])+"\t"+type+"\n")
file.close()
print('Correction parameters written to ' + test_results_filename)
print('Plots saved to ' + output_plot_filename)
| bsd-3-clause |
miguelarauj1o/ackley | plots/plot.py | 1 | 1631 | # -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as pyplot
import time
def timestamped(name, fmt="png"):
return "plot_" + name + str(int(time.time())) + "." + fmt
def plot_vs_generation(data, yfield, title, xlabel, ylabel, filename):
y_axis = [row[yfield] for row in data]
x_axis = [row['generation'] for row in data]
pyplot.figure()
pyplot.title(title)
pyplot.xlabel(xlabel)
pyplot.ylabel("Fitness Médio")
pyplot.plot(x_axis,y_axis)
pyplot.grid(True)
pyplot.show()
pyplot.savefig(timestamped(filename))
def avg_fitness_vs_generation(data, title="Fitness médio por geração"):
plot_vs_generation(data,
yfield="avg_fitness",
title=title,
xlabel="Geração",
ylabel="Fitness Médio",
filename="avg_fitness_vs_generation")
def max_fitness_vs_generation(data, title="Fitness máximo por geração"):
plot_vs_generation(data,
yfield="max_fitness",
title=title,
xlabel="Geração",
ylabel="Fitness Máximo",
filename="max_fitness_vs_generation")
def std_deviation_vs_generation(data, title="Desvio padrão por geração"):
plot_vs_generation(data,
yfield="std_deviation",
title=title,
xlabel="Geração",
ylabel="Desvio Padrão",
filename="std_deviation_vs_generation")
| mit |
ManyBodyPhysics/LectureNotesPhysics | Programs/Chapter8-programs/python/unit_test_example/src/mbpt_version0.py | 1 | 3864 | #!/usr/bin/python
from sympy import *
from pylab import *
import matplotlib.pyplot as plt
below_fermi = (0,1,2,3)
above_fermi = (4,5,6,7)
states = [(1,1),(1,-1),(2,1),(2,-1),(3,1),(3,-1),(4,1),(4,-1)]
N = 8
g = Symbol('g')
def h0(p,q):
if p == q:
p1, s1 = states[p]
return (p1 - 1)
else:
return 0
def f(p,q):
if p == q:
return 0
s = h0(p,q)
for i in below_fermi:
s += assym(p,i,q,i)
return s
def assym(p,q,r,s):
p1, s1 = states[p]
p2, s2 = states[q]
p3, s3 = states[r]
p4, s4 = states[s]
if p1 != p2 or p3 != p4:
return 0
if s1 == s2 or s3 == s4:
return 0
if s1 == s3 and s2 == s4:
return -g/2.
if s1 == s4 and s2 == s3:
return g/2.
print "0,1,0,1: ", assym(0,1,0,1)
def eps(holes, particles):
E = 0
for h in holes:
p, s = states[h]
E += (p-1)
for p in particles:
p, s = states[p]
E -= (p-1)
return E
# Diagram 1
s1 = 0
for a in above_fermi:
for b in above_fermi:
for i in below_fermi:
for j in below_fermi:
s1 += 0.25*assym(a,b,i,j)*assym(i,j,a,b)/eps((i,j),(a,b))
# Diagram 3
s3 = 0
for a in above_fermi:
for b in above_fermi:
for c in above_fermi:
for i in below_fermi:
for j in below_fermi:
for k in below_fermi:
s3 += assym(i,j,a,b)*assym(a,c,j,k)*assym(b,k,c,i)/eps((i,j),(a,b))/eps((k,j),(a,c))
# Diagram 4
s4 = 0
for a in above_fermi:
for b in above_fermi:
for c in above_fermi:
for d in above_fermi:
for i in below_fermi:
for j in below_fermi:
s4 += 0.125*assym(i,j,a,b)*assym(a,b,c,d)*assym(c,d,i,j)/eps((i,j),(a,b))/eps((i,j),(c,d))
# Diagram 5
s5 = 0
for a in above_fermi:
for b in above_fermi:
for i in below_fermi:
for j in below_fermi:
for k in below_fermi:
for l in below_fermi:
s5 += 0.125*assym(i,j,a,b)*assym(k,l,i,j)*assym(a,b,k,l)/eps((i,j),(a,b))/eps((k,l),(a,b))
# Diagram 8
s8 = 0
for a in above_fermi:
for b in above_fermi:
for i in below_fermi:
for j in below_fermi:
for k in below_fermi:
s8 -= 0.5*assym(i,j,a,b)*assym(a,b,i,k)*f(k,j)/eps((i,j),(a,b))/eps((i,k),(a,b))
# Diagram 9
s9 = 0
for a in above_fermi:
for b in above_fermi:
for c in above_fermi:
for i in below_fermi:
for j in below_fermi:
s9 += 0.5*assym(i,j,a,b)*assym(a,c,i,j)*f(b,c)/eps((i,j),(a,b))/eps((i,j),(a,c))
ga = linspace(-1,1,20)
e1 = []
corr2 = []
corr3 = []
for g_val in ga:
H1 = matrix([[2-g_val , -g_val/2., -g_val/2., -g_val/2., -g_val/2., 0],
[-g_val/2., 4-g_val, -g_val/2., -g_val/2., 0., -g_val/2.],
[-g_val/2., -g_val/2., 6-g_val, 0, -g_val/2., -g_val/2.],
[-g_val/2., -g_val/2., 0, 6-g_val, -g_val/2., -g_val/2.],
[-g_val/2., 0, -g_val/2., -g_val/2., 8-g_val, -g_val/2.],
[0 , -g_val/2., -g_val/2., -g_val/2., -g_val/2., 10-g_val]])
u1, v1 = linalg.eig(H1)
e1.append(min(u1))
corr2.append((s1).subs(g,g_val))
corr3.append((s1+s3+s4+s5).subs(g,g_val))
exact = e1 - (2-ga)
plt.axis([-1,1,-0.5,0.05])
plt.xlabel(r'Interaction strength, $g$', fontsize=16)
plt.ylabel(r'Correlation energy', fontsize=16)
exact = plt.plot(ga, exact,'b-*',linewidth = 2.0, label = 'Exact')
mbpt2 = plt.plot(ga, corr2,'r:.', linewidth = 2.0, label = 'MBPT2')
mbpt3 = plt.plot(ga, corr3, 'm:v',linewidth = 2.0, label = 'MBPT3')
plt.legend()
plt.savefig('perturbationtheory.pdf', format='pdf')
plt.show()
| cc0-1.0 |
liangz0707/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 244 | 9986 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef propery works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
| bsd-3-clause |
felixbiessmann/active-manifesto | services/manifesto_data.py | 1 | 4027 | import itertools
import json
import urllib
import urllib.request
import pandas as pd
class ManifestoDataLoader(object):
def __init__(self, api_key):
self.base_url = "https://manifesto-project.wzb.eu/tools"
self.country = "Germany"
self.version = "MPDS2017b"
self.api_key = api_key
self.label2rightleft = {
'right': [104, 201, 203, 305, 401, 402, 407, 414, 505, 601, 603, 605, 606],
'left': [103, 105, 106, 107, 403, 404, 406, 412, 413, 504, 506, 701, 202]
}
def cmp_code_2_left_right_neutral(self, cmp_code):
if cmp_code in self.label2rightleft['left']:
return 'left'
elif cmp_code in self.label2rightleft['right']:
return 'right'
else:
return 'neutral'
@staticmethod
def get_url(url):
return urllib.request.urlopen(url).read().decode()
def get_latest_version(self):
"""
Get the latest version id of the Corpus
"""
versions_url = self.base_url + "/api_list_metadata_versions.json?&api_key=" + self.api_key
versions = json.loads(self.get_url(versions_url))
return versions['versions'][-1]
def get_manifesto_id(self, text_id, version):
"""
Get manifesto id of a text given the text id and a version id
"""
text_key_url = self.base_url + "/api_metadata?keys[]=" + text_id + "&version=" + version + "&api_key=" + self.api_key
text_meta_data = json.loads(self.get_url(text_key_url))
return text_meta_data['items'][0]['manifesto_id']
def get_core(self):
"""
Downloads core data set, including information about all parties
https://manifestoproject.wzb.eu/information/documents/api
"""
url = self.base_url + "/api_get_core?key=" + self.version + "&api_key=" + self.api_key
return json.loads(self.get_url(url))
def get_text_keys(self):
d = self.get_core()
return [p[5:7] for p in d if p[1] == self.country]
def get_text(self, text_id):
"""
Retrieves the latest version of the manifesto text with corresponding labels
"""
# get the latest version of this text
version = self.get_latest_version()
# get the text metadata and manifesto ID
manifesto_id = self.get_manifesto_id(text_id, version)
text_url = self.base_url + "/api_texts_and_annotations.json?keys[]=" + manifesto_id + "&version=" + version + "&api_key=" + self.api_key
text_data = json.loads(self.get_url(text_url))
try:
text = [(t['cmp_code'], t['text']) for t in text_data['items'][0]['items']]
print('Downloaded %d texts for %s' % (len(text_data['items'][0]['items']), text_id))
return text
except:
print('Could not get text %s' % text_id)
def get_texts_per_party(self):
# get all tuples of party/date corresponding to a manifesto text in this country
text_keys = self.get_text_keys()
# get the texts
texts = {t[1] + "_" + t[0]: self.get_text(t[1] + "_" + t[0]) for t in text_keys}
texts = {k: v for k, v in texts.items() if v}
print("Downloaded %d/%d annotated texts" % (len(texts), len(text_keys)))
return texts
def get_texts(self):
texts = self.get_texts_per_party()
return [x for x in list(itertools.chain(*texts.values())) if x[0] != 'NA' and x[0] != '0']
def get_manifesto_texts(self, min_len=10):
print("Downloading texts from manifestoproject.")
manifesto_texts = self.get_texts()
df = pd.DataFrame(manifesto_texts, columns=['cmp_code', 'content'])
df = df[df.content.apply(lambda x: len(str(x)) > min_len)]
return df['content'].map(str).tolist(), df['cmp_code'].map(int).map(self.cmp_code_2_left_right_neutral).tolist()
if __name__ == "__main__":
api_key = ""
loader = ManifestoDataLoader(api_key)
text, code = loader.get_manifesto_texts()
| mit |
hainm/statsmodels | statsmodels/stats/tests/test_power.py | 28 | 25876 | # -*- coding: utf-8 -*-
# pylint: disable=W0231, W0142
"""Tests for statistical power calculations
Note:
tests for chisquare power are in test_gof.py
Created on Sat Mar 09 08:44:49 2013
Author: Josef Perktold
"""
import copy
import numpy as np
from numpy.testing import (assert_almost_equal, assert_allclose, assert_raises,
assert_equal, assert_warns)
import statsmodels.stats.power as smp
import warnings
#from .test_weightstats import CheckPowerMixin
from statsmodels.stats.tests.test_weightstats import Holder
# for testing plots
import nose
from numpy.testing import dec
try:
import matplotlib.pyplot as plt #makes plt available for test functions
have_matplotlib = True
except ImportError:
have_matplotlib = False
class CheckPowerMixin(object):
def test_power(self):
#test against R results
kwds = copy.copy(self.kwds)
del kwds['power']
kwds.update(self.kwds_extra)
if hasattr(self, 'decimal'):
decimal = self.decimal
else:
decimal = 6
res1 = self.cls()
assert_almost_equal(res1.power(**kwds), self.res2.power, decimal=decimal)
def test_positional(self):
res1 = self.cls()
kwds = copy.copy(self.kwds)
del kwds['power']
kwds.update(self.kwds_extra)
# positional args
if hasattr(self, 'args_names'):
args_names = self.args_names
else:
nobs_ = 'nobs' if 'nobs' in kwds else 'nobs1'
args_names = ['effect_size', nobs_, 'alpha']
# pop positional args
args = [kwds.pop(arg) for arg in args_names]
if hasattr(self, 'decimal'):
decimal = self.decimal
else:
decimal = 6
res = res1.power(*args, **kwds)
assert_almost_equal(res, self.res2.power, decimal=decimal)
def test_roots(self):
kwds = copy.copy(self.kwds)
kwds.update(self.kwds_extra)
# kwds_extra are used as argument, but not as target for root
for key in self.kwds:
# keep print to check whether tests are really executed
#print 'testing roots', key
value = kwds[key]
kwds[key] = None
result = self.cls().solve_power(**kwds)
assert_allclose(result, value, rtol=0.001, err_msg=key+' failed')
# yield can be used to investigate specific errors
#yield assert_allclose, result, value, 0.001, 0, key+' failed'
kwds[key] = value # reset dict
@dec.skipif(not have_matplotlib)
def test_power_plot(self):
if self.cls == smp.FTestPower:
raise nose.SkipTest('skip FTestPower plot_power')
fig = plt.figure()
ax = fig.add_subplot(2,1,1)
fig = self.cls().plot_power(dep_var='nobs',
nobs= np.arange(2, 100),
effect_size=np.array([0.1, 0.2, 0.3, 0.5, 1]),
#alternative='larger',
ax=ax, title='Power of t-Test',
**self.kwds_extra)
ax = fig.add_subplot(2,1,2)
fig = self.cls().plot_power(dep_var='es',
nobs=np.array([10, 20, 30, 50, 70, 100]),
effect_size=np.linspace(0.01, 2, 51),
#alternative='larger',
ax=ax, title='',
**self.kwds_extra)
plt.close('all')
#''' test cases
#one sample
# two-sided one-sided
#large power OneS1 OneS3
#small power OneS2 OneS4
#
#two sample
# two-sided one-sided
#large power TwoS1 TwoS3
#small power TwoS2 TwoS4
#small p, ratio TwoS4 TwoS5
#'''
class TestTTPowerOneS1(CheckPowerMixin):
def __init__(self):
#> p = pwr.t.test(d=1,n=30,sig.level=0.05,type="two.sample",alternative="two.sided")
#> cat_items(p, prefix='tt_power2_1.')
res2 = Holder()
res2.n = 30
res2.d = 1
res2.sig_level = 0.05
res2.power = 0.9995636009612725
res2.alternative = 'two.sided'
res2.note = 'NULL'
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
self.kwds_extra = {}
self.cls = smp.TTestPower
class TestTTPowerOneS2(CheckPowerMixin):
# case with small power
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.2,n=20,sig.level=0.05,type="one.sample",alternative="two.sided")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = 0.2
res2.sig_level = 0.05
res2.power = 0.1359562887679666
res2.alternative = 'two.sided'
res2.note = '''NULL'''
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
self.kwds_extra = {}
self.cls = smp.TTestPower
class TestTTPowerOneS3(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=1,n=30,sig.level=0.05,type="one.sample",alternative="greater")
#> cat_items(p, prefix='tt_power1_1g.')
res2.n = 30
res2.d = 1
res2.sig_level = 0.05
res2.power = 0.999892010204909
res2.alternative = 'greater'
res2.note = 'NULL'
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power': res2.power}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestPower
class TestTTPowerOneS4(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.05,n=20,sig.level=0.05,type="one.sample",alternative="greater")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = 0.05
res2.sig_level = 0.05
res2.power = 0.0764888785042198
res2.alternative = 'greater'
res2.note = '''NULL'''
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power': res2.power}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestPower
class TestTTPowerOneS5(CheckPowerMixin):
# case one-sided less, not implemented yet
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.2,n=20,sig.level=0.05,type="one.sample",alternative="less")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = 0.2
res2.sig_level = 0.05
res2.power = 0.006063932667926375
res2.alternative = 'less'
res2.note = '''NULL'''
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power': res2.power}
self.kwds_extra = {'alternative': 'smaller'}
self.cls = smp.TTestPower
class TestTTPowerOneS6(CheckPowerMixin):
# case one-sided less, negative effect size, not implemented yet
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=-0.2,n=20,sig.level=0.05,type="one.sample",alternative="less")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = -0.2
res2.sig_level = 0.05
res2.power = 0.21707518167191
res2.alternative = 'less'
res2.note = '''NULL'''
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power': res2.power}
self.kwds_extra = {'alternative': 'smaller'}
self.cls = smp.TTestPower
class TestTTPowerTwoS1(CheckPowerMixin):
def __init__(self):
#> p = pwr.t.test(d=1,n=30,sig.level=0.05,type="two.sample",alternative="two.sided")
#> cat_items(p, prefix='tt_power2_1.')
res2 = Holder()
res2.n = 30
res2.d = 1
res2.sig_level = 0.05
res2.power = 0.967708258242517
res2.alternative = 'two.sided'
res2.note = 'n is number in *each* group'
res2.method = 'Two-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power': res2.power, 'ratio': 1}
self.kwds_extra = {}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS2(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.1,n=20,sig.level=0.05,type="two.sample",alternative="two.sided")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = 0.1
res2.sig_level = 0.05
res2.power = 0.06095912465411235
res2.alternative = 'two.sided'
res2.note = 'n is number in *each* group'
res2.method = 'Two-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power': res2.power, 'ratio': 1}
self.kwds_extra = {}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS3(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=1,n=30,sig.level=0.05,type="two.sample",alternative="greater")
#> cat_items(p, prefix='tt_power2_1g.')
res2.n = 30
res2.d = 1
res2.sig_level = 0.05
res2.power = 0.985459690251624
res2.alternative = 'greater'
res2.note = 'n is number in *each* group'
res2.method = 'Two-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS4(CheckPowerMixin):
# case with small power
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.01,n=30,sig.level=0.05,type="two.sample",alternative="greater")
#> cat_items(p, "res2.")
res2.n = 30
res2.d = 0.01
res2.sig_level = 0.05
res2.power = 0.0540740302835667
res2.alternative = 'greater'
res2.note = 'n is number in *each* group'
res2.method = 'Two-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS5(CheckPowerMixin):
# case with unequal n, ratio>1
def __init__(self):
res2 = Holder()
#> p = pwr.t2n.test(d=0.1,n1=20, n2=30,sig.level=0.05,alternative="two.sided")
#> cat_items(p, "res2.")
res2.n1 = 20
res2.n2 = 30
res2.d = 0.1
res2.sig_level = 0.05
res2.power = 0.0633081832564667
res2.alternative = 'two.sided'
res2.method = 't test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n1,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1.5}
self.kwds_extra = {'alternative': 'two-sided'}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS6(CheckPowerMixin):
# case with unequal n, ratio>1
def __init__(self):
res2 = Holder()
#> p = pwr.t2n.test(d=0.1,n1=20, n2=30,sig.level=0.05,alternative="greater")
#> cat_items(p, "res2.")
res2.n1 = 20
res2.n2 = 30
res2.d = 0.1
res2.sig_level = 0.05
res2.power = 0.09623589080917805
res2.alternative = 'greater'
res2.method = 't test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n1,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1.5}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestIndPower
def test_normal_power_explicit():
# a few initial test cases for NormalIndPower
sigma = 1
d = 0.3
nobs = 80
alpha = 0.05
res1 = smp.normal_power(d, nobs/2., 0.05)
res2 = smp.NormalIndPower().power(d, nobs, 0.05)
res3 = smp.NormalIndPower().solve_power(effect_size=0.3, nobs1=80, alpha=0.05, power=None)
res_R = 0.475100870572638
assert_almost_equal(res1, res_R, decimal=13)
assert_almost_equal(res2, res_R, decimal=13)
assert_almost_equal(res3, res_R, decimal=13)
norm_pow = smp.normal_power(-0.01, nobs/2., 0.05)
norm_pow_R = 0.05045832927039234
#value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="two.sided")
assert_almost_equal(norm_pow, norm_pow_R, decimal=11)
norm_pow = smp.NormalIndPower().power(0.01, nobs, 0.05,
alternative="larger")
norm_pow_R = 0.056869534873146124
#value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="greater")
assert_almost_equal(norm_pow, norm_pow_R, decimal=11)
# Note: negative effect size is same as switching one-sided alternative
# TODO: should I switch to larger/smaller instead of "one-sided" options
norm_pow = smp.NormalIndPower().power(-0.01, nobs, 0.05,
alternative="larger")
norm_pow_R = 0.0438089705093578
#value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="less")
assert_almost_equal(norm_pow, norm_pow_R, decimal=11)
class TestNormalIndPower1(CheckPowerMixin):
def __init__(self):
#> example from above
# results copied not directly from R
res2 = Holder()
res2.n = 80
res2.d = 0.3
res2.sig_level = 0.05
res2.power = 0.475100870572638
res2.alternative = 'two.sided'
res2.note = 'NULL'
res2.method = 'two sample power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1}
self.kwds_extra = {}
self.cls = smp.NormalIndPower
class TestNormalIndPower2(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> np = pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="less")
#> cat_items(np, "res2.")
res2.h = 0.01
res2.n = 80
res2.sig_level = 0.05
res2.power = 0.0438089705093578
res2.alternative = 'less'
res2.method = ('Difference of proportion power calculation for' +
' binomial distribution (arcsine transformation)')
res2.note = 'same sample sizes'
self.res2 = res2
self.kwds = {'effect_size': res2.h, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1}
self.kwds_extra = {'alternative':'smaller'}
self.cls = smp.NormalIndPower
class TestNormalIndPower_onesamp1(CheckPowerMixin):
def __init__(self):
# forcing one-sample by using ratio=0
#> example from above
# results copied not directly from R
res2 = Holder()
res2.n = 40
res2.d = 0.3
res2.sig_level = 0.05
res2.power = 0.475100870572638
res2.alternative = 'two.sided'
res2.note = 'NULL'
res2.method = 'two sample power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
# keyword for which we don't look for root:
self.kwds_extra = {'ratio': 0}
self.cls = smp.NormalIndPower
class TestNormalIndPower_onesamp2(CheckPowerMixin):
# Note: same power as two sample case with twice as many observations
def __init__(self):
# forcing one-sample by using ratio=0
res2 = Holder()
#> np = pwr.norm.test(d=0.01,n=40,sig.level=0.05,alternative="less")
#> cat_items(np, "res2.")
res2.d = 0.01
res2.n = 40
res2.sig_level = 0.05
res2.power = 0.0438089705093578
res2.alternative = 'less'
res2.method = 'Mean power calculation for normal distribution with known variance'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
# keyword for which we don't look for root:
self.kwds_extra = {'ratio': 0, 'alternative':'smaller'}
self.cls = smp.NormalIndPower
class TestChisquarePower(CheckPowerMixin):
def __init__(self):
# one example from test_gof, results_power
res2 = Holder()
res2.w = 0.1
res2.N = 5
res2.df = 4
res2.sig_level = 0.05
res2.power = 0.05246644635810126
res2.method = 'Chi squared power calculation'
res2.note = 'N is the number of observations'
self.res2 = res2
self.kwds = {'effect_size': res2.w, 'nobs': res2.N,
'alpha': res2.sig_level, 'power':res2.power}
# keyword for which we don't look for root:
# solving for n_bins doesn't work, will not be used in regular usage
self.kwds_extra = {'n_bins': res2.df + 1}
self.cls = smp.GofChisquarePower
def _test_positional(self):
res1 = self.cls()
args_names = ['effect_size','nobs', 'alpha', 'n_bins']
kwds = copy.copy(self.kwds)
del kwds['power']
kwds.update(self.kwds_extra)
args = [kwds[arg] for arg in args_names]
if hasattr(self, 'decimal'):
decimal = self.decimal #pylint: disable-msg=E1101
else:
decimal = 6
assert_almost_equal(res1.power(*args), self.res2.power, decimal=decimal)
def test_ftest_power():
#equivalence ftest, ttest
for alpha in [0.01, 0.05, 0.1, 0.20, 0.50]:
res0 = smp.ttest_power(0.01, 200, alpha)
res1 = smp.ftest_power(0.01, 199, 1, alpha=alpha, ncc=0)
assert_almost_equal(res1, res0, decimal=6)
#example from Gplus documentation F-test ANOVA
#Total sample size:200
#Effect size "f":0.25
#Beta/alpha ratio:1
#Result:
#Alpha:0.1592
#Power (1-beta):0.8408
#Critical F:1.4762
#Lambda: 12.50000
res1 = smp.ftest_anova_power(0.25, 200, 0.1592, k_groups=10)
res0 = 0.8408
assert_almost_equal(res1, res0, decimal=4)
# TODO: no class yet
# examples agains R::pwr
res2 = Holder()
#> rf = pwr.f2.test(u=5, v=199, f2=0.1**2, sig.level=0.01)
#> cat_items(rf, "res2.")
res2.u = 5
res2.v = 199
res2.f2 = 0.01
res2.sig_level = 0.01
res2.power = 0.0494137732920332
res2.method = 'Multiple regression power calculation'
res1 = smp.ftest_power(np.sqrt(res2.f2), res2.v, res2.u,
alpha=res2.sig_level, ncc=1)
assert_almost_equal(res1, res2.power, decimal=5)
res2 = Holder()
#> rf = pwr.f2.test(u=5, v=199, f2=0.3**2, sig.level=0.01)
#> cat_items(rf, "res2.")
res2.u = 5
res2.v = 199
res2.f2 = 0.09
res2.sig_level = 0.01
res2.power = 0.7967191006290872
res2.method = 'Multiple regression power calculation'
res1 = smp.ftest_power(np.sqrt(res2.f2), res2.v, res2.u,
alpha=res2.sig_level, ncc=1)
assert_almost_equal(res1, res2.power, decimal=5)
res2 = Holder()
#> rf = pwr.f2.test(u=5, v=19, f2=0.3**2, sig.level=0.1)
#> cat_items(rf, "res2.")
res2.u = 5
res2.v = 19
res2.f2 = 0.09
res2.sig_level = 0.1
res2.power = 0.235454222377575
res2.method = 'Multiple regression power calculation'
res1 = smp.ftest_power(np.sqrt(res2.f2), res2.v, res2.u,
alpha=res2.sig_level, ncc=1)
assert_almost_equal(res1, res2.power, decimal=5)
# class based version of two above test for Ftest
class TestFtestAnovaPower(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#example from Gplus documentation F-test ANOVA
#Total sample size:200
#Effect size "f":0.25
#Beta/alpha ratio:1
#Result:
#Alpha:0.1592
#Power (1-beta):0.8408
#Critical F:1.4762
#Lambda: 12.50000
#converted to res2 by hand
res2.f = 0.25
res2.n = 200
res2.k = 10
res2.alpha = 0.1592
res2.power = 0.8408
res2.method = 'Multiple regression power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.f, 'nobs': res2.n,
'alpha': res2.alpha, 'power': res2.power}
# keyword for which we don't look for root:
# solving for n_bins doesn't work, will not be used in regular usage
self.kwds_extra = {'k_groups': res2.k} # rootfinding doesn't work
#self.args_names = ['effect_size','nobs', 'alpha']#, 'k_groups']
self.cls = smp.FTestAnovaPower
# precision for test_power
self.decimal = 4
class TestFtestPower(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> rf = pwr.f2.test(u=5, v=19, f2=0.3**2, sig.level=0.1)
#> cat_items(rf, "res2.")
res2.u = 5
res2.v = 19
res2.f2 = 0.09
res2.sig_level = 0.1
res2.power = 0.235454222377575
res2.method = 'Multiple regression power calculation'
self.res2 = res2
self.kwds = {'effect_size': np.sqrt(res2.f2), 'df_num': res2.v,
'df_denom': res2.u, 'alpha': res2.sig_level,
'power': res2.power}
# keyword for which we don't look for root:
# solving for n_bins doesn't work, will not be used in regular usage
self.kwds_extra = {}
self.args_names = ['effect_size', 'df_num', 'df_denom', 'alpha']
self.cls = smp.FTestPower
# precision for test_power
self.decimal = 5
def test_power_solver():
# messing up the solver to trigger backup
nip = smp.NormalIndPower()
# check result
es0 = 0.1
pow_ = nip.solve_power(es0, nobs1=1600, alpha=0.01, power=None, ratio=1,
alternative='larger')
# value is regression test
assert_almost_equal(pow_, 0.69219411243824214, decimal=5)
es = nip.solve_power(None, nobs1=1600, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
assert_almost_equal(es, es0, decimal=4)
assert_equal(nip.cache_fit_res[0], 1)
assert_equal(len(nip.cache_fit_res), 2)
# cause first optimizer to fail
nip.start_bqexp['effect_size'] = {'upp': -10, 'low': -20}
nip.start_ttp['effect_size'] = 0.14
es = nip.solve_power(None, nobs1=1600, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
assert_almost_equal(es, es0, decimal=4)
assert_equal(nip.cache_fit_res[0], 1)
assert_equal(len(nip.cache_fit_res), 3, err_msg=repr(nip.cache_fit_res))
nip.start_ttp['effect_size'] = np.nan
es = nip.solve_power(None, nobs1=1600, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
assert_almost_equal(es, es0, decimal=4)
assert_equal(nip.cache_fit_res[0], 1)
assert_equal(len(nip.cache_fit_res), 4)
# I let this case fail, could be fixed for some statistical tests
# (we shouldn't get here in the first place)
# effect size is negative, but last stage brentq uses [1e-8, 1-1e-8]
assert_raises(ValueError, nip.solve_power, None, nobs1=1600, alpha=0.01,
power=0.005, ratio=1, alternative='larger')
def test_power_solver_warn():
# messing up the solver to trigger warning
# I wrote this with scipy 0.9,
# convergence behavior of scipy 0.11 is different,
# fails at a different case, but is successful where it failed before
pow_ = 0.69219411243824214 # from previous function
nip = smp.NormalIndPower()
# using nobs, has one backup (fsolve)
nip.start_bqexp['nobs1'] = {'upp': 50, 'low': -20}
val = nip.solve_power(0.1, nobs1=None, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
import scipy
if scipy.__version__ < '0.10':
assert_almost_equal(val, 1600, decimal=4)
assert_equal(nip.cache_fit_res[0], 1)
assert_equal(len(nip.cache_fit_res), 3)
# case that has convergence failure, and should warn
nip.start_ttp['nobs1'] = np.nan
from statsmodels.tools.sm_exceptions import ConvergenceWarning
assert_warns(ConvergenceWarning, nip.solve_power, 0.1, nobs1=None,
alpha=0.01, power=pow_, ratio=1, alternative='larger')
# this converges with scipy 0.11 ???
# nip.solve_power(0.1, nobs1=None, alpha=0.01, power=pow_, ratio=1, alternative='larger')
with warnings.catch_warnings(): # python >= 2.6
warnings.simplefilter("ignore")
val = nip.solve_power(0.1, nobs1=None, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
assert_equal(nip.cache_fit_res[0], 0)
assert_equal(len(nip.cache_fit_res), 3)
if __name__ == '__main__':
test_normal_power_explicit()
nt = TestNormalIndPower1()
nt.test_power()
nt.test_roots()
nt = TestNormalIndPower_onesamp1()
nt.test_power()
nt.test_roots()
| bsd-3-clause |
JPFrancoia/scikit-learn | benchmarks/bench_covertype.py | 57 | 7378 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
# Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
# Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
# Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3),
'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
manahl/arctic | arctic/serialization/incremental.py | 1 | 10075 | import abc
import hashlib
import logging
from threading import RLock
import numpy as np
import pandas as pd
from bson import Binary
from arctic._config import ARCTIC_AUTO_EXPAND_CHUNK_SIZE
from arctic.serialization.numpy_records import PandasSerializer
from .._compression import compress
from .._config import MAX_DOCUMENT_SIZE
from .._util import NP_OBJECT_DTYPE
from ..exceptions import ArcticSerializationException
ABC = abc.ABCMeta('ABC', (object,), {})
log = logging.getLogger(__name__)
def incremental_checksum(item, curr_sha=None, is_bytes=False):
curr_sha = hashlib.sha1() if curr_sha is None else curr_sha
curr_sha.update(item if is_bytes else item.tostring())
return curr_sha
class LazyIncrementalSerializer(ABC):
def __init__(self, serializer, input_data, chunk_size):
if chunk_size < 1:
raise ArcticSerializationException("LazyIncrementalSerializer can't be initialized "
"with chunk_size < 1 ({})".format(chunk_size))
if not serializer:
raise ArcticSerializationException("LazyIncrementalSerializer can't be initialized "
"with a None serializer object")
self.input_data = input_data
self.chunk_size = chunk_size
self._serializer = serializer
self._initialized = False
self._checksum = None
@abc.abstractmethod
def __len__(self):
pass
@abc.abstractproperty
def generator(self):
pass
@abc.abstractproperty
def generator_bytes(self):
pass
@abc.abstractproperty
def serialize(self):
pass
class IncrementalPandasToRecArraySerializer(LazyIncrementalSerializer):
def __init__(self, serializer, input_data, chunk_size, string_max_len=None):
super(IncrementalPandasToRecArraySerializer, self).__init__(serializer, input_data, chunk_size)
if not isinstance(serializer, PandasSerializer):
raise ArcticSerializationException("IncrementalPandasToRecArraySerializer requires a serializer of "
"type PandasSerializer.")
if not isinstance(input_data, (pd.DataFrame, pd.Series)):
raise ArcticSerializationException("IncrementalPandasToRecArraySerializer requires a pandas DataFrame or "
"Series as data source input.")
if string_max_len and string_max_len < 1:
raise ArcticSerializationException("IncrementalPandasToRecArraySerializer can't be initialized "
"with string_max_len < 1 ({})".format(string_max_len))
self.string_max_len = string_max_len
# The state which needs to be lazily initialized
self._dtype = None
self._shape = None
self._rows_per_chunk = 0
self._total_chunks = 0
self._has_string_object = False
self._lock = RLock()
def _dtype_convert_to_max_len_string(self, input_ndtype, fname):
if input_ndtype.type not in (np.string_, np.unicode_):
return input_ndtype, False
type_sym = 'S' if input_ndtype.type == np.string_ else 'U'
max_str_len = len(max(self.input_data[fname].astype(type_sym), key=len))
str_field_dtype = np.dtype('{}{:d}'.format(type_sym, max_str_len)) if max_str_len > 0 else input_ndtype
return str_field_dtype, True
def _get_dtype(self):
# Serializer is being called only if can_convert_to_records_without_objects() has passed,
# which means that the resulting recarray does not contain objects but only numpy types, string, or unicode
# Serialize the first row to obtain info about row size in bytes (cache first few rows only)
# Also raise an Exception early, if data are not serializable
first_chunk, serialized_dtypes = self._serializer.serialize(
self.input_data[0:10] if len(self) > 0 else self.input_data,
string_max_len=self.string_max_len)
# This is the common case, where first row's dtype represents well the whole dataframe's dtype
if serialized_dtypes is None or \
len(self.input_data) == 0 or \
NP_OBJECT_DTYPE not in self.input_data.dtypes.values:
return first_chunk, serialized_dtypes, False
# Reaching here means we have at least one column of type object
# To correctly serialize incrementally, we need to know the final dtype (type and fixed length),
# using length-conversion information from all values of the object columns
dtype_arr = []
has_string_object = False
for field_name in serialized_dtypes.names: # include all column names, along with the expanded multi-index
field_dtype = serialized_dtypes[field_name]
if field_name not in self.input_data or self.input_data.dtypes[field_name] is NP_OBJECT_DTYPE:
# Note: .hasobject breaks for timezone-aware datetime64 pandas columns, so compare with dtype('O')
# if column is an expanded multi index or doesn't contain objects, the serialized 1st row dtype is safe
field_dtype, with_str_object = self._dtype_convert_to_max_len_string(field_dtype, field_name)
has_string_object |= with_str_object
dtype_arr.append((field_name, field_dtype))
return first_chunk, np.dtype(dtype_arr), has_string_object
def _lazy_init(self):
if self._initialized:
return
with self._lock:
if self._initialized: # intentional double check here
return
# Get the dtype of the serialized array (takes into account object types, converted to fixed length strings)
first_chunk, dtype, has_string_object = self._get_dtype()
# Compute the number of rows which can fit in a chunk
rows_per_chunk = 0
if len(self) > 0 and self.chunk_size > 1:
rows_per_chunk = IncrementalPandasToRecArraySerializer._calculate_rows_per_chunk(self.chunk_size, first_chunk)
# Initialize object's state
self._dtype = dtype
shp = list(first_chunk.shape)
shp[0] = len(self)
self._shape = tuple(shp)
self._has_string_object = has_string_object
self._rows_per_chunk = rows_per_chunk
self._total_chunks = int(np.ceil(float(len(self)) / self._rows_per_chunk)) if rows_per_chunk > 0 else 0
self._initialized = True
@staticmethod
def _calculate_rows_per_chunk(max_chunk_size, chunk):
sze = int(chunk.dtype.itemsize * np.prod(chunk.shape[1:]))
sze = sze if sze < max_chunk_size else max_chunk_size
rows_per_chunk = int(max_chunk_size / sze)
if rows_per_chunk < 1 and ARCTIC_AUTO_EXPAND_CHUNK_SIZE:
# If a row size is larger than chunk_size, use the maximum document size
logging.warning('Chunk size of {} is too small to fit a row ({}). '
'Using maximum document size.'.format(max_chunk_size, MAX_DOCUMENT_SIZE))
# For huge rows, fall-back to using a very large document size, less than max-allowed by MongoDB
rows_per_chunk = int(MAX_DOCUMENT_SIZE / sze)
if rows_per_chunk < 1:
raise ArcticSerializationException("Serialization failed to split data into max sized chunks.")
return rows_per_chunk
def __len__(self):
return len(self.input_data)
@property
def shape(self):
self._lazy_init()
return self._shape
@property
def dtype(self):
self._lazy_init()
return self._dtype
@property
def rows_per_chunk(self):
self._lazy_init()
return self._rows_per_chunk
def checksum(self, from_idx, to_idx):
if self._checksum is None:
self._lazy_init()
total_sha = None
for chunk_bytes, dtype in self.generator_bytes(from_idx=from_idx, to_idx=to_idx):
# TODO: what about compress_array here in batches?
compressed_chunk = compress(chunk_bytes)
total_sha = incremental_checksum(compressed_chunk, curr_sha=total_sha, is_bytes=True)
self._checksum = Binary(total_sha.digest())
return self._checksum
def generator(self, from_idx=None, to_idx=None):
return self._generator(from_idx=from_idx, to_idx=to_idx)
def generator_bytes(self, from_idx=None, to_idx=None):
return self._generator(from_idx=from_idx, to_idx=to_idx, get_bytes=True)
def _generator(self, from_idx, to_idx, get_bytes=False):
# Note that the range is: [from_idx, to_idx)
self._lazy_init()
my_length = len(self)
# Take into account default arguments and negative indexing (from end offset)
from_idx = 0 if from_idx is None else from_idx
if from_idx < 0:
from_idx = my_length + from_idx
to_idx = my_length if to_idx is None else min(to_idx, my_length)
if to_idx < 0:
to_idx = my_length + to_idx
# No data, finish iteration
if my_length == 0 or from_idx >= my_length or from_idx >= to_idx:
return
# Perform serialization for each chunk
while from_idx < to_idx:
curr_stop = min(from_idx + self._rows_per_chunk, to_idx)
chunk, _ = self._serializer.serialize(
self.input_data[from_idx: curr_stop],
string_max_len=self.string_max_len,
forced_dtype=self.dtype if self._has_string_object else None)
# Let the gc collect the intermediate serialized chunk as early as possible
chunk = chunk.tostring() if chunk is not None and get_bytes else chunk
yield chunk, self.dtype, from_idx, curr_stop
from_idx = curr_stop
def serialize(self):
return self._serializer.serialize(self.input_data, self.string_max_len)
| lgpl-2.1 |
chrhartm/SORN | utils/plotting.py | 2 | 1697 | import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
def pretty_mpl_defaults():
# Sources:
# http://nbviewer.ipython.org/gist/olgabot/5357268
# http://stackoverflow.com/questions/24808739/modifying-the-built-in-colors-of-matplotlib
# Font size
# used 32 for ISI abstract
# used 18 for slide plots
# used 24 for 3 plots / page
mpl.rcParams['font.size'] = 20
# Same for legend font size
mpl.rcParams['legend.fontsize'] = mpl.rcParams['axes.labelsize']
mpl.rcParams['legend.fancybox'] = True
# Line width
mpl.rcParams['lines.linewidth'] = 2
mpl.rcParams['lines.markersize'] = 10
# Set colormap to a sequential one
mpl.rcParams['image.cmap'] = 'Blues' # 'Greys'
# The circles surrounding scatters (also legend boxes...)
mpl.rcParams['patch.edgecolor'] = 'white'
mpl.rcParams['patch.linewidth'] = 0.2
newcycle = plt.cm.Set1(np.linspace(0,1,9)).tolist()
cycle = [x[:3] for x in newcycle][:7]
# Get the order of the first few colors right: blue, green, red
# Anything else doesn't matter that much
tmp = cycle[0]
cycle.remove(tmp)
cycle.insert(2,tmp)
mpl.rcParams['axes.color_cycle'] = cycle
import matplotlib.colors as colors
cdict = colors.colorConverter.colors
cdict['b'] = cycle[0]
cdict['g'] = cycle[1]
cdict['r'] = cycle[2]
cdict['y'] = cycle[5]
# This match is quite a stretch
cdict['c'] = cycle[3]
cdict['m'] = cycle[4]
# Just for completeness
cdict['w'] = cdict['w']
cdict['b'] = cdict['b']
colors.colorConverter.cache = {}
#Scientific notation
mpl.rcParams['axes.formatter.limits'] = [-3,3]
| mit |
probcomp/cgpm | tests/test_gpmcc_simple_composite.py | 1 | 8063 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference quality tests for the conditional GPM (aka foreign predictor)
features of State."""
import pytest
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from cgpm.cgpm import CGpm
from cgpm.crosscat.state import State
from cgpm.dummy.fourway import FourWay
from cgpm.dummy.twoway import TwoWay
from cgpm.utils import general as gu
from cgpm.utils import test as tu
def generate_quadrants(rows, rng):
Q0 = rng.multivariate_normal([2,2], cov=[[.5,0],[0,.5]], size=rows/4)
Q1 = rng.multivariate_normal([-2,2], cov=[[.5,0],[0,.5]], size=rows/4)
Q2 = rng.multivariate_normal([-2,-2], cov=[[.5,0],[0,.5]], size=rows/4)
Q3 = rng.multivariate_normal([2,-2], cov=[[.5,0],[0,.5]], size=rows/4)
colors = iter(cm.gist_rainbow(np.linspace(0, 1, 4)))
for q in [Q0, Q1, Q2, Q3]:
plt.scatter(q[:,0], q[:,1], color=next(colors))
plt.close('all')
return np.row_stack((Q0, Q1, Q2, Q3))
def compute_quadrant_counts(T):
c0 = sum(np.logical_and(T[:,0] > 0, T[:,1] > 0))
c1 = sum(np.logical_and(T[:,0] < 0, T[:,1] > 0))
c2 = sum(np.logical_and(T[:,0] > 0, T[:,1] < 0))
c3 = sum(np.logical_and(T[:,0] < 0, T[:,1] < 0))
return [c0, c1, c2, c3]
@pytest.fixture(scope='module')
def state():
rng = gu.gen_rng(5)
rows = 120
cctypes = ['normal', 'bernoulli', 'normal']
G = generate_quadrants(rows, rng)
B, Zv, Zrv = tu.gen_data_table(
rows, [1], [[.5,.5]], ['bernoulli'], [None], [.95], rng=rng)
T = np.column_stack((G, B.T))[:,[0,2,1]]
state = State(T, outputs=[0,1,2], cctypes=cctypes, rng=rng)
state.transition(N=20)
return state
def test_duplicated_outputs(state):
"""This test ensures that foreign cgpms cannot collide on outputs."""
for o in state.outputs:
fourway = FourWay([o], [0,2], rng=state.rng)
with pytest.raises(ValueError):
state.compose_cgpm(fourway)
assert len(state.hooked_cgpms) == 0
assert not state._composite
def test_decompose_cgpm(state):
"""This test ensures that foreign cgpms can be composed and decomposing
using the returned tokens."""
four = FourWay([15], [0,2], rng=state.rng)
two = TwoWay([10], [1], rng=state.rng)
four_token = state.compose_cgpm(four)
two_token = state.compose_cgpm(two)
assert state.hooked_cgpms[four_token] == four
assert state.hooked_cgpms[two_token] == two
assert state._composite
state.decompose_cgpm(two_token)
assert state.hooked_cgpms[four_token] == four
assert state._composite
state.decompose_cgpm(four_token)
assert len(state.hooked_cgpms) == 0
assert not state._composite
def test_same_logpdf(state):
"""This test ensures that composing gpmcc with foreign cgpms does not change
logpdf values for queries not involving the child cgpms."""
# Get some logpdfs and samples before composing with cgpms.
logp_before_one = state.logpdf(-1, {0: 1, 1: 1}, None, None)
logp_before_two = state.logpdf(-1, {0: 1, 1: 1}, {2:1}, None)
simulate_before_one = state.simulate(-1, [0,1,2], None, None, 10)
simulate_before_two = state.simulate(-1, [1,2], {0:1}, None)
# Compose the CGPMs.
four_index = state.compose_cgpm(FourWay([5], [0,2], rng=state.rng))
two_index = state.compose_cgpm(TwoWay([10], [1], rng=state.rng))
# Get some logpdfs and samples after composing with cgpms.
logp_after_one = state.logpdf(-1, {0: 1, 1: 1})
logp_after_two = state.logpdf(-1, {0: 1, 1: 1}, {2:1})
simulate_after_one = state.simulate(-1, [0,1,2], N=10)
simulate_after_two = state.simulate(-1, [1,2], {0:1})
# Check logps same.
assert np.allclose(logp_before_one, logp_after_one)
assert np.allclose(logp_before_two, logp_after_two)
# Decompose the CGPMs.
state.decompose_cgpm(four_index)
state.decompose_cgpm(two_index)
def crash_test_simulate_logpdf(state):
"""This crash test ensures foreign cgpms can be composed and queried."""
four_token = state.compose_cgpm(FourWay([5], [0,2], rng=state.rng))
two_token = state.compose_cgpm(FourWay([5], [0,2], rng=state.rng))
state.simulate(-1, [0, 1, 2, 5, 10], N=10)
state.logpdf(-1, {0:1, 1:0, 2:-1, 5:3, 10:0})
state.simulate(-1, [5, 0], {10:0, 2:-1}, N=10)
state.logpdf(-1, {5:1, 0:2}, {10:0, 2:-1})
# Unhook the predictors.
state.decompose_cgpm(four_token)
state.decompose_cgpm(two_token)
def test_inference_quality__ci_(state):
"""This test explores inference quality for simulate/logpdf inversion."""
# Build CGPMs.
fourway = FourWay([5], [0,2], rng=state.rng)
twoway = TwoWay([10], [1], rng=state.rng)
# Compose.
four_token = state.compose_cgpm(fourway)
two_token = state.compose_cgpm(twoway)
# simulate parents (0,2) constraining four_index.
for v in [0, 1, 2, 3]:
samples = state.simulate(-1, [0, 2], {5: v}, N=100, accuracy=20)
simulate_fourway_constrain = np.transpose(
np.asarray([[s[0] for s in samples], [s[2] for s in samples]]))
fig, ax = plt.subplots()
ax.scatter(
simulate_fourway_constrain[:,0],
simulate_fourway_constrain[:,1])
ax.hlines(0, *ax.get_xlim(),color='red', linewidth=2)
ax.vlines(0, *ax.get_ylim(),color='red', linewidth=2)
ax.grid()
x0, x1 = FourWay.retrieve_y_for_x(v)
simulate_ideal = np.asarray([[x0, x1]])
counts_ideal = compute_quadrant_counts(simulate_ideal)
counts_actual = compute_quadrant_counts(simulate_fourway_constrain)
assert np.argmax(counts_ideal) == np.argmax(counts_actual)
# logpdf four_index varying parent constraints.
for v in [0, 1, 2, 3]:
x0, x1 = FourWay.retrieve_y_for_x(v)
lp_exact = fourway.logpdf(None, {5:v}, None, {0:x0, 2:x1})
lp_fully_conditioned = state.logpdf(
None, {5:v}, {0:x0, 1:1, 2:x1, 10:0}, accuracy=100)
lp_missing_one = state.logpdf(None, {5:v}, {0: x0, 1:1}, accuracy=100)
lp_missing_two = state.logpdf(None, {5:v}, accuracy=100)
assert np.allclose(lp_fully_conditioned, lp_exact)
assert lp_missing_one < lp_fully_conditioned
assert lp_missing_two < lp_missing_one
assert lp_missing_two < lp_fully_conditioned
# Invert the query conditioning on four_index.
lp_inverse_evidence = state.logpdf(
-1, {0:x0, 2:x1}, {5: v}, accuracy=100)
lp_inverse_no_evidence = state.logpdf(
-1, {0:x0, 2: x1})
assert lp_inverse_no_evidence < lp_inverse_evidence
# Simulate two_index varying parent constraints.
for v in [0, 1]:
x1 = TwoWay.retrieve_y_for_x(v)
lp_exact = twoway.logpdf(None, {10:v}, None, {0:0, 1:x1})
lp_fully_conditioned = state.logpdf(None, {10: v}, {0:0, 1:x1, 2:1})
lp_missing_one = state.logpdf(None, {10: v}, {0:0, 2:1}, accuracy=200)
assert np.allclose(lp_fully_conditioned, lp_exact)
assert lp_missing_one < lp_fully_conditioned
# Invert the query conditioning on two_index.
lp_inverse_evidence = state.logpdf(
None, {0:0, 1:x1}, {10:v}, accuracy=100)
lp_inverse_no_evidence = state.logpdf(None, {0:0, 1:x1})
assert lp_inverse_no_evidence < lp_inverse_evidence
# Unhook the predictors.
state.decompose_cgpm(four_token)
state.decompose_cgpm(two_token)
| apache-2.0 |
kendricktan/rarepepes | data/enhance_x.py | 1 | 1279 | import os
import argparse
import glob
import sys
import skimage.filters as skfilters
import skimage.color as skcolor
import skimage.feature as skfeature
import skimage.io as skio
import skimage.util as skut
import skimage.morphology as skmo
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
from tqdm import tqdm
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Pepe data X enhancer')
parser.add_argument('--img-dir', required=True, type=str)
parser.add_argument('--out-dir', required=True, type=str)
args = parser.parse_args()
# Images
files = glob.glob(os.path.join(args.img_dir, '*.png')) + \
glob.glob(os.path.join(args.img_dir, '*.jpg'))
raw_out_dir = os.path.join(args.out_dir)
if not os.path.exists(raw_out_dir):
os.makedirs(raw_out_dir)
# Invert and dilate
for i in tqdm(range(len(files))):
f = files[i]
img = skio.imread(f)
gray = skcolor.rgb2gray(img)
# Canny
edges = skfeature.canny(gray, sigma=0.69)
dilated = skmo.dilation(edges, skmo.square(3))
eroded = skmo.erosion(dilated, skmo.square(2))
skio.imsave(os.path.join(raw_out_dir, '{}.png'.format(i)), eroded)
| mit |
idlead/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
leesavide/pythonista-docs | Documentation/matplotlib/mpl_examples/units/artist_tests.py | 6 | 1475 | """
Test unit support with each of the matplotlib primitive artist types
The axes handles unit conversions and the artists keep a pointer to
their axes parent, so you must init the artists with the axes instance
if you want to initialize them with unit data, or else they will not
know how to convert the units to scalars
"""
import random
import matplotlib.lines as lines
import matplotlib.patches as patches
import matplotlib.text as text
import matplotlib.collections as collections
from basic_units import cm, inch
import numpy as np
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.xaxis.set_units(cm)
ax.yaxis.set_units(cm)
if 0:
# test a line collection
# Not supported at present.
verts = []
for i in range(10):
# a random line segment in inches
verts.append(zip(*inch*10*np.random.rand(2, random.randint(2,15))))
lc = collections.LineCollection(verts, axes=ax)
ax.add_collection(lc)
# test a plain-ol-line
line = lines.Line2D([0*cm, 1.5*cm], [0*cm, 2.5*cm], lw=2, color='black', axes=ax)
ax.add_line(line)
if 0:
# test a patch
# Not supported at present.
rect = patches.Rectangle( (1*cm, 1*cm), width=5*cm, height=2*cm, alpha=0.2, axes=ax)
ax.add_patch(rect)
t = text.Text(3*cm, 2.5*cm, 'text label', ha='left', va='bottom', axes=ax)
ax.add_artist(t)
ax.set_xlim(-1*cm, 10*cm)
ax.set_ylim(-1*cm, 10*cm)
#ax.xaxis.set_units(inch)
ax.grid(True)
ax.set_title("Artists with units")
plt.show()
| apache-2.0 |
renchuqiao/BayesianDLM | simulation/simulation_new.py | 1 | 4838 | ##===============
##==== libraries
##===============
import numpy as np
from numpy.linalg import inv
from scipy.stats import wishart
from scipy.stats import bernoulli
import math
from numpy import linalg as LA
# import matplotlib.pyplot as plt
# import seaborn as sns
##=====================
##==== global variables
##=====================
n_time = 0 #Note: Don't change value here; change below
n_gene = 0
# n_tissue = 0
#The following parameters need to be determined by test-and-trials
#According to Barbara, they used alpha=beta=1 for the uniform on sparsity
#alpha = 1 beta = 2 is a line of y = -2x + 2
# beta = [1,1] #parameters of beta[alpha, beta]
gamma = [1,2] #parameters of NG[mu, kappa, alpha, beta]
normalWishart = [[2,2],2,[[10,5],[5,10]],3] #parameters of NW[mu, kappa, Lambda, v]
dataset = []
##=================================================
##===The Following code is adapted from Shuo's code
##=================================================
##=====================
##==== Sampling
##=====================
##==== sampling from Gaussian (with mean and std)
def sampler_Normal(mu, sigma):
sample = np.random.normal(mu, sigma)
return sample
##==== sampling from Wishart
def sampler_W(df, scale):
#
sample = wishart.rvs(df, scale, size=1, random_state=None)
# matrix = sample[0]
return sample
##==== sampling from Gamma
def sampler_Gamma(para1, para2):
para2 = 1.0/para2
x = np.random.gamma(para1, para2, 1)
return x[0]
## ==== End of adaptation
## ==== sampling Beta
def sampler_beta(a, b):
return np.random.beta(a, b)
## ==== Start to simulate
def simulate_v():
global gamma
return sampler_Gamma(gamma[0], gamma[1])
def simulation():
global n_gene
global n_time
global gamma
global normalWishart
global dataset
precisionMatrix = sampler_W(normalWishart[3], normalWishart[2])
for sample in range(n_gene):
print "start gene ",
print sample+1
# precisionMatrix_scaled = []
# for i in range(len(precisionMatrix)):
# temp = []
# for j in range(len(precisionMatrix[0])):
# temp.append(precisionMatrix[i][j] / normalWishart[1])
# precisionMatrix_scaled.append(temp)
mu = np.random.multivariate_normal(normalWishart[0], np.multiply(precisionMatrix, 1/normalWishart[1]))
theta_0 = np.random.multivariate_normal(mu, precisionMatrix)
v = simulate_v()
p_v = np.multiply(precisionMatrix, v)
f = theta_0 + np.random.multivariate_normal([0]*n_gene, p_v)
theta_prev = theta_0
count = 0
while count < 50:
theta = np.random.multivariate_normal(theta_prev, precisionMatrix)
v = simulate_v()
p_v = np.multiply(precisionMatrix, v)
y = np.multiply(f, theta) + np.random.multivariate_normal([0]*n_gene, p_v)
dataset.append(y)
f = y
theta_prev = theta
count += 1
return
def writeToFile():
f = open("./simulated_data.csv", "wb")
d = np.array(dataset)
f.write("data,gene 1,gene 2,gene 3,gene 4,gene 5,gene 6,gene 7,gene 8,gene 9,gene 10\n")
for i in range(n_time):
line = "time "+str(i) + ","
for j in range(n_gene):
line += str(d[i][j]) + ","
line = line[:-1] + '\n'
f.write(line)
f.close()
if __name__ == '__main__':
# DEBUG
print "enter program..."
# DEBUG
print "now start preparing the data..."
##==================================
##==== loading and preparing dataset
##==================================
# data_prepare() # prepare the "dataset" and "markerset"
# DEBUG
print "finish data preparation..."
# DEBUG
print "now initializing all the variables..."
##================================
##==== initialize global variables
##================================
n_time = 500 # TODO: this is tunable, and the number 400 comes from results of other more complex methods
n_gene = 10
#initialize normal wishart parameter
mu = []
precision = []
for i in range(n_gene):
mu.append(2)
for i in range(n_gene):
temp = []
for j in range(n_gene):
if (i == j):
temp.append(1)
else:
temp.append(0)
precision.append(temp)
normalWishart[0] = mu
normalWishart[2] = precision
normalWishart[3] = n_gene + 1
#initialize sparsity parameter
# beta = [2, 2] # parameters of beta[alpha, beta]
gamma = [1, 2] # parameters of Gamma[alpha, beta]
#DEBUG
print "finish initializing all the variables..."
#DEBUG
print "now start simulation..."
simulation()
writeToFile()
| gpl-3.0 |
samzhang111/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 206 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
Featuretools/featuretools | featuretools/entityset/entityset.py | 1 | 40323 | import copy
import logging
from collections import defaultdict
import cloudpickle
import numpy as np
import pandas as pd
from pandas.api.types import is_dtype_equal, is_numeric_dtype
import featuretools.variable_types.variable as vtypes
from featuretools.entityset import deserialize, serialize
from featuretools.entityset.entity import Entity
from featuretools.entityset.relationship import Relationship, RelationshipPath
pd.options.mode.chained_assignment = None # default='warn'
logger = logging.getLogger('featuretools.entityset')
class EntitySet(object):
"""
Stores all actual data for a entityset
Attributes:
id
entity_dict
relationships
time_type
Properties:
metadata
"""
def __init__(self, id=None, entities=None, relationships=None):
"""Creates EntitySet
Args:
id (str) : Unique identifier to associate with this instance
entities (dict[str -> tuple(pd.DataFrame, str, str)]): Dictionary of
entities. Entries take the format
{entity id -> (dataframe, id column, (time_column), (variable_types))}.
Note that time_column and variable_types are optional.
relationships (list[(str, str, str, str)]): List of relationships
between entities. List items are a tuple with the format
(parent entity id, parent variable, child entity id, child variable).
Example:
.. code-block:: python
entities = {
"cards" : (card_df, "id"),
"transactions" : (transactions_df, "id", "transaction_time")
}
relationships = [("cards", "id", "transactions", "card_id")]
ft.EntitySet("my-entity-set", entities, relationships)
"""
self.id = id
self.entity_dict = {}
self.relationships = []
self.time_type = None
entities = entities or {}
relationships = relationships or []
for entity in entities:
df = entities[entity][0]
index_column = entities[entity][1]
time_column = None
variable_types = None
if len(entities[entity]) > 2:
time_column = entities[entity][2]
if len(entities[entity]) > 3:
variable_types = entities[entity][3]
self.entity_from_dataframe(entity_id=entity,
dataframe=df,
index=index_column,
time_index=time_column,
variable_types=variable_types)
for relationship in relationships:
parent_variable = self[relationship[0]][relationship[1]]
child_variable = self[relationship[2]][relationship[3]]
self.add_relationship(Relationship(parent_variable,
child_variable))
self.reset_data_description()
def __sizeof__(self):
return sum([entity.__sizeof__() for entity in self.entities])
def __dask_tokenize__(self):
return (EntitySet, cloudpickle.dumps(self.metadata))
def __eq__(self, other, deep=False):
if len(self.entity_dict) != len(other.entity_dict):
return False
for eid, e in self.entity_dict.items():
if eid not in other.entity_dict:
return False
if not e.__eq__(other[eid], deep=deep):
return False
for r in other.relationships:
if r not in other.relationships:
return False
return True
def __ne__(self, other, deep=False):
return not self.__eq__(other, deep=deep)
def __getitem__(self, entity_id):
"""Get entity instance from entityset
Args:
entity_id (str): Id of entity.
Returns:
:class:`.Entity` : Instance of entity. None if entity doesn't
exist.
"""
if entity_id in self.entity_dict:
return self.entity_dict[entity_id]
name = self.id or "entity set"
raise KeyError('Entity %s does not exist in %s' % (entity_id, name))
@property
def entities(self):
return list(self.entity_dict.values())
@property
def metadata(self):
'''Returns the metadata for this EntitySet. The metadata will be recomputed if it does not exist.'''
if self._data_description is None:
description = serialize.entityset_to_description(self)
self._data_description = deserialize.description_to_entityset(description)
return self._data_description
def reset_data_description(self):
self._data_description = None
def to_pickle(self, path, compression=None, profile_name=None):
'''Write entityset in the pickle format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str): location on disk to write to (will be created as a directory)
compression (str) : Name of the compression to use. Possible values are: {'gzip', 'bz2', 'zip', 'xz', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='pickle', compression=compression, profile_name=profile_name)
return self
def to_parquet(self, path, engine='auto', compression=None, profile_name=None):
'''Write entityset to disk in the parquet format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str): location on disk to write to (will be created as a directory)
engine (str) : Name of the engine to use. Possible values are: {'auto', 'pyarrow', 'fastparquet'}.
compression (str) : Name of the compression to use. Possible values are: {'snappy', 'gzip', 'brotli', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='parquet', engine=engine, compression=compression, profile_name=profile_name)
return self
def to_csv(self, path, sep=',', encoding='utf-8', engine='python', compression=None, profile_name=None):
'''Write entityset to disk in the csv format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str) : Location on disk to write to (will be created as a directory)
sep (str) : String of length 1. Field delimiter for the output file.
encoding (str) : A string representing the encoding to use in the output file, defaults to 'utf-8'.
engine (str) : Name of the engine to use. Possible values are: {'c', 'python'}.
compression (str) : Name of the compression to use. Possible values are: {'gzip', 'bz2', 'zip', 'xz', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='csv', index=False, sep=sep, encoding=encoding, engine=engine, compression=compression, profile_name=profile_name)
return self
def to_dictionary(self):
return serialize.entityset_to_description(self)
###########################################################################
# Public getter/setter methods #########################################
###########################################################################
def __repr__(self):
repr_out = u"Entityset: {}\n".format(self.id)
repr_out += u" Entities:"
for e in self.entities:
if e.df.shape:
repr_out += u"\n {} [Rows: {}, Columns: {}]".format(
e.id, e.df.shape[0], e.df.shape[1])
else:
repr_out += u"\n {} [Rows: None, Columns: None]".format(
e.id)
repr_out += "\n Relationships:"
if len(self.relationships) == 0:
repr_out += u"\n No relationships"
for r in self.relationships:
repr_out += u"\n %s.%s -> %s.%s" % \
(r._child_entity_id, r._child_variable_id,
r._parent_entity_id, r._parent_variable_id)
return repr_out
def add_relationships(self, relationships):
"""Add multiple new relationships to a entityset
Args:
relationships (list[Relationship]) : List of new
relationships.
"""
return [self.add_relationship(r) for r in relationships][-1]
def add_relationship(self, relationship):
"""Add a new relationship between entities in the entityset
Args:
relationship (Relationship) : Instance of new
relationship to be added.
"""
if relationship in self.relationships:
logger.warning(
"Not adding duplicate relationship: %s", relationship)
return self
# _operations?
# this is a new pair of entities
child_e = relationship.child_entity
child_v = relationship.child_variable.id
parent_e = relationship.parent_entity
parent_v = relationship.parent_variable.id
if not isinstance(child_e[child_v], vtypes.Id):
child_e.convert_variable_type(variable_id=child_v,
new_type=vtypes.Id,
convert_data=False)
if not isinstance(parent_e[parent_v], vtypes.Index):
parent_e.convert_variable_type(variable_id=parent_v,
new_type=vtypes.Index,
convert_data=False)
# Empty dataframes (as a result of accessing Entity.metadata)
# default to object dtypes for discrete variables, but
# indexes/ids default to ints. In this case, we convert
# the empty column's type to int
if (child_e.df.empty and child_e.df[child_v].dtype == object and
is_numeric_dtype(parent_e.df[parent_v])):
child_e.df[child_v] = pd.Series(name=child_v, dtype=np.int64)
parent_dtype = parent_e.df[parent_v].dtype
child_dtype = child_e.df[child_v].dtype
msg = u"Unable to add relationship because {} in {} is Pandas dtype {}"\
u" and {} in {} is Pandas dtype {}."
if not is_dtype_equal(parent_dtype, child_dtype):
raise ValueError(msg.format(parent_v, parent_e.id, parent_dtype,
child_v, child_e.id, child_dtype))
self.relationships.append(relationship)
self.reset_data_description()
return self
###########################################################################
# Relationship access/helper methods ###################################
###########################################################################
def find_forward_paths(self, start_entity_id, goal_entity_id):
"""
Generator which yields all forward paths between a start and goal
entity. Does not include paths which contain cycles.
Args:
start_entity_id (str) : id of entity to start the search from
goal_entity_id (str) : if of entity to find forward path to
See Also:
:func:`BaseEntitySet.find_backward_paths`
"""
for sub_entity_id, path in self._forward_entity_paths(start_entity_id):
if sub_entity_id == goal_entity_id:
yield path
def find_backward_paths(self, start_entity_id, goal_entity_id):
"""
Generator which yields all backward paths between a start and goal
entity. Does not include paths which contain cycles.
Args:
start_entity_id (str) : Id of entity to start the search from.
goal_entity_id (str) : Id of entity to find backward path to.
See Also:
:func:`BaseEntitySet.find_forward_paths`
"""
for path in self.find_forward_paths(goal_entity_id, start_entity_id):
# Reverse path
yield path[::-1]
def _forward_entity_paths(self, start_entity_id, seen_entities=None):
"""
Generator which yields the ids of all entities connected through forward
relationships, and the path taken to each. An entity will be yielded
multiple times if there are multiple paths to it.
Implemented using depth first search.
"""
if seen_entities is None:
seen_entities = set()
if start_entity_id in seen_entities:
return
seen_entities.add(start_entity_id)
yield start_entity_id, []
for relationship in self.get_forward_relationships(start_entity_id):
next_entity = relationship.parent_entity.id
# Copy seen entities for each next node to allow multiple paths (but
# not cycles).
descendants = self._forward_entity_paths(next_entity, seen_entities.copy())
for sub_entity_id, sub_path in descendants:
yield sub_entity_id, [relationship] + sub_path
def get_forward_entities(self, entity_id, deep=False):
"""
Get entities that are in a forward relationship with entity
Args:
entity_id (str): Id entity of entity to search from.
deep (bool): if True, recursively find forward entities.
Yields a tuple of (descendent_id, path from entity_id to descendant).
"""
for relationship in self.get_forward_relationships(entity_id):
parent_eid = relationship.parent_entity.id
direct_path = RelationshipPath([(True, relationship)])
yield parent_eid, direct_path
if deep:
sub_entities = self.get_forward_entities(parent_eid, deep=True)
for sub_eid, path in sub_entities:
yield sub_eid, direct_path + path
def get_backward_entities(self, entity_id, deep=False):
"""
Get entities that are in a backward relationship with entity
Args:
entity_id (str): Id entity of entity to search from.
deep (bool): if True, recursively find backward entities.
Yields a tuple of (descendent_id, path from entity_id to descendant).
"""
for relationship in self.get_backward_relationships(entity_id):
child_eid = relationship.child_entity.id
direct_path = RelationshipPath([(False, relationship)])
yield child_eid, direct_path
if deep:
sub_entities = self.get_backward_entities(child_eid, deep=True)
for sub_eid, path in sub_entities:
yield sub_eid, direct_path + path
def get_forward_relationships(self, entity_id):
"""Get relationships where entity "entity_id" is the child
Args:
entity_id (str): Id of entity to get relationships for.
Returns:
list[:class:`.Relationship`]: List of forward relationships.
"""
return [r for r in self.relationships if r.child_entity.id == entity_id]
def get_backward_relationships(self, entity_id):
"""
get relationships where entity "entity_id" is the parent.
Args:
entity_id (str): Id of entity to get relationships for.
Returns:
list[:class:`.Relationship`]: list of backward relationships
"""
return [r for r in self.relationships if r.parent_entity.id == entity_id]
def has_unique_forward_path(self, start_entity_id, end_entity_id):
"""
Is the forward path from start to end unique?
This will raise if there is no such path.
"""
paths = self.find_forward_paths(start_entity_id, end_entity_id)
next(paths)
second_path = next(paths, None)
return not second_path
###########################################################################
# Entity creation methods ##############################################
###########################################################################
def entity_from_dataframe(self,
entity_id,
dataframe,
index=None,
variable_types=None,
make_index=False,
time_index=None,
secondary_time_index=None,
already_sorted=False):
"""
Load the data for a specified entity from a Pandas DataFrame.
Args:
entity_id (str) : Unique id to associate with this entity.
dataframe (pandas.DataFrame) : Dataframe containing the data.
index (str, optional): Name of the variable used to index the entity.
If None, take the first column.
variable_types (dict[str -> Variable], optional):
Keys are of variable ids and values are variable types. Used to to
initialize an entity's store.
make_index (bool, optional) : If True, assume index does not
exist as a column in dataframe, and create a new column of that name
using integers. Otherwise, assume index exists.
time_index (str, optional): Name of the variable containing
time data. Type must be in :class:`variables.DateTime` or be
able to be cast to datetime (e.g. str, float, or numeric.)
secondary_time_index (dict[str -> Variable]): Name of variable
containing time data to use a second time index for the entity.
already_sorted (bool, optional) : If True, assumes that input dataframe
is already sorted by time. Defaults to False.
Notes:
Will infer variable types from Pandas dtype
Example:
.. ipython:: python
import featuretools as ft
import pandas as pd
transactions_df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"session_id": [1, 2, 1, 3, 4, 5],
"amount": [100.40, 20.63, 33.32, 13.12, 67.22, 1.00],
"transaction_time": pd.date_range(start="10:00", periods=6, freq="10s"),
"fraud": [True, False, True, False, True, True]})
es = ft.EntitySet("example")
es.entity_from_dataframe(entity_id="transactions",
index="id",
time_index="transaction_time",
dataframe=transactions_df)
es["transactions"]
es["transactions"].df
"""
variable_types = variable_types or {}
if time_index is not None and time_index == index:
raise ValueError("time_index and index cannot be the same value, %s" % (time_index))
if time_index is None:
for variable, variable_type in variable_types.items():
if variable_type == vtypes.DatetimeTimeIndex:
raise ValueError("DatetimeTimeIndex variable %s must be set using time_index parameter" % (variable))
entity = Entity(
entity_id,
dataframe,
self,
variable_types=variable_types,
index=index,
time_index=time_index,
secondary_time_index=secondary_time_index,
already_sorted=already_sorted,
make_index=make_index)
self.entity_dict[entity.id] = entity
self.reset_data_description()
return self
def normalize_entity(self, base_entity_id, new_entity_id, index,
additional_variables=None, copy_variables=None,
make_time_index=None,
make_secondary_time_index=None,
new_entity_time_index=None,
new_entity_secondary_time_index=None):
"""Create a new entity and relationship from unique values of an existing variable.
Args:
base_entity_id (str) : Entity id from which to split.
new_entity_id (str): Id of the new entity.
index (str): Variable in old entity
that will become index of new entity. Relationship
will be created across this variable.
additional_variables (list[str]):
List of variable ids to remove from
base_entity and move to new entity.
copy_variables (list[str]): List of
variable ids to copy from old entity
and move to new entity.
make_time_index (bool or str, optional): Create time index for new entity based
on time index in base_entity, optionally specifying which variable in base_entity
to use for time_index. If specified as True without a specific variable,
uses the primary time index. Defaults to True if base entity has a time index.
make_secondary_time_index (dict[str -> list[str]], optional): Create a secondary time index
from key. Values of dictionary
are the variables to associate with the secondary time index. Only one
secondary time index is allowed. If None, only associate the time index.
new_entity_time_index (str, optional): Rename new entity time index.
new_entity_secondary_time_index (str, optional): Rename new entity secondary time index.
"""
base_entity = self.entity_dict[base_entity_id]
additional_variables = additional_variables or []
copy_variables = copy_variables or []
# Check base entity to make sure time index is valid
if base_entity.time_index is not None:
t_index = base_entity[base_entity.time_index]
if not isinstance(t_index, (vtypes.NumericTimeIndex, vtypes.DatetimeTimeIndex)):
base_error = "Time index '{0}' is not a NumericTimeIndex or DatetimeTimeIndex, but type {1}. Use set_time_index on entity '{2}' to set the time_index."
raise TypeError(base_error.format(base_entity.time_index, type(t_index), str(base_entity.id)))
if not isinstance(additional_variables, list):
raise TypeError("'additional_variables' must be a list, but received type {}"
.format(type(additional_variables)))
if len(additional_variables) != len(set(additional_variables)):
raise ValueError("'additional_variables' contains duplicate variables. All variables must be unique.")
if not isinstance(copy_variables, list):
raise TypeError("'copy_variables' must be a list, but received type {}"
.format(type(copy_variables)))
if len(copy_variables) != len(set(copy_variables)):
raise ValueError("'copy_variables' contains duplicate variables. All variables must be unique.")
for v in additional_variables + copy_variables:
if v == index:
raise ValueError("Not copying {} as both index and variable".format(v))
break
if isinstance(make_time_index, str):
if make_time_index not in base_entity.df.columns:
raise ValueError("'make_time_index' must be a variable in the base entity")
elif make_time_index not in additional_variables + copy_variables:
raise ValueError("'make_time_index' must specified in 'additional_variables' or 'copy_variables'")
if index == base_entity.index:
raise ValueError("'index' must be different from the index column of the base entity")
transfer_types = {}
transfer_types[index] = type(base_entity[index])
for v in additional_variables + copy_variables:
if type(base_entity[v]) == vtypes.DatetimeTimeIndex:
transfer_types[v] = vtypes.Datetime
elif type(base_entity[v]) == vtypes.NumericTimeIndex:
transfer_types[v] = vtypes.Numeric
else:
transfer_types[v] = type(base_entity[v])
# create and add new entity
new_entity_df = self[base_entity_id].df.copy()
if make_time_index is None and base_entity.time_index is not None:
make_time_index = True
if isinstance(make_time_index, str):
# Set the new time index to make_time_index.
base_time_index = make_time_index
new_entity_time_index = make_time_index
already_sorted = (new_entity_time_index == base_entity.time_index)
elif make_time_index:
# Create a new time index based on the base entity time index.
base_time_index = base_entity.time_index
if new_entity_time_index is None:
new_entity_time_index = "first_%s_time" % (base_entity.id)
already_sorted = True
assert base_entity.time_index is not None, \
"Base entity doesn't have time_index defined"
if base_time_index not in [v for v in additional_variables]:
copy_variables.append(base_time_index)
transfer_types[new_entity_time_index] = type(base_entity[base_entity.time_index])
else:
new_entity_time_index = None
already_sorted = False
if new_entity_time_index is not None and new_entity_time_index == index:
raise ValueError("time_index and index cannot be the same value, %s" % (new_entity_time_index))
selected_variables = [index] +\
[v for v in additional_variables] +\
[v for v in copy_variables]
new_entity_df2 = new_entity_df. \
drop_duplicates(index, keep='first')[selected_variables]
if make_time_index:
new_entity_df2.rename(columns={base_time_index: new_entity_time_index}, inplace=True)
if make_secondary_time_index:
assert len(make_secondary_time_index) == 1, "Can only provide 1 secondary time index"
secondary_time_index = list(make_secondary_time_index.keys())[0]
secondary_variables = [index, secondary_time_index] + list(make_secondary_time_index.values())[0]
secondary_df = new_entity_df. \
drop_duplicates(index, keep='last')[secondary_variables]
if new_entity_secondary_time_index:
secondary_df.rename(columns={secondary_time_index: new_entity_secondary_time_index},
inplace=True)
secondary_time_index = new_entity_secondary_time_index
else:
new_entity_secondary_time_index = secondary_time_index
secondary_df.set_index(index, inplace=True)
new_entity_df = new_entity_df2.join(secondary_df, on=index)
else:
new_entity_df = new_entity_df2
base_entity_index = index
transfer_types[index] = vtypes.Categorical
if make_secondary_time_index:
old_ti_name = list(make_secondary_time_index.keys())[0]
ti_cols = list(make_secondary_time_index.values())[0]
ti_cols = [c if c != old_ti_name else secondary_time_index for c in ti_cols]
make_secondary_time_index = {secondary_time_index: ti_cols}
self.entity_from_dataframe(
new_entity_id,
new_entity_df,
index,
already_sorted=already_sorted,
time_index=new_entity_time_index,
secondary_time_index=make_secondary_time_index,
variable_types=transfer_types)
self.entity_dict[base_entity_id].delete_variables(additional_variables)
new_entity = self.entity_dict[new_entity_id]
base_entity.convert_variable_type(base_entity_index, vtypes.Id, convert_data=False)
self.add_relationship(Relationship(new_entity[index], base_entity[base_entity_index]))
self.reset_data_description()
return self
###########################################################################
# Data wrangling methods ###############################################
###########################################################################
def concat(self, other, inplace=False):
'''Combine entityset with another to create a new entityset with the
combined data of both entitysets.
'''
assert_string = "Entitysets must have the same entities, relationships"\
", and variable_ids"
assert (self.__eq__(other) and
self.relationships == other.relationships), assert_string
for entity in self.entities:
assert entity.id in other.entity_dict, assert_string
assert (len(self[entity.id].variables) ==
len(other[entity.id].variables)), assert_string
other_variable_ids = [o_variable.id for o_variable in
other[entity.id].variables]
assert (all([variable.id in other_variable_ids
for variable in self[entity.id].variables])), assert_string
if inplace:
combined_es = self
else:
combined_es = copy.deepcopy(self)
has_last_time_index = []
for entity in self.entities:
self_df = entity.df
other_df = other[entity.id].df
combined_df = pd.concat([self_df, other_df])
if entity.created_index == entity.index:
columns = [col for col in combined_df.columns if
col != entity.index or col != entity.time_index]
else:
columns = [entity.index]
combined_df.drop_duplicates(columns, inplace=True)
if entity.time_index:
combined_df.sort_values([entity.time_index, entity.index], inplace=True)
else:
combined_df.sort_index(inplace=True)
if (entity.last_time_index is not None or
other[entity.id].last_time_index is not None):
has_last_time_index.append(entity.id)
combined_es[entity.id].update_data(df=combined_df,
recalculate_last_time_indexes=False)
combined_es.add_last_time_indexes(updated_entities=has_last_time_index)
self.reset_data_description()
return combined_es
###########################################################################
# Indexing methods ###############################################
###########################################################################
def add_last_time_indexes(self, updated_entities=None):
"""
Calculates the last time index values for each entity (the last time
an instance or children of that instance were observed). Used when
calculating features using training windows
Args:
updated_entities (list[str]): List of entity ids to update last_time_index for
(will update all parents of those entities as well)
"""
# Generate graph of entities to find leaf entities
children = defaultdict(list) # parent --> child mapping
child_vars = defaultdict(dict)
for r in self.relationships:
children[r.parent_entity.id].append(r.child_entity)
child_vars[r.parent_entity.id][r.child_entity.id] = r.child_variable
updated_entities = updated_entities or []
if updated_entities:
# find parents of updated_entities
parent_queue = updated_entities[:]
parents = set()
while len(parent_queue):
e = parent_queue.pop(0)
if e in parents:
continue
parents.add(e)
for parent_id, _ in self.get_forward_entities(e):
parent_queue.append(parent_id)
queue = [self[p] for p in parents]
to_explore = parents
else:
to_explore = set([e.id for e in self.entities[:]])
queue = self.entities[:]
explored = set()
for e in queue:
e.last_time_index = None
# We will explore children of entities on the queue,
# which may not be in the to_explore set. Therefore,
# we check whether all elements of to_explore are in
# explored, rather than just comparing length
while not to_explore.issubset(explored):
entity = queue.pop(0)
if entity.last_time_index is None:
if entity.time_index is not None:
lti = entity.df[entity.time_index].copy()
else:
lti = entity.df[entity.index].copy()
lti[:] = None
entity.last_time_index = lti
if entity.id in children:
child_entities = children[entity.id]
# if all children not explored, skip for now
if not set([e.id for e in child_entities]).issubset(explored):
# Now there is a possibility that a child entity
# was not explicitly provided in updated_entities,
# and never made it onto the queue. If updated_entities
# is None then we just load all entities onto the queue
# so we didn't need this logic
for e in child_entities:
if e.id not in explored and e.id not in [q.id for q in queue]:
queue.append(e)
queue.append(entity)
continue
# updated last time from all children
for child_e in child_entities:
if child_e.last_time_index is None:
continue
link_var = child_vars[entity.id][child_e.id].id
lti_df = pd.DataFrame({'last_time': child_e.last_time_index,
entity.index: child_e.df[link_var]})
# sort by time and keep only the most recent
lti_df.sort_values(['last_time', entity.index],
kind="mergesort", inplace=True)
lti_df.drop_duplicates(entity.index,
keep='last',
inplace=True)
lti_df.set_index(entity.index, inplace=True)
lti_df = lti_df.reindex(entity.last_time_index.index)
lti_df['last_time_old'] = entity.last_time_index
lti_df = lti_df.apply(lambda x: x.dropna().max(), axis=1)
entity.last_time_index = lti_df
entity.last_time_index.name = 'last_time'
explored.add(entity.id)
self.reset_data_description()
###########################################################################
# Other ###############################################
###########################################################################
def add_interesting_values(self, max_values=5, verbose=False):
"""Find interesting values for categorical variables, to be used to generate "where" clauses
Args:
max_values (int) : Maximum number of values per variable to add.
verbose (bool) : If True, print summary of interesting values found.
Returns:
None
"""
for entity in self.entities:
entity.add_interesting_values(max_values=max_values, verbose=verbose)
self.reset_data_description()
def plot(self, to_file=None):
"""
Create a UML diagram-ish graph of the EntitySet.
Args:
to_file (str, optional) : Path to where the plot should be saved.
If set to None (as by default), the plot will not be saved.
Returns:
graphviz.Digraph : Graph object that can directly be displayed in
Jupyter notebooks.
"""
try:
import graphviz
except ImportError:
raise ImportError('Please install graphviz to plot entity sets.' +
' (See https://docs.featuretools.com/getting_started/install.html#installing-graphviz for' +
' details)')
# Try rendering a dummy graph to see if a working backend is installed
try:
graphviz.Digraph().pipe()
except graphviz.backend.ExecutableNotFound:
raise RuntimeError(
"To plot entity sets, a graphviz backend is required.\n" +
"Install the backend using one of the following commands:\n" +
" Mac OS: brew install graphviz\n" +
" Linux (Ubuntu): sudo apt-get install graphviz\n" +
" Windows: conda install python-graphviz\n" +
" For more details visit: https://docs.featuretools.com/getting_started/install.html"
)
if to_file:
# Explicitly cast to str in case a Path object was passed in
to_file = str(to_file)
split_path = to_file.split('.')
if len(split_path) < 2:
raise ValueError("Please use a file extension like '.pdf'" +
" so that the format can be inferred")
format = split_path[-1]
valid_formats = graphviz.backend.FORMATS
if format not in valid_formats:
raise ValueError("Unknown format. Make sure your format is" +
" amongst the following: %s" % valid_formats)
else:
format = None
# Initialize a new directed graph
graph = graphviz.Digraph(self.id, format=format,
graph_attr={'splines': 'ortho'})
# Draw entities
for entity in self.entities:
variables_string = '\l'.join([var.id + ' : ' + var.type_string # noqa: W605
for var in entity.variables])
nrows = entity.shape[0]
label = '{%s (%d row%s)|%s\l}' % (entity.id, nrows, 's' * (nrows > 1), variables_string) # noqa: W605
graph.node(entity.id, shape='record', label=label)
# Draw relationships
for rel in self.relationships:
# Display the key only once if is the same for both related entities
if rel._parent_variable_id == rel._child_variable_id:
label = rel._parent_variable_id
else:
label = '%s -> %s' % (rel._parent_variable_id,
rel._child_variable_id)
graph.edge(rel._child_entity_id, rel._parent_entity_id, xlabel=label)
if to_file:
# Graphviz always appends the format to the file name, so we need to
# remove it manually to avoid file names like 'file_name.pdf.pdf'
offset = len(format) + 1 # Add 1 for the dot
output_path = to_file[:-offset]
graph.render(output_path, cleanup=True)
return graph
| bsd-3-clause |
raghavrv/scikit-learn | doc/sphinxext/sphinx_gallery/notebook.py | 16 | 5801 | # -*- coding: utf-8 -*-
r"""
Parser for Jupyter notebooks
============================
Class that holds the Jupyter notebook information
"""
# Author: Óscar Nájera
# License: 3-clause BSD
from __future__ import division, absolute_import, print_function
from functools import partial
import argparse
import json
import re
import sys
from .py_source_parser import split_code_and_text_blocks
def jupyter_notebook_skeleton():
"""Returns a dictionary with the elements of a Jupyter notebook"""
py_version = sys.version_info
notebook_skeleton = {
"cells": [],
"metadata": {
"kernelspec": {
"display_name": "Python " + str(py_version[0]),
"language": "python",
"name": "python" + str(py_version[0])
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": py_version[0]
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython" + str(py_version[0]),
"version": '{0}.{1}.{2}'.format(*sys.version_info[:3])
}
},
"nbformat": 4,
"nbformat_minor": 0
}
return notebook_skeleton
def directive_fun(match, directive):
"""Helper to fill in directives"""
directive_to_alert = dict(note="info", warning="danger")
return ('<div class="alert alert-{0}"><h4>{1}</h4><p>{2}</p></div>'
.format(directive_to_alert[directive], directive.capitalize(),
match.group(1).strip()))
def rst2md(text):
"""Converts the RST text from the examples docstrigs and comments
into markdown text for the Jupyter notebooks"""
top_heading = re.compile(r'^=+$\s^([\w\s-]+)^=+$', flags=re.M)
text = re.sub(top_heading, r'# \1', text)
math_eq = re.compile(r'^\.\. math::((?:.+)?(?:\n+^ .+)*)', flags=re.M)
text = re.sub(math_eq,
lambda match: r'\begin{{align}}{0}\end{{align}}'.format(
match.group(1).strip()),
text)
inline_math = re.compile(r':math:`(.+?)`', re.DOTALL)
text = re.sub(inline_math, r'$\1$', text)
directives = ('warning', 'note')
for directive in directives:
directive_re = re.compile(r'^\.\. %s::((?:.+)?(?:\n+^ .+)*)'
% directive, flags=re.M)
text = re.sub(directive_re,
partial(directive_fun, directive=directive), text)
links = re.compile(r'^ *\.\. _.*:.*$\n', flags=re.M)
text = re.sub(links, '', text)
refs = re.compile(r':ref:`')
text = re.sub(refs, '`', text)
contents = re.compile(r'^\s*\.\. contents::.*$(\n +:\S+: *$)*\n',
flags=re.M)
text = re.sub(contents, '', text)
images = re.compile(
r'^\.\. image::(.*$)(?:\n *:alt:(.*$)\n)?(?: +:\S+:.*$\n)*',
flags=re.M)
text = re.sub(
images, lambda match: '\n'.format(
match.group(1).strip(), (match.group(2) or '').strip()), text)
return text
def jupyter_notebook(script_blocks):
"""Generate a Jupyter notebook file cell-by-cell
Parameters
----------
script_blocks: list
script execution cells
"""
work_notebook = jupyter_notebook_skeleton()
add_code_cell(work_notebook, "%matplotlib inline")
fill_notebook(work_notebook, script_blocks)
return work_notebook
def add_code_cell(work_notebook, code):
"""Add a code cell to the notebook
Parameters
----------
code : str
Cell content
"""
code_cell = {
"cell_type": "code",
"execution_count": None,
"metadata": {"collapsed": False},
"outputs": [],
"source": [code.strip()]
}
work_notebook["cells"].append(code_cell)
def add_markdown_cell(work_notebook, text):
"""Add a markdown cell to the notebook
Parameters
----------
code : str
Cell content
"""
markdown_cell = {
"cell_type": "markdown",
"metadata": {},
"source": [rst2md(text)]
}
work_notebook["cells"].append(markdown_cell)
def fill_notebook(work_notebook, script_blocks):
"""Writes the Jupyter notebook cells
Parameters
----------
script_blocks : list of tuples
"""
for blabel, bcontent in script_blocks:
if blabel == 'code':
add_code_cell(work_notebook, bcontent)
else:
add_markdown_cell(work_notebook, bcontent + '\n')
def save_notebook(work_notebook, write_file):
"""Saves the Jupyter work_notebook to write_file"""
with open(write_file, 'w') as out_nb:
json.dump(work_notebook, out_nb, indent=2)
###############################################################################
# Notebook shell utility
def python_to_jupyter_cli(args=None, namespace=None):
"""Exposes the jupyter notebook renderer to the command line
Takes the same arguments as ArgumentParser.parse_args
"""
parser = argparse.ArgumentParser(
description='Sphinx-Gallery Notebook converter')
parser.add_argument('python_src_file', nargs='+',
help='Input Python file script to convert. '
'Supports multiple files and shell wildcards'
' (e.g. *.py)')
args = parser.parse_args(args, namespace)
for src_file in args.python_src_file:
blocks = split_code_and_text_blocks(src_file)
print('Converting {0}'.format(src_file))
example_nb = jupyter_notebook(blocks)
save_notebook(example_nb, src_file.replace('.py', '.ipynb'))
| bsd-3-clause |
MATH497project/MATH497-DiabeticRetinopathy | data_aggregation/data_normalization_sqlite.py | 1 | 2204 | import pandas as pd
from pprint import pprint
import json
import numpy as np
import dataset
# ICD_list table must be re-built from, presumably, ICD_for_Enc due to some entries being
# pre-18th birthday. ICD_list entries are not timestamped!
table_names = ['all_encounter_data', 'demographics', 'encounters', 'family_hist_for_Enc',
'family_hist_list', 'ICD_for_Enc',
# 'ICD_list',
'macula_findings_for_Enc',
'SL_Lens_for_Enc', 'SNOMED_problem_list', 'systemic_disease_for_Enc', 'systemic_disease_list']
person_data = ['demographics','family_hist_list', 'systemic_disease_list', 'SNOMED_problem_list']
encounter_data = ['all_encounter_data', 'encounters', 'family_hist_for_Enc', 'ICD_for_Enc', 'macula_findings_for_Enc',
'SL_Lens_for_Enc', 'systemic_disease_for_Enc']
path = 'E:\\anil\\IIT Sop\\Term02\\MATH497\\ICO_data\\original_pickle\\'
# read tables into dataframes
dfs = [pd.read_pickle(path + name + '.pickle') for name in table_names]
for df in dfs:
if df is not None:
df.columns = [col.decode("utf-8-sig") for col in df.columns]
dfs = [df.where((pd.notnull(df)), None) for df in dfs if df is not None]
sqlite_file = 'dr_data_sqlite.db'
db = dataset.connect('sqlite:///'+sqlite_file)
for df_index, df in enumerate(dfs):
db.begin()
print table_names[df_index], len(df)
table = db[table_names[df_index]]
df_columns = set(df.columns.values)
enc_key = 'Enc_Nbr'
person_key = 'Person_Nbr'
row_count = 0
for i, dfrow in df.iterrows():
rowdict = dict(dfrow)
for k, v in rowdict.iteritems():
if isinstance(v, pd.tslib.Timestamp):
rowdict[k] = v.toordinal()
try:
table.insert(rowdict)
except:
for k, v in rowdict.iteritems():
try:
rowdict[k]=rowdict[k].decode("utf-8-sig")
except:
pass
try:
table.insert(rowdict)
except:
print rowdict
row_count+=1
if row_count%10000==0:
print 'rows inserted :', row_count
db.commit() | mit |
darioizzo/pykep | pykep/phasing/_dbscan.py | 2 | 6958 | class dbscan():
"""
This class can be used to locate areas of the interplanetary space that are 'dense' at one epoch.
Essentially, it locates planet clusters
"""
from pykep.core import AU, EARTH_VELOCITY
def _axis_equal_3d(self, ax):
"""Rescales 3D axis limits using equal scale."""
import numpy
extents = numpy.array(
[getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:, 1] - extents[:, 0]
centers = numpy.mean(extents, axis=1)
maxsize = max(abs(sz))
r = maxsize / 2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
def __init__(self, planet_list):
"""
USAGE: cl = dbscan(planet_list):
- planet_list = list of pykep planets (typically thousands)
"""
self._asteroids = planet_list
self.labels = None
self.n_clusters = None
self.members = None
self.core_members = None
self._scaling = None
def _orbital_metric(self, r, v):
from pykep.core import DAY2SEC
DV2 = [a / (self._T * DAY2SEC) for a in r]
DV1 = [a + b for a, b in zip(DV2, v)]
return DV1 + DV2
def cluster(self, t, eps=0.125, min_samples=10, metric='orbital', T=180, ref_r=AU, ref_v=EARTH_VELOCITY):
"""
USAGE: cl.cluster(t, eps=0.125, min_samples=10, metric='orbital', T=180, ref_r=AU, ref_v=EARTH_VELOCITY):
- t: epoch (in MJD2000)
- eps: max distance between points in a cluster
- min_samples: minimum number of samples per cluster
- metric: one of 'euclidean', 'euclidean_r', orbital'
- T: average transfer time (used in the definition of the 'orbital' metric)
- ref_r reference radius (used as a scaling factor for r if the metric is 'euclidean' or 'euclidean_r')
- ref_v reference velocity (used as a scaling factor for v if the metric is 'euclidean')
"""
import pykep
import numpy
from sklearn.cluster import DBSCAN
self._epoch = pykep.epoch(t)
if metric == 'euclidean':
self._X = [
[elem for tupl in p.eph(self._epoch) for elem in tupl] for p in self._asteroids]
self._scaling = [ref_r] * 3
self._scaling += [ref_v] * 3
elif metric == 'euclidean_r':
self._X = [list(p.eph(self._epoch)[0]) for p in self._asteroids]
self._scaling = [ref_r] * 3
elif metric == 'orbital':
self._T = T
self._X = [self._orbital_metric(
*p.eph(self._epoch)) for p in self._asteroids]
self._scaling = [1.] * 6 # no scaling
self._X = numpy.array(self._X)
self._scaling = numpy.array(self._scaling)
self._X = self._X / self._scaling[None, :]
self._db = DBSCAN(eps=eps, min_samples=min_samples).fit(self._X)
self._core_samples = self._db.core_sample_indices_
self.labels = self._db.labels_
self.n_clusters = len(
set(self.labels)) - (1 if -1 in self.labels else 0)
self.members = {}
self.core_members = {}
for label in set(self.labels):
if int(label) == -1:
continue
self.members[int(label)] = [index[0]
for index in numpy.argwhere(self.labels == label)]
self.core_members[int(label)] = [
index for index in self._core_samples if self.labels[index] == label]
self._X = self._X * self._scaling[None, :]
def pretty(self):
"""Prints the cluster lists."""
if self.labels is None:
return
print("Number of clusters: %d" % self.n_clusters)
print("Size of dataset: %s" % str(self._X.shape))
print("Scaling: %s" % str(self._scaling))
print("Epoch: %s" % str(self._epoch))
for label in list(self.members.keys()):
print("cluster %d (%d - %d): %s" % (label, len(self.members[label]),
len(self.core_members[label]), str(self.members[label])))
def plot(self, ax=None, clusters=None, orbits=False, only_core=False):
"""Plots the clusters."""
if self.n_clusters < 1:
return
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
if ax is None:
fig = plt.figure()
axis = fig.add_subplot(111, projection='3d')
else:
axis = ax
axis.view_init(elev=30.0, azim=135.0)
axis.set_aspect('equal')
if orbits:
from pykep.orbit_plots import plot_planet
members = self.core_members if only_core else self.members
for label in members if clusters is None else clusters:
for planet in members[label]:
plot_planet(
self._asteroids[planet], t0=self._epoch, s=0, axes=axis)
X, labels = list(zip(*[(x, label) for (x, label) in zip(self._X, self.labels)
if label > -.5 and (clusters is None or label in clusters)]))
data = [[x[0], x[1], x[2]] for x in X]
axis.scatter(*list(zip(*data)), c=labels, alpha=0.5)
self._axis_equal_3d(axis)
if ax is None:
plt.show()
return axis
def plot_cluster_evolution(self, cluster_id=None, only_core=False, epochs=range(7500, 8400, 100), skip=100, alpha=0.3):
"""
Plots a cluster evolution at 9 prefixed epochs.
"""
if self.n_clusters < 1:
print("No clusters have been found yet")
return
if cluster_id >= self.n_clusters or cluster_id < 0:
print(
"cluster_id should be larger than 0 and smaller than the number of clusters (-1)")
return
if len(epochs) != 9:
print("The epochs requested must be exactly 9 as to assemble 3x3 subplots")
return
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
from pykep.orbit_plots import plot_planet
from pykep import epoch
if only_core:
ids = self.core_members[cluster_id]
else:
ids = self.members[cluster_id]
fig = plt.figure()
for i, ep in enumerate(epochs):
axis = fig.add_subplot(3, 3, i + 1, projection='3d')
plt.axis('off')
plt.title(epoch(ep).__repr__()[:11])
for pl in self._asteroids[::skip]:
axis = plot_planet(pl, axes=axis, alpha=0.05, s=0)
for cluster_member in ids:
r, _ = self._asteroids[cluster_member].eph(epoch(ep))
axis.scatter([r[0]], [r[1]], [r[2]], marker='o', alpha=alpha)
plt.draw()
plt.show()
return fig
| gpl-3.0 |
spgarbet/pysb | pysb/examples/paper_figures/fig6.py | 4 | 9163 | """Produce contact map for Figure 5D from the PySB publication"""
import pysb.integrate
import pysb.util
import numpy as np
import scipy.optimize
import scipy.interpolate
import matplotlib.pyplot as plt
import os
import sys
import inspect
from earm.lopez_embedded import model
# List of model observables and corresponding data file columns for
# point-by-point fitting
obs_names = ['mBid', 'cPARP']
data_names = ['norm_ICRP', 'norm_ECRP']
var_names = ['nrm_var_ICRP', 'nrm_var_ECRP']
# Load experimental data file
data_path = os.path.join(os.path.dirname(__file__), 'fig6_data.csv')
exp_data = np.genfromtxt(data_path, delimiter=',', names=True)
# Model observable corresponding to the IMS-RP reporter (MOMP timing)
momp_obs = 'aSmac'
# Mean and variance of Td (delay time) and Ts (switching time) of MOMP, and
# yfinal (the last value of the IMS-RP trajectory)
momp_data = np.array([9810.0, 180.0, 1.0])
momp_var = np.array([7245000.0, 3600.0, 1e-9])
# Build time points for the integrator, using the same time scale as the
# experimental data but with greater resolution to help the integrator converge.
ntimes = len(exp_data['Time'])
# Factor by which to increase time resolution
tmul = 10
# Do the sampling such that the original experimental timepoints can be
# extracted with a slice expression instead of requiring interpolation.
tspan = np.linspace(exp_data['Time'][0], exp_data['Time'][-1],
(ntimes-1) * tmul + 1)
# Initialize solver object
solver = pysb.integrate.Solver(model, tspan, rtol=1e-5, atol=1e-5)
# Get parameters for rates only
rate_params = model.parameters_rules()
# Build a boolean mask for those params against the entire param list
rate_mask = np.array([p in rate_params for p in model.parameters])
# Build vector of nominal parameter values from the model
nominal_values = np.array([p.value for p in model.parameters])
# Set the radius of a hypercube bounding the search space
bounds_radius = 2
def objective_func(x, rate_mask, lb, ub):
caller_frame, _, _, caller_func, _, _ = inspect.stack()[1]
if caller_func in {'anneal', '_minimize_anneal'}:
caller_locals = caller_frame.f_locals
if caller_locals['n'] == 1:
print caller_locals['best_state'].cost, caller_locals['current_state'].cost
# Apply hard bounds
if np.any((x < lb) | (x > ub)):
print "bounds-check failed"
return np.inf
# Simulate model with rates taken from x (which is log transformed)
param_values = np.array([p.value for p in model.parameters])
param_values[rate_mask] = 10 ** x
solver.run(param_values)
# Calculate error for point-by-point trajectory comparisons
e1 = 0
for obs_name, data_name, var_name in zip(obs_names, data_names, var_names):
# Get model observable trajectory (this is the slice expression
# mentioned above in the comment for tspan)
ysim = solver.yobs[obs_name][::tmul]
# Normalize it to 0-1
ysim_norm = ysim / np.nanmax(ysim)
# Get experimental measurement and variance
ydata = exp_data[data_name]
yvar = exp_data[var_name]
# Compute error between simulation and experiment (chi-squared)
e1 += np.sum((ydata - ysim_norm) ** 2 / (2 * yvar)) / len(ydata)
# Calculate error for Td, Ts, and final value for IMS-RP reporter
# =====
# Normalize trajectory
ysim_momp = solver.yobs[momp_obs]
ysim_momp_norm = ysim_momp / np.nanmax(ysim_momp)
# Build a spline to interpolate it
st, sc, sk = scipy.interpolate.splrep(solver.tspan, ysim_momp_norm)
# Use root-finding to find the point where trajectory reaches 10% and 90%
t10 = scipy.interpolate.sproot((st, sc-0.10, sk))[0]
t90 = scipy.interpolate.sproot((st, sc-0.90, sk))[0]
# Calculate Td as the mean of these times
td = (t10 + t90) / 2
# Calculate Ts as their difference
ts = t90 - t10
# Get yfinal, the last element from the trajectory
yfinal = ysim_momp_norm[-1]
# Build a vector of the 3 variables to fit
momp_sim = [td, ts, yfinal]
# Perform chi-squared calculation against mean and variance vectors
e2 = np.sum((momp_data - momp_sim) ** 2 / (2 * momp_var)) / 3
# Calculate error for final cPARP value (ensure all PARP is cleaved)
cparp_final = model.parameters['PARP_0'].value
cparp_final_var = .01
cparp_final_sim = solver.yobs['cPARP'][-1]
e3 = (cparp_final - cparp_final_sim) ** 2 / (2 * cparp_final_var)
error = e1 + e2 + e3
return error
def estimate(start_values=None):
"""Estimate parameter values by fitting to data.
Parameters
==========
parameter_values : numpy array of floats, optional
Starting parameter values. Taken from model's nominal parameter values
if not specified.
Returns
=======
numpy array of floats, containing fitted parameter values.
"""
# Set starting position to nominal parameter values if not specified
if start_values is None:
start_values = nominal_values
else:
assert start_values.shape == nominal_values.shape
# Log-transform the starting position
x0 = np.log10(start_values[rate_mask])
# Displacement size for annealing moves
dx = .02
# The default 'fast' annealing schedule uses the 'lower' and 'upper'
# arguments in a somewhat counterintuitive way. See
# http://projects.scipy.org/scipy/ticket/1126 for more information. This is
# how to get the search to start at x0 and use a displacement on the order
# of dx (note that this will affect the T0 estimation which *does* expect
# lower and upper to be the absolute expected bounds on x).
lower = x0 - dx / 2
upper = x0 + dx / 2
# Log-transform the rate parameter values
xnominal = np.log10(nominal_values[rate_mask])
# Hard lower and upper bounds on x
lb = xnominal - bounds_radius
ub = xnominal + bounds_radius
# Perform the annealing
args = [rate_mask, lb, ub]
(xmin, Jmin, Tfinal, feval, iters, accept, retval) = \
scipy.optimize.anneal(objective_func, x0, full_output=True,
maxiter=4000, quench=0.5,
lower=lower, upper=upper,
args=args)
# Construct vector with resulting parameter values (un-log-transformed)
params_estimated = start_values.copy()
params_estimated[rate_mask] = 10 ** xmin
# Display annealing results
for v in ('xmin', 'Jmin', 'Tfinal', 'feval', 'iters', 'accept', 'retval'):
print "%s: %s" % (v, locals()[v])
return params_estimated
def display(params_estimated):
# Simulate model with nominal parameters and construct a matrix of the
# trajectories of the observables of interest, normalized to 0-1.
solver.run()
obs_names_disp = ['mBid', 'aSmac', 'cPARP']
obs_totals = [model.parameters[n].value for n in ('Bid_0', 'Smac_0', 'PARP_0')]
sim_obs = solver.yobs[obs_names_disp].view(float).reshape(len(solver.yobs), -1)
sim_obs_norm = (sim_obs / obs_totals).T
# Do the same with the estimated parameters
solver.run(params_estimated)
sim_est_obs = solver.yobs[obs_names_disp].view(float).reshape(len(solver.yobs), -1)
sim_est_obs_norm = (sim_est_obs / obs_totals).T
# Plot data with simulation trajectories both before and after fitting
color_data = '#C0C0C0'
color_orig = '#FAAA6A'
color_est = '#83C98E'
plt.subplot(311)
plt.errorbar(exp_data['Time'], exp_data['norm_ICRP'],
yerr=exp_data['nrm_var_ICRP']**0.5, c=color_data, linewidth=2,
elinewidth=0.5)
plt.plot(solver.tspan, sim_obs_norm[0], color_orig, linewidth=2)
plt.plot(solver.tspan, sim_est_obs_norm[0], color_est, linewidth=2)
plt.ylabel('Fraction of\ncleaved IC-RP/Bid', multialignment='center')
plt.axis([0, 20000, -0.2, 1.2])
plt.subplot(312)
plt.vlines(momp_data[0], -0.2, 1.2, color=color_data, linewidth=2)
plt.plot(solver.tspan, sim_obs_norm[1], color_orig, linewidth=2)
plt.plot(solver.tspan, sim_est_obs_norm[1], color_est, linewidth=2)
plt.ylabel('Td / Fraction of\nreleased Smac', multialignment='center')
plt.axis([0, 20000, -0.2, 1.2])
plt.subplot(313)
plt.errorbar(exp_data['Time'], exp_data['norm_ECRP'],
yerr=exp_data['nrm_var_ECRP']**0.5, c=color_data, linewidth=2,
elinewidth=0.5)
plt.plot(solver.tspan, sim_obs_norm[2], color_orig, linewidth=2)
plt.plot(solver.tspan, sim_est_obs_norm[2], color_est, linewidth=2)
plt.ylabel('Fraction of\ncleaved EC-RP/PARP', multialignment='center')
plt.xlabel('Time (s)')
plt.axis([0, 20000, -0.2, 1.2])
plt.show()
if __name__ == '__main__':
params_estimated = None
try:
earm_path = sys.modules['earm'].__path__[0]
fit_file = os.path.join(earm_path, '..', 'EARM_2_0_M1a_fitted_params.txt')
params_estimated = np.genfromtxt(fit_file)[:,1].copy()
except IOError:
pass
if params_estimated is None:
np.random.seed(1)
params_estimated = estimate()
display(params_estimated)
| bsd-2-clause |
timothydmorton/qa_explorer | explorer/plots.py | 1 | 17724 | from functools import partial
import param
import numpy as np
import pandas as pd
import holoviews as hv
import datashader as ds
import colorcet as cc
from param import ParameterizedFunction, ParamOverrides
from holoviews.core.operation import Operation
from holoviews.streams import Stream, BoundsXY, LinkedStream
from holoviews.plotting.bokeh.callbacks import Callback
from holoviews.operation.datashader import datashade, dynspread
from holoviews.operation import decimate
decimate.max_samples = 5000
import parambokeh
from bokeh.palettes import Greys9
# Define Stream class that stores filters for various Dimensions
class FilterStream(Stream):
"""
Stream to apply arbitrary filtering on a Dataset.
Many of the plotting functions accept a `FilterStream` object;
the utility of this is that you can define a single `FilterStream`,
and if you connect the same one to all your plots, then all of the
selections/flag selections/etc. can be linked.
See the demo notebooks for an example of usage.
"""
filter_range = param.Dict(default={}, doc="""
Ranges of parameters to select.""")
flags = param.List(default=[], doc="""
Flags to select.""")
bad_flags = param.List(default=[], doc="""
Flags to ignore""")
class FlagSetter(Stream):
"""Stream for setting flags
Most useful in context of a parambokeh widget, e.g.:
from explorer.plots import FlagSetter
import parambokeh
flag_setter = FlagSetter(filter_stream=filter_stream, flags=data.flags, bad_flags=data.flags)
parambokeh.Widgets(flag_setter, callback=flag_setter.event, push=False, on_init=True)
Where `filter_stream` has been previously defined and connected to other plots
for which you want to see points with certain flags shown/hidden/etc.
"""
flags = param.ListSelector(default=[], objects=[], doc="""
Flags to select""")
bad_flags = param.ListSelector(default=[], doc="""
Flags to ignore""")
def __init__(self, filter_stream, **kwargs):
super(FlagSetter, self).__init__(**kwargs)
self.filter_stream = filter_stream
def event(self, **kwargs):
self.filter_stream.event(**kwargs)
class SkyFlags(Stream):
"""Experimental; not currently used for anything
"""
flags = param.ListSelector(default=[], objects=[])
bad_flags = param.ListSelector(default=[], doc="""
Flags to ignore""")
cmap = param.String(default='coolwarm') # make this a list to select from
output = parambokeh.view.Plot()
def __init__(self, dset, vdim, filter_stream, **kwargs):
super(FlagSetter, self).__init__(**kwargs)
self.dset = dset
self.filter_stream = filter_stream
self.vdim = vdim
def points(self, *args, **kwargs):
return hv.util.Dyanmic(data.ds, operation=skypoints, streams=[self.filter_stream])
def event(self, **kwargs):
if not self.output or any(k in kwargs for k in ['cmap']):
self.output = dynspread(datashade(self.points, cmap=cc.palette[kwargs['cmap']]))
else:
self.filter_stream.event(**kwargs)
# super(SkyFlags, self).event(**kwargs)
#######################################################################################
# All this enables bokeh "reset" button to also reset a stream (such as FilterStream) #
# Not sure if some of this should be updated for newer version of HV, as this was put #
# together circa v1.9.0, I think
class ResetCallback(Callback):
models = ['plot']
on_events = ['reset']
class Reset(LinkedStream):
def __init__(self, *args, **params):
super(Reset, self).__init__(self, *args, **dict(params, transient=True))
Stream._callbacks['bokeh'][Reset] = ResetCallback
#######################################################################################
class filter_dset(Operation):
"""Process a dataset based on FilterStream state (filter_range, flags, bad_flags)
This is used in many applications to define dynamically selected `holoviews.Dataset`
objects.
"""
filter_range = param.Dict(default={}, doc="""
Dictionary of filter bounds.""")
flags = param.List(default=[], doc="""
Flags to select.""")
bad_flags = param.List(default=[], doc="""
Flags to ignore""")
def _process(self, dset, key=None):
filter_dict = self.p.filter_range.copy()
filter_dict.update({f:True for f in self.p.flags})
filter_dict.update({f:False for f in self.p.bad_flags})
if self.p.filter_range is not None:
dset = dset.select(**filter_dict)
return dset
# Define Operation that filters based on FilterStream state (which provides the filter_range)
class filterpoints(Operation):
"""Process a dataset based on FilterStream state (filter_range, flags, bad_flags)
This is used in many applications to define dynamically selected `holoviews.Points`
objects.
"""
filter_range = param.Dict(default={}, doc="""
Dictionary of filter bounds.""")
flags = param.List(default=[], doc="""
Flags to select.""")
bad_flags = param.List(default=[], doc="""
Flags to ignore""")
xdim = param.String(default='x', "Name of x-dimension")
ydim = param.String(default='y', "Name of y-dimension")
set_title = param.Boolean(default=False)
def _process(self, dset, key=None):
dset = filter_dset(dset, flags=self.p.flags, bad_flags=self.p.bad_flags,
filter_range=self.p.filter_range)
kdims = [dset.get_dimension(self.p.xdim), dset.get_dimension(self.p.ydim)]
vdims = [dim for dim in dset.dimensions() if dim.name not in kdims]
pts = hv.Points(dset, kdims=kdims, vdims=vdims)
if self.p.set_title:
ydata = dset.data[self.p.ydim]
title = 'mean = {:.3f}, std = {:.3f} ({:.0f})'.format(ydata.mean(),
ydata.std(),
len(ydata))
pts = pts.relabel(title)
return pts
class summary_table(Operation):
ydim = param.String(default=None)
filter_range = param.Dict(default={}, doc="""
Dictionary of filter bounds.""")
flags = param.List(default=[], doc="""
Flags to select.""")
bad_flags = param.List(default=[], doc="""
Flags to ignore""")
def _process(self, dset, key=None):
ds = filter_dset(dset, filter_range=self.p.filter_range,
flags=self.p.flags, bad_flags=self.p.bad_flags)
if self.p.ydim is None:
cols = [dim.name for dim in dset.vdims]
else:
cols = [self.p.ydim]
df = ds.data[cols]
return hv.Table(df.describe().loc[['count', 'mean', 'std']])
def notify_stream(bounds, filter_stream, xdim, ydim):
"""
Function to attach to bounds stream as subscriber to notify FilterStream.
"""
l, b, r, t = bounds
filter_range = dict(filter_stream.filter_range)
for dim, (low, high) in [(xdim, (l, r)), (ydim, (b, t))]:
## If you want to take the intersection of x selections, e.g.
# if dim in filter_range:
# old_low, old_high = filter_range[dim]
# filter_range[dim]= (max(old_low, low), min(old_high, high))
# else:
# filter_range[dim] = (low, high)
filter_range[dim] = (low, high)
filter_stream.event(filter_range=filter_range)
def reset_stream(filter_stream):
filter_stream.event(filter_range={}, flags=[], bad_flags=[])
class scattersky(ParameterizedFunction):
"""
Creates two datashaded views from a Dataset.
First plot is an x-y scatter plot, with colormap according to density
of points; second plot is a sky plot where the colormap corresponds
to the average y values of the first plot in each datashaded pixel.
"""
xdim = param.String(default='x', doc="""
Dimension of the dataset to use as x-coordinate""")
ydim = param.String(default='y0', doc="""
Dimension of the dataset to use as y-coordinate""")
scatter_cmap = param.String(default='fire', doc="""
Colormap to use for the scatter plot""")
sky_cmap = param.String(default='coolwarm', doc="""
Colormap to use for the sky plot""")
height = param.Number(default=300, doc="""
Height in pixels of the combined layout""")
width = param.Number(default=900, doc="""
Width in pixels of the combined layout""")
filter_stream = param.ClassSelector(default=FilterStream(), class_=FilterStream,
doc="Stream to which selection ranges get added.")
show_rawsky = param.Boolean(default=False, doc="""
Whether to show the "unselected" sky points in greyscale when there is a selection.""")
def __call__(self, dset, **params):
self.p = ParamOverrides(self, params)
if self.p.ydim not in dset.dimensions():
raise ValueError('{} not in Dataset.'.format(self.p.ydim))
# Set up scatter plot
scatter_filterpoints = filterpoints.instance(xdim=self.p.xdim, ydim=self.p.ydim)
scatter_pts = hv.util.Dynamic(dset, operation=scatter_filterpoints,
streams=[self.p.filter_stream])
scatter_opts = dict(plot={'height':self.p.height, 'width':self.p.width - self.p.height},
# 'tools':['box_select']},
norm=dict(axiswise=True))
scatter_shaded = datashade(scatter_pts, cmap=cc.palette[self.p.scatter_cmap])
scatter = dynspread(scatter_shaded).opts(**scatter_opts)
# Set up sky plot
sky_filterpoints = filterpoints.instance(xdim='ra', ydim='dec', set_title=False)
sky_pts = hv.util.Dynamic(dset, operation=sky_filterpoints,
streams=[self.p.filter_stream])
sky_opts = dict(plot={'height':self.p.height, 'width':self.p.height},
# 'tools':['box_select']},
norm=dict(axiswise=True))
sky_shaded = datashade(sky_pts, cmap=cc.palette[self.p.sky_cmap],
aggregator=ds.mean(self.p.ydim), height=self.p.height,
width=self.p.width)
sky = dynspread(sky_shaded).opts(**sky_opts)
# Set up summary table
table = hv.util.Dynamic(dset, operation=summary_table.instance(ydim=self.p.ydim),
streams=[self.p.filter_stream])
table = table.opts(plot={'width':200})
# Set up BoundsXY streams to listen to box_select events and notify FilterStream
scatter_select = BoundsXY(source=scatter)
scatter_notifier = partial(notify_stream, filter_stream=self.p.filter_stream,
xdim=self.p.xdim, ydim=self.p.ydim)
scatter_select.add_subscriber(scatter_notifier)
sky_select = BoundsXY(source=sky)
sky_notifier = partial(notify_stream, filter_stream=self.p.filter_stream,
xdim='ra', ydim='dec')
sky_select.add_subscriber(sky_notifier)
# Reset
reset = Reset(source=scatter)
reset.add_subscriber(partial(reset_stream, self.p.filter_stream))
raw_scatter = datashade(scatter_filterpoints(dset), cmap=Greys9[::-1][:5])
if self.p.show_rawsky:
raw_sky = datashade(sky_filterpoints(dset), cmap=Greys9[::-1][:5])
return (table + raw_scatter*scatter + raw_sky*sky)
else:
return (table + raw_scatter*scatter + sky)
class multi_scattersky(ParameterizedFunction):
"""Layout of multiple scattersky plots, one for each vdim in dset
"""
filter_stream = param.ClassSelector(default=FilterStream(), class_=FilterStream)
ignored_dimensions = param.List(default=['x', 'ra', 'dec', 'label', 'ccdId', 'patchId'])
height = param.Number(default=300)
width = param.Number(default=900)
def _get_ydims(self, dset):
# Get dimensions from first Dataset type found in input
return [dim.name for dim in dset.traverse(lambda x: x, [hv.Dataset])[0].vdims]
# return [dim.name for dim in dset.traverse(lambda x: x, [hv.Dataset])[0].dimensions()
# if dim.name not in self.p.ignored_dimensions]
def __call__(self, dset, **params):
self.p = param.ParamOverrides(self, params)
return hv.Layout([scattersky(dset, filter_stream=self.p.filter_stream,
ydim=ydim, height=self.p.height, width=self.p.width)
for ydim in self._get_ydims(dset)]).cols(3).opts(plot={'merge_tools':False})
class skypoints(Operation):
"""Creates Points with ra, dec as kdims, and interesting stuff as vdims
"""
filter_range = param.Dict(default={}, doc="""
Dictionary of filter bounds.""")
flags = param.List(default=[], doc="""
Flags to select.""")
bad_flags = param.List(default=[], doc="""
Flags to ignore""")
def _process(self, dset, key=None):
dset = filter_dset(dset, filter_range=self.p.filter_range,
flags=self.p.flags, bad_flags=self.p.bad_flags)
return hv.Points(dset, kdims=['ra', 'dec'], vdims=dset.vdims + ['label'])
class skyplot(ParameterizedFunction):
"""Datashaded + decimated RA/dec plot, with colormap of third dimension
"""
cmap = param.String(default='coolwarm', doc="""
Colormap to use.""")
aggregator = param.ObjectSelector(default='mean', objects=['mean', 'std', 'count'], doc="""
Aggregator for datashading.""")
vdim = param.String(default=None, doc="""
Dimension to use for colormap.""")
width = param.Number(default=None)
height = param.Number(default=None)
decimate_size = param.Number(default=5, doc="""
Size of (invisible) decimated points.""")
filter_stream = param.ClassSelector(default=FilterStream(), class_=FilterStream)
flags = param.List(default=[], doc="""
Flags to select.""")
bad_flags = param.List(default=[], doc="""
Flags to ignore""")
def __call__(self, dset, **params):
self.p = ParamOverrides(self, params)
if self.p.vdim is None:
vdim = dset.vdims[0].name
else:
vdim = self.p.vdim
pts = hv.util.Dynamic(dset, operation=skypoints,
streams=[self.p.filter_stream])
if self.p.aggregator == 'mean':
aggregator = ds.mean(vdim)
elif self.p.aggregator == 'std':
aggregator = ds.std(vdim)
elif self.p.aggregator == 'count':
aggregator = ds.count()
kwargs = dict(cmap=cc.palette[self.p.cmap],
aggregator=aggregator)
if self.p.width is not None:
kwargs.update(width=self.p.width, height=self.p.height)
# streams=[hv.streams.RangeXY])
decimate_opts = dict(plot={'tools':['hover', 'box_select']},
style={'alpha':0, 'size':self.p.decimate_size,
'nonselection_alpha':0})
decimated = decimate(pts).opts(**decimate_opts)
sky_shaded = datashade(pts, **kwargs)
return dynspread(sky_shaded) * decimated
class skyplot_layout(ParameterizedFunction):
"""Layout of skyplots with linked crosshair
"""
crosshair = param.Boolean(default=True)
def __call__(self, skyplots, **params):
self.p = param.ParamOverrides(self, params)
pointer = hv.streams.PointerXY(x=0, y=0)
cross_opts = dict(style={'line_width':1, 'color':'black'})
cross_dmap = hv.DynamicMap(lambda x, y: (hv.VLine(x).opts(**cross_opts) *
hv.HLine(y).opts(**cross_opts)), streams=[pointer])
plots = []
for s in skyplots:
if self.p.crosshair:
plot = (s*cross_dmap).relabel(s.label)
else:
plot = s
plots.append(plot)
return hv.Layout(plots)
class skyshade(Operation):
"""Experimental
"""
cmap = param.String(default='coolwarm')
aggregator = param.ObjectSelector(default='mean', objects=['mean', 'std', 'count'])
width = param.Number(default=None)
height = param.Number(default=None)
vdim = param.String(default='y')
decimate_size = param.Number(default=5)
max_samples = param.Number(default=10000)
def _process(self, element, key=None):
vdim = self.p.vdim
if self.p.aggregator == 'mean':
aggregator = ds.mean(vdim)
elif self.p.aggregator == 'std':
aggregator = ds.std(vdim)
elif self.p.aggregator == 'count':
aggregator = ds.count()
kwargs = dict(cmap=cc.palette[self.p.cmap],
aggregator=aggregator)
if self.p.width is not None:
kwargs.update(width=self.p.width, height=self.p.height,
streams=[hv.streams.RangeXY])
datashaded = dynspread(datashade(element, **kwargs))
# decimate_opts = dict(plot={'tools':['hover', 'box_select']},
# style={'alpha':0, 'size':self.p.decimate_size,
# 'nonselection_alpha':0})
# decimated = decimate(element, max_samples=self.p.max_samples).opts(**decimate_opts)
return datashaded #* decimated
| mit |
DTMilodowski/EO_data_processing | src/plot_EO_data/plot_forest_loss_maps.py | 1 | 2311 | # This function plots up a series of maps illustrating forest loss through time,
# as modelled based on global forest watch annual forest loss distributed temporally
# based on the "seasonality" of FORMA
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from mpl_toolkits.basemap import Basemap, shiftgrid
import numpy as np
import sys
sys.path.append('/home/dmilodow/DataStore_DTM/FOREST2020/EOdata/EO_data_processing/src/plot_EO_data/colormap/')
import colormaps as cmaps
plt.register_cmap(name='viridis', cmap=cmaps.viridis)
plt.set_cmap(cmaps.viridis)
# Some basic info for maps
N = 33.
S = 14.
E = -86.
W = -118.
dY = 0.125
dX = 0.125
lat = np.arange(S,N,dY)#+dY/2. # shifting to cell centre
lon = np.arange(W,E,dX)#+dX/2. # shifting to cell centre
lon_grid,lat_grid = np.meshgrid(lon,lat)
# Load in GFW
GFW = np.load('GFW_monthly.npz')['arr_0']
N_months=GFW.shape[0]
cum_GFW = np.cumsum(GFW,axis=0)
ForestLoss=np.ma.masked_where(GFW<=0,GFW)
# Now make the plots
for i in range(0,N_months):
fig = plt.figure(1, facecolor='White',figsize=[5,8])
ax1a= plt.subplot2grid((2,1),(0,0))
ax1a.set_title('Monthly forest loss')
m1a = Basemap(projection='aea', lat_0=(N+S)/2., lon_0=(E+W)/2., llcrnrlat=S, urcrnrlat=N,llcrnrlon=W, urcrnrlon=E, resolution='i')
m1a.ax = ax1a
x,y = m1a(lon_grid,lat_grid)
im1 = m1a.pcolormesh(x,y,GFW[i,:,:],vmin=0.0,vmax=0.004, rasterized=True, edgecolor='0.6', linewidth=0)
cbar = m1a.colorbar(im1)
cbar.solids.set_edgecolor("face")
cbar.set_ticks([0,0.002,0.004])
m1a.drawcountries(color='0.6',linewidth=1)
m1a.drawcoastlines(color='0.5',linewidth=1)
ax1b= plt.subplot2grid((2,1),(1,0))
ax1b.set_title('Cumulative forest loss')
m1b = Basemap(projection='aea', lat_0=(N+S)/2., lon_0=(E+W)/2., llcrnrlat=S, urcrnrlat=N,llcrnrlon=W, urcrnrlon=E, resolution='i')
m1b.ax = ax1b
x,y = m1b(lon_grid,lat_grid)
im2=m1b.pcolormesh(x,y,cum_GFW[i,:,:],vmin=0.0,vmax=1, rasterized=True, edgecolor='0.6', linewidth=0)
m1b.drawcountries(color='0.6',linewidth=1)
m1b.drawcoastlines(color='0.5',linewidth=1)
cbar = m1b.colorbar(im2)
cbar.solids.set_edgecolor("face")
cbar.set_ticks([0,0.5,1])
plt.savefig('plot_EO_data/ForestLossMonthly_tstep'+str(i).zfill(3)+'.png')
| gpl-3.0 |
hendrikTpl/deeppy | examples/siamese_mnist.py | 9 | 2485 | #!/usr/bin/env python
"""
Siamese networks
================
"""
import random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
import deeppy as dp
# Fetch MNIST data
dataset = dp.dataset.MNIST()
x_train, y_train, x_test, y_test = dataset.data(flat=True, dp_dtypes=True)
# Normalize pixel intensities
scaler = dp.StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
# Generate image pairs
n_pairs = 100000
x1 = np.empty((n_pairs, 28*28), dtype=dp.float_)
x2 = np.empty_like(x1, dtype=dp.float_)
y = np.empty(n_pairs, dtype=dp.int_)
n_imgs = x_train.shape[0]
n = 0
while n < n_pairs:
i = random.randint(0, n_imgs-1)
j = random.randint(0, n_imgs-1)
if i == j:
continue
x1[n, ...] = x_train[i]
x2[n, ...] = x_train[j]
if y_train[i] == y_train[j]:
y[n] = 1
else:
y[n] = 0
n += 1
# Prepare network inputs
train_input = dp.SupervisedSiameseInput(x1, x2, y, batch_size=128)
# Setup network
w_gain = 1.5
w_decay = 1e-4
net = dp.SiameseNetwork(
siamese_layers=[
dp.FullyConnected(
n_out=1024,
weights=dp.Parameter(dp.AutoFiller(w_gain), weight_decay=w_decay),
),
dp.ReLU(),
dp.FullyConnected(
n_out=1024,
weights=dp.Parameter(dp.AutoFiller(w_gain), weight_decay=w_decay),
),
dp.ReLU(),
dp.FullyConnected(
n_out=2,
weights=dp.Parameter(dp.AutoFiller(w_gain)),
),
],
loss=dp.ContrastiveLoss(margin=1.0),
)
# Train network
trainer = dp.StochasticGradientDescent(
max_epochs=15,
learn_rule=dp.RMSProp(learn_rate=0.01),
)
trainer.train(net, train_input)
# Plot 2D embedding
test_input = dp.Input(x_test)
x_test = np.reshape(x_test, (-1,) + dataset.img_shape)
feat = net.features(test_input)
feat -= np.min(feat, 0)
feat /= np.max(feat, 0)
plt.figure()
ax = plt.subplot(111)
shown_images = np.array([[1., 1.]])
for i in range(feat.shape[0]):
dist = np.sum((feat[i] - shown_images)**2, 1)
if np.min(dist) < 6e-4:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [feat[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(x_test[i], zoom=0.6, cmap=plt.cm.gray_r),
xy=feat[i], frameon=False
)
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
plt.title('Embedding from the last layer of the network')
| mit |
RayMick/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 71 | 25104 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels():
# Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
ChayaSt/Torsions | torsionfit/plots.py | 4 | 23291 | """
Plotting module and data exploration for torsionfit
This module contains functions to simplify exploration of torsionfit output in addition to general plotting functions
"""
__author__ = 'Chaya D. Stern'
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import pymbar
# global parameter
multiplicities = (1, 2, 3, 4, 6)
def get_parameter_names(model, db, n5=False):
"""
returns a dictionary that maps torsion name all associated parameters for convenient trace access in pymc.database
:param model: torsionfit.TorsionFitModel
:param db: pymc.database (can also be pymc.sampler)
:return: dictionary mapping torsion name to all associated parameters
"""
if n5:
multiplicities = tuple(range(1, 7))
else:
multiplicities = (1, 2, 3, 4, 6)
torsion_parameters = {}
torsions = model.parameters_to_optimize
for name in torsions:
torsion_name = name[0] + '_' + name[1] + '_' + name[2] + '_' + name[3]
torsion_parameters[torsion_name] = []
multiplicity_bitstring = torsion_name + '_multiplicity_bitstring'
torsion_parameters[torsion_name].append(multiplicity_bitstring)
for m in multiplicities:
k = torsion_name + '_' + str(m) + '_K'
torsion_parameters[torsion_name].append(k)
phase = torsion_name + '_' + str(m) + '_Phase'
torsion_parameters[torsion_name].append(phase)
return torsion_parameters
def get_multiplicity_traces(torsion_parameters, db, n5=False):
"""
returns traces for the multiplicity terms for all torsions in (0, 1)
:param torsion_parameters: dict mapping torsion name to parameters for that torsion or name of torsion
:param db: pymc.database
:return: dict mapping torsion name to multiplicity terms trace
"""
if n5:
multiplicities = tuple(range(1, 7))
else:
multiplicities = (1, 2, 3, 4, 6)
if type(torsion_parameters) == str:
torsion_parameters = [torsion_parameters]
else:
torsion_parameters = torsion_parameters.keys()
multiplicity_traces = {}
for torsion_name in torsion_parameters:
multiplicity_bitstring = torsion_name + '_multiplicity_bitstring'
for m in multiplicities:
multiplicity_traces[torsion_name + '_' + str(m)] = []
for i in db.trace(multiplicity_bitstring)[:]:
if 2**(m-1) & int(i):
multiplicity_traces[torsion_name + '_' + str(m)].append(1)
else:
multiplicity_traces[torsion_name + '_' + str(m)].append(0)
return multiplicity_traces
def get_statistics(db, torsion_parameters):
"""
uses pymbar.timeseries.detectEquilibration module to get equilibration time, statistical inefficiency and effective
samples for each trace. Returns a dictionary that maps all parameters to statistics.
:param db: pymc.database (can also use pymc.sampler)
:param torsion_parameters: dict mapping torsion name to associated parameters
:return: dict that maps parameters to statistics
"""
statistics = {}
for parameters in torsion_parameters:
for param in torsion_parameters[parameters]:
statistics[param] = pymbar.timeseries.detectEquilibration(db.trace(param)[:])
return statistics
def trace_plots(name, db, markersize, statistics=False, multiplicity_traces=False, continuous=False, filename=None):
"""
Generate trace plot for all parameters of a given torsion
:param name: str. name of torsion parameter A_B_C_D where A, B, C, and D are atom types.
:param db: pymc.database (can also use pymc.sampler)
:param markersize: int.
:param statistics: dict that maps parameters to statistics from pymbar.timeseries.detectEquilibrium. Default: False
:param multiplicity_traces: dict that maps multiplicity term to (0,1) trace. Default is False.
"""
if not multiplicity_traces:
try:
multiplicity_traces = get_multiplicity_traces(torsion_parameters=name, db=db)
except KeyError:
pass
pp = PdfPages('%s_traces.pdf' % name)
fig = plt.figure()
axes_k = plt.subplot(9, 2, 1)
plt.plot(db.trace(name + '_' + str(1) + '_K')[:], 'k.', markersize=markersize, label='K')
plt.title(name, fontweight='bold')
if statistics:
axes_k.axvline(statistics[name + '_' + '1' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(1) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(0, 20)
plt.ylabel('kJ/mole')
plt.xticks([])
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.yticks([0, 20])
axes_phase = plt.subplot(9, 2, 3)
plt.plot(db.trace(name + '_' + str(1) + '_Phase')[:], '.', markersize=markersize, label='Phase')
if continuous:
plt.ylim(-1.0, 181)
plt.yticks([1, 180])
else:
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xticks([])
axes_n = plt.subplot(9, 2, 5)
try:
plt.plot(multiplicity_traces[name + '_' + str(1)], 'k.', markersize=markersize, label='1')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xticks([])
except:
pass
axes_k = plt.subplot(9, 2, 7)
plt.plot(db.trace(name + '_' + str(2) + '_K')[:], 'k.', markersize=markersize, label='K')
if statistics:
axes_k.axvline(statistics[name + '_' + '2' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(2) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(0, 20)
plt.ylabel('kJ/mole')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xticks([])
plt.yticks([0, 20])
axes_phase = plt.subplot(9, 2, 9)
plt.plot(db.trace(name + '_' + str(2) + '_Phase')[:], '.', markersize=markersize, label='Phase')
if continuous:
plt.ylim(-1.0, 181)
plt.yticks([1, 180])
else:
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xticks([])
axes_n = plt.subplot(9, 2, 11)
try:
plt.plot(multiplicity_traces[name + '_' + str(2)], 'k.', markersize=markersize, label='2')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xticks([])
except:
pass
axes_k = plt.subplot(9, 2, 13)
plt.plot(db.trace(name + '_' + str(3) + '_K')[:], 'k.', markersize=markersize, label='K')
if statistics:
axes_k.axvline(statistics[name + '_' + '3' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(3) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(0, 20)
plt.ylabel('kJ/mole')
plt.xticks([])
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.yticks([0, 20])
axes_phase = plt.subplot(9, 2, 15)
plt.plot(db.trace(name + '_' + str(3) + '_Phase')[:], '.', markersize=markersize, label='Phase')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xticks([])
if continuous:
plt.ylim(-1.0, 181)
plt.yticks([1, 180])
else:
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
axes_n = plt.subplot(9, 2, 17)
try:
plt.plot(multiplicity_traces[name + '_' + str(3)], 'k.', markersize=markersize, label='3')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xlabel('mcmc steps')
except:
pass
axes_k = plt.subplot(9, 2, 2)
plt.title(name, fontweight='bold')
plt.plot(db.trace(name + '_' + str(4) + '_K')[:], 'k.', markersize=markersize, label='K')
if statistics:
axes_k.axvline(statistics[name + '_' + '4' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(4) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(0, 20)
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xticks([])
plt.yticks([])
axes_phase = plt.subplot(9, 2, 4)
plt.plot(db.trace(name + '_' + str(4) + '_Phase')[:], '.', markersize=markersize, label='Phase')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
if continuous:
plt.ylim(-1.0, 181)
plt.yticks([1, 180])
else:
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xticks([])
plt.yticks([])
try:
axes_n = plt.subplot(9, 2, 6)
plt.plot(multiplicity_traces[name + '_' + str(4)], 'k.', markersize=markersize, label='4')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([])
plt.xticks([])
except:
pass
axes_k = plt.subplot(9, 2, 8)
plt.plot(db.trace(name + '_' + str(6) + '_K')[:], 'k.', markersize=markersize, label='K')
if statistics:
axes_k.axvline(statistics[name + '_' + '6' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(6) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(0, 20)
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xticks([])
plt.yticks([])
axes_phase = plt.subplot(9, 2, 10)
plt.plot(db.trace(name + '_' + str(6) + '_Phase')[:], '.', markersize=markersize, label='Phase')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
if continuous:
plt.ylim(-1.0, 181)
plt.yticks([1, 180])
else:
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xticks([])
plt.yticks([])
axes_n = plt.subplot(9, 2, 12)
try:
plt.plot(multiplicity_traces[name + '_' + str(6)], 'k.', markersize=markersize, label='6')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([])
plt.xlabel('mcmc steps')
except:
pass
if filename is None:
fig.savefig('%s_traces.pdf' % name)
else:
fig.savefig(filename)
pp.savefig(fig, dpi=80)
pp.close()
def trace_no_phase(name, db, markersize, statistics=False, multiplicity_traces=False, ymin=-20, ymax=20, filename=None):
"""
Generate trace plot for all parameters of a given torsion
:param name: str. name of torsion parameter A_B_C_D where A, B, C, and D are atom types.
:param db: pymc.database (can also use pymc.sampler)
:param markersize: int.
:param statistics: dict that maps parameters to statistics from pymbar.timeseries.detectEquilibrium. Default: False
:param multiplicity_traces: dict that maps multiplicity term to (0,1) trace. Default is False.
"""
if not multiplicity_traces:
try:
multiplicity_traces = get_multiplicity_traces(torsion_parameters=name, db=db)
except KeyError:
pass
pp = PdfPages('%s_traces.pdf' % name)
fig = plt.figure()
axes_k = plt.subplot(5, 2, 1)
plt.plot(db.trace(name + '_' + str(1) + '_K')[:], 'k.', markersize=markersize, label='K')
plt.title(name, fontweight='bold')
if statistics:
axes_k.axvline(statistics[name + '_' + '1' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(1) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(ymin, ymax)
plt.ylabel('kJ/mole')
plt.xticks([])
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.yticks([ymin, 0, ymax])
axes_n = plt.subplot(5, 2, 2)
try:
plt.plot(multiplicity_traces[name + '_' + str(1)], 'k.', markersize=markersize, label='1')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xticks([])
except:
pass
axes_k = plt.subplot(5, 2, 3)
plt.plot(db.trace(name + '_' + str(2) + '_K')[:], 'k.', markersize=markersize, label='K')
if statistics:
axes_k.axvline(statistics[name + '_' + '2' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(2) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(ymin, ymax)
plt.ylabel('kJ/mole')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xticks([])
plt.yticks([ymin, 0, ymax])
axes_n = plt.subplot(5, 2, 4)
try:
plt.plot(multiplicity_traces[name + '_' + str(2)], 'k.', markersize=markersize, label='2')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xticks([])
except:
pass
axes_k = plt.subplot(5, 2, 5)
plt.plot(db.trace(name + '_' + str(3) + '_K')[:], 'k.', markersize=markersize, label='K')
if statistics:
axes_k.axvline(statistics[name + '_' + '3' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(3) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(ymin, ymax)
plt.ylabel('kJ/mole')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.yticks([ymin, 0, ymax])
plt.xlabel('mcmc steps')
axes_n = plt.subplot(5, 2, 6)
plt.title(name, fontweight='bold')
try:
plt.plot(multiplicity_traces[name + '_' + str(3)], 'k.', markersize=markersize, label='3')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([])
plt.xticks([])
except:
pass
axes_k = plt.subplot(5, 2, 7)
plt.plot(db.trace(name + '_' + str(4) + '_K')[:], 'k.', markersize=markersize, label='K')
if statistics:
axes_k.axvline(statistics[name + '_' + '4' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(4) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(ymin, ymax)
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xticks([])
plt.yticks([ymin, 0, ymax])
axes_n = plt.subplot(5, 2, 8)
try:
plt.plot(multiplicity_traces[name + '_' + str(4)], 'k.', markersize=markersize, label='4')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([])
plt.xticks([])
except:
pass
axes_k = plt.subplot(5, 2, 9)
plt.plot(db.trace(name + '_' + str(6) + '_K')[:], 'k.', markersize=markersize, label='K')
if statistics:
axes_k.axvline(statistics[name + '_' + '6' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(6) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(ymin, ymax)
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.yticks([])
plt.xticks([ymin, 0, ymax])
axes_n = plt.subplot(5, 2, 10)
try:
plt.plot(multiplicity_traces[name + '_' + str(6)], 'k.', markersize=markersize, label='6')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([])
plt.xlabel('mcmc steps')
except:
pass
if not filename:
fig.savefig('%s_traces.pdf' % name)
else:
fig.savefig(filename)
pp.savefig(fig, dpi=80)
pp.close()
def trace_no_phase_n5(name, db, markersize, statistics=False, equil=True, multiplicity_traces=False, ymin=-20, ymax=20, filename=None):
"""
Generate trace plot for all parameters of a given torsion
:param name: str. name of torsion parameter A_B_C_D where A, B, C, and D are atom types.
:param db: pymc.database (can also use pymc.sampler)
:param markersize: int.
:param statistics: dict that maps parameters to statistics from pymbar.timeseries.detectEquilibrium. Default: False
:param multiplicity_traces: dict that maps multiplicity term to (0,1) trace. Default is False.
"""
if not multiplicity_traces:
multiplicity_traces = get_multiplicity_traces(torsion_parameters=name, db=db, n5=True)
pp = PdfPages('%s_traces.pdf' % name)
fig = plt.figure()
axes_k = plt.subplot(6, 2, 1)
plt.plot(db.trace(name + '_' + str(1) + '_K')[:], 'k.', markersize=markersize, label='K')
plt.title(name, fontweight='bold')
if equil:
if statistics:
axes_k.axvline(statistics[name + '_' + '1' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(1) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(ymin, ymax)
plt.ylabel('kJ/mole')
plt.xticks([])
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.yticks([ymin, 0, ymax])
axes_n = plt.subplot(6, 2, 2)
plt.title(name, fontweight='bold')
plt.plot(multiplicity_traces[name + '_' + str(1)], 'k.', markersize=markersize, label='1')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xticks([])
axes_k = plt.subplot(6, 2, 3)
plt.plot(db.trace(name + '_' + str(2) + '_K')[:], 'k.', markersize=markersize, label='K')
if equil:
if statistics:
axes_k.axvline(statistics[name + '_' + '2' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(2) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(ymin, ymax)
plt.ylabel('kJ/mole')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xticks([])
plt.yticks([ymin, 0, ymax])
axes_n = plt.subplot(6, 2, 4)
plt.plot(multiplicity_traces[name + '_' + str(2)], 'k.', markersize=markersize, label='2')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xticks([])
axes_k = plt.subplot(6, 2, 5)
plt.plot(db.trace(name + '_' + str(3) + '_K')[:], 'k.', markersize=markersize, label='K')
if equil:
if statistics:
axes_k.axvline(statistics[name + '_' + '3' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(3) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(ymin, ymax)
plt.ylabel('kJ/mole')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.yticks([ymin, 0, ymax])
plt.xticks([])
axes_n = plt.subplot(6, 2, 6)
plt.plot(multiplicity_traces[name + '_' + str(3)], 'k.', markersize=markersize, label='3')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xticks([])
axes_k = plt.subplot(6, 2, 7)
plt.plot(db.trace(name + '_' + str(4) + '_K')[:], 'k.', markersize=markersize, label='K')
if equil:
if statistics:
axes_k.axvline(statistics[name + '_' + '4' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(4) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(ymin, ymax)
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xticks([])
plt.yticks([ymin, 0, ymax])
plt.ylabel('KJ/mol')
axes_n = plt.subplot(6, 2, 8)
plt.plot(multiplicity_traces[name + '_' + str(4)], 'k.', markersize=markersize, label='4')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xticks([])
axes_k = plt.subplot(6, 2, 9)
plt.plot(db.trace(name + '_' + str(5) + '_K')[:], 'k.', markersize=markersize, label='K')
if equil:
if statistics:
axes_k.axvline(statistics[name + '_' + '5' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(5) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(ymin, ymax)
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xticks([])
plt.yticks([ymin, 0, ymax])
plt.ylabel('KJ/mol')
axes_n = plt.subplot(6, 2, 10)
plt.plot(multiplicity_traces[name + '_' + str(5)], 'k.', markersize=markersize, label='5')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xticks([])
axes_k = plt.subplot(6, 2, 11)
plt.plot(db.trace(name + '_' + str(6) + '_K')[:], 'k.', markersize=markersize, label='K')
if equil:
if statistics:
axes_k.axvline(statistics[name + '_' + '6' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(6) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(ymin, ymax)
plt.yticks([ymin, 0, ymax])
plt.ylabel('KJ/mol')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xlabel('mcmc steps')
axes_n = plt.subplot(6, 2, 12)
plt.plot(multiplicity_traces[name + '_' + str(6)], 'k.', markersize=markersize, label='6')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xlabel('mcmc steps')
if not filename:
fig.savefig('%s_traces.pdf' % name)
else:
fig.savefig(filename)
pp.savefig(fig, dpi=80)
pp.close()
def marg_mult(model, db, samples, burn=0, filename=None, n5=False):
"""
generates histogram for marginal distribution of posterior multiplicities.
:param model: TorsionFitModel
:param db: pymc.database for model
:param samples: length of trace
:param burn: int. number of steps to skip
:param filename: filename for plot to save
"""
if n5:
multiplicities = tuple(range(1, 7))
else:
multiplicities = (1, 2, 3, 4, 6)
mult_bitstring = []
for i in model.pymc_parameters.keys():
if i.split('_')[-1] == 'bitstring':
mult_bitstring.append(i)
if n5:
histogram = np.zeros((len(mult_bitstring), samples, 5))
else:
histogram = np.zeros((len(mult_bitstring), samples, 5))
for m, torsion in enumerate(mult_bitstring):
for i, j in enumerate(db.trace('%s' % torsion)[burn:]):
for k, l in enumerate(multiplicities):
if 2**(l-1) & int(j):
histogram[m][i][k] = 1
plt.matshow(histogram.sum(1), cmap='Blues', extent=[0, 5, 0, 20]), plt.colorbar()
plt.yticks([])
plt.xlabel('multiplicity term')
plt.ylabel('torsion')
if filename:
plt.savefig(filename)
| gpl-2.0 |
Sohojoe/damon | damon1/irt_tools.py | 1 | 70783 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 8 11:03:10 2016
@author: Mark H. Moulton, Educational Data Systems, Inc.
Dependencies: Numpy, Pandas
Stand-alone utility (not integrated with the main Damon workflow) for creating
raw-to-scale score conversion tables from item parameters. Supports Rasch,
2PL, 3PL, and GPC models, including mixed. Requires an item-by-parameter
input file with information about model, number of categories, scale
multiplication factor (1.0 or 1.7), and relevant item parameters.
Workflow Example
----------------
import sys
import numpy as np
import damon1.raw_to_scale as rs
# Get file information
path = ('/Users/markhmoulton/anaconda/lib/python2.7/site-packages/damon1'
'/tests/play/')
data = 'raw_K1_RD_ITM.txt'
# Specify parameters for load_item_params()
sep = '\t'
index_col = 'name'
# {file_label:internal_field_name}
cols = {'aparam':rs.A,
'bparam':rs.B,
'cparam':rs.C,
'step1':rs.D1,
'step2':rs.D2,
'step3':rs.D3,
'model':rs.MODEL,
'name':rs.ITEM,
'ncat':rs.NCAT,
'scale':rs.SCALE,
'test':rs.TEST
}
# {model_label:internal_model_type}
models = {'L3':rs.PL3,
'PC1':rs.GPC}
params = rs.load_item_params(data, sep, index_col, cols, models)
thetas = np.arange(-4.0, 4.1, 0.10)
title = data[:-4]
conv_tab = rs.build_conversion_table(thetas, params, title, path)
"""
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Constants
TEST = 'test'
ITEM = 'item'
STUDENT = 'student'
MODEL = 'model'
NCAT = 'ncat'
A = 'a'
B = 'b'
C = 'c'
D1 = 'd1'
D2 = 'd2'
D3 = 'd3'
D4 = 'd4'
D5 = 'd5'
D6 = 'd6'
PL1 = '1PL'
PL2 = '2PL'
PL3 = '3PL'
GPC = 'GPC'
SCALE = 'scale_factor'
THETA_SS = 'theta_ss'
THETA_SE = 'theta_se'
TCC = {'pct':False, 'score_line':'b-', 'err_line':'r-', 'title':None,
'score_label':None, 'err_label':None, 'xlabel':'Theta',
'ylabel':'True Score', 'ylabel_err':'Standard Error',
'adj_box':None, 'score_legend':{'loc':'upper left'},
'err_legend':{'loc':'upper center'}, 'theta_se':True}
def load_thetas(data, sep, index_col, cols):
"""Load theta values and format as pandas dataframe.
"""
usecols = cols.keys()
if isinstance(usecols[0], int):
header = None
elif isinstance(usecols[0], str):
header = 0
df = pd.read_csv(data, sep,
index_col=index_col,
header=header,
usecols=usecols)
df.rename(columns=cols, inplace=True)
df.index.name = cols[index_col]
return df
def load_item_params(data, sep, index_col, cols, models, select=None):
"""Load and format item parameters file as pandas dataframe.
Returns
-------
Item parameters and information formatted as a Pandas dataframe.
Comments
--------
The item-by-parameter file needs a header row. Required fields (in
no particular order, more allowed) should include:
* item id
* model type
* number of score categories
* scale multiplication factor (1.0 for logistic, 1.7 for normal)
* item parameters
You don't need to load the standard errors of the item parameters.
Example Data
name,model,ncat,scale,aparam,bparam,cparam,step1,step2,step3
sf_rd1,L3,2,1.7,1.009,-0.584,0.400,,,
sf_rd2,L3,2,1.7,0.829,0.010,0.316,,,
sf_rd3,L3,2,1.7,1.345,1.026,0.271,,,
sf_rd4,L3,2,1.7,1.079,0.641,0.317,,,
sf_rd5,L3,2,1.7,0.343,-0.598,0.015,,,
sf_rd6,L3,2,1.7,1.169,-0.075,0.328,,,
sf_rd7,L3,2,1.7,2.094,0.565,0.332,,,
sf_rd8,L3,2,1.7,1.027,1.399,0.318,,,
sf_rd9,L3,2,1.7,2.234,1.000,0.267,,,
sf_rd12,L3,2,1.7,1.283,-0.488,0.269,,,
sf_rd13,L3,2,1.7,1.560,0.819,0.361,,,
sf_rd14,L3,2,1.7,2.561,0.346,0.241,,,
sf_rd15,L3,2,1.7,2.304,0.329,0.345,,,
sf_rd16,L3,2,1.7,2.580,0.331,0.230,,,
sf_rd10,PC1,4,1.7,1.851,,,-1.118,-0.864,-0.776
sf_rd11,PC1,4,1.7,2.410,,,-1.114,-0.923,-0.742
sf_rd19,L3,2,1.7,1.131,0.660,0.144,,,
sf_rd20,L3,2,1.7,0.939,0.326,0.184,,,
sf_rd17,L3,2,1.7,1.110,0.572,0.000,,,
sf_rd18,L3,2,1.7,1.456,0.879,0.000,,,
Parameters
----------
"data" is either the name or path of the item parameter file or it
is an already loaded pandas dataframe.
data = 'my_params.txt'
data = my_params # pandas dataframse
--------------
"sep" is the column delimiter.
sep = ',' => columns are comma-delimited.
sep = '\t' => columns are tab-delimited.
--------------
"index_col" is the column containing unique item ids. It should be a
field name if there is a header row, otherwise an integer index.
index_col = 0 => item ids are in the 0'th column
index_col = 'name' => item ids are in the 'name' column
--------------
"cols" is a dictionary matching existing field names to Damon's
constant names as given in the irt_tools module. In IRT parlance,
A means the 'a' parameter, B the 'b' parameter, 'c' the C parameter.
The D parameters are the step parameters. If D is specified, B is
not specified. Only those columns are loaded that are specified in
your cols dictionary. Columns that are listed in 'cols' but are
not present in the data header are ignored, saving you the trouble
of having to build a new cols list for every test.
import irt_tools as it
cols = {'test':rs.TEST, => if selecting items for a test
'aparam':it.A,
'bparam':it.B,
'cparam':it.C,
'step1':it.D1,
'step2':it.D2,
'step3':it.D3,
'model':it.MODEL,
'name':it.ITEM,
'ncat':it.NCAT,
'scale':it.SCALE => scale factor 1.0 or 1.7
}
This means, for example, that the column that is labeled
'aparam' corresponds to the irt_tools.A constant. The column
labeled 'bparam' corresponds to the irt_tools.B constant.
the column labeled 'model' corresponds to the irt_tools.MODEL
constant. And so on. The original column headers are replaced
by their Damon equivalents.
--------------
"models" is a dictionary for renaming model names to Damon constant
names. Like the column names, model names are stored in the
irt_tools module.
import irt_tools as it
models = {'L3':it.PL3, => 3PL model
'PC1':it.GPC} => Generalized Partial Credit Model
--------------
"select" allows you to select a subset of rows (items) that belong
to a particular test in the column corresponding to
irt_tools.TEST.
select = None (default) => Use all rows.
select = 'test_LI_3-5' => Use only items that have 'test_LI_3-5'
in the TEST field.
"""
if isinstance(data, pd.DataFrame):
if select:
df = data.loc[data[TEST] == select]
else:
df = data
return df
elif isinstance(data, str):
# Get usecols, header parameters
usecols = cols.keys()
if isinstance(usecols[0], int):
header = None
elif isinstance(usecols[0], str):
header = 0
# Remove from usecols labels that are not in file header
df_ = pd.read_csv(data, sep,
index_col=index_col,
header=header,
usecols=None)
usecols_ = []
for c in usecols:
if c in df_.columns.values:
usecols_.append(c)
usecols = usecols_.append(index_col)
df = pd.read_csv(data, sep,
index_col=index_col,
header=header,
usecols=usecols)
df.rename(columns=cols, inplace=True)
df.index.name = cols[index_col]
# Rename models
df.replace({MODEL:models}, inplace=True)
# Select subset of rows
if select:
df = df.loc[df[TEST] == select]
return df
else:
exc = 'data must be a string path/file name or a Pandas dataframe.'
raise ValueError(exc)
def build_conversion_table(thetas, params, title, dir_path, sep=',',
nraw=None, min_max=None, theta_se=True,
tcc=None):
"""Build raw-to-scale-score conversion table with TCC chart.
Returns
-------
Returns a Python dict containing a raw-to-scale-score
conversion table as a Pandas dataframe as well as a dataframe
relating theta scale scores to IRT "true scores":
out = build_conversion_table(...)
conv_tab = out['ctab']
true_scores = out['true']
The latter is used as an input to the compare_tccs() function.
In addition, the function outputs the conversion table as a text file
and a test characteristic chart as a .png file.
Comments
--------
Test scoring frequently requires a simple table for converting
raw scores to scale scores in some desired metric. This is
done straightfowardly with unidimensional models in which there is
no need for "pattern scoring" (where each response pattern
has its own score). All that is required is a one-to-one monotonic
relationship between raw scores (summed across items) and scale
scores.
In addition to a table for scoring, the chart of scale scores
to "true scores", known as a Test Characteristic Curve (TCC), is useful
for comparing tests and validating their comparability. If the TCCs
for two tests are not parallel to each other, their scale scores are
not comparable.
Each person's "true score" is simply their expected score on a test
based on their ability and the item parameters. It is in the same
metric as person "raw scores", but continuous rather than discrete.
Parameters
----------
"thetas" is a numpy array containing a range of equally spaced student
ability values (called "theta" in IRT jargon). They may be in any
metric (e.g., -3 to +3 in increments of 0.10 or 100 to 300 in
increments of 1, the smaller the increment the better), but it is
important that the theta metric match the metric of the difficulty
'b' parameters or 'd' step parameters -- all in logits or all in some
other scale score metric.
The thetas array is usually built using numpy's arange() function.
thetas = np.arange(-3.0, 3.1, 0.1)
=> Define the theta ability range from
-3 up to, but not including, 3.1,
in increments of 0.1.
thetas = np.arange(200, 601, 1)
=> Define the ability range from 200
up to, but not including, 600 in
unit increments.
--------------
"params" is a dataframe of item parameters obtained by running
irt_tools.load_item_params().
import irt_tools as it
params = it.load_item_params('params.txt', ...)
--------------
"title" is the description to put on top of the TCC chart and in
the output conversion table file name.
title = 'Spring 2016 Reading'
--------------
"dir_path" is the directory in which the conversion table and TCC
chart should be placed.
dir_path = '/Users/markhmoulton/test_results/'
--------------
"sep" is the delimiter to use when outputting the file. It also
controls the file extension.
sep = ',' => Comma-delimited '.csv' file
sep = '\t' => Tab-delimited '.txt' file
--------------
"nraw" <int> is the maximum raw score.
nraw = 20 => A maximum of 20 raw score points
--------------
"min_max" <int> is the minimum and maximum allowed scale score.
min_max=[200, 800] => Scale scores below 200 will be clipped at
200. Scale score above 800 will be
clipped at 800.
--------------
"theta_se" (True, False) specifies whether to report "conditional
standard errors" -- the standard error of measurement at each value
of theta. Expect the standard error to rise toward infinity at the
upper and lower extremes of the scale.
--------------
"tcc" is a dictionary of parameters used for building charts of
the test characteristic curve with standard error. To see the
most current default tcc parameters, type:
import irt_tools as it
print it.TCC
For more details refer to the matplotlib docs. The following will
give you most of what you need to know.
import matplotlib.pyplot as plt
help(plt.plot)
This will help with the "legend" parameters (where to place, etc.):
help(plt.legend)
The tcc parameters are (as of this writing):
{'pct':False, => <bool> report y-axis as a percentage
'theta_se':True, => <bool> display conditional standard errors
'score_line':'b-', => display the score curve as a blue line
'err_line':'r-', => display the error curve as a red line
'title':None, => don't provide a chart title
'score_label':None, => <str> If None, test + 'Score'
'err_label':None, => <str> If None, test + 'SE'
'xlabel':'Theta', => <str> x-axis label
'ylabel':'True Score', => <str> left-hand y-axis label
'ylabel_err':'Standard Error', => <str> right-hand y-axis label
'adj_box':None, => <None, [pWidth, pHeight]> adjust size of box
'score_legend':{'loc':'top left'}, => dict of score 'legend' params
'err_legend':{'loc':'upper center'} => dict of error 'legend' params
}
It is easy to place the 'score_legend' and 'err_legend' inside the
chart using the legend() loc parameter. To place them outside the
box (e.g. below the x-axis label), you need to adjust the size of
the box (e.g., shrink the chart vertically) to make room for the
legends and place them using the legend() 'bbox_to_anchor' parameter:
{other tcc params, ... ,
'adj_box':[1.0, 0.9],
'score_legend':{'bbox_to_anchor':(0.5, -0.15), 'fontsize':'medium'}
'err_legend:{'bbox_to_anchor':(0.8, -0.15), 'fontsize':'medium'}
}
In this case, adj_box contains two parameters -- a factor by which
to multiply the width of the box and a factor by which to multiply
its height. Here we keep the width unchanged and shrink its height
to 0.9 of the original.
If you don't use the 'adj_box' parameter in this scenario, the
output .png file will cut off the legends. This is the only
scenario where you will need 'adj_box'.
The 'bbox_to_anchor' parameters (see matplotlib.pyplot.legend docs)
consist of a tuple of numbers representing the x-value and y-value
of where each legend should be placed relative to the bottom left of
the chart. These values tend to range from -0.20 to 1.2 but you
have to experiment to see where the legends end up.
The legend parameters reflect the matplotlib.pyplot.legend() method
and are therefore quite flexible, if a bit tricky to master.
All parameters you do NOT specify will refer to the it.TCC default.
tcc = None => Do not build a test characteristic curve.
tcc = {'pct':True} => Defaults will be used for all parameters
except for 'pct', which specifies that
the left-hand y-axis run from 0 - 100.
Paste Function
--------------
build_conversion_table(thetas,
params,
title,
dir_path,
sep=',',
nraw=None,
min_max=None,
theta_se=True,
tcc=None)
"""
# Get cell estimates for theta x item
est, info = estimates(thetas, params)
conv_tab = conversion_table(est, info, nraw, min_max, theta_se)
ctab = conv_tab['ctab']
true = conv_tab['true']
# Export table
ext = '.csv' if sep == ',' else '.txt'
export(ctab, dir_path + title + '_convtab' + ext, sep)
# Export chart
if tcc is not None:
tcc_ = TCC.copy()
for k in tcc:
tcc_[k] = tcc[k]
chart_tcc(true, tcc_, dir_path + title + '_tcc.png')
return {'ctab':ctab, 'true':true}
def estimates(thetas, params):
"""Calculate expected values for each theta and item.
Returns
-------
two thetas x items dataframes containing cell estimates (also
called "expected values" and cell information:
est, info = estimates(thetas, params)
Comments
--------
estimates() is used by build_conversion_table() to calculate
a raw-to-scale score conversion table with standard errors
as well as test characteristic and standard error curves.
It draws on the IRT probability formulas implemented in
est_pl() and est_gpc(), i.e., the formulas for the 1PL,
2PL, and 3PL models for dichotomous data and the Generalized
Partial Credit Model (de Ayala, 2009).
Parameters
----------
"thetas" is a list or array of possible theta values (person scale
scores) in either a transformed or untransformed metric.
--------
"params" is a dataframe of item parameters and other item
information, generated by load_item_params().
References
----------
de Ayala, R.J. (2009). "The Theory and Practice of Item Response
Theory". The Guilford Press.
"""
# Initialize estimates (expected values) dataframe
items = params.index.values
est = pd.DataFrame(index=thetas, columns=items)
info = pd.DataFrame(index=thetas, columns=items)
# For each item, get an array of estimates across thetas
for item in items:
est_, info_ = estimates_for_item(thetas, params, item, pcats=None)
est.loc[:, item] = est_
info.loc[:, item] = info_
return est, info
def estimates_for_item(thetas, params, item, pcats=False):
"Calculate expected values for each theta and specified item."
mod = params.loc[item, MODEL]
est, info, pcats_ = est_func[mod](thetas, params.loc[item, :])
if pcats:
return est, info, pcats_
else:
return est, info
# Model-specific expected value (cell estimate) functions
def est_pl(thetas, params):
"""Calcuate expected value for a dichotomous 1-, 2- or 3PL item.
References
----------
de Ayala, R.J. (2009). "The Theory and Practice of Item Response
Theory". The Guilford Press.
See p. 124, eq 6.3, for 3PL probability formula.
"""
mod = params[MODEL]
# Define parameters
if mod == PL3:
try:
params[C]
except KeyError:
params[C] = 0
if np.isnan(params[C]):
params[C] = 0
a, b, c, k, th, ncat = (params[A], params[B], params[C], params[SCALE],
thetas, params[NCAT])
elif mod == PL2:
a, b, c, k, th, ncat = (params[A], params[B], 0, params[SCALE], thetas,
params[NCAT])
elif mod == PL1:
a, b, c, k, th, ncat = (1, params[B], 0, params[SCALE], thetas,
params[NCAT])
else:
exc = 'Unable to figure out the model.'
raise ValueError(exc)
# Calculate cell estimates across thetas
y = np.exp(a*k*(th - b))
est = p = c + ((1 - c) * (y / (1 + y)))
# Calculate cell information across thetas
if mod == PL3:
info = k**2 * (a**2 * (p - c)**2 * (1 - p)) / ((1 - c)**2 * p)
elif mod == PL2:
info = k**2 * a**2 * p * (1 - p)
elif mod == PL1:
info = k**2 * p * (1 - p)
cat_probs = {0:(1 - p), 1:p}
return est, info, cat_probs
def est_gpc(thetas, params):
"""Calculate expected value for a polytomous Generalized Partial Credit
item.
References
----------
de Ayala, R.J. (2009). "The Theory and Practice of Item Response
Theory". The Guilford Press.
See p. 200, eq 7.6, for information function.
"""
ncat = params[NCAT]
nrows = len(thetas)
a, th, k = params[A], thetas, params[SCALE]
steps = [D1, D2, D3, D4, D5, D6][:ncat - 1]
st = params[steps]
cats = range(ncat)
cat_probs = {}
cat_prob_denom = np.zeros(nrows)
# Get numerators and denominator
for cat in cats:
if cat == 0:
cat_probs[cat] = 1.0 #np.exp(cat)
else:
cat_probs[cat] = np.exp(a*k*(cat*th - np.sum(st[:cat].values)))
cat_prob_denom += cat_probs[cat]
# Get cat probabilities
for cat in cats:
cat_probs[cat] = cat_probs[cat] / cat_prob_denom
# Expected values
est = np.zeros((nrows))
for cat in cats:
est += cat * cat_probs[cat]
# Get information
term1 = np.zeros((nrows))
term2 = np.zeros((nrows))
for cat in cats:
p = cat_probs[cat]
term1 += cat**2 * p
term2 += cat * p
info = k**2 * a**2 * (term1 - term2**2)
return est, info, cat_probs
# Dictionary to store model estimation functions
est_func = {PL3:est_pl,
PL2:est_pl,
PL1:est_pl,
GPC:est_gpc}
def conversion_table(estimates, info, nraw=None, min_max=None, theta_se=True):
"Get nearest raw score for each theta."
# Load "true scores" and scale score se into dataframe
true_ = estimates.sum(axis=1)
if theta_se:
se_ = 1.0 / np.sqrt(info.sum(axis=1))
true = true_.to_frame('true_score')
true.index.name = 'theta'
if theta_se:
true['se'] = se_
# Get range of raw scores
if nraw:
min_raw, max_raw = np.amin(np.around(true.values)), nraw
else:
min_raw, max_raw = (np.amin(np.around(true.values)),
np.amax(np.around(true.values)))
raws = np.arange(max_raw + 1)
# Initialize conversion table
columns = ['theta', 'se'] if theta_se else ['theta']
ctab = pd.DataFrame(index=raws, columns=columns)
ctab.index.name = 'raw_score'
if min_max is not None:
min_, max_ = min_max
# Get scale score corresponding to true score nearest each raw score
for raw_score in ctab.index:
if min_max is not None:
if raw_score < min_raw:
ss = min_
elif raw_score > max_raw:
ss = max_
else:
ss = np.clip([find_nearest(true.loc[:, 'true_score'],
raw_score)], min_, max_)[0]
else:
ss = find_nearest(true, raw_score)
if theta_se:
se = se_[ss]
ctab.loc[raw_score] = [ss, se]
else:
se = None
ctab.loc[raw_score] = ss
return {'ctab':ctab, 'true':true}
def find_nearest(array, value):
"Get value in array nearest to that specified for conversion_table()."
pdix = (np.abs(array - value)).argmin()
return pdix
def export(data, save_as, sep='\t'):
"Export dataframe as a tab-delimited text file for build_conversion_table."
# Export data
data.to_csv(save_as, sep=sep)
return None
def chart_tcc(true, tcc, savefig):
("Chart 'true scores' against theta for build_conversion_table() "
"and compare_tccs().")
if isinstance(true, dict):
tests = true.keys()
trues = true
if tcc.keys()[0] not in trues.keys():
exc = ("Unable to figure out tcc parameter. It should be nested "
"by test, e.g., {'test1':{tcc1...}, 'test2':{tcc2...}}")
raise ValueError(exc)
else:
tccs = tcc
else:
tests = ['test']
trues = {}
trues['test'] = true
tccs = {}
tccs['test'] = tcc
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
thetas = trues[tests[0]].index.values
for i, test in enumerate(tests):
tcc = tccs[test]
true = trues[test]
if not tcc['theta_se']:
errs = None
else:
try:
errs = true.loc[:, 'se']
except KeyError:
errs = None
scores = true.loc[:, 'true_score']
if tcc['pct']:
scores = 100 * scores / np.float(max(scores))
# Main plot
slabel, elabel = tcc['score_label'], tcc['err_label']
slabel = test+' score' if slabel is None else slabel
elabel = test+' SE' if elabel is None else elabel
ax1.plot(thetas, scores, tcc['score_line'], label=slabel)
ax2.plot(thetas, errs, tcc['err_line'], label=elabel)
# Add formatting
ax1.set_xlabel(tcc['xlabel'])
ax1.set_ylabel(tcc['ylabel'])
ax1.set_xlim((min(thetas) - 1, max(thetas + 1)))
ax1.set_ylim((0, max(scores) + 3))
if errs is not None:
errs_ylim = (min(errs) - 1, max(errs) + 1)
ax2.set_ylim(errs_ylim)
ax2.set_ylabel(tcc['ylabel_err'])
# box = ax.get_position()
#ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Adjust size of charting box to make room for legend
if tcc['adj_box'] is not None:
box = ax1.get_position()
start_x = box.x0
start_y = box.y0 + box.height * (1.0 - tcc['adj_box'][1])
stop_x = box.width * tcc['adj_box'][0]
stop_y = box.height * tcc['adj_box'][1]
ax1.set_position([start_x, start_y, stop_x, stop_y])
ax2.set_position([start_x, start_y, stop_x, stop_y])
# Add legend
if tcc['score_legend'] is not None:
ax1.legend(**tcc['score_legend'])
if tcc['err_legend'] is not None and errs is not None:
ax2.legend(**tcc['err_legend'])
# Format and save
if tcc['title'] is not None:
plt.title(tcc['title'])
fig.savefig(savefig)
def compare_tccs(trues, tccs=None, savefig='compare_tccs.png'):
"""Compare multiple TCC's with standard errors on one chart.
Returns
-------
Outputs a .png chart containing multiple test characteristic
curves with their standard errors.
Comments
--------
The build_conversion_table() function returns a raw to
scale score conversion table with a chart of a single
test characteristic curve (TCC). TCC's compare each
possible person ability theta with the corresponding
"true score" (like continuous raw scores) and are generally
S-curves or ogives.
Test characteristic curves are used in IRT to determine whether
the scales associated with two tests behave the same way. When
data fit the Rasch (1PL) model, the TCCs will necessarily be
comparable. They will be parallel and have the same upper and lower
asymptotes.
However, because the 2PL and 3PL models permit varying item
discrimination parameters, which yield different slopes per item,
there is no guarantee, when their item characteristic curves are
combined to create test characteristic curves (TCCs), that the TCCs
will be parallel. Variation in the c-parameter also means there
is no guarantee that all curves will have the same lower asymptote.
Since comparability of scales is only possible when the TCCs are
parallel, use of the 2PL and 3PL models requires selecting items
specifically for their capacity, when combined, to yield parallel
TCCs.
Thus, with Rasch, achieving comparability is done up front by
using analysis of fit and other diagnostics to selecting items that
fit the model and thus have parallel TCCs. With 2PL and 3PL,
comparability is achieved on the back end by selecting items that,
when combined, yield parallel TCCs.
This generally requires a visual inspection -- placing two or
more TCCs side by side on a graph and seeing whether they are
parallel. That's what compare_tccs() is used for.
compare_tccs() is usually run following build_conversion_table(),
since it relies on one of the outputs of that function. However,
that output (theta with true score equivalents) can be pulled
from any source. Here is the expected workflow:
Workflow
--------
import irt_tools as it
trues = {}
for test in tests:
params = it.load_item_params(...)
conv_tab = it.build_conversion_table(...)
# Here is the conversion table
ctab = conv_tab['ctab']
# Here is the theta/true_score table, saved in the trues dict
trues[test] = conv_tab['true']
# Now, having collected multiple true_score tables, compare TCCs
it.compare_tccs(trues, ...)
Parameters
----------
"trues" is a Python dictionary of test labels and the
"theta/true-score" dataframes associated with each test. These
dataframes are one of the outputs of the build_conversion_table()
function, so this will generally (though not necessarily) be run first.
trues = {'test_2015':df_2015, 'test_2016':df_2016}
=> We are comparing the TCCs for the 2015 and
2016 versions of a given test. The thetas
and true scores for each year are contained
in the "true score" dataframe output by
build_conversion_table().
----------
"tccs" is a nested dictionary of parameters for building the test
characteristic and error curves. It is just like the tcc parameter
in build_conversion_table() except that you specify a separate
dictionary for each set of test curves. Parameters that are
not specified fall back on the default irt_tools.TCC dictionary.
None, in this case, tells the function to use only the defaults.
tccs = {'test_2015':{'pct':True,
'xlabel':'thetas for 2015'},
'test_2016':{'pct':Tru,
'xlabel':'thetas for 2016'}}
tccs = None => Use default parameters
For more documentation, go to:
import irt_tools as it
help(it.build_conversion_table)
import matplotlib.pyplot as plt
help(plt.plot)
help(plt.legend)
----------
"savefig" (default = 'compare_tcc.png') is the name or path to
which the output TCC chart is to be saved.
save_fig = '/my/path/compare_2015_16_tcc.png'
=> Save the chart to the specified path.
save_fig = 'compare_2015_16_tcc.png'
=> Saves to the current working directory.
Paste Function
--------------
compare_tccs(trues,
savefig='compare_tccs.png',
tccs=None)
"""
tests = trues.keys()
tccs_ = {}
score_lines = ['b-', 'g-', 'k-']
err_lines = ['r-', 'y-', 'm-']
# Use defaults
if tccs is None:
tccs_ = {}
for i, test in enumerate(tests):
tccs_[test] = TCC.copy()
tccs_[test]['score_line'] = score_lines[i]
tccs_[test]['err_line'] = err_lines[i]
# Overwrite defaults with specified params
else:
for test in tests:
tccs_[test] = TCC.copy()
for k in tccs[test]:
tccs_[test][k] = tccs[test][k]
chart_tcc(trues, tccs_, savefig)
###############################################################################
def dif_stats(filename, # [<'my/file.txt',...> => name of scored data file]
student_id = 'Student_ID', # [<'Student_ID', ...> => student id column label]
group = ['Sex', {'focal':0, 'ref':1}], # [<e.g.'Sex', {'focal':'female', 'ref':'male'}]> => column label with assignment to focal and reference]
raw_score = 'RawScore', # [<'RawScore',...> => raw score column label]
items = 'All', # [<'All', ['item1', 'item3',...]> => items for which to get stats]
stats = 'All', # [<'All', [see list in docs]> => desired statistics]
strata = ('all_scores', 4), # [(<'all_scores', int>, int) => number of raw score strata, with backup if insufficient]
getrows = None, # [<None, {'Get':_,'Labels':_,'Rows':_}> => select rows using extract() syntax]
getcols = None, # [<None, {'Get':_,'Labels':_,'Cols':_}> => select cols using extract() syntax]
delimiter = '\t', # [<',', '\t'> => column delimiter]
):
"""Calculate DIF stats for each in a range of items.
Returns
-------
dif() returns an item by statistic Damon object with
a column containing number of score categories. Display
results using:
>>> print tabulate(dif(...).whole, 'firstrow')
Comments
--------
"dif" (DIF) stands for "differential item functioning" and reflects
the degree to which items have different difficulties for two
groups of persons, a "focal" and a "reference" group, after
adjusting for the ability of each person. It is used to flag
items that "play favorites" with student groups, e.g., that are
easy for girls and hard for boys even though the two groups
otherwise have similar ability.
There are a profusion of DIF statistics, organized mainly by whether
they are intended for dichotomous or polytomous items. The Rasch
model has its own way of estimating DIF (not included in this
function) which yields similar results. dif() supports three
categories of DIF statistics plus related variances, z-scores,
chi-squares and so on. Any number of combinations of these statistics
have been proposed for flagging DIF items.
'MH' => Mantel-Haenszel, for dichotomous data
'M' => Mantel, for dichotomous and polytomous data
'SMD' => standardized mean difference, usually for polytomous
Formulas are pulled from Zwick & Thayer (1996) and Wood (2011).
A commonly used statistic is the 'Flag' statistic, which gives a code
for whether an item should be flagged. ETS's a, b, c DIF flags
are reported numerically as 0, 1, 2. See discussion below.
The dif_stats() function applies only to unidimensional data.
Multidimensional DIF can be evaluated in Damon to a limited
degree using the "stability" statistic in conjunction with
coord()'s seed parameters.
dif() requires a student-by-item data file or array with a group
membership column and a column of student raw scores. Thus, column
headers should contain a student id column, a group column, a raw score
column, and a series of item columns. Any other columns in your
dataset should be filtered out using the getcols parameter.
References
----------
Zwick, R., Thayer, D. (Autumn, 1996). "Evaluating the Magnitude of Differential
Item Functioning in Polytomous Items". Journal of Educational and
Behavioral Statistics, Vol. 21, No. 3, pp 187-201.
http://www.jstor.org/stable/1165267
Wood, S. W. (2011). "Differential item functioning procedures for polytomous
items when examinee sample sizes are small." doctoral PhD diss, University
of Iowa, 2011.
http://ir.uiowa.edu/etd/1110.
Parameters
----------
"filename" is the string name of a person x item file containing
integer scores of how each student did on each item, a column
containing test-level raw scores for each student, and a column
assigning each student to a group. All non-numerical cells are
treated as missing. All numerical scores are treated as valid.
Numerical scores must be integers whose minimum value is zero.
Data must be tabular and field-delimited.
filename = '/path/to/my_file.txt'
=> file is 'my_file.txt'
-----------
"student_id' is the header label of the column containing unique
student identifiers.
student_id = 'Student_ID' => Student identifiers are in the
column labels 'Student_ID'.
-----------
"group" contains the header label of the group column and
assigns one group to be "focal" and the other to be the "reference".
group = ['Sex', {'focal':'female', 'ref':'male'}]
=> Student gender identifiers are
in the column labeled 'Sex'.
Students labeled "female" will
be the focal group. Students
labeled "male" will be the
reference group.
Note: As is typical with DIF statistics, while there can be
more than two groups, only two are compared at a time.
-----------
"raw_score" is the header label of the raw score column.
raw_score = 'RawScore' => Test-level student raw scores
are in the column labeled
'RawScore'
-----------
"items" is the list of items for which DIF statistics should be
calculated.
items = 'All' => Calculate DIF for all items
in the dataset.
items = ['item1', 'item5'] => Calculate DIF for only items
1 and 5.
-----------
"stats" is the list of DIF stats to be calculated for each
item. If a given statistic cannot be calculated for a given
item, the cell is left blank.
stats = 'All' => Calculate all possible DIF
statistics for all items (see
list below).
stats = ['MH_d-dif', 'MH_z', 'M_z', 'SMD_z']
=> Calculate just the Mantel-Haenszel
delta-DIF (defined by ETS), the
Mantel-Haenszel z statistic (both
for dichotomous items), the Mantel
z-statistic (for dichotomous and
polytomous items), and the
standardized mean difference
z-statistic.
List of available DIF-related statistics ("MH" means Mantel-
Haenszel, "M" means Mantel, "SMD" means standardized mean difference.
Mantel-Haenszel (dichotomous data)
'MH_alpha' => odds ratio, dich, 0 -> +inf
'MH_dif' => log-odds ratio, dich, -inf -> +inf
'MH_d-dif' => delta-DIF = -2.35*log-odds, dich, -inf -> +inf,
negative implies bias toward reference group.
(d-dif > 1.5 implies DIF)
'MH_var' => variance of MH_dif (SE = sqrt(var))
'MH_d-var' => variance of MH_d-dif
'MH_z' => absolute z-statistic (dif/sqrt(var)), z > 2.0 => p < 0.05
'MH_pval' => p-value associated with z, pval < 0.05 => significance
'MH_chisq' => chi-square = z^2. chisq > 3.84 => p < 0.05
'MH_chisq_pval' => p-value associated with chisq, pval < 0.05 => significance
Mantel (dichotomous and polytomous data)
'M_dif' => observed - expected frequencies
'M_var' => variance of M_diff (SE = sqrt(var))
'M_z' => signed z-statistic, dif/sqrt(var), z > 2.0 => p < 0.05
'M_pval' => p-value associated with z, pval < 0.05 => significance
'M_chisq' => chi-square = z^2. chisq > 3.84 => p < 0.05
'M_chisq_pval' => p-value associated with chisq, pval < 0.05 => significance
Standardized mean difference (mainly for polytomous data)
'SMD_dif' => difference between reference and focal groups
'SMD_var' => variance of SMD_dif (SE = sqrt(var))
'SMD_z' => signed z-statistic, dif/sqrt(var), z > 2.0 => p < 0.05
'SMD_pval' => p-value associated with z, pval < 0.05 => significance
'SMD_chisq' => chi-square = z^2. chisq > 3.84 => p < 0.05
'SMD_chisq_pval'=> p-value associated with chisq, pval < 0.05 => significance
Other stats
'SD' => standard deviation of person scores for that item
'SMD/SD' => absolute SMD/SD > 0.25 implies DIF if SMD_chisq_pval < 0.05
'Flag' => flag a DIF item based on the rules described below.
'Counts' => Count valid scores for each item, overall and by group.
As mentioned, all statistics that are dependent on sample size (e.g., z,
chi-square) will show larger values as sample size increases and their
standard errors go to zero. Therefore, DIF decisions should be based
on other considerations.
One useful rule suggested by Zwick, Thayer, and Mazzeo and used by
ETS is as follows. Flag DIF:
for dichotomous items:
Flag = 2 if:
'MH_d-dif' is greater than 1.5 and significantly greater than 1.0.
Flag = 0 if:
'MH_d-dif' is less than 1.0 or the p-value is greater than 0.05.
Flag = 1, otherwise.
These correspond to ETS a, b, c DIF flags:
'a'=>0, 'b'=>1, 'c'=>2
for polytomous items:
Flag = 2 if:
'SMD/SD' is greater than 0.25 and 'M_chisq_pval' is less than 0.05.
Flag = 0, otherwise.
There is no flag = 1 here.
(Note: Zwick refers to this as a Mantel-Haenszel chi-square p-value
but the formula resembles the polytomous Mantel chi-square p-value,
which is what is used here.)
-----------
"strata" is the number of ability strata or levels into which
to divide student test raw scores for purposes of matching
students of similar abilities. If the number of strata do
not divide evenly into the number of potential raw scores,
the remainder are stuck in the lowest stratum. "strata" requires
a backup strata specification in case the primary specification
leads to a count of one or less for a given item:
strata = (primary, backup)
Examples:
strata = ('all_scores', 4) => Let each possible raw
score be its own stratum.
This is desirable so long as
the sample of persons is large
enough that all cells in
the resulting stratum x score
table have fairly large counts.
If 'all_scores' yields insufficient
data for a given item, use a
stratum of 4 for that item.
strata = (20, 10) => Divide the raw scores into
20 strata and match students
who belong to the same stratum.
If this leads to insufficient data,
use 10 for that item.
Some DIF programs allow no more than five or so stratification
levels in order to avoid insufficient counts. This degrades the
DIF statistics a little, but not generally enough to be a problem.
-----------
"getrows" controls the rows that are loaded from the datafile,
making it possible to filter out unneeded rows, e.g., to get a
student subsample. The syntax is drawn from Damon's extract()
method and can be a bit fancy. To get a full description of
what you can do with getrows, see:
>>> help(core.Damon.extract)
Simple examples:
getrows = None => Retain all rows as they are.
Non-intuitively, this really means
"get all rows".
getrows = {'Get':'AllExcept','Labels':'key','Rows':['row_x', 'row_y']}
=> Extract all rows except those
labeled 'row_x' and 'row_y'.
getrows = {'Get':'NoneExcept','Labels':'index','Rows':[range(1, 20, 2)]}
=> Extract only row 1 up to, but not
including, row 20. 2 is a step parameter, and
means get every other row within the range.
Counting starts from 0. The 'index' parameter
means 'Rows' refers to positions, not 'keys'.
-----------
"getcols" controls the columns that are loaded from the datafile,
making it possible to filter out unneeded columns, e.g., data
columns that are not items or the student raw score. The syntax
is drawn from Damon's extract() method and can be a bit fancy.
To get a full description of what you can do with getcols, see:
>>> help(core.Damon.extract)
Simple examples:
getcols = None => Retain all columns as they are.
Non-intuitively, this really means
"get all columns".
getcols = {'Get':'AllExcept','Labels':'key','Cols':['col_x', 'col_y']}
=> Extract all columns except those
labeled 'col_x' and 'col_y'.
getcols = {'Get':'NoneExcept','Labels':'index','Cols':[range(2, 41)]}
=> Extract only columns 2 up to, but not
including, 41. Counting starts from 0.
Note the 'index' parameter.
-----------
"delimiter" is the character used to delimit columns in
the dataset.
delimiter = ',' => File is comma-delimited.
delimiter = '\t' => File is tab-delimited.
Examples
--------
[under construction]
Paste Function
--------------
dif_stats(filename, # [<'my/file.txt',...> => name of scored data file]
student_id = 'Student_ID', # [<'Student_ID', ...> => student id column label]
group = ['Sex', {'focal':0, 'ref':1}], # [<e.g.'Sex', {'focal':'female', 'ref':'male'}]> => column label with assignment to focal and reference]
raw_score = 'RawScore', # [<'RawScore',...> => raw score column label]
items = 'All', # [<'All', ['item1', 'item3',...]> => items for which to get stats]
stats = 'All', # [<'All', [see list in docs]> => desired statistics]
strata = ('all_scores', 4), # [(<'all_scores', int>, int) => number of raw score strata, with backup if insufficient]
getrows = None, # [<None, {'Get':_,'Labels':_,'Rows':_}> => select rows using extract() syntax]
getcols = None, # [<None, {'Get':_,'Labels':_,'Cols':_}> => select cols using extract() syntax]
delimiter = '\t', # [<',', '\t'> => column delimiter]
)
"""
args = locals()
from dif import dif_stats
return dif_stats(**args)
###############################################################################
def classify(ss_file, # [<'my/scorefile.csv'> scores, errors, counts]
cut_file, # [<'my/cutfile.csv'> => cut-points and labels]
grade, # [<int> => grade as a number]
domain, # [<str> => domain label]
at_cuts=False, # [<bool> => report stats at each cut-point]
load_ss=None, # [<func> => function for loading ss_file]
load_cuts=None, # [<func> => function for loading cut_file]
sep=',' # [<str> column delimiter]
):
"""Calculation classification accuracy and consistency statistics.
Returns
-------
classify() returns a nested dictionary giving classification
and accuracy statistics for each cut-point and overall, e.g.:
{'summary':{'acc':0.68,
'consist':0.56,
'kappa':0.55},
'Basic':{'acc':0.60,
'consist':0.43,
'kappa':0.51},
'Proficient':{'acc':0.73,
'consist':0.69,
'kappa':0.65}
}
Comments
--------
classify() answers the questions:
1) How accurate is the test overall in assigning students to their
"true" performance level?
2) How consistent is that assignment, i.e., how often would
students be assigned to the same performance level on
repeated testing?
3) What is the accuracy and consistency with regard to a
particular performance level cut-point?
The psychometric literature offers a variety of methods to answer
these questions, some based on classical test theory, some on item
response theory, with variations that differ according to the
item parameter estimation program. classify() uses the IRT-based
method proposed by Lawrence Rudner (2001). In this method, the
standard error associated with each scale score, which is assumed to
be normally distributed, is used to calculate the probability that
a student with that scale score will fall into each of the
performance levels. The resulting probabilities are used to
compute the desired accuracy, consistency, and Cohen's kappa
statistics (Cohen's kappa is an alternative measure of consistency).
The method is fast, reliable, and (relatively) easy to understand.
Note that Cohen's kappa as computed here is calculated using a
traditional unweighted formula, but the results look quite different
from those calculated using the CTT-based Livingston & Lewis method.
I believe that's because the L&L method applies the kappa formula to
a hypothetical test whose length is adjusted to account for test
reliability. In the IRT-based Rudner method, there is no need to
create such a hypothetical test.
Parameters
----------
"ss_file" is a path or filename containing fields for grade, domain,
raw score, scale score, standard error of measure, performance level,
and count of students. Thus for a given grade with a test of 25
items, you will have 25 rows. Each row will have a raw score
from 0 to 25, the corresponding scale score and standard error,
and the number of students who got that score. The raw score,
scale score, and standard error of measurement will be an output
of your IRT package. You may have to get the counts yourself.
Here is an example:
Grade Domain RS SS SEM PL N
0 L 0 220 117 1 166
0 L 1 245 110 1 174
0 L 2 255 105 1 417
0 L 3 262 101 1 743
etc.
Gotcha
------
Loading grades as a mix of string and int ('k', 1, 2) creates
problems. Either make them all int (0, 1, 2) or all string
('k', 'g1', 'g2').
This file format is here termed the "EDS format", since it is
the format we use at Educational Data Sytems, but classify() supports
other file formats if you're willing to write the requisite
load function (see "load_ss" below).
-------------
"cut_file" is a file of cut-scores, the scale scores used to
demarcate adjoining performance level. Here is an example in
EDS format:
Grade Domain B EI I EA A Max
0 L 220 362 409 455 502 570
0 S 140 353 405 457 509 630
0 R 220 232 300 380 468 570
0 W 220 255 327 383 430 600
1 L 220 362 409 455 502 570
1 S 140 353 405 457 509 630
1 R 220 357 393 468 570 570
1 W 220 372 406 444 518 600
etc.
Gotcha
------
Loading grades as a mix of string and int ('k', 1, 2) creates
problems. Either make them all int (0, 1, 2) or all string
('k', 'g1', 'g2').
Each scale score is the lowest scale score for that performance
level. So 220 is the lowest possible score in kindergarten
Listening as well as the lowest in the 'B'asic performance level.
Note the "Max" column. This is the maximum possible scale score,
at the top of the highest performance level.
As with ss_file, you can read cut files stored in alternative
formats by specifying your own load function (see "load_cuts"
below).
-------------
"grade" is an integer representing the grade (kindergarten is "0").
If your load method doesn't need grade, you can set grade=None.
-------------
"domain" is a string representing the content domain. If your load
method doesn't need domain, you can set domain=None.
-------------
"at_cuts" <bool>, if True, adds a separate set of accuracy,
consistency, and kappa statistics for each cut-point.
-------------
"load_ss" allows you to specify a (function, kwargs) tuple for
loading a file of scale scores and frequencies that are in a
different format than the "EDS" format assumed here. Whatever
the inputs, the output should be a Pandas Dataframe consisting
minimally of:
RS SS SEM N
0 220 117 166
1 245 110 174
2 255 105 417
3 262 101 743
etc.
in which the RS column (Raw Score) is the index. Open the classify.py
module to see two examples of load functions: load_ss() and
load_ss_eds(). If your function returns similar dataframes, classify()
can analyze them.
Examples:
load_ss = None => Use the default classify.load_ss_eds()
load function.
import damon1.classify as cl
load_ss = (cl.load_ss, {'filename':_, 'names':_, etc})
=> Use the classify.load_ss() function
with the accompanying keyword
arguments. (Positional arguments
won't work.)
load_ss = (my_func, {my_kwargs})
=> To load your file, use my_func
and accompanying keyword arguments.
-------------
"load_cuts" allows you to specify a (function, kwargs) tuple for
loading a file of cut-scores (with maximum score) that are in a
different format that the "EDS" format assumed here. Whatever
the inputs, the output should be a pandas Series consisting
minimally of:
B EI I EA A Max
220 362 409 455 502 570
that is, a scale score cut-point giving the start of each
performance level cut-point, plus a maximum scale score.
Open the classify.py module for an example.
-------------
"sep" is the column delimiter for both the ss_file and cut_file
files.
sep = ',' => Both files are comma-delimited
sep = '\t' => Both files are tab-delimited
References
----------
Cohen's kappa. (2016, October 4). In Wikipedia, The Free Encyclopedia.
Retrieved 13:39, October 4, 2016, from
https://en.wikipedia.org/w/index.php?title=Cohen%27s_kappa&oldid=742569319
Livingston, Samuel A., & Lewis, Charles (1993). Estimating the
Consistency and Accuracy of Classifications Based on Test Scores.
Education Testing Service, Research Report.
https://www.ets.org/Media/Research/pdf/RR-93-48.pdf
Rudner, Lawrence M. (2001). Computing the expected proportions
of misclassified examinees. Practical Assessment, Research &
Evaluation, 7(14).
http://PAREonline.net/getvn.asp?v=7&n=14.
Paste Function
--------------
classify(ss_file, # [<'my/scorefile.csv'> scores, errors, counts]
cut_file, # [<'my/cutfile.csv'> => cut-points and labels]
grade, # [<int> => grade as a number]
domain, # [<str> => domain label]
at_cuts=False, # [<bool> => report stats at each cut-point]
load_ss=None, # [<func> => function for loading ss_file]
load_cuts=None, # [<func> => function for loading cut_file]
sep=',' # [<str> column delimiter]
)
"""
import damon1.classify as cl
# Get function to load raw and scale scores
if load_ss is None:
load_ss = cl.load_ss_eds
load_ss_args = {'filename':ss_file, 'grade':grade, 'domain':domain,
'names':cl.SS_COLS_EDS, 'usecols':cl.SS_COLS_EDS,
'index_col':[cl.GRADE, cl.DOMAIN], 'sep':sep}
else:
load_ss = load_ss[0]
load_ss_args = load_ss[1]
# Get function to load cutpoints
if load_cuts is None:
load_cuts = cl.load_cuts_eds
load_cuts_args = {'filename':cut_file, 'grade':grade, 'domain':domain,
'sep':sep}
else:
load_cuts = load_cuts[0]
load_cuts_args = load_cuts[1]
# Load scores and cuts
ss_se = load_ss(**load_ss_args)
cuts = load_cuts(**load_cuts_args)
# Get summary stats (across all cutpoints)
out = {}
out['summary'] = cl.acc_consist(ss_se, cuts)
# Get stats at each cut
if at_cuts:
max_ = len(cuts) - 1
for i in range(1, max_):
label = cuts.index[i]
cuts_ = cuts.iloc[[0, i, max_]]
out[label] = cl.acc_consist(ss_se, cuts_)
return out
###############################################################################
def fit_for_celdt(scored, thetas, params, skipna=True):
"""Calculate item fit statistics using a formula specific to the EDS
CELDT contract.
This procedure is based on the following explanation in the CELDT Tech
Report and an email by Brad Mulder at ETS.
8.6.1 IRT Model Fit Analyses. Because the CELDT makes use of item
response theory (IRT) to equate successive forms of the test, evaluating
the extent to which the model is appropriate for the CELDT data is an
important part of evaluating the validity of the test. Goodness-of-fit
statistics were computed for each item to examine how closely an item’s
data conform to the item response models. For each item, a comparison
of the observed proportions of examinees in each response category with
the expected proportion based on the model parameters yields a
chi-square-like goodness-of-fit test (with degrees of freedom equal
to mj -1, one less than the number of response categories for an item)
for each item, the Q statistic.
This statistic is directly dependent on sample size, and for the large
samples of the CELDT, the Q values need to be modified to take this
dependency into account. Consistent with past practice, we calculated
a Z statistic as
Z[j] = (Q[j] - df[Qj]) / sqrt(2(df))
where df = mj -1.
This statistic is useful for flagging items that fit relatively poorly.
Zj is sensitive to sample size, and cutoff values for flagging an item
based on Zj have been developed and were used to identify items for the
item review. The cutoff value is (N/1,500 x 4) for a given test, where
N is the sample size.
Brad's explanation:
Here are the steps for evaluating fit:
1) Multilog provides [from Speaking K-2]:
OBSERVED AND EXPECTED COUNTS/PROPORTIONS IN
CATEGORY(K): 1 2
OBS. FREQ. 3721 66165
OBS. PROP. 0.0532 0.9468
EXP. PROP. 0.0651 0.9349
2) Get the expected frequency from the expected proportions.
3) Calculate Pearson chi-square using only those 4 counts (I get 161.4102
for item 5 in Listening K-2)
4) Calculate fit value=(_PCHI_-DF_PCHI)/(sqrt(2*DF_PCHI)) (I get 113.4272)
5) Calculate cutoff=N/1500*4; (I get 186.3627)
6) If fit>=cutoff then Fit_Flag='Y';
Because this isn’t evaluating anything beyond that the item calibration
recovering the number of examinees earning each score point, it isn’t
going to match something that is picking up deviations in particular
ranges of theta. We’d have replaced them were we using them to choose
items.
Brad
Mark Moulton's Opinion: I don't believe this procedure offers a sound
measurement of fit:
1) The Multilog expected values (probabilities) assume complete
data. When the observed array contains missing cells, this
the percentage of missing biases the fit statistics.
2) In my understanding of the Rasch-based joint maximum likelihood
algorithm, iteration occurs until the sum of observed values
equals the sum of expected values. If jMetrik or Multilog
use the same iterative approximations, we would then expect
the difference between the observed and expected sums
to approach zero. Misfit, in that case, would be driven by
number of iterations.
3) These fit statistics don't look anything like those calculated
by jMetrik or Rasch programs.
Therefore, these fit stats are reported at the request of the client to
maintain continuity with the past. They are not used to make decisions
about items.
"""
thetas_ = thetas.loc[:, THETA_SS].values
items = scored.columns
# items = ['CES00818_14']
# items = ['CEL00899_5']
# items = ['CEL00526_2']
fit = {'fit':[], 'flag':[], 'cut':[], 'xsq':[], 'n':[]}
cat0 = []
cat1 = []
for item in items:
obs = scored.loc[:, item]
obs_freq = obs.value_counts().sort_index()
pcats = estimates_for_item(thetas_, params, item, pcats=True)[2]
pcats = pd.DataFrame(pcats, index=thetas.index.values)
n = np.sum(obs_freq) #if skipna else len(thetas_)
if skipna is True:
nix = obs.isnull()
pcats.iloc[nix.values, :] = np.nan
if skipna is True:
exp_freq = pcats.sum(0, skipna).sort_index()
else:
exp_freq = (pcats.mean(0, skipna) * n).sort_index()
# print '\n', item
# print 'first=\n', exp_freq
#
# n_ = n
# ef = n_ * np.array([0.2625, 0.7375])
# exp_freq[0] = ef[0]
# exp_freq[1] = ef[1]
# print 'second=\n', exp_freq
# sys.exit()
xsq = np.sum((obs_freq - exp_freq)**2 / exp_freq)
df = len(obs_freq) - 1
fit_ = (xsq - df) / np.sqrt(2 * df)
cut = (n / 1500.0) * 4.0
flag = fit_ >= cut
fit['xsq'].append(xsq)
fit['n'].append(n)
fit['fit'].append(fit_)
fit['cut'].append(cut)
fit['flag'].append(flag)
cat0.append(pcats.mean(0, skipna).sort_index()[0])
cat1.append(pcats.mean(0, skipna).sort_index()[1])
# print 'obs=\n', obs_freq
# print '\nexp=\n', item, pcats.mean(0, skipna).sort_index()
# print '\nxsq=', xsq
# print 'df=', df
# print 'n=', n
# print 'cut=', cut
results = pd.DataFrame(fit, index=items).round(2)
results['cat0'] = np.around(cat0, 3)
results['cat1'] = np.around(cat1, 3)
return results
###############################################################################
def rasch_fit(scored, fit='in', samp=300, n_samps=5, cut=1.2):
"""Calculate item fit statistics using a Rasch model.
"""
import damon1.core as dmn
import damon1.tools as dmnt
t = {'in':['fac1_infit', 'infit'],
'out':['fac1_outfit', 'outfit']}
infits = []
for i in range(n_samps):
scored_ = scored.sample(samp, replace=False, random_state=i, axis=0)
d = dmn.Damon(scored_, 'dataframe', #'RCD_dicts_whole',
validchars=['All', range(0, 10), 'Num', 'Guess'],
verbose=None)
d.extract_valid(5, 5, 0.1)
# Define groups. Each non-dich item gets its own group (GPC)
vcdict = d.extract_valid_out['validchars'][1]
groups = {'dich':[]}
gpc = 0
if isinstance(vcdict, list):
groups['dich'] = dmnt.getkeys(d, 'Col', 'Core')
elif isinstance(vcdict, dict):
for item in vcdict:
if len(vcdict[item]) == 2:
groups['dich'].append(item)
else:
groups['gpc'+str(gpc)] = [item]
gpc += 1
# print 'vcdict=\n', vcdict
# print 'groups=\n', groups
#
# sys.exit()
d.rasch(['key', groups])
# print d.to_dataframe(d.rasch_out[t[fit][0]])
infits.append(d.to_dataframe(d.rasch_out[t[fit][0]]))
df = pd.concat(infits, axis=1)
mean_fits = df.mean(axis=1).to_frame(t[fit][1])
flag = mean_fits.loc[:, t[fit][1]] >= cut
mean_fits['flag'] = flag
return mean_fits
| apache-2.0 |
ruymanengithub/vison | vison/flat/FLAT0Xaux.py | 1 | 5248 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Auxiliary Functions and resources to FLAT0X.
Created on Tue Jan 30 17:39:00 2018
:author: Ruyman Azzollini
"""
# IMPORT STUFF
from pdb import set_trace as stop
import numpy as np
import os
from collections import OrderedDict
import string as st
from matplotlib import cm
from vison.datamodel import cdp
from vison.plot import figclasses
from vison.plot import trends
# END IMPORT
def get_CDP_lib():
prnu_tb_cdp = cdp.Tables_CDP()
prnu_tb_cdp.rootname = '%s_%snm_PRNU_TB'
CDP_lib = dict(PRNU_TB=prnu_tb_cdp)
return CDP_lib
def gt_check_offsets_dict(test):
ntest = test.replace('_', '\_')
return dict(stats=['offset_pre', 'offset_ove'],
trendaxis='time',
figname='%s_offset_vs_time.png' % (test,),
caption='%s: offset vs. time.' % (ntest,),
meta=dict(doLegend=True,
doNiceXDate=True,
suptitle='%s-checks: offsets' % ntest),
ylim=trends.offset_lims)
def gt_check_deltaoff_dict(test):
ntest = test.replace('_', '\_')
return dict(
stats=[
'deltaoff_pre', 'deltaoff_ove'], trendaxis='time', figname='%s_deltaoff_vs_time.png' %
(test,), caption='%s: $\delta$offset vs. time. Offset value in each frame minus the average value.' %
(ntest,), meta=dict(
doLegend=True, doNiceXDate=True, suptitle='%s-checks: delta-offsets' %
ntest), ylim=[
-10., 10.])
def gt_check_std_dict(test):
ntest = test.replace('_', '\_')
return dict(stats=['std_pre', 'std_ove'],
trendaxis='time',
figname='%s_std_vs_time.png' % test,
caption='%s: std vs. time.' % ntest,
meta=dict(doLegend=True,
doNiceXDate=True,
suptitle='%s-checks: std' % ntest,
ylim=trends.RON_lims))
def gt_check_img_flu_dict(test):
ntest = test.replace('_', '\_')
return dict(stats=['flu_med_img'],
figname='%s_flu_vs_time.png' % test,
caption='%s: Fluence vs. time.' % ntest,
meta=dict(doLegend=False,
doNiceXDate=True,
suptitle='%s-checks: Fluence' % ntest,
ylabel='[ADU]'))
def gt_check_img_std_dict(test):
ntest = test.replace('_', '\_')
return dict(stats=['flu_std_img'],
figname='%s_imgstd_vs_time.png' % test,
caption='%s: Image area STD vs. time.' % ntest,
meta=dict(doLegend=False,
doNiceXDate=True,
suptitle='%s-checks: Image-Area STD' % ntest,
ylabel='[ADU]'))
def gt_indiv_prof1Dhor_dict(test):
ntest = test.replace('_', '\_')
return dict(
#figname='%s_profs1D_hor_allOBSIDs.png' % test,
caption='%s: Average profiles across columns, PLACEHOLDER.' % ntest,
meta=dict(doLegend=False,
ylabel='ADU',
xlabel='Column [pix]',
suptitle='%s: Profiles across columns, PLACEHOLDER.' % ntest)
)
def gt_indiv_prof1Dver_dict(test):
ntest = test.replace('_', '\_')
return dict(
#figname='%s_profs1D_ver_allOBSIDs.png' % test,
caption='%s: Average profiles across rows, PLACEHOLDER.' % ntest,
meta=dict(doLegend=False,
ylabel='ADU',
xlabel='Row [pix]',
suptitle='%s: Profiles across rows, PLACEHOLDER.' % ntest)
)
def gt_meta_MFF2D_dict(test):
ntest = test.replace('_', '\_')
return dict(
figname='%s_MASTERFLATFIELD_2Dimgshow_PLACEHOLDER.png' % test,
caption='%s: Master FlatField for the CCDs [PLACEHOLDER]. Smoothed with gaussian kernel and displayed using histogram equalization to highlight structure.' % ntest,
meta=dict(doLegend=False,
doColorbar=False,
suptitle='%s/Master:Quadrant Images [PLACEHOLDER].' % ntest,
corekwargs=dict(cmap=cm.gray, aspect='auto', # norm=None,
origin='lower left'))
)
def gt_FL0Xfigs(test):
FL0Xfigs = dict()
FL0Xfigs['FL0Xchecks_offsets'] = [
trends.Fig_Basic_Checkstat, gt_check_offsets_dict(test)]
FL0Xfigs['FL0Xchecks_deltaoff'] = [
trends.Fig_Basic_Checkstat, gt_check_deltaoff_dict(test)]
FL0Xfigs['FL0Xchecks_stds'] = [
trends.Fig_Basic_Checkstat, gt_check_std_dict(test)]
FL0Xfigs['FL0Xchecks_flu'] = [
trends.Fig_Basic_Checkstat, gt_check_img_flu_dict(test)]
FL0Xfigs['FL0Xchecks_imgstd'] = [
trends.Fig_Basic_Checkstat, gt_check_img_std_dict(test)]
FL0Xfigs['FL0Xindiv_prof1D_hor_generic'] = [
figclasses.Fig_Beam2DPlot, gt_indiv_prof1Dhor_dict(test)]
FL0Xfigs['FL0Xindiv_prof1D_ver_generic'] = [
figclasses.Fig_Beam2DPlot, gt_indiv_prof1Dver_dict(test)]
FL0Xfigs['FL0Xmeta_MFF_2D'] = [
figclasses.Fig_BeamImgShow, gt_meta_MFF2D_dict(test)]
FL0Xfigs['BlueScreen'] = [figclasses.BlueScreen, dict()]
return FL0Xfigs
| gpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/ipykernel/ipkernel.py | 5 | 13493 | """The IPython kernel implementation"""
import getpass
import sys
import traceback
from IPython.core import release
from ipython_genutils.py3compat import builtin_mod, PY3, unicode_type, safe_unicode
from IPython.utils.tokenutil import token_at_cursor, line_at_cursor
from traitlets import Instance, Type, Any, List
from .comm import CommManager
from .kernelbase import Kernel as KernelBase
from .zmqshell import ZMQInteractiveShell
class IPythonKernel(KernelBase):
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
allow_none=True)
shell_class = Type(ZMQInteractiveShell)
user_module = Any()
def _user_module_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_module = new
user_ns = Instance(dict, args=None, allow_none=True)
def _user_ns_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_ns = new
self.shell.init_user_ns()
# A reference to the Python builtin 'raw_input' function.
# (i.e., __builtin__.raw_input for Python 2.7, builtins.input for Python 3)
_sys_raw_input = Any()
_sys_eval_input = Any()
def __init__(self, **kwargs):
super(IPythonKernel, self).__init__(**kwargs)
# Initialize the InteractiveShell subclass
self.shell = self.shell_class.instance(parent=self,
profile_dir = self.profile_dir,
user_module = self.user_module,
user_ns = self.user_ns,
kernel = self,
)
self.shell.displayhook.session = self.session
self.shell.displayhook.pub_socket = self.iopub_socket
self.shell.displayhook.topic = self._topic('execute_result')
self.shell.display_pub.session = self.session
self.shell.display_pub.pub_socket = self.iopub_socket
self.comm_manager = CommManager(parent=self, kernel=self)
self.shell.configurables.append(self.comm_manager)
comm_msg_types = [ 'comm_open', 'comm_msg', 'comm_close' ]
for msg_type in comm_msg_types:
self.shell_handlers[msg_type] = getattr(self.comm_manager, msg_type)
help_links = List([
{
'text': "Python",
'url': "http://docs.python.org/%i.%i" % sys.version_info[:2],
},
{
'text': "IPython",
'url': "http://ipython.org/documentation.html",
},
{
'text': "NumPy",
'url': "http://docs.scipy.org/doc/numpy/reference/",
},
{
'text': "SciPy",
'url': "http://docs.scipy.org/doc/scipy/reference/",
},
{
'text': "Matplotlib",
'url': "http://matplotlib.org/contents.html",
},
{
'text': "SymPy",
'url': "http://docs.sympy.org/latest/index.html",
},
{
'text': "pandas",
'url': "http://pandas.pydata.org/pandas-docs/stable/",
},
]).tag(config=True)
# Kernel info fields
implementation = 'ipython'
implementation_version = release.version
language_info = {
'name': 'python',
'version': sys.version.split()[0],
'mimetype': 'text/x-python',
'codemirror_mode': {
'name': 'ipython',
'version': sys.version_info[0]
},
'pygments_lexer': 'ipython%d' % (3 if PY3 else 2),
'nbconvert_exporter': 'python',
'file_extension': '.py'
}
@property
def banner(self):
return self.shell.banner
def start(self):
self.shell.exit_now = False
super(IPythonKernel, self).start()
def set_parent(self, ident, parent):
"""Overridden from parent to tell the display hook and output streams
about the parent message.
"""
super(IPythonKernel, self).set_parent(ident, parent)
self.shell.set_parent(parent)
def init_metadata(self, parent):
"""Initialize metadata.
Run at the beginning of each execution request.
"""
md = super(IPythonKernel, self).init_metadata(parent)
# FIXME: remove deprecated ipyparallel-specific code
# This is required for ipyparallel < 5.0
md.update({
'dependencies_met' : True,
'engine' : self.ident,
})
return md
def finish_metadata(self, parent, metadata, reply_content):
"""Finish populating metadata.
Run after completing an execution request.
"""
# FIXME: remove deprecated ipyparallel-specific code
# This is required by ipyparallel < 5.0
metadata['status'] = reply_content['status']
if reply_content['status'] == 'error' and reply_content['ename'] == 'UnmetDependency':
metadata['dependencies_met'] = False
return metadata
def _forward_input(self, allow_stdin=False):
"""Forward raw_input and getpass to the current frontend.
via input_request
"""
self._allow_stdin = allow_stdin
if PY3:
self._sys_raw_input = builtin_mod.input
builtin_mod.input = self.raw_input
else:
self._sys_raw_input = builtin_mod.raw_input
self._sys_eval_input = builtin_mod.input
builtin_mod.raw_input = self.raw_input
builtin_mod.input = lambda prompt='': eval(self.raw_input(prompt))
self._save_getpass = getpass.getpass
getpass.getpass = self.getpass
def _restore_input(self):
"""Restore raw_input, getpass"""
if PY3:
builtin_mod.input = self._sys_raw_input
else:
builtin_mod.raw_input = self._sys_raw_input
builtin_mod.input = self._sys_eval_input
getpass.getpass = self._save_getpass
@property
def execution_count(self):
return self.shell.execution_count
@execution_count.setter
def execution_count(self, value):
# Ignore the incrememnting done by KernelBase, in favour of our shell's
# execution counter.
pass
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
shell = self.shell # we'll need this a lot here
self._forward_input(allow_stdin)
reply_content = {}
try:
res = shell.run_cell(code, store_history=store_history, silent=silent)
finally:
self._restore_input()
if res.error_before_exec is not None:
err = res.error_before_exec
else:
err = res.error_in_exec
if res.success:
reply_content[u'status'] = u'ok'
else:
reply_content[u'status'] = u'error'
reply_content.update({
u'traceback': shell._last_traceback or [],
u'ename': unicode_type(type(err).__name__),
u'evalue': safe_unicode(err),
})
# FIXME: deprecate piece for ipyparallel:
e_info = dict(engine_uuid=self.ident, engine_id=self.int_id,
method='execute')
reply_content['engine_info'] = e_info
# Return the execution counter so clients can display prompts
reply_content['execution_count'] = shell.execution_count - 1
if 'traceback' in reply_content:
self.log.info("Exception in execute request:\n%s", '\n'.join(reply_content['traceback']))
# At this point, we can tell whether the main code execution succeeded
# or not. If it did, we proceed to evaluate user_expressions
if reply_content['status'] == 'ok':
reply_content[u'user_expressions'] = \
shell.user_expressions(user_expressions or {})
else:
# If there was an error, don't even try to compute expressions
reply_content[u'user_expressions'] = {}
# Payloads should be retrieved regardless of outcome, so we can both
# recover partial output (that could have been generated early in a
# block, before an error) and always clear the payload system.
reply_content[u'payload'] = shell.payload_manager.read_payload()
# Be aggressive about clearing the payload because we don't want
# it to sit in memory until the next execute_request comes in.
shell.payload_manager.clear_payload()
return reply_content
def do_complete(self, code, cursor_pos):
# FIXME: IPython completers currently assume single line,
# but completion messages give multi-line context
# For now, extract line from cell, based on cursor_pos:
if cursor_pos is None:
cursor_pos = len(code)
line, offset = line_at_cursor(code, cursor_pos)
line_cursor = cursor_pos - offset
txt, matches = self.shell.complete('', line, line_cursor)
return {'matches' : matches,
'cursor_end' : cursor_pos,
'cursor_start' : cursor_pos - len(txt),
'metadata' : {},
'status' : 'ok'}
def do_inspect(self, code, cursor_pos, detail_level=0):
name = token_at_cursor(code, cursor_pos)
info = self.shell.object_inspect(name)
reply_content = {'status' : 'ok'}
reply_content['data'] = data = {}
reply_content['metadata'] = {}
reply_content['found'] = info['found']
if info['found']:
info_text = self.shell.object_inspect_text(
name,
detail_level=detail_level,
)
data['text/plain'] = info_text
return reply_content
def do_history(self, hist_access_type, output, raw, session=0, start=0,
stop=None, n=None, pattern=None, unique=False):
if hist_access_type == 'tail':
hist = self.shell.history_manager.get_tail(n, raw=raw, output=output,
include_latest=True)
elif hist_access_type == 'range':
hist = self.shell.history_manager.get_range(session, start, stop,
raw=raw, output=output)
elif hist_access_type == 'search':
hist = self.shell.history_manager.search(
pattern, raw=raw, output=output, n=n, unique=unique)
else:
hist = []
return {
'status': 'ok',
'history' : list(hist),
}
def do_shutdown(self, restart):
self.shell.exit_now = True
return dict(status='ok', restart=restart)
def do_is_complete(self, code):
status, indent_spaces = self.shell.input_transformer_manager.check_complete(code)
r = {'status': status}
if status == 'incomplete':
r['indent'] = ' ' * indent_spaces
return r
def do_apply(self, content, bufs, msg_id, reply_metadata):
from .serialize import serialize_object, unpack_apply_message
shell = self.shell
try:
working = shell.user_ns
prefix = "_"+str(msg_id).replace("-","")+"_"
f,args,kwargs = unpack_apply_message(bufs, working, copy=False)
fname = getattr(f, '__name__', 'f')
fname = prefix+"f"
argname = prefix+"args"
kwargname = prefix+"kwargs"
resultname = prefix+"result"
ns = { fname : f, argname : args, kwargname : kwargs , resultname : None }
# print ns
working.update(ns)
code = "%s = %s(*%s,**%s)" % (resultname, fname, argname, kwargname)
try:
exec(code, shell.user_global_ns, shell.user_ns)
result = working.get(resultname)
finally:
for key in ns:
working.pop(key)
result_buf = serialize_object(result,
buffer_threshold=self.session.buffer_threshold,
item_threshold=self.session.item_threshold,
)
except BaseException as e:
# invoke IPython traceback formatting
shell.showtraceback()
reply_content = {
u'traceback': shell._last_traceback or [],
u'ename': unicode_type(type(e).__name__),
u'evalue': safe_unicode(e),
}
# FIXME: deprecate piece for ipyparallel:
e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method='apply')
reply_content['engine_info'] = e_info
self.send_response(self.iopub_socket, u'error', reply_content,
ident=self._topic('error'))
self.log.info("Exception in apply request:\n%s", '\n'.join(reply_content['traceback']))
result_buf = []
reply_content['status'] = 'error'
else:
reply_content = {'status' : 'ok'}
return reply_content, result_buf
def do_clear(self):
self.shell.reset(False)
return dict(status='ok')
# This exists only for backwards compatibility - use IPythonKernel instead
class Kernel(IPythonKernel):
def __init__(self, *args, **kwargs):
import warnings
warnings.warn('Kernel is a deprecated alias of ipykernel.ipkernel.IPythonKernel',
DeprecationWarning)
super(Kernel, self).__init__(*args, **kwargs)
| gpl-3.0 |
chakki-works/seqeval | tests/test_metrics.py | 1 | 9057 | """
Evaluation test is performed for the following dataset.
https://www.clips.uantwerpen.be/conll2000/chunking/output.html
"""
import os
import random
import subprocess
import unittest
import numpy as np
import pytest
from sklearn.exceptions import UndefinedMetricWarning
from seqeval.metrics import (accuracy_score, classification_report, f1_score,
performance_measure, precision_score,
recall_score)
from seqeval.metrics.sequence_labeling import get_entities
from seqeval.scheme import IOB2
class TestF1score:
@pytest.mark.parametrize(
'mode, scheme',
[
(None, None),
('strict', IOB2),
]
)
def test_undefined_metric_warning(self, mode, scheme):
with pytest.warns(UndefinedMetricWarning):
f1_score([[]], [[]], average='micro', mode=mode, scheme=scheme)
@pytest.mark.parametrize(
'mode, scheme',
[
(None, None),
('strict', IOB2)
]
)
def test_runtime_warning(self, mode, scheme):
with pytest.warns(RuntimeWarning):
f1_score([[]], [[]], average='macro', mode=mode, scheme=scheme)
@pytest.mark.parametrize(
'y_true, y_pred',
[
([['O']], [[]]),
([[]], [['O']])
]
)
def test_value_error(self, y_true, y_pred):
with pytest.raises(ValueError):
f1_score(y_true, y_pred)
@pytest.mark.parametrize(
'average, expected',
[
(None, np.array([1])),
('micro', 1),
('macro', 1),
('weighted', 1)
]
)
def test_conll_f1score(self, average, expected):
y_true = [['B-ORG', 'I-ORG']]
y_pred = [['I-ORG', 'I-ORG']]
f = f1_score(y_true, y_pred, average=average)
assert f == expected
@pytest.mark.parametrize(
'average, expected',
[
(None, np.array([0])),
('micro', 0),
('macro', 0),
('weighted', 0)
]
)
def test_strict_f1score(self, average, expected):
y_true = [['B-ORG', 'I-ORG']]
y_pred = [['I-ORG', 'I-ORG']]
f = f1_score(y_true, y_pred, average=average, mode='strict', scheme=IOB2)
assert f == expected
class TestMetrics(unittest.TestCase):
def setUp(self):
self.file_name = os.path.join(os.path.dirname(__file__), 'data/ground_truth.txt')
self.y_true, self.y_pred = self.load_labels(self.file_name)
self.inv_file_name = os.path.join(os.path.dirname(__file__), 'data/ground_truth_inv.txt')
self.y_true_inv, self.y_pred_inv = self.load_labels(self.inv_file_name)
def test_get_entities(self):
y_true = ['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O', 'B-PER', 'I-PER']
self.assertEqual(get_entities(y_true), [('MISC', 3, 5), ('PER', 7, 8)])
def test_get_entities_with_suffix_style(self):
y_true = ['O', 'O', 'O', 'MISC-B', 'MISC-I', 'MISC-I', 'O', 'PER-B', 'PER-I']
self.assertEqual(get_entities(y_true, suffix=True), [('MISC', 3, 5), ('PER', 7, 8)])
def test_get_entities_with_non_NE_input(self):
y_true = ['O', 'O', 'O', 'MISC', 'MISC', 'MISC', 'O', 'PER', 'PER']
with self.assertWarns(UserWarning):
get_entities(y_true)
with self.assertWarns(UserWarning):
get_entities(y_true, suffix=True)
def test_get_entities_with_only_IOB(self):
y_true = [['O', 'O', 'O', 'B', 'I', 'I', 'O'], ['B', 'I', 'O']]
entities = get_entities(y_true)
self.assertEqual(entities, [('_', 3, 5), ('_', 8, 9)])
def test_performance_measure(self):
y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'O', 'B-ORG'], ['B-PER', 'I-PER', 'O', 'B-PER']]
y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O', 'O'], ['B-PER', 'I-PER', 'O', 'B-MISC']]
performance_dict = performance_measure(y_true, y_pred)
self.assertDictEqual(performance_dict, {
'FN': 1, 'FP': 3, 'TN': 4, 'TP': 3})
def test_classification_report(self):
print(classification_report(self.y_true, self.y_pred))
def test_inv_classification_report(self):
print(classification_report(self.y_true_inv, self.y_pred_inv, suffix=True))
def test_by_ground_truth(self):
with open(self.file_name) as f:
output = subprocess.check_output(['perl', 'conlleval.pl'], stdin=f).decode('utf-8')
acc_true, p_true, r_true, f1_true = self.parse_conlleval_output(output)
acc_pred = accuracy_score(self.y_true, self.y_pred)
p_pred = precision_score(self.y_true, self.y_pred)
r_pred = recall_score(self.y_true, self.y_pred)
f1_pred = f1_score(self.y_true, self.y_pred)
self.assertLess(abs(acc_pred - acc_true), 1e-4)
self.assertLess(abs(p_pred - p_true), 1e-4)
self.assertLess(abs(r_pred - r_true), 1e-4)
self.assertLess(abs(f1_pred - f1_true), 1e-4)
def test_metrics_for_inv_data(self):
with open(self.file_name) as f:
acc_pred = accuracy_score(self.y_true, self.y_pred)
p_pred = precision_score(self.y_true, self.y_pred)
r_pred = recall_score(self.y_true, self.y_pred)
f1_pred = f1_score(self.y_true, self.y_pred)
acc_pred_inv = accuracy_score(self.y_true_inv, self.y_pred_inv)
p_pred_inv = precision_score(self.y_true_inv, self.y_pred_inv, suffix=True)
r_pred_inv = recall_score(self.y_true_inv, self.y_pred_inv, suffix=True)
f1_pred_inv = f1_score(self.y_true_inv, self.y_pred_inv, suffix=True)
self.assertLess(abs(acc_pred - acc_pred_inv), 1e-4)
self.assertLess(abs(p_pred - p_pred_inv), 1e-4)
self.assertLess(abs(r_pred - r_pred_inv), 1e-4)
self.assertLess(abs(f1_pred - f1_pred_inv), 1e-4)
def test_statistical_tests(self):
filepath = 'eval_data.txt'
for prefix in ['BIO', 'EIO']:
for i in range(10000):
print('Iteration: {}'.format(i))
self.generate_eval_data(filepath, prefix)
y_true, y_pred = self.load_labels(filepath)
with open(filepath) as f:
output = subprocess.check_output(['perl', 'conlleval.pl'], stdin=f).decode('utf-8')
acc_true, p_true, r_true, f1_true = self.parse_conlleval_output(output)
acc_pred = accuracy_score(y_true, y_pred)
p_pred = precision_score(y_true, y_pred)
r_pred = recall_score(y_true, y_pred)
f1_pred = f1_score(y_true, y_pred)
self.assertLess(abs(acc_pred - acc_true), 1e-4)
self.assertLess(abs(p_pred - p_true), 1e-4)
self.assertLess(abs(r_pred - r_true), 1e-4)
self.assertLess(abs(f1_pred - f1_true), 1e-4)
os.remove(filepath)
def load_labels(self, filename):
y_true, y_pred = [], []
with open(filename) as f:
tags_true, tags_pred = [], []
for line in f:
line = line.rstrip()
if len(line) == 0:
if len(tags_true) != 0:
y_true.append(tags_true)
y_pred.append(tags_pred)
tags_true, tags_pred = [], []
else:
_, _, tag_true, tag_pred = line.split(' ')
tags_true.append(tag_true)
tags_pred.append(tag_pred)
else:
y_true.append(tags_true)
y_pred.append(tags_pred)
return y_true, y_pred
@staticmethod
def parse_conlleval_output(text):
eval_line = text.split('\n')[1]
items = eval_line.split(' ')
accuracy, precision, recall = [item[:-2] for item in items if '%' in item]
f1 = items[-1]
accuracy = float(accuracy) / 100
precision = float(precision) / 100
recall = float(recall) / 100
f1 = float(f1) / 100
return accuracy, precision, recall, f1
@staticmethod
def generate_eval_data(filepath, prefixes='BIO'):
types = ['PER', 'MISC', 'ORG', 'LOC']
report = ''
raw_fmt = '{} {} {} {}\n'
for i in range(1000):
type_true = random.choice(types)
type_pred = random.choice(types)
prefix_true = random.choice(prefixes)
prefix_pred = random.choice(prefixes)
true_out = 'O' if prefix_true == 'O' else '{}-{}'.format(prefix_true, type_true)
pred_out = 'O' if prefix_pred == 'O' else '{}-{}'.format(prefix_pred, type_pred)
report += raw_fmt.format('X', 'X', true_out, pred_out)
# end of sentence
if random.random() > 0.95:
report += '\n'
with open(filepath, 'w') as f:
f.write(report)
| mit |
kevinpetersavage/BOUT-dev | examples/non-local_1d/analyse_target_heatflux.py | 3 | 6363 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Runs the conduction example, produces some output
#
from __future__ import print_function
from builtins import str
from builtins import range
nproc = 1 # Number of processors to use
from boututils import shell, launch, plotdata
from boutdata import collect
import numpy as np
from sys import argv
from math import sqrt, log10, log, pi
from matplotlib import pyplot, ticker, rc
rc('text', usetex=True)
rc('font',**{'family':'serif','serif':['Computer Modern']})
#rc('font',**{'family':'serif','serif':['Computer Modern'],'size':16})
gamma = float(argv[1])
if len(argv)==2:
end_index = -1
data_path = "data"
elif len(argv)==3:
try:
end_index = int(argv[2])
data_path = "data"
except ValueError:
end_index = -1
data_path = str(argv[2])
elif len(argv)==4:
end_index = int(argv[2])
data_path = str(argv[3])
else:
print("Arguments: 'gamma [end_index] [data_path]' or 'gamma [data_path]'")
Exit(1)
electron_mass = 9.10938291e-31
ion_mass = 3.34358348e-27
# Collect the data
Te = collect("T_electron", path=data_path, info=True, yguards=True)
Ti = collect("T_ion", path=data_path, info=True, yguards=True)
n = collect("n_ion", path=data_path, info=True, yguards=True)
if end_index<0:
end_index = len(n[:,0,0,0])
logtime = []
q_electron_left = []
q_electron_right = []
q_ion_left = []
q_ion_right = []
q_total_left = []
q_total_right = []
q_target_electron_left = []
q_target_electron_right = []
q_target_ion_left = []
q_target_ion_right = []
q_target_total_left = []
q_target_total_right = []
right_index = len(Te[0,2,:,0])-4
for i in range(len(Te[:end_index,2,0,0])):
if i>0: logtime.append(log10(i))
else: logtime.append(0)
Te_here = 0.5*(Te[i,2,2,0]+Te[i,2,3,0])
n_here = 0.5*(n[i,2,2,0]+n[i,2,3,0])
Ti_here = 0.5*(Ti[i,2,2,0]+Ti[i,2,3,0])
sheath_potential = 0.5*Te_here*log(2*pi*electron_mass/ion_mass*(1+gamma*Ti_here/Te_here))
q_electron_left.append((2.0*Te_here-sheath_potential)*n_here*1.602176565e-19*sqrt((Te_here+gamma*Ti_here)/3.34358348e-27*1.602176565e-19)) # in W/m^2
q_ion_left.append(n_here*((2.5+0.5*gamma)*Ti_here+0.5*Te_here)*1.602176565e-19*sqrt((Te_here+gamma*Ti_here)/3.34358348e-27*1.602176565e-19)) # in W/m^2
q_total_left.append(q_electron_left[i]+q_ion_left[i]) # in W/m^2
q_target_electron_left.append((2.0*Te_here)*n_here*1.602176565e-19*sqrt((Te_here+gamma*Ti_here)/3.34358348e-27*1.602176565e-19)) # in W/m^2
q_target_ion_left.append(n_here*((2.5+0.5*gamma)*Ti_here+0.5*Te_here-sheath_potential)*1.602176565e-19*sqrt((Te_here+gamma*Ti_here)/3.34358348e-27*1.602176565e-19)) # in W/m^2
q_target_total_left.append(q_target_electron_left[i]+q_target_ion_left[i]) # in W/m^2
Te_here = (Te[i,2,right_index,0]+Te[i,2,right_index+1,0])
n_here = (n[i,2,right_index,0]+n[i,2,right_index+1,0])
Ti_here = (Ti[i,2,right_index,0]+Ti[i,2,right_index+1,0])
sheath_potential = 0.5*Te_here*log(2*pi*electron_mass/ion_mass*(1+gamma*Ti_here/Te_here))
q_electron_right.append((2.0*Te_here-sheath_potential)*n_here*1.602176565e-19*sqrt((Te_here+gamma*Ti_here)/3.34358348e-27*1.602176565e-19)) # in W/m^2
q_ion_right.append(n_here*((2.5+0.5*gamma)*Ti_here+0.5*Te_here)*1.602176565e-19*sqrt((Te_here+gamma*Ti_here)/3.34358348e-27*1.602176565e-19)) # in W/m^2
q_total_right.append(q_electron_right[i]+q_ion_right[i]) # in W/m^2
q_target_electron_right.append((2.0*Te_here)*n_here*1.602176565e-19*sqrt((Te_here+gamma*Ti_here)/3.34358348e-27*1.602176565e-19)) # in W/m^2
q_target_ion_right.append(n_here*((2.5+0.5*gamma)*Ti_here+0.5*Te_here-sheath_potential)*1.602176565e-19*sqrt((Te_here+gamma*Ti_here)/3.34358348e-27*1.602176565e-19)) # in W/m^2
q_target_total_right.append(q_target_electron_right[i]+q_target_ion_right[i]) # in W/m^2
#pyplot.figure(1)
#pyplot.plot(logtime,q_electron_left)
#pyplot.axes().set_aspect('auto')
#pyplot.title("Electron Heat Flux (lower boundary)")
#pyplot.figure(2)
#pyplot.plot(q_electron_right)
#pyplot.axes().set_aspect('auto')
#pyplot.title("Electron Heat Flux (upper boundary)")
#pyplot.figure(3)
#pyplot.plot(q_total_left)
#pyplot.axes().set_aspect('auto')
#pyplot.title("Total Heat Flux (lower boundary)")
#pyplot.figure(4)
#pyplot.plot(q_total_right)
#pyplot.axes().set_aspect('auto')
#pyplot.title("Total Heat Flux (upper boundary)")
pyplot.figure(5,dpi=80, facecolor='w')
#pyplot.plot(logtime,q_electron_left,'r',logtime,q_ion_left,'b',logtime,q_total_left,'k')
#pyplot.title("Electron (red), Ion (blue) and Total (black) Sheath Edge Heat Flux vs log(t)")
pyplot.semilogx(q_electron_left,'r',q_ion_left,'b',q_total_left,'k')
pyplot.title("Electron (red), Ion (blue) and Total (black) Sheath Edge Heat Flux\\ \\")
pyplot.xlabel("$t/\mu\mathrm{s}$")
pyplot.ylabel(r"$Q\mathrm{/W.m}^{-2}$")
pyplot.axes().xaxis.set_major_formatter(ticker.FormatStrFormatter("$%g$"))
pyplot.axes().grid(color='grey', which='both')
#pyplot.tight_layout(pad=20)
pyplot.figure(6)
pyplot.plot(q_electron_right,'r',q_ion_right,'b',q_total_right,'k')
pyplot.title("Electron (red), Ion (blue) and Total (black) Sheath Edge Heat Flux\\ \\")
pyplot.xlabel("$t/\mu\mathrm{s}$")
pyplot.ylabel(r"$Q\mathrm{/W.m}^{-2}$")
#pyplot.figure(7,dpi=80, facecolor='w')
pyplot.figure(7,dpi=800, facecolor='w')
#pyplot.plot(logtime,q_target_electron_left,'r',logtime,q_target_ion_left,'b',logtime,q_target_total_left,'k')
#pyplot.title("Electron (red), Ion (blue) and Total (black) Target Heat Flux vs log(t)")
#pyplot.semilogx(q_target_electron_left,'r',q_target_ion_left,'b',q_target_total_left,'k')
#pyplot.semilogx(q_target_electron_left,'k',q_target_ion_left,'r',q_target_total_left,'b')
pyplot.semilogx(q_target_electron_left,'r--',q_target_ion_left,'b:',q_target_total_left,'k')
#pyplot.title("Electron (red), Ion (blue) and Total (black) Target Heat Flux\\ \\")
pyplot.xlabel("$t/\mu\mathrm{s}$")
pyplot.ylabel(r"$Q\mathrm{/W.m}^{-2}$")
pyplot.ylim(0,5e9)
pyplot.axes().xaxis.set_major_formatter(ticker.FormatStrFormatter("$%g$"))
pyplot.axes().grid(color='grey', which='both')
pyplot.tight_layout(pad=20)
pyplot.figure(8)
pyplot.plot(q_target_electron_right,'r',q_target_ion_right,'b',q_target_total_right,'k')
pyplot.rc('text', usetex=True)
pyplot.title("Electron (red), Ion (blue) and Total (black) Target Heat Flux")
pyplot.xlabel("$t/\mu\mathrm{s}$")
pyplot.ylabel(r"$Q\mathrm{/W.m}^{-2}$")
pyplot.show()
| gpl-3.0 |
prabhjyotsingh/incubator-zeppelin | python/src/main/resources/python/mpl_config.py | 41 | 3653 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module provides utitlites for users to configure the inline plotting
# backend through a PyZeppelinContext instance (eg, through z.configure_mpl())
import matplotlib
def configure(**kwargs):
"""
Generic configure function.
Usage: configure(prop1='foo', prop2='bar', ...)
Currently supported zeppelin-specific properties are:
interactive - If true show all figures without explicit call to show()
via a post-execute hook.
angular - If true, bind figures to angular display system.
close - If true, close all figures once shown.
width, height - Default width / height of the figure in pixels.
fontsize - Font size.
dpi - dpi of the figure.
fmt - Figure format
supported_formats - Supported Figure formats ()
context - ZeppelinContext instance (requires PY4J)
"""
_config.update(**kwargs)
# Broadcast relevant changes to matplotlib RC
_on_config_change()
def get(key):
"""
Get the configuration info given a key
"""
return _config[key]
def _on_config_change():
# dpi
dpi = _config['dpi']
# For older versions of matplotlib, savefig.dpi is not synced with
# figure.dpi by default
matplotlib.rcParams['figure.dpi'] = dpi
if matplotlib.__version__ < '2.0.0':
matplotlib.rcParams['savefig.dpi'] = dpi
# Width and height
width = float(_config['width']) / dpi
height = float(_config['height']) / dpi
matplotlib.rcParams['figure.figsize'] = (width, height)
# Font size
fontsize = _config['fontsize']
matplotlib.rcParams['font.size'] = fontsize
# Default Figure Format
fmt = _config['format']
supported_formats = _config['supported_formats']
if fmt not in supported_formats:
raise ValueError("Unsupported format %s" %fmt)
if matplotlib.__version__ < '1.2.0':
matplotlib.rcParams.update({'savefig.format': fmt})
else:
matplotlib.rcParams['savefig.format'] = fmt
# Interactive mode
interactive = _config['interactive']
matplotlib.interactive(interactive)
def _init_config():
dpi = matplotlib.rcParams['figure.dpi']
if matplotlib.__version__ < '1.2.0':
matplotlib.rcParams.update({'savefig.format': 'png'})
fmt = matplotlib.rcParams['savefig.format']
width, height = matplotlib.rcParams['figure.figsize']
fontsize = matplotlib.rcParams['font.size']
_config['dpi'] = dpi
_config['format'] = fmt
_config['width'] = width*dpi
_config['height'] = height*dpi
_config['fontsize'] = fontsize
_config['close'] = True
_config['interactive'] = matplotlib.is_interactive()
_config['angular'] = False
_config['supported_formats'] = ['png', 'jpg', 'svg']
_config['context'] = None
_config = {}
_init_config()
| apache-2.0 |
lagadic/visp | example/calibration/camera_calibration_show_extrinsics.py | 2 | 12636 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ViSP, open source Visual Servoing Platform software.
# Copyright (C) 2005 - 2020 by Inria. All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the names of the copyright holders nor the names of the contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# This software is provided by the copyright holders and contributors "as is" and
# any express or implied warranties, including, but not limited to, the implied
# warranties of merchantability and fitness for a particular purpose are disclaimed.
# In no event shall copyright holders or contributors be liable for any direct,
# indirect, incidental, special, exemplary, or consequential damages
# (including, but not limited to, procurement of substitute goods or services;
# loss of use, data, or profits; or business interruption) however caused
# and on any theory of liability, whether in contract, strict liability,
# or tort (including negligence or otherwise) arising in any way out of
# the use of this software, even if advised of the possibility of such damage.
# Python 2/3 compatibility
from __future__ import print_function
from scipy.spatial.transform import Rotation
import numpy as np
from numpy import linspace
from enum import Enum
import xml.etree.ElementTree as ET
import re
class PatternType(Enum):
CHESSBOARD = 'Chessboard'
CIRCLES_GRID = 'Circles grid'
class PoseVector:
def __init__(self, str):
nums = [float(n) for n in str.split()]
self.pose = np.array(nums)
def __str__(self):
return "Pose: {}".format(self.pose.transpose())
class CameraInfo:
def __init__(self, name, image_width, image_height, px, py, u0, v0, \
pattern_type, board_width, board_height, square_size, cMo_vec):
self.name = name
self.image_width = image_width
self.image_height = image_height
self.intrinsics = np.array([[px, 0, u0], [0, py, v0], [0, 0, 1]], np.float64)
self.pattern_type = pattern_type
self.board_width = board_width
self.board_height = board_height
self.square_size = square_size
self.cMo_vec = cMo_vec
def __str__(self):
str = "CameraInfo:\n\t camera name:{}\n\timage: {:d}x{:d}\n\tcamera intrinsics:\n{}\n\t{}\n\tboard_size: {:d}x{:d}\n\tsquare_size: {:g}\n\tposes:\n".format( \
self.name, self.image_width, self.image_height, self.intrinsics, \
self.pattern_type, self.board_width, self.board_height, self.square_size)
for cMo in self.cMo_vec:
str += "\t\t" + cMo.__str__() + "\n"
return str
def inverse_homogeneoux_matrix(M):
R = M[0:3, 0:3]
T = M[0:3, 3]
M_inv = np.identity(4)
M_inv[0:3, 0:3] = R.T
M_inv[0:3, 3] = -(R.T).dot(T)
return M_inv
def transform_to_matplotlib_frame(cMo, X, patternCentric=False):
M = np.identity(4)
if patternCentric:
M[1,1] = -1
M[2,2] = -1
else:
M[1,1] = 0
M[1,2] = 1
M[2,1] = -1
M[2,2] = 0
if patternCentric:
return M.dot(inverse_homogeneoux_matrix(cMo).dot(X))
else:
return M.dot(cMo.dot(X))
def create_camera_model(camera_matrix, width, height, scale_focal, draw_frame_axis=False):
fx = camera_matrix[0,0]
fy = camera_matrix[1,1]
focal = 2 / (fx + fy)
f_scale = scale_focal * focal
# draw image plane
X_img_plane = np.ones((4,5))
X_img_plane[0:3,0] = [-width, height, f_scale]
X_img_plane[0:3,1] = [width, height, f_scale]
X_img_plane[0:3,2] = [width, -height, f_scale]
X_img_plane[0:3,3] = [-width, -height, f_scale]
X_img_plane[0:3,4] = [-width, height, f_scale]
# draw triangle above the image plane
X_triangle = np.ones((4,3))
X_triangle[0:3,0] = [-width, -height, f_scale]
X_triangle[0:3,1] = [0, -2*height, f_scale]
X_triangle[0:3,2] = [width, -height, f_scale]
# draw camera
X_center1 = np.ones((4,2))
X_center1[0:3,0] = [0, 0, 0]
X_center1[0:3,1] = [-width, height, f_scale]
X_center2 = np.ones((4,2))
X_center2[0:3,0] = [0, 0, 0]
X_center2[0:3,1] = [width, height, f_scale]
X_center3 = np.ones((4,2))
X_center3[0:3,0] = [0, 0, 0]
X_center3[0:3,1] = [width, -height, f_scale]
X_center4 = np.ones((4,2))
X_center4[0:3,0] = [0, 0, 0]
X_center4[0:3,1] = [-width, -height, f_scale]
# draw camera frame axis
X_frame1 = np.ones((4,2))
X_frame1[0:3,0] = [0, 0, 0]
X_frame1[0:3,1] = [f_scale/2, 0, 0]
X_frame2 = np.ones((4,2))
X_frame2[0:3,0] = [0, 0, 0]
X_frame2[0:3,1] = [0, f_scale/2, 0]
X_frame3 = np.ones((4,2))
X_frame3[0:3,0] = [0, 0, 0]
X_frame3[0:3,1] = [0, 0, f_scale/2]
if draw_frame_axis:
return [X_img_plane, X_triangle, X_center1, X_center2, X_center3, X_center4, X_frame1, X_frame2, X_frame3]
else:
return [X_img_plane, X_triangle, X_center1, X_center2, X_center3, X_center4]
def create_board_model(extrinsics, board_width, board_height, square_size, draw_frame_axis=False):
width = board_width*square_size
height = board_height*square_size
# draw calibration board
X_board = np.ones((4,5))
X_board[0:3,0] = [0,0,0]
X_board[0:3,1] = [width,0,0]
X_board[0:3,2] = [width,height,0]
X_board[0:3,3] = [0,height,0]
X_board[0:3,4] = [0,0,0]
# draw board frame axis
X_frame1 = np.ones((4,2))
X_frame1[0:3,0] = [0, 0, 0]
X_frame1[0:3,1] = [height/2, 0, 0]
X_frame2 = np.ones((4,2))
X_frame2[0:3,0] = [0, 0, 0]
X_frame2[0:3,1] = [0, height/2, 0]
X_frame3 = np.ones((4,2))
X_frame3[0:3,0] = [0, 0, 0]
X_frame3[0:3,1] = [0, 0, height/2]
if draw_frame_axis:
return [X_board, X_frame1, X_frame2, X_frame3]
else:
return [X_board]
def draw_camera_boards(ax, camera_matrix, cam_width, cam_height, scale_focal,
extrinsics, board_width, board_height, square_size,
patternCentric):
from matplotlib import cm
min_values = np.zeros((3,1))
min_values = np.inf
max_values = np.zeros((3,1))
max_values = -np.inf
if patternCentric:
X_moving = create_camera_model(camera_matrix, cam_width, cam_height, scale_focal)
X_static = create_board_model(extrinsics, board_width, board_height, square_size, True)
else:
X_static = create_camera_model(camera_matrix, cam_width, cam_height, scale_focal, True)
X_moving = create_board_model(extrinsics, board_width, board_height, square_size, True)
cm_subsection = linspace(0.0, 1.0, extrinsics.shape[0])
colors = [ cm.jet(x) for x in cm_subsection ]
for i in range(len(X_static)):
X = np.zeros(X_static[i].shape)
for j in range(X_static[i].shape[1]):
X[:,j] = transform_to_matplotlib_frame(np.eye(4), X_static[i][:,j], patternCentric)
ax.plot3D(X[0,:], X[1,:], X[2,:], color='r')
min_values = np.minimum(min_values, X[0:3,:].min(1))
max_values = np.maximum(max_values, X[0:3,:].max(1))
for idx in range(extrinsics.shape[0]):
R = Rotation.from_rotvec(extrinsics[idx,0:3]).as_dcm()
cMo = np.eye(4,4)
cMo[0:3,0:3] = R
cMo[0:3,3] = extrinsics[idx,3:6]
for i in range(len(X_moving)):
X = np.zeros(X_moving[i].shape)
for j in range(X_moving[i].shape[1]):
X[0:4,j] = transform_to_matplotlib_frame(cMo, X_moving[i][0:4,j], patternCentric)
ax.plot3D(X[0,:], X[1,:], X[2,:], color=colors[idx])
min_values = np.minimum(min_values, X[0:3,:].min(1))
max_values = np.maximum(max_values, X[0:3,:].max(1))
return min_values, max_values
def main():
import argparse
parser = argparse.ArgumentParser(description='Plot camera calibration extrinsics.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--calibration', type=str, default='camera.xml',
help='XML camera calibration file.')
parser.add_argument('--cam_width', type=float, default=0.064/2,
help='Width/2 of the displayed camera.')
parser.add_argument('--cam_height', type=float, default=0.048/2,
help='Height/2 of the displayed camera.')
parser.add_argument('--scale_focal', type=float, default=40,
help='Value to scale the focal length.')
parser.add_argument('--cameraCentric', action='store_true',
help='If argument is present, the camera is static and the calibration boards are moving.')
args = parser.parse_args()
tree = ET.parse(args.calibration)
root = tree.getroot()
def loadCameraInfo(root):
camera_info_vec = []
for camera in root.iter('camera'):
pattern_type = PatternType.CHESSBOARD
if camera.find('additional_information').find('calibration_pattern_type').text == 'Circles grid':
pattern_type = PatternType.CIRCLES_GRID
board_size = re.search(r"(\d+)x(\d+)", camera.find('additional_information').find('board_size').text)
cMo_vec = []
for cMo in camera.find('additional_information').find('camera_poses').iter('cMo'):
cMo_vec.append(PoseVector(cMo.text))
camera_info_vec.append(CameraInfo( \
camera.find('name').text, int(camera.find('image_width').text), int(camera.find('image_height').text), \
float(camera.find('model').find('px').text), float(camera.find('model').find('py').text), \
float(camera.find('model').find('u0').text), float(camera.find('model').find('v0').text), \
pattern_type, int(board_size.group(1)), int(board_size.group(2)), \
float(camera.find('additional_information').find('square_size').text), \
cMo_vec \
))
return camera_info_vec
camera_info_vec = loadCameraInfo(root)
for camera_info in camera_info_vec:
print('\n', camera_info)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # pylint: disable=unused-variable
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_aspect("equal")
cMo_vec = camera_info.cMo_vec
extrinsics = np.empty((len(cMo_vec), 6), np.float64)
for idx, cMo in enumerate(cMo_vec):
extrinsics[idx, :3] = cMo.pose[3:]
extrinsics[idx, 3:] = cMo.pose[:3]
cam_width = args.cam_width
cam_height = args.cam_height
scale_focal = args.scale_focal
pattern_centric = not args.cameraCentric
min_values, max_values = draw_camera_boards(ax, camera_info.intrinsics, cam_width, cam_height,
scale_focal, extrinsics, camera_info.board_width,
camera_info.board_height, camera_info.square_size,
pattern_centric)
X_min = min_values[0]
X_max = max_values[0]
Y_min = min_values[1]
Y_max = max_values[1]
Z_min = min_values[2]
Z_max = max_values[2]
max_range = np.array([X_max-X_min, Y_max-Y_min, Z_max-Z_min]).max() / 2.0
mid_x = (X_max+X_min) * 0.5
mid_y = (Y_max+Y_min) * 0.5
mid_z = (Z_max+Z_min) * 0.5
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
if pattern_centric:
ax.set_xlabel('x')
ax.set_ylabel('-y')
ax.set_zlabel('-z')
ax.set_title('Camera Poses Visualization')
else:
ax.set_xlabel('x')
ax.set_ylabel('z')
ax.set_zlabel('-y')
ax.set_title('Calibration Board Poses Visualization')
plt.show()
if __name__ == '__main__':
main()
| gpl-2.0 |
subutai/nupic.research | nupic/research/frameworks/wandb/ray_wandb.py | 2 | 11410 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2020, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import ast
import json
import numbers
import os
import warnings
from copy import deepcopy
import wandb
from ray import tune
from ray.tune.utils import flatten_dict
__all__ = [
"log",
"WandbLogger",
"prep_plot_for_wandb",
]
# Find directory of where wandb save its results.
if "WANDB_DIR" in os.environ:
WANDB_DIR = os.path.join(os.environ["WANDB_DIR"], "wandb")
else:
WANDB_DIR = None
CONFIG_NAME = "ray_wandb_config.json"
def log(log_dict, commit=False, step=None, sync=True, *args, **kwargs):
"""
This logs its arguments to wandb only if a run has been initialized.
"""
if wandb.run:
wandb.log(log_dict, commit=commit, step=step, sync=sync, *args, **kwargs)
class WandbLogger(tune.logger.Logger):
"""
This forks the wandb 0.9.7 ray WandbLogger to
1) make resuming experiments easier
2) include more supported data types
3) support logging time-series formatted results
As a `ray.tune.logger.Logger` this class will process all results returned from
training and automatically sync them to wandb.
To use this class, include it in your `tune.run` config under `loggers` and add
`env_config["wandb"]` to specify wandb params.
The main options include
wandb:
- name: Chosen name of run/experiment
- project: Name of wandb project to group all related runs
- group: Extra layer of naming to group runs under a project
- notes: A multi-line string associated with the run
All are optional, but name, project, and notes are recommended. For all wandb init
params, see https://docs.wandb.com/library/init.
Example usage:
```
# Be sure to set `WANDB_API_KEY` in environment variables.
from ray.tune.logger import DEFAULT_LOGGERS
tune.run(
MyTrianable,
loggers=list(DEFAULT_LOGGERS) + [WandbLogger],
config={
"env_config": {
"wandb": {
"project": "my-project-name",
"name": "my-exp-name",
"group": "group-of-runs",
"notes": "This experiments aims to ..."
},
# Optional
"result_to_time_series_fn":
MyExperiment.expand_result_to_time_series,
}
}
)
```
The "result_to_time_series_fn" is a function that takes a result and config
and returns a dictionary of {timestep: result}. If you provide this
function, you convert from an epoch-based time series to your own
timestep-based time series, logging multiple timesteps for each epoch.
"""
# Only the following types are able to be logged through this class.
# See https://docs.wandb.com/library/log for all wandb data-types.
accepted_types = (
numbers.Number,
wandb.data_types.WBValue, # Base class for all wandb values
)
def _init(self):
"""
This function runs `wandb.init` with two key extra steps:
1) `group` is automatically assigned to the date-time if not already given
2) The config passed to `wandb.init` is saved. This allows `log` (from this
module) to make an identical call to `wandb.init`. While The former init
gets called outside of the ray process, the latter typically does not.
Thus, by saving the wandb-config, we can associate calls to `log` to the
same `group` associated to this logger.
"""
self._config = None
# Auto format the group to be the name of the trial.
env_config = self.config["env_config"]
wandb_config = env_config["wandb"]
# Find latest run config upon resume.
resume = wandb_config.get("resume", False)
if resume and "id" not in wandb_config:
enable_run_resume(wandb_config)
# This will create a new run-directory.
wandb.init(**self.config.get("env_config", {}).get("wandb", {}))
# Get result_to_time_series_fn.
experiment_class = self.config.get("experiment_class", None)
self.result_to_time_series_fn = None
if "result_to_time_series_fn" in env_config:
self.result_to_time_series_fn = env_config["result_to_time_series_fn"]
elif hasattr(experiment_class, "expand_result_to_time_series"):
self.result_to_time_series_fn = (
experiment_class.expand_result_to_time_series
)
def on_result(self, result):
"""
The following is copied from the parent class; however, non-serializable
config values are saved as the repr's so that they are all yaml
serializable. See for details:
- https://github.com/wandb/client/issues/586
"""
config = deepcopy(result.get("config"))
if config and self._config is None:
for k in config.keys():
if wandb.config.get(k) is None:
s = repr(config[k])
try:
ast.literal_eval(s)
wandb.config[k] = config[k]
except (ValueError, SyntaxError):
# Non-serializable
wandb.config[k] = s
self._config = config
tmp = result.copy()
for k in ["done", "config", "pid", "timestamp"]:
if k in tmp:
del tmp[k]
if self.result_to_time_series_fn is not None:
assert self._config is not None
time_series_dict = self.result_to_time_series_fn(tmp, self._config)
for t, d in sorted(time_series_dict.items(), key=lambda x: x[0]):
metrics = {}
for key, value in flatten_dict(d, delimiter="/").items():
if not isinstance(value, self.accepted_types):
continue
metrics[key] = value
wandb.log(metrics, step=t)
else:
metrics = {}
for key, value in flatten_dict(tmp, delimiter="/").items():
if not isinstance(value, self.accepted_types):
continue
metrics[key] = value
wandb.log(metrics)
def close(self):
wandb.join()
class PrepPlotForWandb:
"""
This mixin ensures all plots can be logged to wandb without error. Ray typically
tries to deepcopy the results dict which throws an error since this is not
implemented for plots by matplotlib. This is avoided by first wrapping the plots
with wandb.Image before sending them to Ray which logs them through the WandbLogger.
"""
def run_epoch(self):
"""Wrap plots with wandb.Image"""
results = super().run_epoch()
wandb_plots = {}
for name, value in results.items():
if is_matplotlib_plot(value):
wandb_plots[name] = wandb.Image(value)
results.update(wandb_plots)
return results
@classmethod
def get_execution_order(cls):
eo = super().get_execution_order()
eo["run_epoch"].append(
"PrepPlotForWandb: Wrap plots with wandb.Image")
return eo
# ---------
# Utils
# ---------
def is_matplotlib_plot(value):
typename = wandb.util.get_full_typename(value)
return wandb.util.is_matplotlib_typename(typename)
def prep_plot_for_wandb(plot_func):
"""
This wraps a plotting function to alter it's return value to be of type wandb.Image.
This way, the plot can be logged through ray, specifically the ray WandbLogger,
without error. Ray typically tries to deepcopy all logged objects; however, plots
cannot be deepcopied.
:param plot_func: callable with arbitrary arguments that returns a matplotlib
figure, axes object, or anything related.
"""
def plot_and_make_wandb_image(*args, **kwargs):
plot = plot_func(*args, **kwargs)
if is_matplotlib_plot(plot):
plot = wandb.Image(plot)
else:
warnings.warn(f"Unable to convert object of type {type(plot)}"
" to `wandb.Image`.")
return plot
return plot_and_make_wandb_image
def enable_run_resume(wandb_config):
"""
Finds and sets latest wandb run id to resume the corresponding run.
"""
name = wandb_config.get("name", None)
run_id = wandb_config.get("id", None)
if name and not run_id:
run_id = get_latest_run_id(name=name) or None
if run_id is None:
warnings.warn(
"Couldn't find latest wandb run-id to resume."
"Ensure `WANDB_DIR` environment variable is set.")
wandb_config.update(id=run_id, resume=True)
def get_latest_run_id(name=None):
"""
Gets the config of the latest wandb run.
:param name: (optional) name of run; filters runs so they must match the name given
"""
latest_run_dir = get_latest_run_dir(name=name)
if latest_run_dir is None:
return None
run_id = latest_run_dir.split("-")[-1] or None # None if empty string
return run_id
def get_latest_run_dir(name=None):
"""
Gets the directory of where the latest wandb run is saved.
:param name: (optional) name of run; filters runs so they must match the name given
"""
if WANDB_DIR is None:
return None
all_subdirs = []
for d in os.listdir(WANDB_DIR):
# Make sure run directory exists.
d_full = os.path.join(WANDB_DIR, d)
if not os.path.isdir(d_full):
continue
# Validate name of run when specified.
run_metadata_path = os.path.join(d_full, "wandb-metadata.json")
if name and os.path.isfile(run_metadata_path):
with open(run_metadata_path, "r") as f:
try:
run_metadata = json.load(f)
except json.JSONDecodeError:
run_metadata = {}
d_name = run_metadata.get("name", False)
if d_name and d_name == name:
all_subdirs.append(d_full)
# If name is not given, add to list of run directories by default.
elif name is None:
all_subdirs.append(d_full)
# Find latest run directory chronologically.
latest_run_dir = max(all_subdirs, key=os.path.getmtime)
return latest_run_dir
| agpl-3.0 |
bmazin/ARCONS-pipeline | flatcal/plotWeights.py | 1 | 4808 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.backends.backend_pdf import PdfPages
from util.FileName import FileName
# npzFileName = os.path.splitext(fullFlatCalFileName)[0]+'.npz'
#
# #calculate total spectra and medians for programs that expect old format flat cal
# spectra = np.array(np.sum(self.spectralCubes,axis=0))
#
# wvlMedians = np.zeros(self.nWvlBins)
# spectra2d = np.reshape(spectra,[self.nRow*self.nCol,self.nWvlBins ])
# for iWvl in xrange(self.nWvlBins):
# spectrum = spectra2d[:,iWvl]
# goodSpectrum = spectrum[spectrum != 0]#dead pixels need to be taken out before calculating medians
# wvlMedians[iWvl] = np.median(goodSpectrum)
# np.savez(npzFileName,median=wvlMedians,medianSpectra=np.array(self.medianSpectra),binEdges=self.wvlBinEdges,spectra=spectra,weights=np.array(self.flatWeights.data))
def main():
run='PAL2012'
#flatSunsetDates = ['20121211','20121207','20121210','20121211']
#flatTimestamps = ['20121212-084725','','','']
#flatLabels = ['geminga 1211','twi 1207','twi 1210','twi 1211']
flatSunsetDates = ['20121207','20121210','20121211']
flatTimestamps = ['','','']
flatLabels = ['twi 1207','twi 1210','twi 1211']
oldFlatSunsetDates = ['20121207','20121210','20121211']
oldFlatTimestamps = ['','','']
oldFlatLabels = ['old twi 1207','old twi 1210','old twi 1211']
flatFileNames = [FileName(run=run,date=date,tstamp=tstamp).flatInfo() for date,tstamp in zip(flatSunsetDates,flatTimestamps)]
oldFlatFileNames = [FileName(run=run,date=date,tstamp=tstamp).oldFlatInfo() for date,tstamp in zip(oldFlatSunsetDates,oldFlatTimestamps)]
flatInfos = [np.load(filename) for filename in flatFileNames]
oldFlatInfos = [np.load(filename) for filename in oldFlatFileNames]
pdfFileName = '/Scratch/flatCalSolnFiles2/compareFlatWeights.pdf'
pp = PdfPages(pdfFileName)
nPlotsPerRow = 2
nPlotsPerCol = 4
nPlotsPerPage = nPlotsPerRow*nPlotsPerCol
iPlot = 0
matplotlib.rcParams['font.size'] = 4
nRow=46
nCol=44
for iRow in xrange(nRow):
print iRow
for iCol in xrange(nCol):
if iPlot % nPlotsPerPage == 0:
fig = plt.figure(figsize=(10,10),dpi=100)
ax = fig.add_subplot(nPlotsPerCol,nPlotsPerRow,iPlot%nPlotsPerPage+1)
ax.set_ylim(.5,2.)
ax.set_xlabel(r'$\lambda$ ($\AA$)')
ax.set_ylabel(r'Weights')
ax.set_title('Flat Weights')
ax.set_title('p %d,%d'%(iRow,iCol))
allWeights = np.array([flatInfo['weights'][iRow,iCol] for flatInfo in flatInfos])
if np.any(allWeights):
for iFlatInfo,flatInfo in enumerate(oldFlatInfos):
#weights = flatInfo['weights'][iRow,iCol]
weights = flatInfo['weights'][iRow,iCol]
flatSpectra = flatInfo['spectra'][iRow,iCol]
flatMedians = flatInfo['median']
deltaFlatSpectra = np.sqrt(flatSpectra)
deltaWeights = weights*deltaFlatSpectra/flatSpectra
color=matplotlib.cm.jet((iFlatInfo+1.)/(len(flatInfos)*2))
#wvlBinCenters = self.wvlBinEdges[:-1]+np.diff(self.wvlBinEdges)/2.
#ax.step(self.wvlBinEdges[:-1],weights,linestyle='-',label=self.params['flatInfoFiles'][iFlat],color=color,)
ax.errorbar(flatInfo['binEdges'][0:-1],weights,linestyle='-',yerr=deltaWeights,color=color,label=oldFlatLabels[iFlatInfo],alpha=.7)
for iFlatInfo,flatInfo in enumerate(flatInfos):
#weights = flatInfo['weights'][iRow,iCol]
weights = allWeights[iFlatInfo]
flatSpectra = flatInfo['spectra'][iRow,iCol]
flatMedians = flatInfo['median']
deltaFlatSpectra = np.sqrt(flatSpectra)
deltaWeights = weights*deltaFlatSpectra/flatSpectra
color=matplotlib.cm.jet((iFlatInfo+1.+len(flatInfos))/(len(flatInfos)*2))
#wvlBinCenters = self.wvlBinEdges[:-1]+np.diff(self.wvlBinEdges)/2.
#ax.step(self.wvlBinEdges[:-1],weights,linestyle='-',label=self.params['flatInfoFiles'][iFlat],color=color,)
ax.errorbar(flatInfo['binEdges'][0:-1],weights,linestyle='-',yerr=deltaWeights,color=color,label=flatLabels[iFlatInfo])
ax.legend(loc='lower right')
if iPlot%nPlotsPerPage == nPlotsPerPage-1 or (iRow == nRow-1 and iCol == nCol-1):
pp.savefig(fig)
iPlot += 1
pp.close()
if __name__=='__main__':
main()
| gpl-2.0 |
HyukjinKwon/spark | python/pyspark/pandas/series.py | 9 | 197528 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper class for Spark Column to behave similar to pandas Series.
"""
import datetime
import re
import inspect
import sys
from collections.abc import Mapping
from functools import partial, wraps, reduce
from typing import (
Any,
Callable,
Dict,
Generic,
IO,
Iterable,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
no_type_check,
overload,
TYPE_CHECKING,
)
import numpy as np
import pandas as pd
from pandas.core.accessor import CachedAccessor
from pandas.io.formats.printing import pprint_thing
from pandas.api.types import is_list_like, is_hashable
from pandas.api.extensions import ExtensionDtype
from pandas.tseries.frequencies import DateOffset
from pyspark.sql import functions as F, Column, DataFrame as SparkDataFrame
from pyspark.sql.types import (
ArrayType,
BooleanType,
DataType,
DoubleType,
FloatType,
IntegerType,
IntegralType,
LongType,
NumericType,
Row,
StructType,
)
from pyspark.sql.window import Window
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, Dtype, Label, Name, Scalar, T
from pyspark.pandas.accessors import PandasOnSparkSeriesMethods
from pyspark.pandas.categorical import CategoricalAccessor
from pyspark.pandas.config import get_option
from pyspark.pandas.base import IndexOpsMixin
from pyspark.pandas.exceptions import SparkPandasIndexingError
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.generic import Frame
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
DEFAULT_SERIES_NAME,
NATURAL_ORDER_COLUMN_NAME,
SPARK_DEFAULT_INDEX_NAME,
SPARK_DEFAULT_SERIES_NAME,
)
from pyspark.pandas.missing.series import MissingPandasLikeSeries
from pyspark.pandas.plot import PandasOnSparkPlotAccessor
from pyspark.pandas.ml import corr
from pyspark.pandas.utils import (
combine_frames,
is_name_like_tuple,
is_name_like_value,
name_like_string,
same_anchor,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
validate_bool_kwarg,
verify_temp_column_name,
SPARK_CONF_ARROW_ENABLED,
)
from pyspark.pandas.datetimes import DatetimeMethods
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.spark.accessors import SparkSeriesMethods
from pyspark.pandas.strings import StringMethods
from pyspark.pandas.typedef import (
infer_return_type,
spark_type_to_pandas_dtype,
ScalarType,
SeriesType,
)
if TYPE_CHECKING:
from pyspark.sql._typing import ColumnOrName # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import SeriesGroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes import Index # noqa: F401 (SPARK-34943)
# This regular expression pattern is complied and defined here to avoid to compile the same
# pattern every time it is used in _repr_ in Series.
# This pattern basically seeks the footer string from pandas'
REPR_PATTERN = re.compile(r"Length: (?P<length>[0-9]+)")
_flex_doc_SERIES = """
Return {desc} of series and other, element-wise (binary operator `{op_name}`).
Equivalent to ``{equiv}``
Parameters
----------
other : Series or scalar value
Returns
-------
Series
The result of the operation.
See Also
--------
Series.{reverse}
{series_examples}
"""
_add_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.add(df.b)
a 4.0
b NaN
c 6.0
d NaN
dtype: float64
>>> df.a.radd(df.b)
a 4.0
b NaN
c 6.0
d NaN
dtype: float64
"""
_sub_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.subtract(df.b)
a 0.0
b NaN
c 2.0
d NaN
dtype: float64
>>> df.a.rsub(df.b)
a 0.0
b NaN
c -2.0
d NaN
dtype: float64
"""
_mul_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.multiply(df.b)
a 4.0
b NaN
c 8.0
d NaN
dtype: float64
>>> df.a.rmul(df.b)
a 4.0
b NaN
c 8.0
d NaN
dtype: float64
"""
_div_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.divide(df.b)
a 1.0
b NaN
c 2.0
d NaN
dtype: float64
>>> df.a.rdiv(df.b)
a 1.0
b NaN
c 0.5
d NaN
dtype: float64
"""
_pow_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.pow(df.b)
a 4.0
b NaN
c 16.0
d NaN
dtype: float64
>>> df.a.rpow(df.b)
a 4.0
b NaN
c 16.0
d NaN
dtype: float64
"""
_mod_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.mod(df.b)
a 0.0
b NaN
c 0.0
d NaN
dtype: float64
>>> df.a.rmod(df.b)
a 0.0
b NaN
c 2.0
d NaN
dtype: float64
"""
_floordiv_example_SERIES = """
Examples
--------
>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],
... 'b': [2, np.nan, 2, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df
a b
a 2.0 2.0
b 2.0 NaN
c 4.0 2.0
d NaN NaN
>>> df.a.floordiv(df.b)
a 1.0
b NaN
c 2.0
d NaN
dtype: float64
>>> df.a.rfloordiv(df.b)
a 1.0
b NaN
c 0.0
d NaN
dtype: float64
"""
# Needed to disambiguate Series.str and str type
str_type = str
def _create_type_for_series_type(param: Any) -> Type[SeriesType]:
from pyspark.pandas.typedef import NameTypeHolder
if isinstance(param, ExtensionDtype):
new_class = type("NameType", (NameTypeHolder,), {}) # type: Type[NameTypeHolder]
new_class.tpe = param
else:
new_class = param.type if isinstance(param, np.dtype) else param
return SeriesType[new_class] # type: ignore
if (3, 5) <= sys.version_info < (3, 7) and __name__ != "__main__":
from typing import GenericMeta # type: ignore
old_getitem = GenericMeta.__getitem__ # type: ignore
@no_type_check
def new_getitem(self, params):
if hasattr(self, "is_series"):
return old_getitem(self, _create_type_for_series_type(params))
else:
return old_getitem(self, params)
GenericMeta.__getitem__ = new_getitem # type: ignore
class Series(Frame, IndexOpsMixin, Generic[T]):
"""
pandas-on-Spark Series that corresponds to pandas Series logically. This holds Spark Column
internally.
:ivar _internal: an internal immutable Frame to manage metadata.
:type _internal: InternalFrame
:ivar _psdf: Parent's pandas-on-Spark DataFrame
:type _psdf: ps.DataFrame
Parameters
----------
data : array-like, dict, or scalar value, pandas Series
Contains data stored in Series
If data is a dict, argument order is maintained for Python 3.6
and later.
Note that if `data` is a pandas Series, other arguments should not be used.
index : array-like or Index (1d)
Values must be hashable and have the same length as `data`.
Non-unique index values are allowed. Will default to
RangeIndex (0, 1, 2, ..., n) if not provided. If both a dict and index
sequence are used, the index will override the keys found in the
dict.
dtype : numpy.dtype or None
If None, dtype will be inferred
copy : boolean, default False
Copy input data
"""
@no_type_check
def __init__(self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False):
assert data is not None
if isinstance(data, DataFrame):
assert dtype is None
assert name is None
assert not copy
assert not fastpath
self._anchor = data # type: DataFrame
self._col_label = index # type: Label
else:
if isinstance(data, pd.Series):
assert index is None
assert dtype is None
assert name is None
assert not copy
assert not fastpath
s = data
else:
s = pd.Series(
data=data, index=index, dtype=dtype, name=name, copy=copy, fastpath=fastpath
)
internal = InternalFrame.from_pandas(pd.DataFrame(s))
if s.name is None:
internal = internal.copy(column_labels=[None])
anchor = DataFrame(internal)
self._anchor = anchor
self._col_label = anchor._internal.column_labels[0]
object.__setattr__(anchor, "_psseries", {self._column_label: self})
@property
def _psdf(self) -> DataFrame:
return self._anchor
@property
def _internal(self) -> InternalFrame:
return self._psdf._internal.select_column(self._column_label)
@property
def _column_label(self) -> Optional[Label]:
return self._col_label
def _update_anchor(self, psdf: DataFrame) -> None:
assert psdf._internal.column_labels == [self._column_label], (
psdf._internal.column_labels,
[self._column_label],
)
self._anchor = psdf
object.__setattr__(psdf, "_psseries", {self._column_label: self})
def _with_new_scol(self, scol: Column, *, field: Optional[InternalField] = None) -> "Series":
"""
Copy pandas-on-Spark Series with the new Spark Column.
:param scol: the new Spark Column
:return: the copied Series
"""
name = name_like_string(self._column_label)
internal = self._internal.copy(
data_spark_columns=[scol.alias(name)],
data_fields=[
field if field is None or field.struct_field is None else field.copy(name=name)
],
)
return first_series(DataFrame(internal))
spark = CachedAccessor("spark", SparkSeriesMethods)
@property
def dtypes(self) -> Dtype:
"""Return the dtype object of the underlying data.
>>> s = ps.Series(list('abc'))
>>> s.dtype == s.dtypes
True
"""
return self.dtype
@property
def axes(self) -> List["Index"]:
"""
Return a list of the row axis labels.
Examples
--------
>>> psser = ps.Series([1, 2, 3])
>>> psser.axes
[Int64Index([0, 1, 2], dtype='int64')]
"""
return [self.index]
# Arithmetic Operators
def add(self, other: Any) -> "Series":
return self + other
add.__doc__ = _flex_doc_SERIES.format(
desc="Addition",
op_name="+",
equiv="series + other",
reverse="radd",
series_examples=_add_example_SERIES,
)
def radd(self, other: Any) -> "Series":
return other + self
radd.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Addition",
op_name="+",
equiv="other + series",
reverse="add",
series_examples=_add_example_SERIES,
)
def div(self, other: Any) -> "Series":
return self / other
div.__doc__ = _flex_doc_SERIES.format(
desc="Floating division",
op_name="/",
equiv="series / other",
reverse="rdiv",
series_examples=_div_example_SERIES,
)
divide = div
def rdiv(self, other: Any) -> "Series":
return other / self
rdiv.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Floating division",
op_name="/",
equiv="other / series",
reverse="div",
series_examples=_div_example_SERIES,
)
def truediv(self, other: Any) -> "Series":
return self / other
truediv.__doc__ = _flex_doc_SERIES.format(
desc="Floating division",
op_name="/",
equiv="series / other",
reverse="rtruediv",
series_examples=_div_example_SERIES,
)
def rtruediv(self, other: Any) -> "Series":
return other / self
rtruediv.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Floating division",
op_name="/",
equiv="other / series",
reverse="truediv",
series_examples=_div_example_SERIES,
)
def mul(self, other: Any) -> "Series":
return self * other
mul.__doc__ = _flex_doc_SERIES.format(
desc="Multiplication",
op_name="*",
equiv="series * other",
reverse="rmul",
series_examples=_mul_example_SERIES,
)
multiply = mul
def rmul(self, other: Any) -> "Series":
return other * self
rmul.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Multiplication",
op_name="*",
equiv="other * series",
reverse="mul",
series_examples=_mul_example_SERIES,
)
def sub(self, other: Any) -> "Series":
return self - other
sub.__doc__ = _flex_doc_SERIES.format(
desc="Subtraction",
op_name="-",
equiv="series - other",
reverse="rsub",
series_examples=_sub_example_SERIES,
)
subtract = sub
def rsub(self, other: Any) -> "Series":
return other - self
rsub.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Subtraction",
op_name="-",
equiv="other - series",
reverse="sub",
series_examples=_sub_example_SERIES,
)
def mod(self, other: Any) -> "Series":
return self % other
mod.__doc__ = _flex_doc_SERIES.format(
desc="Modulo",
op_name="%",
equiv="series % other",
reverse="rmod",
series_examples=_mod_example_SERIES,
)
def rmod(self, other: Any) -> "Series":
return other % self
rmod.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Modulo",
op_name="%",
equiv="other % series",
reverse="mod",
series_examples=_mod_example_SERIES,
)
def pow(self, other: Any) -> "Series":
return self ** other
pow.__doc__ = _flex_doc_SERIES.format(
desc="Exponential power of series",
op_name="**",
equiv="series ** other",
reverse="rpow",
series_examples=_pow_example_SERIES,
)
def rpow(self, other: Any) -> "Series":
return other ** self
rpow.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Exponential power",
op_name="**",
equiv="other ** series",
reverse="pow",
series_examples=_pow_example_SERIES,
)
def floordiv(self, other: Any) -> "Series":
return self // other
floordiv.__doc__ = _flex_doc_SERIES.format(
desc="Integer division",
op_name="//",
equiv="series // other",
reverse="rfloordiv",
series_examples=_floordiv_example_SERIES,
)
def rfloordiv(self, other: Any) -> "Series":
return other // self
rfloordiv.__doc__ = _flex_doc_SERIES.format(
desc="Reverse Integer division",
op_name="//",
equiv="other // series",
reverse="floordiv",
series_examples=_floordiv_example_SERIES,
)
# create accessor for pandas-on-Spark specific methods.
pandas_on_spark = CachedAccessor("pandas_on_spark", PandasOnSparkSeriesMethods)
# keep the name "koalas" for backward compatibility.
koalas = CachedAccessor("koalas", PandasOnSparkSeriesMethods)
# Comparison Operators
def eq(self, other: Any) -> bool:
"""
Compare if the current value is equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a == 1
a True
b False
c False
d False
Name: a, dtype: bool
>>> df.b.eq(1)
a True
b False
c True
d False
Name: b, dtype: bool
"""
return self == other
equals = eq
def gt(self, other: Any) -> "Series":
"""
Compare if the current value is greater than the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a > 1
a False
b True
c True
d True
Name: a, dtype: bool
>>> df.b.gt(1)
a False
b False
c False
d False
Name: b, dtype: bool
"""
return self > other
def ge(self, other: Any) -> "Series":
"""
Compare if the current value is greater than or equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a >= 2
a False
b True
c True
d True
Name: a, dtype: bool
>>> df.b.ge(2)
a False
b False
c False
d False
Name: b, dtype: bool
"""
return self >= other
def lt(self, other: Any) -> "Series":
"""
Compare if the current value is less than the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a < 1
a False
b False
c False
d False
Name: a, dtype: bool
>>> df.b.lt(2)
a True
b False
c True
d False
Name: b, dtype: bool
"""
return self < other
def le(self, other: Any) -> "Series":
"""
Compare if the current value is less than or equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a <= 2
a True
b True
c False
d False
Name: a, dtype: bool
>>> df.b.le(2)
a True
b False
c True
d False
Name: b, dtype: bool
"""
return self <= other
def ne(self, other: Any) -> "Series":
"""
Compare if the current value is not equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.a != 1
a False
b True
c True
d True
Name: a, dtype: bool
>>> df.b.ne(1)
a False
b True
c False
d True
Name: b, dtype: bool
"""
return self != other
def divmod(self, other: Any) -> Tuple["Series", "Series"]:
"""
Return Integer division and modulo of series and other, element-wise
(binary operator `divmod`).
Parameters
----------
other : Series or scalar value
Returns
-------
2-Tuple of Series
The result of the operation.
See Also
--------
Series.rdivmod
"""
return self.floordiv(other), self.mod(other)
def rdivmod(self, other: Any) -> Tuple["Series", "Series"]:
"""
Return Integer division and modulo of series and other, element-wise
(binary operator `rdivmod`).
Parameters
----------
other : Series or scalar value
Returns
-------
2-Tuple of Series
The result of the operation.
See Also
--------
Series.divmod
"""
return self.rfloordiv(other), self.rmod(other)
def between(self, left: Any, right: Any, inclusive: bool = True) -> "Series":
"""
Return boolean Series equivalent to left <= series <= right.
This function returns a boolean vector containing `True` wherever the
corresponding Series element is between the boundary values `left` and
`right`. NA values are treated as `False`.
Parameters
----------
left : scalar or list-like
Left boundary.
right : scalar or list-like
Right boundary.
inclusive : bool, default True
Include boundaries.
Returns
-------
Series
Series representing whether each element is between left and
right (inclusive).
See Also
--------
Series.gt : Greater than of series and other.
Series.lt : Less than of series and other.
Notes
-----
This function is equivalent to ``(left <= ser) & (ser <= right)``
Examples
--------
>>> s = ps.Series([2, 0, 4, 8, np.nan])
Boundary values are included by default:
>>> s.between(1, 4)
0 True
1 False
2 True
3 False
4 False
dtype: bool
With `inclusive` set to ``False`` boundary values are excluded:
>>> s.between(1, 4, inclusive=False)
0 True
1 False
2 False
3 False
4 False
dtype: bool
`left` and `right` can be any scalar value:
>>> s = ps.Series(['Alice', 'Bob', 'Carol', 'Eve'])
>>> s.between('Anna', 'Daniel')
0 False
1 True
2 True
3 False
dtype: bool
"""
if inclusive:
lmask = self >= left
rmask = self <= right
else:
lmask = self > left
rmask = self < right
return lmask & rmask
# TODO: arg should support Series
# TODO: NaN and None
def map(self, arg: Union[Dict, Callable]) -> "Series":
"""
Map values of Series according to input correspondence.
Used for substituting each value in a Series with another value,
that may be derived from a function, a ``dict``.
.. note:: make sure the size of the dictionary is not huge because it could
downgrade the performance or throw OutOfMemoryError due to a huge
expression within Spark. Consider the input as a functions as an
alternative instead in this case.
Parameters
----------
arg : function or dict
Mapping correspondence.
Returns
-------
Series
Same index as caller.
See Also
--------
Series.apply : For applying more complex functions on a Series.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Notes
-----
When ``arg`` is a dictionary, values in Series that are not in the
dictionary (as keys) are converted to ``None``. However, if the
dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.
provides a method for default values), then this default is used
rather than ``None``.
Examples
--------
>>> s = ps.Series(['cat', 'dog', None, 'rabbit'])
>>> s
0 cat
1 dog
2 None
3 rabbit
dtype: object
``map`` accepts a ``dict``. Values that are not found
in the ``dict`` are converted to ``None``, unless the dict has a default
value (e.g. ``defaultdict``):
>>> s.map({'cat': 'kitten', 'dog': 'puppy'})
0 kitten
1 puppy
2 None
3 None
dtype: object
It also accepts a function:
>>> def format(x) -> str:
... return 'I am a {}'.format(x)
>>> s.map(format)
0 I am a cat
1 I am a dog
2 I am a None
3 I am a rabbit
dtype: object
"""
if isinstance(arg, dict):
is_start = True
# In case dictionary is empty.
current = F.when(SF.lit(False), SF.lit(None).cast(self.spark.data_type))
for to_replace, value in arg.items():
if is_start:
current = F.when(self.spark.column == SF.lit(to_replace), value)
is_start = False
else:
current = current.when(self.spark.column == SF.lit(to_replace), value)
if hasattr(arg, "__missing__"):
tmp_val = arg[np._NoValue]
del arg[np._NoValue] # Remove in case it's set in defaultdict.
current = current.otherwise(SF.lit(tmp_val))
else:
current = current.otherwise(SF.lit(None).cast(self.spark.data_type))
return self._with_new_scol(current)
else:
return self.apply(arg)
@property
def shape(self) -> Tuple[int]:
"""Return a tuple of the shape of the underlying data."""
return (len(self),)
@property
def name(self) -> Name:
"""Return name of the Series."""
name = self._column_label
if name is not None and len(name) == 1:
return name[0]
else:
return name
@name.setter
def name(self, name: Name) -> None:
self.rename(name, inplace=True)
# TODO: Functionality and documentation should be matched. Currently, changing index labels
# taking dictionary and function to change index are not supported.
def rename(self, index: Optional[Name] = None, **kwargs: Any) -> "Series":
"""
Alter Series name.
Parameters
----------
index : scalar
Scalar will alter the ``Series.name`` attribute.
inplace : bool, default False
Whether to return a new Series. If True then value of copy is
ignored.
Returns
-------
Series
Series with name altered.
Examples
--------
>>> s = ps.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
"""
if index is None:
pass
elif not is_hashable(index):
raise TypeError("Series.name must be a hashable type")
elif not isinstance(index, tuple):
index = (index,)
name = name_like_string(index)
scol = self.spark.column.alias(name)
field = self._internal.data_fields[0].copy(name=name)
internal = self._internal.copy(
column_labels=[index],
data_spark_columns=[scol],
data_fields=[field],
column_label_names=None,
)
psdf = DataFrame(internal) # type: DataFrame
if kwargs.get("inplace", False):
self._col_label = index
self._update_anchor(psdf)
return self
else:
return first_series(psdf)
def rename_axis(
self, mapper: Optional[Any] = None, index: Optional[Any] = None, inplace: bool = False
) -> Optional["Series"]:
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper, index : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to the index values.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series.
Returns
-------
Series, or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Examples
--------
>>> s = ps.Series(["dog", "cat", "monkey"], name="animal")
>>> s # doctest: +NORMALIZE_WHITESPACE
0 dog
1 cat
2 monkey
Name: animal, dtype: object
>>> s.rename_axis("index").sort_index() # doctest: +NORMALIZE_WHITESPACE
index
0 dog
1 cat
2 monkey
Name: animal, dtype: object
**MultiIndex**
>>> index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> s = ps.Series([4, 4, 2], index=index, name='num_legs')
>>> s # doctest: +NORMALIZE_WHITESPACE
type name
mammal dog 4
cat 4
monkey 2
Name: num_legs, dtype: int64
>>> s.rename_axis(index={'type': 'class'}).sort_index() # doctest: +NORMALIZE_WHITESPACE
class name
mammal cat 4
dog 4
monkey 2
Name: num_legs, dtype: int64
>>> s.rename_axis(index=str.upper).sort_index() # doctest: +NORMALIZE_WHITESPACE
TYPE NAME
mammal cat 4
dog 4
monkey 2
Name: num_legs, dtype: int64
"""
psdf = self.to_frame().rename_axis(mapper=mapper, index=index, inplace=False)
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
@property
def index(self) -> "ps.Index":
"""The index (axis labels) Column of the Series.
See Also
--------
Index
"""
return self._psdf.index
@property
def is_unique(self) -> bool:
"""
Return boolean if values in the object are unique
Returns
-------
is_unique : boolean
>>> ps.Series([1, 2, 3]).is_unique
True
>>> ps.Series([1, 2, 2]).is_unique
False
>>> ps.Series([1, 2, 3, None]).is_unique
True
"""
scol = self.spark.column
# Here we check:
# 1. the distinct count without nulls and count without nulls for non-null values
# 2. count null values and see if null is a distinct value.
#
# This workaround is in order to calculate the distinct count including nulls in
# single pass. Note that COUNT(DISTINCT expr) in Spark is designed to ignore nulls.
return self._internal.spark_frame.select(
(F.count(scol) == F.countDistinct(scol))
& (F.count(F.when(scol.isNull(), 1).otherwise(None)) <= 1)
).collect()[0][0]
def reset_index(
self,
level: Optional[Union[int, Name, Sequence[Union[int, Name]]]] = None,
drop: bool = False,
name: Optional[Name] = None,
inplace: bool = False,
) -> Optional[Union["Series", DataFrame]]:
"""
Generate a new DataFrame or Series with the index reset.
This is useful when the index needs to be treated as a column,
or when the index is meaningless and needs to be reset
to the default before another operation.
Parameters
----------
level : int, str, tuple, or list, default optional
For a Series with a MultiIndex, only remove the specified levels from the index.
Removes all levels by default.
drop : bool, default False
Just reset the index, without inserting it as a column in the new DataFrame.
name : object, optional
The name to use for the column containing the original Series values.
Uses self.name by default. This argument is ignored when drop is True.
inplace : bool, default False
Modify the Series in place (do not create a new object).
Returns
-------
Series or DataFrame
When `drop` is False (the default), a DataFrame is returned.
The newly created columns will come first in the DataFrame,
followed by the original Series values.
When `drop` is True, a `Series` is returned.
In either case, if ``inplace=True``, no value is returned.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4], index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
Generate a DataFrame with default index.
>>> s.reset_index()
idx 0
0 a 1
1 b 2
2 c 3
3 d 4
To specify the name of the new column use `name`.
>>> s.reset_index(name='values')
idx values
0 a 1
1 b 2
2 c 3
3 d 4
To generate a new Series with the default set `drop` to True.
>>> s.reset_index(drop=True)
0 1
1 2
2 3
3 4
dtype: int64
To update the Series in place, without generating a new one
set `inplace` to True. Note that it also requires ``drop=True``.
>>> s.reset_index(inplace=True, drop=True)
>>> s
0 1
1 2
2 3
3 4
dtype: int64
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace and not drop:
raise TypeError("Cannot reset_index inplace on a Series to create a DataFrame")
if drop:
psdf = self._psdf[[self.name]]
else:
psser = self
if name is not None:
psser = psser.rename(name)
psdf = psser.to_frame()
psdf = psdf.reset_index(level=level, drop=drop)
if drop:
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
else:
return psdf
def to_frame(self, name: Optional[Name] = None) -> DataFrame:
"""
Convert Series to DataFrame.
Parameters
----------
name : object, default None
The passed name should substitute for the series name (if it has
one).
Returns
-------
DataFrame
DataFrame representation of Series.
Examples
--------
>>> s = ps.Series(["a", "b", "c"])
>>> s.to_frame()
0
0 a
1 b
2 c
>>> s = ps.Series(["a", "b", "c"], name="vals")
>>> s.to_frame()
vals
0 a
1 b
2 c
"""
if name is not None:
renamed = self.rename(name)
elif self._column_label is None:
renamed = self.rename(DEFAULT_SERIES_NAME)
else:
renamed = self
return DataFrame(renamed._internal)
to_dataframe = to_frame
def to_string(
self,
buf: Optional[IO[str]] = None,
na_rep: str = "NaN",
float_format: Optional[Callable[[float], str]] = None,
header: bool = True,
index: bool = True,
length: bool = False,
dtype: bool = False,
name: bool = False,
max_rows: Optional[int] = None,
) -> Optional[str]:
"""
Render a string representation of the Series.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
buffer to write to
na_rep : string, optional
string representation of NAN to use, default 'NaN'
float_format : one-parameter function, optional
formatter function to apply to columns' elements if they are floats
default None
header : boolean, default True
Add the Series header (index name)
index : bool, optional
Add index (row) labels, default True
length : boolean, default False
Add the Series length
dtype : boolean, default False
Add the Series dtype
name : boolean, default False
Add the Series name if not None
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
Returns
-------
formatted : string (if not buffer passed)
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], columns=['dogs', 'cats'])
>>> print(df['dogs'].to_string())
0 0.2
1 0.0
2 0.6
3 0.2
>>> print(df['dogs'].to_string(max_rows=2))
0 0.2
1 0.0
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
psseries = self.head(max_rows)
else:
psseries = self
return validate_arguments_and_invoke_function(
psseries._to_internal_pandas(), self.to_string, pd.Series.to_string, args
)
def to_clipboard(self, excel: bool = True, sep: Optional[str] = None, **kwargs: Any) -> None:
# Docstring defined below by reusing DataFrame.to_clipboard's.
args = locals()
psseries = self
return validate_arguments_and_invoke_function(
psseries._to_internal_pandas(), self.to_clipboard, pd.Series.to_clipboard, args
)
to_clipboard.__doc__ = DataFrame.to_clipboard.__doc__
def to_dict(self, into: Type = dict) -> Mapping:
"""
Convert Series to {label -> value} dict or dict-like object.
.. note:: This method should only be used if the resulting pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
into : class, default dict
The collections.abc.Mapping subclass to use as the return
object. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
collections.abc.Mapping
Key-value representation of Series.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4])
>>> s_dict = s.to_dict()
>>> sorted(s_dict.items())
[(0, 1), (1, 2), (2, 3), (3, 4)]
>>> from collections import OrderedDict, defaultdict
>>> s.to_dict(OrderedDict)
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> dd = defaultdict(list)
>>> s.to_dict(dd) # doctest: +ELLIPSIS
defaultdict(<class 'list'>, {...})
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psseries = self
return validate_arguments_and_invoke_function(
psseries._to_internal_pandas(), self.to_dict, pd.Series.to_dict, args
)
def to_latex(
self,
buf: Optional[IO[str]] = None,
columns: Optional[List[Name]] = None,
col_space: Optional[int] = None,
header: bool = True,
index: bool = True,
na_rep: str = "NaN",
formatters: Optional[
Union[List[Callable[[Any], str]], Dict[Name, Callable[[Any], str]]]
] = None,
float_format: Optional[Callable[[float], str]] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
bold_rows: bool = False,
column_format: Optional[str] = None,
longtable: Optional[bool] = None,
escape: Optional[bool] = None,
encoding: Optional[str] = None,
decimal: str = ".",
multicolumn: Optional[bool] = None,
multicolumn_format: Optional[str] = None,
multirow: Optional[bool] = None,
) -> Optional[str]:
args = locals()
psseries = self
return validate_arguments_and_invoke_function(
psseries._to_internal_pandas(), self.to_latex, pd.Series.to_latex, args
)
to_latex.__doc__ = DataFrame.to_latex.__doc__
def to_pandas(self) -> pd.Series:
"""
Return a pandas Series.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], columns=['dogs', 'cats'])
>>> df['dogs'].to_pandas()
0 0.2
1 0.0
2 0.6
3 0.2
Name: dogs, dtype: float64
"""
return self._to_internal_pandas().copy()
def to_list(self) -> List:
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
.. note:: This method should only be used if the resulting list is expected
to be small, as all the data is loaded into the driver's memory.
"""
return self._to_internal_pandas().tolist()
tolist = to_list
def drop_duplicates(self, keep: str = "first", inplace: bool = False) -> Optional["Series"]:
"""
Return Series with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
Method to handle dropping duplicates:
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
inplace : bool, default ``False``
If ``True``, performs operation inplace and returns None.
Returns
-------
Series
Series with duplicates dropped.
Examples
--------
Generate a Series with duplicated entries.
>>> s = ps.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'],
... name='animal')
>>> s.sort_index()
0 lama
1 cow
2 lama
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
With the 'keep' parameter, the selection behaviour of duplicated values
can be changed. The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
>>> s.drop_duplicates().sort_index()
0 lama
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
The value 'last' for parameter 'keep' keeps the last occurrence for
each set of duplicated entries.
>>> s.drop_duplicates(keep='last').sort_index()
1 cow
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
The value ``False`` for parameter 'keep' discards all sets of
duplicated entries. Setting the value of 'inplace' to ``True`` performs
the operation inplace and returns ``None``.
>>> s.drop_duplicates(keep=False, inplace=True)
>>> s.sort_index()
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
"""
inplace = validate_bool_kwarg(inplace, "inplace")
psdf = self._psdf[[self.name]].drop_duplicates(keep=keep)
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
def reindex(self, index: Optional[Any] = None, fill_value: Optional[Any] = None) -> "Series":
"""
Conform Series to new index with optional filling logic, placing
NA/NaN in locations having no value in the previous index. A new object
is produced.
Parameters
----------
index: array-like, optional
New labels / index to conform to, should be specified using keywords.
Preferably an Index object to avoid duplicating data
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
Returns
-------
Series with changed index.
See Also
--------
Series.reset_index : Remove row labels or move them to new columns.
Examples
--------
Create a series with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> ser = ps.Series([200, 200, 404, 404, 301],
... index=index, name='http_status')
>>> ser
Firefox 200
Chrome 200
Safari 404
IE10 404
Konqueror 301
Name: http_status, dtype: int64
Create a new index and reindex the Series. By default
values in the new index that do not have corresponding
records in the Series are assigned ``NaN``.
>>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> ser.reindex(new_index).sort_index()
Chrome 200.0
Comodo Dragon NaN
IE10 404.0
Iceweasel NaN
Safari 404.0
Name: http_status, dtype: float64
We can fill in the missing values by passing a value to
the keyword ``fill_value``.
>>> ser.reindex(new_index, fill_value=0).sort_index()
Chrome 200
Comodo Dragon 0
IE10 404
Iceweasel 0
Safari 404
Name: http_status, dtype: int64
To further illustrate the filling functionality in
``reindex``, we will create a Series with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> ser2 = ps.Series([100, 101, np.nan, 100, 89, 88],
... name='prices', index=date_index)
>>> ser2.sort_index()
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Name: prices, dtype: float64
Suppose we decide to expand the series to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> ser2.reindex(date_index2).sort_index()
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
Name: prices, dtype: float64
"""
return first_series(self.to_frame().reindex(index=index, fill_value=fill_value)).rename(
self.name
)
def reindex_like(self, other: Union["Series", "DataFrame"]) -> "Series":
"""
Return a Series with matching indices as other object.
Conform the object to the same index on all axes. Places NA/NaN in locations
having no value in the previous index.
Parameters
----------
other : Series or DataFrame
Its row and column indices are used to define the new indices
of this object.
Returns
-------
Series
Series with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, ...)``.
Examples
--------
>>> s1 = ps.Series([24.3, 31.0, 22.0, 35.0],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'),
... name="temp_celsius")
>>> s1
2014-02-12 24.3
2014-02-13 31.0
2014-02-14 22.0
2014-02-15 35.0
Name: temp_celsius, dtype: float64
>>> s2 = ps.Series(["low", "low", "medium"],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']),
... name="winspeed")
>>> s2
2014-02-12 low
2014-02-13 low
2014-02-15 medium
Name: winspeed, dtype: object
>>> s2.reindex_like(s1).sort_index()
2014-02-12 low
2014-02-13 low
2014-02-14 None
2014-02-15 medium
Name: winspeed, dtype: object
"""
if isinstance(other, (Series, DataFrame)):
return self.reindex(index=other.index)
else:
raise TypeError("other must be a pandas-on-Spark Series or DataFrame")
def fillna(
self,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool = False,
limit: Optional[int] = None,
) -> Optional["Series"]:
"""Fill NA/NaN values.
.. note:: the current implementation of 'method' parameter in fillna uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
value : scalar, dict, Series
Value to use to fill holes. alternately a dict/Series of values
specifying which value to use for each column.
DataFrame is not supported.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series pad / ffill: propagate last valid
observation forward to next valid backfill / bfill:
use NEXT valid observation to fill gap
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
Series
Series with NA entries filled.
Examples
--------
>>> s = ps.Series([np.nan, 2, 3, 4, np.nan, 6], name='x')
>>> s
0 NaN
1 2.0
2 3.0
3 4.0
4 NaN
5 6.0
Name: x, dtype: float64
Replace all NaN elements with 0s.
>>> s.fillna(0)
0 0.0
1 2.0
2 3.0
3 4.0
4 0.0
5 6.0
Name: x, dtype: float64
We can also propagate non-null values forward or backward.
>>> s.fillna(method='ffill')
0 NaN
1 2.0
2 3.0
3 4.0
4 4.0
5 6.0
Name: x, dtype: float64
>>> s = ps.Series([np.nan, 'a', 'b', 'c', np.nan], name='x')
>>> s.fillna(method='ffill')
0 None
1 a
2 b
3 c
4 c
Name: x, dtype: object
"""
psser = self._fillna(value=value, method=method, axis=axis, limit=limit)
if method is not None:
psser = DataFrame(psser._psdf._internal.resolved_copy)._psser_for(self._column_label)
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
self._psdf._update_internal_frame(psser._psdf._internal, requires_same_anchor=False)
return None
else:
return psser._with_new_scol(psser.spark.column) # TODO: dtype?
def _fillna(
self,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
limit: Optional[int] = None,
part_cols: Sequence["ColumnOrName"] = (),
) -> "Series":
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError("fillna currently only works for axis=0 or axis='index'")
if (value is None) and (method is None):
raise ValueError("Must specify a fillna 'value' or 'method' parameter.")
if (method is not None) and (method not in ["ffill", "pad", "backfill", "bfill"]):
raise ValueError("Expecting 'pad', 'ffill', 'backfill' or 'bfill'.")
scol = self.spark.column
if isinstance(self.spark.data_type, (FloatType, DoubleType)):
cond = scol.isNull() | F.isnan(scol)
else:
if not self.spark.nullable:
return self.copy()
cond = scol.isNull()
if value is not None:
if not isinstance(value, (float, int, str, bool)):
raise TypeError("Unsupported type %s" % type(value).__name__)
if limit is not None:
raise ValueError("limit parameter for value is not support now")
scol = F.when(cond, value).otherwise(scol)
else:
if method in ["ffill", "pad"]:
func = F.last
end = Window.currentRow - 1
if limit is not None:
begin = Window.currentRow - limit
else:
begin = Window.unboundedPreceding
elif method in ["bfill", "backfill"]:
func = F.first
begin = Window.currentRow + 1
if limit is not None:
end = Window.currentRow + limit
else:
end = Window.unboundedFollowing
window = (
Window.partitionBy(*part_cols)
.orderBy(NATURAL_ORDER_COLUMN_NAME)
.rowsBetween(begin, end)
)
scol = F.when(cond, func(scol, True).over(window)).otherwise(scol)
return DataFrame(
self._psdf._internal.with_new_spark_column(
self._column_label, scol.alias(name_like_string(self.name)) # TODO: dtype?
)
)._psser_for(self._column_label)
def dropna(self, axis: Axis = 0, inplace: bool = False, **kwargs: Any) -> Optional["Series"]:
"""
Return a new Series with missing values removed.
Parameters
----------
axis : {0 or 'index'}, default 0
There is only one axis to drop values from.
inplace : bool, default False
If True, do operation inplace and return None.
**kwargs
Not in use.
Returns
-------
Series
Series with NA entries dropped from it.
Examples
--------
>>> ser = ps.Series([1., 2., np.nan])
>>> ser
0 1.0
1 2.0
2 NaN
dtype: float64
Drop NA values from a Series.
>>> ser.dropna()
0 1.0
1 2.0
dtype: float64
Keep the Series with valid entries in the same variable.
>>> ser.dropna(inplace=True)
>>> ser
0 1.0
1 2.0
dtype: float64
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# TODO: last two examples from pandas produce different results.
psdf = self._psdf[[self.name]].dropna(axis=axis, inplace=False)
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) -> "Series":
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values.
Parameters
----------
lower : float or int, default None
Minimum threshold value. All values below this threshold will be set to it.
upper : float or int, default None
Maximum threshold value. All values above this threshold will be set to it.
Returns
-------
Series
Series with the values outside the clip boundaries replaced
Examples
--------
>>> ps.Series([0, 2, 4]).clip(1, 3)
0 1
1 2
2 3
dtype: int64
Notes
-----
One difference between this implementation and pandas is that running
`pd.Series(['a', 'b']).clip(0, 1)` will crash with "TypeError: '<=' not supported between
instances of 'str' and 'int'" while `ps.Series(['a', 'b']).clip(0, 1)` will output the
original Series, simply ignoring the incompatible types.
"""
if is_list_like(lower) or is_list_like(upper):
raise TypeError(
"List-like value are not supported for 'lower' and 'upper' at the " + "moment"
)
if lower is None and upper is None:
return self
if isinstance(self.spark.data_type, NumericType):
scol = self.spark.column
if lower is not None:
scol = F.when(scol < lower, lower).otherwise(scol)
if upper is not None:
scol = F.when(scol > upper, upper).otherwise(scol)
return self._with_new_scol(
scol.alias(self._internal.data_spark_column_names[0]),
field=self._internal.data_fields[0],
)
else:
return self
def drop(
self,
labels: Optional[Union[Name, List[Name]]] = None,
index: Optional[Union[Name, List[Name]]] = None,
level: Optional[int] = None,
) -> "Series":
"""
Return Series with specified index labels removed.
Remove elements of a Series based on specifying the index labels.
When using a multi-index, labels on different levels can be removed by specifying the level.
Parameters
----------
labels : single label or list-like
Index labels to drop.
index : None
Redundant for application on Series, but index can be used instead of labels.
level : int or level name, optional
For MultiIndex, level for which the labels will be removed.
Returns
-------
Series
Series with specified index labels removed.
See Also
--------
Series.dropna
Examples
--------
>>> s = ps.Series(data=np.arange(3), index=['A', 'B', 'C'])
>>> s
A 0
B 1
C 2
dtype: int64
Drop single label A
>>> s.drop('A')
B 1
C 2
dtype: int64
Drop labels B and C
>>> s.drop(labels=['B', 'C'])
A 0
dtype: int64
With 'index' rather than 'labels' returns exactly same result.
>>> s.drop(index='A')
B 1
C 2
dtype: int64
>>> s.drop(index=['B', 'C'])
A 0
dtype: int64
Also support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
lama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.drop(labels='weight', level=1)
lama speed 45.0
length 1.2
cow speed 30.0
length 1.5
falcon speed 320.0
length 0.3
dtype: float64
>>> s.drop(('lama', 'weight'))
lama speed 45.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.drop([('lama', 'speed'), ('falcon', 'weight')])
lama weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
length 0.3
dtype: float64
"""
return first_series(self._drop(labels=labels, index=index, level=level))
def _drop(
self,
labels: Optional[Union[Name, List[Name]]] = None,
index: Optional[Union[Name, List[Name]]] = None,
level: Optional[int] = None,
) -> DataFrame:
if labels is not None:
if index is not None:
raise ValueError("Cannot specify both 'labels' and 'index'")
return self._drop(index=labels, level=level)
if index is not None:
internal = self._internal
if level is None:
level = 0
if level >= internal.index_level:
raise ValueError("'level' should be less than the number of indexes")
if is_name_like_tuple(index): # type: ignore
index_list = [cast(Label, index)]
elif is_name_like_value(index):
index_list = [(index,)]
elif all(is_name_like_value(idxes, allow_tuple=False) for idxes in index):
index_list = [(idex,) for idex in index]
elif not all(is_name_like_tuple(idxes) for idxes in index):
raise ValueError(
"If the given index is a list, it "
"should only contains names as all tuples or all non tuples "
"that contain index names"
)
else:
index_list = cast(List[Label], index)
drop_index_scols = []
for idxes in index_list:
try:
index_scols = [
internal.index_spark_columns[lvl] == idx
for lvl, idx in enumerate(idxes, level)
]
except IndexError:
raise KeyError(
"Key length ({}) exceeds index depth ({})".format(
internal.index_level, len(idxes)
)
)
drop_index_scols.append(reduce(lambda x, y: x & y, index_scols))
cond = ~reduce(lambda x, y: x | y, drop_index_scols)
return DataFrame(internal.with_filter(cond))
else:
raise ValueError("Need to specify at least one of 'labels' or 'index'")
def head(self, n: int = 5) -> "Series":
"""
Return the first n rows.
This function returns the first n rows for the object based on position.
It is useful for quickly testing if your object has the right type of data in it.
Parameters
----------
n : Integer, default = 5
Returns
-------
The first n rows of the caller object.
Examples
--------
>>> df = ps.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion']})
>>> df.animal.head(2) # doctest: +NORMALIZE_WHITESPACE
0 alligator
1 bee
Name: animal, dtype: object
"""
return first_series(self.to_frame().head(n)).rename(self.name)
def last(self, offset: Union[str, DateOffset]) -> "Series":
"""
Select final periods of time series data based on a date offset.
When having a Series with dates as index, this function can
select the last few elements based on a date offset.
Parameters
----------
offset : str or DateOffset
The offset length of the data that will be selected. For instance,
'3D' will display all the rows having their index within the last 3 days.
Returns
-------
Series
A subset of the caller.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
Examples
--------
>>> index = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> psser = ps.Series([1, 2, 3, 4], index=index)
>>> psser
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
dtype: int64
Get the rows for the last 3 days:
>>> psser.last('3D')
2018-04-13 3
2018-04-15 4
dtype: int64
Notice the data for 3 last calendar days were returned, not the last
3 observed days in the dataset, and therefore data for 2018-04-11 was
not returned.
"""
return first_series(self.to_frame().last(offset)).rename(self.name)
def first(self, offset: Union[str, DateOffset]) -> "Series":
"""
Select first periods of time series data based on a date offset.
When having a Series with dates as index, this function can
select the first few elements based on a date offset.
Parameters
----------
offset : str or DateOffset
The offset length of the data that will be selected. For instance,
'3D' will display all the rows having their index within the first 3 days.
Returns
-------
Series
A subset of the caller.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
Examples
--------
>>> index = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> psser = ps.Series([1, 2, 3, 4], index=index)
>>> psser
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
dtype: int64
Get the rows for the first 3 days:
>>> psser.first('3D')
2018-04-09 1
2018-04-11 2
dtype: int64
Notice the data for 3 first calendar days were returned, not the first
3 observed days in the dataset, and therefore data for 2018-04-13 was
not returned.
"""
return first_series(self.to_frame().first(offset)).rename(self.name)
# TODO: Categorical type isn't supported (due to PySpark's limitation) and
# some doctests related with timestamps were not added.
def unique(self) -> "Series":
"""
Return unique values of Series object.
Uniques are returned in order of appearance. Hash table-based unique,
therefore does NOT sort.
.. note:: This method returns newly created Series whereas pandas returns
the unique values as a NumPy array.
Returns
-------
Returns the unique values as a Series.
See Also
--------
Index.unique
groupby.SeriesGroupBy.unique
Examples
--------
>>> psser = ps.Series([2, 1, 3, 3], name='A')
>>> psser.unique().sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
<BLANKLINE>
... 1
... 2
... 3
Name: A, dtype: int64
>>> ps.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique()
0 2016-01-01
dtype: datetime64[ns]
>>> psser.name = ('x', 'a')
>>> psser.unique().sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
<BLANKLINE>
... 1
... 2
... 3
Name: (x, a), dtype: int64
"""
sdf = self._internal.spark_frame.select(self.spark.column).distinct()
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=None,
column_labels=[self._column_label],
data_spark_columns=[scol_for(sdf, self._internal.data_spark_column_names[0])],
data_fields=[self._internal.data_fields[0]],
column_label_names=self._internal.column_label_names,
)
return first_series(DataFrame(internal))
def sort_values(
self, ascending: bool = True, inplace: bool = False, na_position: str = "last"
) -> Optional["Series"]:
"""
Sort by the values.
Sort a Series in ascending or descending order by some criterion.
Parameters
----------
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : Series ordered by values.
Examples
--------
>>> s = ps.Series([np.nan, 1, 3, 10, 5])
>>> s
0 NaN
1 1.0
2 3.0
3 10.0
4 5.0
dtype: float64
Sort values ascending order (default behaviour)
>>> s.sort_values(ascending=True)
1 1.0
2 3.0
4 5.0
3 10.0
0 NaN
dtype: float64
Sort values descending order
>>> s.sort_values(ascending=False)
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values inplace
>>> s.sort_values(ascending=False, inplace=True)
>>> s
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values putting NAs first
>>> s.sort_values(na_position='first')
0 NaN
1 1.0
2 3.0
4 5.0
3 10.0
dtype: float64
Sort a series of strings
>>> s = ps.Series(['z', 'b', 'd', 'a', 'c'])
>>> s
0 z
1 b
2 d
3 a
4 c
dtype: object
>>> s.sort_values()
3 a
1 b
4 c
2 d
0 z
dtype: object
"""
inplace = validate_bool_kwarg(inplace, "inplace")
psdf = self._psdf[[self.name]]._sort(
by=[self.spark.column], ascending=ascending, na_position=na_position
)
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
def sort_index(
self,
axis: Axis = 0,
level: Optional[Union[int, List[int]]] = None,
ascending: bool = True,
inplace: bool = False,
kind: str = None,
na_position: str = "last",
) -> Optional["Series"]:
"""
Sort object by labels (along an axis)
Parameters
----------
axis : index, columns to direct sorting. Currently, only axis = 0 is supported.
level : int or level name or list of ints or list of level names
if not None, sort on values in specified index level(s)
ascending : boolean, default True
Sort ascending vs. descending
inplace : bool, default False
if True, perform operation in-place
kind : str, default None
pandas-on-Spark does not allow specifying the sorting algorithm at the moment,
default None
na_position : {‘first’, ‘last’}, default ‘last’
first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for
MultiIndex.
Returns
-------
sorted_obj : Series
Examples
--------
>>> df = ps.Series([2, 1, np.nan], index=['b', 'a', np.nan])
>>> df.sort_index()
a 1.0
b 2.0
NaN NaN
dtype: float64
>>> df.sort_index(ascending=False)
b 2.0
a 1.0
NaN NaN
dtype: float64
>>> df.sort_index(na_position='first')
NaN NaN
a 1.0
b 2.0
dtype: float64
>>> df.sort_index(inplace=True)
>>> df
a 1.0
b 2.0
NaN NaN
dtype: float64
>>> df = ps.Series(range(4), index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]], name='0')
>>> df.sort_index()
a 0 3
1 2
b 0 1
1 0
Name: 0, dtype: int64
>>> df.sort_index(level=1) # doctest: +SKIP
a 0 3
b 0 1
a 1 2
b 1 0
Name: 0, dtype: int64
>>> df.sort_index(level=[1, 0])
a 0 3
b 0 1
a 1 2
b 1 0
Name: 0, dtype: int64
"""
inplace = validate_bool_kwarg(inplace, "inplace")
psdf = self._psdf[[self.name]].sort_index(
axis=axis, level=level, ascending=ascending, kind=kind, na_position=na_position
)
if inplace:
self._update_anchor(psdf)
return None
else:
return first_series(psdf)
def swaplevel(
self, i: Union[int, Name] = -2, j: Union[int, Name] = -1, copy: bool = True
) -> "Series":
"""
Swap levels i and j in a MultiIndex.
Default is to swap the two innermost levels of the index.
Parameters
----------
i, j : int, str
Level of the indices to be swapped. Can pass level name as string.
copy : bool, default True
Whether to copy underlying data. Must be True.
Returns
-------
Series
Series with levels swapped in MultiIndex.
Examples
--------
>>> midx = pd.MultiIndex.from_arrays([['a', 'b'], [1, 2]], names = ['word', 'number'])
>>> midx # doctest: +SKIP
MultiIndex([('a', 1),
('b', 2)],
names=['word', 'number'])
>>> psser = ps.Series(['x', 'y'], index=midx)
>>> psser
word number
a 1 x
b 2 y
dtype: object
>>> psser.swaplevel()
number word
1 a x
2 b y
dtype: object
>>> psser.swaplevel(0, 1)
number word
1 a x
2 b y
dtype: object
>>> psser.swaplevel('number', 'word')
number word
1 a x
2 b y
dtype: object
"""
assert copy is True
return first_series(self.to_frame().swaplevel(i, j, axis=0)).rename(self.name)
def swapaxes(self, i: Axis, j: Axis, copy: bool = True) -> "Series":
"""
Interchange axes and swap values axes appropriately.
Parameters
----------
i: {0 or 'index', 1 or 'columns'}. The axis to swap.
j: {0 or 'index', 1 or 'columns'}. The axis to swap.
copy : bool, default True.
Returns
-------
Series
Examples
--------
>>> psser = ps.Series([1, 2, 3], index=["x", "y", "z"])
>>> psser
x 1
y 2
z 3
dtype: int64
>>>
>>> psser.swapaxes(0, 0)
x 1
y 2
z 3
dtype: int64
"""
assert copy is True
i = validate_axis(i)
j = validate_axis(j)
if not i == j == 0:
raise ValueError("Axis must be 0 for Series")
return self.copy()
def add_prefix(self, prefix: str) -> "Series":
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series
New Series with updated labels.
See Also
--------
Series.add_suffix: Suffix column labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
"""
assert isinstance(prefix, str)
internal = self._internal.resolved_copy
sdf = internal.spark_frame.select(
[
F.concat(SF.lit(prefix), index_spark_column).alias(index_spark_column_name)
for index_spark_column, index_spark_column_name in zip(
internal.index_spark_columns, internal.index_spark_column_names
)
]
+ internal.data_spark_columns
)
return first_series(
DataFrame(internal.with_new_sdf(sdf, index_fields=([None] * internal.index_level)))
)
def add_suffix(self, suffix: str) -> "Series":
"""
Suffix labels with string suffix.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series
New Series with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
"""
assert isinstance(suffix, str)
internal = self._internal.resolved_copy
sdf = internal.spark_frame.select(
[
F.concat(index_spark_column, SF.lit(suffix)).alias(index_spark_column_name)
for index_spark_column, index_spark_column_name in zip(
internal.index_spark_columns, internal.index_spark_column_names
)
]
+ internal.data_spark_columns
)
return first_series(
DataFrame(internal.with_new_sdf(sdf, index_fields=([None] * internal.index_level)))
)
def corr(self, other: "Series", method: str = "pearson") -> float:
"""
Compute correlation with `other` Series, excluding missing values.
Parameters
----------
other : Series
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
correlation : float
Examples
--------
>>> df = ps.DataFrame({'s1': [.2, .0, .6, .2],
... 's2': [.3, .6, .0, .1]})
>>> s1 = df.s1
>>> s2 = df.s2
>>> s1.corr(s2, method='pearson') # doctest: +ELLIPSIS
-0.851064...
>>> s1.corr(s2, method='spearman') # doctest: +ELLIPSIS
-0.948683...
Notes
-----
There are behavior differences between pandas-on-Spark and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. pandas-on-Spark will return an error.
* pandas-on-Spark doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
# This implementation is suboptimal because it computes more than necessary,
# but it should be a start
columns = ["__corr_arg1__", "__corr_arg2__"]
psdf = self._psdf.assign(__corr_arg1__=self, __corr_arg2__=other)[columns]
psdf.columns = columns
c = corr(psdf, method=method)
return c.loc[tuple(columns)]
def nsmallest(self, n: int = 5) -> "Series":
"""
Return the smallest `n` elements.
Parameters
----------
n : int, default 5
Return this many ascending sorted values.
Returns
-------
Series
The `n` smallest values in the Series, sorted in increasing order.
See Also
--------
Series.nlargest: Get the `n` largest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values().head(n)`` for small `n` relative to
the size of the ``Series`` object.
In pandas-on-Spark, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Examples
--------
>>> data = [1, 2, 3, 4, np.nan ,6, 7, 8]
>>> s = ps.Series(data)
>>> s
0 1.0
1 2.0
2 3.0
3 4.0
4 NaN
5 6.0
6 7.0
7 8.0
dtype: float64
The `n` largest elements where ``n=5`` by default.
>>> s.nsmallest()
0 1.0
1 2.0
2 3.0
3 4.0
5 6.0
dtype: float64
>>> s.nsmallest(3)
0 1.0
1 2.0
2 3.0
dtype: float64
"""
return self.sort_values(ascending=True).head(n)
def nlargest(self, n: int = 5) -> "Series":
"""
Return the largest `n` elements.
Parameters
----------
n : int, default 5
Returns
-------
Series
The `n` largest values in the Series, sorted in decreasing order.
See Also
--------
Series.nsmallest: Get the `n` smallest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values(ascending=False).head(n)`` for small `n`
relative to the size of the ``Series`` object.
In pandas-on-Spark, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Examples
--------
>>> data = [1, 2, 3, 4, np.nan ,6, 7, 8]
>>> s = ps.Series(data)
>>> s
0 1.0
1 2.0
2 3.0
3 4.0
4 NaN
5 6.0
6 7.0
7 8.0
dtype: float64
The `n` largest elements where ``n=5`` by default.
>>> s.nlargest()
7 8.0
6 7.0
5 6.0
3 4.0
2 3.0
dtype: float64
>>> s.nlargest(n=3)
7 8.0
6 7.0
5 6.0
dtype: float64
"""
return self.sort_values(ascending=False).head(n)
def append(
self, to_append: "Series", ignore_index: bool = False, verify_integrity: bool = False
) -> "Series":
"""
Concatenate two or more Series.
Parameters
----------
to_append : Series or list/tuple of Series
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise Exception on creating index with duplicates
Returns
-------
appended : Series
Examples
--------
>>> s1 = ps.Series([1, 2, 3])
>>> s2 = ps.Series([4, 5, 6])
>>> s3 = ps.Series([4, 5, 6], index=[3,4,5])
>>> s1.append(s2)
0 1
1 2
2 3
0 4
1 5
2 6
dtype: int64
>>> s1.append(s3)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With ignore_index set to True:
>>> s1.append(s2, ignore_index=True)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
"""
return first_series(
self.to_frame().append(to_append.to_frame(), ignore_index, verify_integrity)
).rename(self.name)
def sample(
self,
n: Optional[int] = None,
frac: Optional[float] = None,
replace: bool = False,
random_state: Optional[int] = None,
) -> "Series":
return first_series(
self.to_frame().sample(n=n, frac=frac, replace=replace, random_state=random_state)
).rename(self.name)
sample.__doc__ = DataFrame.sample.__doc__
@no_type_check
def hist(self, bins=10, **kwds):
return self.plot.hist(bins, **kwds)
hist.__doc__ = PandasOnSparkPlotAccessor.hist.__doc__
def apply(self, func: Callable, args: Sequence[Any] = (), **kwds: Any) -> "Series":
"""
Invoke function on values of Series.
Can be a Python function that only works on the Series.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def square(x) -> np.int32:
... return x ** 2
pandas-on-Spark uses return type hint and does not try to infer the type.
Parameters
----------
func : function
Python function to apply. Note that type hint for return type is required.
args : tuple
Positional arguments passed to func after the series value.
**kwds
Additional keyword arguments passed to func.
Returns
-------
Series
See Also
--------
Series.aggregate : Only perform aggregating type operations.
Series.transform : Only perform transforming type operations.
DataFrame.apply : The equivalent function for DataFrame.
Examples
--------
Create a Series with typical summer temperatures for each city.
>>> s = ps.Series([20, 21, 12],
... index=['London', 'New York', 'Helsinki'])
>>> s
London 20
New York 21
Helsinki 12
dtype: int64
Square the values by defining a function and passing it as an
argument to ``apply()``.
>>> def square(x) -> np.int64:
... return x ** 2
>>> s.apply(square)
London 400
New York 441
Helsinki 144
dtype: int64
Define a custom function that needs additional positional
arguments and pass these additional arguments using the
``args`` keyword
>>> def subtract_custom_value(x, custom_value) -> np.int64:
... return x - custom_value
>>> s.apply(subtract_custom_value, args=(5,))
London 15
New York 16
Helsinki 7
dtype: int64
Define a custom function that takes keyword arguments
and pass these arguments to ``apply``
>>> def add_custom_values(x, **kwargs) -> np.int64:
... for month in kwargs:
... x += kwargs[month]
... return x
>>> s.apply(add_custom_values, june=30, july=20, august=25)
London 95
New York 96
Helsinki 87
dtype: int64
Use a function from the Numpy library
>>> def numpy_log(col) -> np.float64:
... return np.log(col)
>>> s.apply(numpy_log)
London 2.995732
New York 3.044522
Helsinki 2.484907
dtype: float64
You can omit the type hint and let pandas-on-Spark infer its type.
>>> s.apply(np.log)
London 2.995732
New York 3.044522
Helsinki 2.484907
dtype: float64
"""
assert callable(func), "the first argument should be a callable function."
try:
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
should_infer_schema = return_sig is None
except TypeError:
# Falls back to schema inference if it fails to get signature.
should_infer_schema = True
apply_each = wraps(func)(lambda s: s.apply(func, args=args, **kwds))
if should_infer_schema:
return self.pandas_on_spark._transform_batch(apply_each, None)
else:
sig_return = infer_return_type(func)
if not isinstance(sig_return, ScalarType):
raise ValueError(
"Expected the return type of this function to be of scalar type, "
"but found type {}".format(sig_return)
)
return_type = cast(ScalarType, sig_return)
return self.pandas_on_spark._transform_batch(apply_each, return_type)
# TODO: not all arguments are implemented comparing to pandas' for now.
def aggregate(self, func: Union[str, List[str]]) -> Union[Scalar, "Series"]:
"""Aggregate using one or more operations over the specified axis.
Parameters
----------
func : str or a list of str
function name(s) as string apply to series.
Returns
-------
scalar, Series
The return can be:
- scalar : when Series.agg is called with single function
- Series : when Series.agg is called with several functions
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
Series.apply : Invoke function on a Series.
Series.transform : Only perform transforming type operations.
Series.groupby : Perform operations over groups.
DataFrame.aggregate : The equivalent function for DataFrame.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4])
>>> s.agg('min')
1
>>> s.agg(['min', 'max']).sort_index()
max 4
min 1
dtype: int64
"""
if isinstance(func, list):
return first_series(self.to_frame().aggregate(func)).rename(self.name)
elif isinstance(func, str):
return getattr(self, func)()
else:
raise TypeError("func must be a string or list of strings")
agg = aggregate
def transpose(self, *args: Any, **kwargs: Any) -> "Series":
"""
Return the transpose, which is by definition self.
Examples
--------
It returns the same object as the transpose of the given series object, which is by
definition self.
>>> s = ps.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.transpose()
0 1
1 2
2 3
dtype: int64
"""
return self.copy()
T = property(transpose)
def transform(
self, func: Union[Callable, List[Callable]], axis: Axis = 0, *args: Any, **kwargs: Any
) -> Union["Series", DataFrame]:
"""
Call ``func`` producing the same type as `self` with transformed values
and that has the same axis length as input.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def square(x) -> np.int32:
... return x ** 2
pandas-on-Spark uses return type hint and does not try to infer the type.
Parameters
----------
func : function or list
A function or a list of functions to use for transforming the data.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
An instance of the same type with `self` that must have the same length as input.
See Also
--------
Series.aggregate : Only perform aggregating type operations.
Series.apply : Invoke function on Series.
DataFrame.transform : The equivalent function for DataFrame.
Examples
--------
>>> s = ps.Series(range(3))
>>> s
0 0
1 1
2 2
dtype: int64
>>> def sqrt(x) -> float:
... return np.sqrt(x)
>>> s.transform(sqrt)
0 0.000000
1 1.000000
2 1.414214
dtype: float64
Even though the resulting instance must have the same length as the
input, it is possible to provide several input functions:
>>> def exp(x) -> float:
... return np.exp(x)
>>> s.transform([sqrt, exp])
sqrt exp
0 0.000000 1.000000
1 1.000000 2.718282
2 1.414214 7.389056
You can omit the type hint and let pandas-on-Spark infer its type.
>>> s.transform([np.sqrt, np.exp])
sqrt exp
0 0.000000 1.000000
1 1.000000 2.718282
2 1.414214 7.389056
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
if isinstance(func, list):
applied = []
for f in func:
applied.append(self.apply(f, args=args, **kwargs).rename(f.__name__))
internal = self._internal.with_new_columns(applied)
return DataFrame(internal)
else:
return self.apply(func, args=args, **kwargs)
def round(self, decimals: int = 0) -> "Series":
"""
Round each value in a Series to the given number of decimals.
Parameters
----------
decimals : int
Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of
positions to the left of the decimal point.
Returns
-------
Series object
See Also
--------
DataFrame.round
Examples
--------
>>> df = ps.Series([0.028208, 0.038683, 0.877076], name='x')
>>> df
0 0.028208
1 0.038683
2 0.877076
Name: x, dtype: float64
>>> df.round(2)
0 0.03
1 0.04
2 0.88
Name: x, dtype: float64
"""
if not isinstance(decimals, int):
raise TypeError("decimals must be an integer")
scol = F.round(self.spark.column, decimals)
return self._with_new_scol(scol) # TODO: dtype?
# TODO: add 'interpolation' parameter.
def quantile(
self, q: Union[float, Iterable[float]] = 0.5, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return value at the given quantile.
.. note:: Unlike pandas', the quantile in pandas-on-Spark is an approximated quantile
based upon approximate percentile computation because computing quantile across
a large dataset is extremely expensive.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
float or Series
If the current object is a Series and ``q`` is an array, a Series will be
returned where the index is ``q`` and the values are the quantiles, otherwise
a float will be returned.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4, 5])
>>> s.quantile(.5)
3.0
>>> (s + 1).quantile(.5)
4.0
>>> s.quantile([.25, .5, .75])
0.25 2.0
0.50 3.0
0.75 4.0
dtype: float64
>>> (s + 1).quantile([.25, .5, .75])
0.25 3.0
0.50 4.0
0.75 5.0
dtype: float64
"""
if isinstance(q, Iterable):
return first_series(
self.to_frame().quantile(q=q, axis=0, numeric_only=False, accuracy=accuracy)
).rename(self.name)
else:
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
if not isinstance(q, float):
raise TypeError(
"q must be a float or an array of floats; however, [%s] found." % type(q)
)
q_float = cast(float, q)
if q_float < 0.0 or q_float > 1.0:
raise ValueError("percentiles should all be in the interval [0, 1].")
def quantile(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), q_float, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(quantile, name="quantile")
# TODO: add axis, numeric_only, pct, na_option parameter
def rank(self, method: str = "average", ascending: bool = True) -> "Series":
"""
Compute numerical data ranks (1 through n) along axis. Equal values are
assigned a rank that is the average of the ranks of those values.
.. note:: the current implementation of rank uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
ascending : boolean, default True
False for ranks by high (1) to low (N)
Returns
-------
ranks : same type as caller
Examples
--------
>>> s = ps.Series([1, 2, 2, 3], name='A')
>>> s
0 1
1 2
2 2
3 3
Name: A, dtype: int64
>>> s.rank()
0 1.0
1 2.5
2 2.5
3 4.0
Name: A, dtype: float64
If method is set to 'min', it use lowest rank in group.
>>> s.rank(method='min')
0 1.0
1 2.0
2 2.0
3 4.0
Name: A, dtype: float64
If method is set to 'max', it use highest rank in group.
>>> s.rank(method='max')
0 1.0
1 3.0
2 3.0
3 4.0
Name: A, dtype: float64
If method is set to 'first', it is assigned rank in order without groups.
>>> s.rank(method='first')
0 1.0
1 2.0
2 3.0
3 4.0
Name: A, dtype: float64
If method is set to 'dense', it leaves no gaps in group.
>>> s.rank(method='dense')
0 1.0
1 2.0
2 2.0
3 3.0
Name: A, dtype: float64
"""
return self._rank(method, ascending).spark.analyzed
def _rank(
self,
method: str = "average",
ascending: bool = True,
*,
part_cols: Sequence["ColumnOrName"] = ()
) -> "Series":
if method not in ["average", "min", "max", "first", "dense"]:
msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'"
raise ValueError(msg)
if self._internal.index_level > 1:
raise ValueError("rank do not support index now")
if ascending:
asc_func = lambda scol: scol.asc()
else:
asc_func = lambda scol: scol.desc()
if method == "first":
window = (
Window.orderBy(
asc_func(self.spark.column),
asc_func(F.col(NATURAL_ORDER_COLUMN_NAME)),
)
.partitionBy(*part_cols)
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
)
scol = F.row_number().over(window)
elif method == "dense":
window = (
Window.orderBy(asc_func(self.spark.column))
.partitionBy(*part_cols)
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
)
scol = F.dense_rank().over(window)
else:
if method == "average":
stat_func = F.mean
elif method == "min":
stat_func = F.min
elif method == "max":
stat_func = F.max
window1 = (
Window.orderBy(asc_func(self.spark.column))
.partitionBy(*part_cols)
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
)
window2 = Window.partitionBy([self.spark.column] + list(part_cols)).rowsBetween(
Window.unboundedPreceding, Window.unboundedFollowing
)
scol = stat_func(F.row_number().over(window1)).over(window2)
psser = self._with_new_scol(scol)
return psser.astype(np.float64)
def filter(
self,
items: Optional[Sequence[Any]] = None,
like: Optional[str] = None,
regex: Optional[str] = None,
axis: Optional[Axis] = None,
) -> "Series":
axis = validate_axis(axis)
if axis == 1:
raise ValueError("Series does not support columns axis.")
return first_series(
self.to_frame().filter(items=items, like=like, regex=regex, axis=axis)
).rename(self.name)
filter.__doc__ = DataFrame.filter.__doc__
def describe(self, percentiles: Optional[List[float]] = None) -> "Series":
return first_series(self.to_frame().describe(percentiles)).rename(self.name)
describe.__doc__ = DataFrame.describe.__doc__
def diff(self, periods: int = 1) -> "Series":
"""
First discrete difference of element.
Calculates the difference of a Series element compared with another element in the
DataFrame (default is the element in the same column of the previous row).
.. note:: the current implementation of diff uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative values.
Returns
-------
diffed : Series
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.b.diff()
0 NaN
1 0.0
2 1.0
3 1.0
4 2.0
5 3.0
Name: b, dtype: float64
Difference with previous value
>>> df.c.diff(periods=3)
0 NaN
1 NaN
2 NaN
3 15.0
4 21.0
5 27.0
Name: c, dtype: float64
Difference with following value
>>> df.c.diff(periods=-1)
0 -3.0
1 -5.0
2 -7.0
3 -9.0
4 -11.0
5 NaN
Name: c, dtype: float64
"""
return self._diff(periods).spark.analyzed
def _diff(self, periods: int, *, part_cols: Sequence["ColumnOrName"] = ()) -> "Series":
if not isinstance(periods, int):
raise TypeError("periods should be an int; however, got [%s]" % type(periods).__name__)
window = (
Window.partitionBy(*part_cols)
.orderBy(NATURAL_ORDER_COLUMN_NAME)
.rowsBetween(-periods, -periods)
)
scol = self.spark.column - F.lag(self.spark.column, periods).over(window)
return self._with_new_scol(scol, field=self._internal.data_fields[0].copy(nullable=True))
def idxmax(self, skipna: bool = True) -> Union[Tuple, Any]:
"""
Return the row label of the maximum value.
If multiple values equal the maximum, the first row label with that
value is returned.
Parameters
----------
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
Returns
-------
Index
Label of the maximum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
Series.idxmin : Return index *label* of the first occurrence
of minimum of values.
Examples
--------
>>> s = ps.Series(data=[1, None, 4, 3, 5],
... index=['A', 'B', 'C', 'D', 'E'])
>>> s
A 1.0
B NaN
C 4.0
D 3.0
E 5.0
dtype: float64
>>> s.idxmax()
'E'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmax(skipna=False)
nan
In case of multi-index, you get a tuple:
>>> index = pd.MultiIndex.from_arrays([
... ['a', 'a', 'b', 'b'], ['c', 'd', 'e', 'f']], names=('first', 'second'))
>>> s = ps.Series(data=[1, None, 4, 5], index=index)
>>> s
first second
a c 1.0
d NaN
b e 4.0
f 5.0
dtype: float64
>>> s.idxmax()
('b', 'f')
If multiple values equal the maximum, the first row label with that
value is returned.
>>> s = ps.Series([1, 100, 1, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])
>>> s
10 1
3 100
5 1
2 100
1 1
8 100
dtype: int64
>>> s.idxmax()
3
"""
sdf = self._internal.spark_frame
scol = self.spark.column
index_scols = self._internal.index_spark_columns
# desc_nulls_(last|first) is used via Py4J directly because
# it's not supported in Spark 2.3.
if skipna:
sdf = sdf.orderBy(Column(scol._jc.desc_nulls_last()), NATURAL_ORDER_COLUMN_NAME)
else:
sdf = sdf.orderBy(Column(scol._jc.desc_nulls_first()), NATURAL_ORDER_COLUMN_NAME)
results = sdf.select([scol] + index_scols).take(1)
if len(results) == 0:
raise ValueError("attempt to get idxmin of an empty sequence")
if results[0][0] is None:
# This will only happens when skipna is False because we will
# place nulls first.
return np.nan
values = list(results[0][1:])
if len(values) == 1:
return values[0]
else:
return tuple(values)
def idxmin(self, skipna: bool = True) -> Union[Tuple, Any]:
"""
Return the row label of the minimum value.
If multiple values equal the minimum, the first row label with that
value is returned.
Parameters
----------
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
Returns
-------
Index
Label of the minimum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
Series.idxmax : Return index *label* of the first occurrence
of maximum of values.
Notes
-----
This method is the Series version of ``ndarray.argmin``. This method
returns the label of the minimum, while ``ndarray.argmin`` returns
the position. To get the position, use ``series.values.argmin()``.
Examples
--------
>>> s = ps.Series(data=[1, None, 4, 0],
... index=['A', 'B', 'C', 'D'])
>>> s
A 1.0
B NaN
C 4.0
D 0.0
dtype: float64
>>> s.idxmin()
'D'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmin(skipna=False)
nan
In case of multi-index, you get a tuple:
>>> index = pd.MultiIndex.from_arrays([
... ['a', 'a', 'b', 'b'], ['c', 'd', 'e', 'f']], names=('first', 'second'))
>>> s = ps.Series(data=[1, None, 4, 0], index=index)
>>> s
first second
a c 1.0
d NaN
b e 4.0
f 0.0
dtype: float64
>>> s.idxmin()
('b', 'f')
If multiple values equal the minimum, the first row label with that
value is returned.
>>> s = ps.Series([1, 100, 1, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])
>>> s
10 1
3 100
5 1
2 100
1 1
8 100
dtype: int64
>>> s.idxmin()
10
"""
sdf = self._internal.spark_frame
scol = self.spark.column
index_scols = self._internal.index_spark_columns
# asc_nulls_(last|first)is used via Py4J directly because
# it's not supported in Spark 2.3.
if skipna:
sdf = sdf.orderBy(Column(scol._jc.asc_nulls_last()), NATURAL_ORDER_COLUMN_NAME)
else:
sdf = sdf.orderBy(Column(scol._jc.asc_nulls_first()), NATURAL_ORDER_COLUMN_NAME)
results = sdf.select([scol] + index_scols).take(1)
if len(results) == 0:
raise ValueError("attempt to get idxmin of an empty sequence")
if results[0][0] is None:
# This will only happens when skipna is False because we will
# place nulls first.
return np.nan
values = list(results[0][1:])
if len(values) == 1:
return values[0]
else:
return tuple(values)
def pop(self, item: Name) -> Union["Series", Scalar]:
"""
Return item and drop from series.
Parameters
----------
item : label
Label of index to be popped.
Returns
-------
Value that is popped from series.
Examples
--------
>>> s = ps.Series(data=np.arange(3), index=['A', 'B', 'C'])
>>> s
A 0
B 1
C 2
dtype: int64
>>> s.pop('A')
0
>>> s
B 1
C 2
dtype: int64
>>> s = ps.Series(data=np.arange(3), index=['A', 'A', 'C'])
>>> s
A 0
A 1
C 2
dtype: int64
>>> s.pop('A')
A 0
A 1
dtype: int64
>>> s
C 2
dtype: int64
Also support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
lama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.pop('lama')
speed 45.0
weight 200.0
length 1.2
dtype: float64
>>> s
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
Also support for MultiIndex with several indexs.
>>> midx = pd.MultiIndex([['a', 'b', 'c'],
... ['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 0, 0, 0, 1, 1, 1],
... [0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 0, 2]]
... )
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
a lama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
b falcon speed 320.0
speed 1.0
length 0.3
dtype: float64
>>> s.pop(('a', 'lama'))
speed 45.0
weight 200.0
length 1.2
dtype: float64
>>> s
a cow speed 30.0
weight 250.0
length 1.5
b falcon speed 320.0
speed 1.0
length 0.3
dtype: float64
>>> s.pop(('b', 'falcon', 'speed'))
(b, falcon, speed) 320.0
(b, falcon, speed) 1.0
dtype: float64
"""
if not is_name_like_value(item):
raise TypeError("'key' should be string or tuple that contains strings")
if not is_name_like_tuple(item):
item = (item,)
if self._internal.index_level < len(item):
raise KeyError(
"Key length ({}) exceeds index depth ({})".format(
len(item), self._internal.index_level
)
)
internal = self._internal
scols = internal.index_spark_columns[len(item) :] + [self.spark.column]
rows = [internal.spark_columns[level] == index for level, index in enumerate(item)]
sdf = internal.spark_frame.filter(reduce(lambda x, y: x & y, rows)).select(scols)
psdf = self._drop(item)
self._update_anchor(psdf)
if self._internal.index_level == len(item):
# if spark_frame has one column and one data, return data only without frame
pdf = sdf.limit(2).toPandas()
length = len(pdf)
if length == 1:
return pdf[internal.data_spark_column_names[0]].iloc[0]
item_string = name_like_string(item)
sdf = sdf.withColumn(SPARK_DEFAULT_INDEX_NAME, SF.lit(str(item_string)))
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)],
column_labels=[self._column_label],
data_fields=[self._internal.data_fields[0]],
)
return first_series(DataFrame(internal))
else:
internal = internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in internal.index_spark_column_names[len(item) :]
],
index_fields=internal.index_fields[len(item) :],
index_names=self._internal.index_names[len(item) :],
data_spark_columns=[scol_for(sdf, internal.data_spark_column_names[0])],
)
return first_series(DataFrame(internal))
def copy(self, deep: bool = True) -> "Series":
"""
Make a copy of this object's indices and data.
Parameters
----------
deep : bool, default True
this parameter is not supported but just dummy parameter to match pandas.
Returns
-------
copy : Series
Examples
--------
>>> s = ps.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
"""
return self._psdf.copy(deep=deep)._psser_for(self._column_label)
def mode(self, dropna: bool = True) -> "Series":
"""
Return the mode(s) of the dataset.
Always returns Series even if only one value is returned.
Parameters
----------
dropna : bool, default True
Don't consider counts of NaN/NaT.
Returns
-------
Series
Modes of the Series.
Examples
--------
>>> s = ps.Series([0, 0, 1, 1, 1, np.nan, np.nan, np.nan])
>>> s
0 0.0
1 0.0
2 1.0
3 1.0
4 1.0
5 NaN
6 NaN
7 NaN
dtype: float64
>>> s.mode()
0 1.0
dtype: float64
If there are several same modes, all items are shown
>>> s = ps.Series([0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3,
... np.nan, np.nan, np.nan])
>>> s
0 0.0
1 0.0
2 1.0
3 1.0
4 1.0
5 2.0
6 2.0
7 2.0
8 3.0
9 3.0
10 3.0
11 NaN
12 NaN
13 NaN
dtype: float64
>>> s.mode().sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
<BLANKLINE>
... 1.0
... 2.0
... 3.0
dtype: float64
With 'dropna' set to 'False', we can also see NaN in the result
>>> s.mode(False).sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
<BLANKLINE>
... 1.0
... 2.0
... 3.0
... NaN
dtype: float64
"""
ser_count = self.value_counts(dropna=dropna, sort=False)
sdf_count = ser_count._internal.spark_frame
most_value = ser_count.max()
sdf_most_value = sdf_count.filter("count == {}".format(most_value))
sdf = sdf_most_value.select(
F.col(SPARK_DEFAULT_INDEX_NAME).alias(SPARK_DEFAULT_SERIES_NAME)
)
internal = InternalFrame(spark_frame=sdf, index_spark_columns=None, column_labels=[None])
return first_series(DataFrame(internal))
def keys(self) -> "ps.Index":
"""
Return alias for index.
Returns
-------
Index
Index of the Series.
Examples
--------
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> psser = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
>>> psser.keys() # doctest: +SKIP
MultiIndex([( 'lama', 'speed'),
( 'lama', 'weight'),
( 'lama', 'length'),
( 'cow', 'speed'),
( 'cow', 'weight'),
( 'cow', 'length'),
('falcon', 'speed'),
('falcon', 'weight'),
('falcon', 'length')],
)
"""
return self.index
# TODO: 'regex', 'method' parameter
def replace(
self,
to_replace: Optional[Union[Any, List, Tuple, Dict]] = None,
value: Optional[Union[List, Tuple]] = None,
regex: bool = False,
) -> "Series":
"""
Replace values given in to_replace with value.
Values of the Series are replaced with other values dynamically.
Parameters
----------
to_replace : str, list, tuple, dict, Series, int, float, or None
How to find the values that will be replaced.
* numeric, str:
- numeric: numeric values equal to to_replace will be replaced with value
- str: string exactly matching to_replace will be replaced with value
* list of str or numeric:
- if to_replace and value are both lists or tuples, they must be the same length.
- str and numeric rules apply as above.
* dict:
- Dicts can be used to specify different replacement values for different
existing values.
For example, {'a': 'b', 'y': 'z'} replaces the value ‘a’ with ‘b’ and ‘y’
with ‘z’. To use a dict in this way the value parameter should be None.
- For a DataFrame a dict can specify that different values should be replaced
in different columns. For example, {'a': 1, 'b': 'z'} looks for the value 1
in column ‘a’ and the value ‘z’ in column ‘b’ and replaces these values with
whatever is specified in value.
The value parameter should not be None in this case.
You can treat this as a special case of passing two lists except that you are
specifying the column to search in.
See the examples section for examples of each of these.
value : scalar, dict, list, tuple, str default None
Value to replace any values matching to_replace with.
For a DataFrame a dict of values can be used to specify which value to use
for each column (columns not in the dict will not be filled).
Regular expressions, strings and lists or dicts of such objects are also allowed.
Returns
-------
Series
Object after replacement.
Examples
--------
Scalar `to_replace` and `value`
>>> s = ps.Series([0, 1, 2, 3, 4])
>>> s
0 0
1 1
2 2
3 3
4 4
dtype: int64
>>> s.replace(0, 5)
0 5
1 1
2 2
3 3
4 4
dtype: int64
List-like `to_replace`
>>> s.replace([0, 4], 5000)
0 5000
1 1
2 2
3 3
4 5000
dtype: int64
>>> s.replace([1, 2, 3], [10, 20, 30])
0 0
1 10
2 20
3 30
4 4
dtype: int64
Dict-like `to_replace`
>>> s.replace({1: 1000, 2: 2000, 3: 3000, 4: 4000})
0 0
1 1000
2 2000
3 3000
4 4000
dtype: int64
Also support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
lama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.replace(45, 450)
lama speed 450.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.replace([45, 30, 320], 500)
lama speed 500.0
weight 200.0
length 1.2
cow speed 500.0
weight 250.0
length 1.5
falcon speed 500.0
weight 1.0
length 0.3
dtype: float64
>>> s.replace({45: 450, 30: 300})
lama speed 450.0
weight 200.0
length 1.2
cow speed 300.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
"""
if to_replace is None:
return self.fillna(method="ffill")
if not isinstance(to_replace, (str, list, tuple, dict, int, float)):
raise TypeError("'to_replace' should be one of str, list, tuple, dict, int, float")
if regex:
raise NotImplementedError("replace currently not support for regex")
to_replace = list(to_replace) if isinstance(to_replace, tuple) else to_replace
value = list(value) if isinstance(value, tuple) else value
if isinstance(to_replace, list) and isinstance(value, list):
if not len(to_replace) == len(value):
raise ValueError(
"Replacement lists must match in length. Expecting {} got {}".format(
len(to_replace), len(value)
)
)
to_replace = {k: v for k, v in zip(to_replace, value)}
if isinstance(to_replace, dict):
is_start = True
if len(to_replace) == 0:
current = self.spark.column
else:
for to_replace_, value in to_replace.items():
cond = (
(F.isnan(self.spark.column) | self.spark.column.isNull())
if pd.isna(to_replace_)
else (self.spark.column == SF.lit(to_replace_))
)
if is_start:
current = F.when(cond, value)
is_start = False
else:
current = current.when(cond, value)
current = current.otherwise(self.spark.column)
else:
cond = self.spark.column.isin(to_replace)
# to_replace may be a scalar
if np.array(pd.isna(to_replace)).any():
cond = cond | F.isnan(self.spark.column) | self.spark.column.isNull()
current = F.when(cond, value).otherwise(self.spark.column)
return self._with_new_scol(current) # TODO: dtype?
def update(self, other: "Series") -> None:
"""
Modify Series in place using non-NA values from passed Series. Aligns on index.
Parameters
----------
other : Series
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> s = ps.Series([1, 2, 3])
>>> s.update(ps.Series([4, 5, 6]))
>>> s.sort_index()
0 4
1 5
2 6
dtype: int64
>>> s = ps.Series(['a', 'b', 'c'])
>>> s.update(ps.Series(['d', 'e'], index=[0, 2]))
>>> s.sort_index()
0 d
1 b
2 e
dtype: object
>>> s = ps.Series([1, 2, 3])
>>> s.update(ps.Series([4, 5, 6, 7, 8]))
>>> s.sort_index()
0 4
1 5
2 6
dtype: int64
>>> s = ps.Series([1, 2, 3], index=[10, 11, 12])
>>> s
10 1
11 2
12 3
dtype: int64
>>> s.update(ps.Series([4, 5, 6]))
>>> s.sort_index()
10 1
11 2
12 3
dtype: int64
>>> s.update(ps.Series([4, 5, 6], index=[11, 12, 13]))
>>> s.sort_index()
10 1
11 4
12 5
dtype: int64
If ``other`` contains NaNs the corresponding values are not updated
in the original Series.
>>> s = ps.Series([1, 2, 3])
>>> s.update(ps.Series([4, np.nan, 6]))
>>> s.sort_index()
0 4.0
1 2.0
2 6.0
dtype: float64
>>> reset_option("compute.ops_on_diff_frames")
"""
if not isinstance(other, Series):
raise TypeError("'other' must be a Series")
combined = combine_frames(self._psdf, other._psdf, how="leftouter")
this_scol = combined["this"]._internal.spark_column_for(self._column_label)
that_scol = combined["that"]._internal.spark_column_for(other._column_label)
scol = (
F.when(that_scol.isNotNull(), that_scol)
.otherwise(this_scol)
.alias(self._psdf._internal.spark_column_name_for(self._column_label))
)
internal = combined["this"]._internal.with_new_spark_column(
self._column_label, scol # TODO: dtype?
)
self._psdf._update_internal_frame(internal.resolved_copy, requires_same_anchor=False)
def where(self, cond: "Series", other: Any = np.nan) -> "Series":
"""
Replace values where the condition is False.
Parameters
----------
cond : boolean Series
Where cond is True, keep the original value. Where False,
replace with corresponding value from other.
other : scalar, Series
Entries where cond is False are replaced with corresponding value from other.
Returns
-------
Series
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> s1 = ps.Series([0, 1, 2, 3, 4])
>>> s2 = ps.Series([100, 200, 300, 400, 500])
>>> s1.where(s1 > 0).sort_index()
0 NaN
1 1.0
2 2.0
3 3.0
4 4.0
dtype: float64
>>> s1.where(s1 > 1, 10).sort_index()
0 10
1 10
2 2
3 3
4 4
dtype: int64
>>> s1.where(s1 > 1, s1 + 100).sort_index()
0 100
1 101
2 2
3 3
4 4
dtype: int64
>>> s1.where(s1 > 1, s2).sort_index()
0 100
1 200
2 2
3 3
4 4
dtype: int64
>>> reset_option("compute.ops_on_diff_frames")
"""
assert isinstance(cond, Series)
# We should check the DataFrame from both `cond` and `other`.
should_try_ops_on_diff_frame = not same_anchor(cond, self) or (
isinstance(other, Series) and not same_anchor(other, self)
)
if should_try_ops_on_diff_frame:
# Try to perform it with 'compute.ops_on_diff_frame' option.
psdf = self.to_frame()
tmp_cond_col = verify_temp_column_name(psdf, "__tmp_cond_col__")
tmp_other_col = verify_temp_column_name(psdf, "__tmp_other_col__")
psdf[tmp_cond_col] = cond
psdf[tmp_other_col] = other
# above logic makes a Spark DataFrame looks like below:
# +-----------------+---+----------------+-----------------+
# |__index_level_0__| 0|__tmp_cond_col__|__tmp_other_col__|
# +-----------------+---+----------------+-----------------+
# | 0| 0| false| 100|
# | 1| 1| false| 200|
# | 3| 3| true| 400|
# | 2| 2| true| 300|
# | 4| 4| true| 500|
# +-----------------+---+----------------+-----------------+
condition = (
F.when(
psdf[tmp_cond_col].spark.column,
psdf._psser_for(psdf._internal.column_labels[0]).spark.column,
)
.otherwise(psdf[tmp_other_col].spark.column)
.alias(psdf._internal.data_spark_column_names[0])
)
internal = psdf._internal.with_new_columns(
[condition], column_labels=self._internal.column_labels
)
return first_series(DataFrame(internal))
else:
if isinstance(other, Series):
other = other.spark.column
condition = (
F.when(cond.spark.column, self.spark.column)
.otherwise(other)
.alias(self._internal.data_spark_column_names[0])
)
return self._with_new_scol(condition)
def mask(self, cond: "Series", other: Any = np.nan) -> "Series":
"""
Replace values where the condition is True.
Parameters
----------
cond : boolean Series
Where cond is False, keep the original value. Where True,
replace with corresponding value from other.
other : scalar, Series
Entries where cond is True are replaced with corresponding value from other.
Returns
-------
Series
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> s1 = ps.Series([0, 1, 2, 3, 4])
>>> s2 = ps.Series([100, 200, 300, 400, 500])
>>> s1.mask(s1 > 0).sort_index()
0 0.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
>>> s1.mask(s1 > 1, 10).sort_index()
0 0
1 1
2 10
3 10
4 10
dtype: int64
>>> s1.mask(s1 > 1, s1 + 100).sort_index()
0 0
1 1
2 102
3 103
4 104
dtype: int64
>>> s1.mask(s1 > 1, s2).sort_index()
0 0
1 1
2 300
3 400
4 500
dtype: int64
>>> reset_option("compute.ops_on_diff_frames")
"""
return self.where(cast(Series, ~cond), other)
def xs(self, key: Name, level: Optional[int] = None) -> "Series":
"""
Return cross-section from the Series.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
Returns
-------
Series
Cross-section from the original Series
corresponding to the selected index levels.
Examples
--------
>>> midx = pd.MultiIndex([['a', 'b', 'c'],
... ['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
a lama speed 45.0
weight 200.0
length 1.2
b cow speed 30.0
weight 250.0
length 1.5
c falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
Get values at specified index
>>> s.xs('a')
lama speed 45.0
weight 200.0
length 1.2
dtype: float64
Get values at several indexes
>>> s.xs(('a', 'lama'))
speed 45.0
weight 200.0
length 1.2
dtype: float64
Get values at specified index and level
>>> s.xs('lama', level=1)
a speed 45.0
weight 200.0
length 1.2
dtype: float64
"""
if not isinstance(key, tuple):
key = (key,)
if level is None:
level = 0
internal = self._internal
scols = (
internal.index_spark_columns[:level]
+ internal.index_spark_columns[level + len(key) :]
+ [self.spark.column]
)
rows = [internal.spark_columns[lvl] == index for lvl, index in enumerate(key, level)]
sdf = internal.spark_frame.filter(reduce(lambda x, y: x & y, rows)).select(scols)
if internal.index_level == len(key):
# if spark_frame has one column and one data, return data only without frame
pdf = sdf.limit(2).toPandas()
length = len(pdf)
if length == 1:
return pdf[self._internal.data_spark_column_names[0]].iloc[0]
index_spark_column_names = (
internal.index_spark_column_names[:level]
+ internal.index_spark_column_names[level + len(key) :]
)
index_names = internal.index_names[:level] + internal.index_names[level + len(key) :]
index_fields = internal.index_fields[:level] + internal.index_fields[level + len(key) :]
internal = internal.copy(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_spark_column_names],
index_names=index_names,
index_fields=index_fields,
data_spark_columns=[scol_for(sdf, internal.data_spark_column_names[0])],
)
return first_series(DataFrame(internal))
def pct_change(self, periods: int = 1) -> "Series":
"""
Percentage change between the current and a prior element.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
Returns
-------
Series
Examples
--------
>>> psser = ps.Series([90, 91, 85], index=[2, 4, 1])
>>> psser
2 90
4 91
1 85
dtype: int64
>>> psser.pct_change()
2 NaN
4 0.011111
1 -0.065934
dtype: float64
>>> psser.sort_index().pct_change()
1 NaN
2 0.058824
4 0.011111
dtype: float64
>>> psser.pct_change(periods=2)
2 NaN
4 NaN
1 -0.055556
dtype: float64
"""
scol = self.spark.column
window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(-periods, -periods)
prev_row = F.lag(scol, periods).over(window)
return self._with_new_scol((scol - prev_row) / prev_row).spark.analyzed
def combine_first(self, other: "Series") -> "Series":
"""
Combine Series values, choosing the calling Series's values first.
Parameters
----------
other : Series
The value(s) to be combined with the `Series`.
Returns
-------
Series
The result of combining the Series with the other object.
See Also
--------
Series.combine : Perform elementwise operation on two Series
using a given function.
Notes
-----
Result index will be the union of the two indexes.
Examples
--------
>>> s1 = ps.Series([1, np.nan])
>>> s2 = ps.Series([3, 4])
>>> with ps.option_context("compute.ops_on_diff_frames", True):
... s1.combine_first(s2)
0 1.0
1 4.0
dtype: float64
"""
if not isinstance(other, ps.Series):
raise TypeError("`combine_first` only allows `Series` for parameter `other`")
if same_anchor(self, other):
this = self.spark.column
that = other.spark.column
combined = self._psdf
else:
combined = combine_frames(self._psdf, other._psdf)
this = combined["this"]._internal.spark_column_for(self._column_label)
that = combined["that"]._internal.spark_column_for(other._column_label)
# If `self` has missing value, use value of `other`
cond = F.when(this.isNull(), that).otherwise(this)
# If `self` and `other` come from same frame, the anchor should be kept
if same_anchor(self, other):
return self._with_new_scol(cond) # TODO: dtype?
index_scols = combined._internal.index_spark_columns
sdf = combined._internal.spark_frame.select(
*index_scols, cond.alias(self._internal.data_spark_column_names[0])
).distinct()
internal = self._internal.with_new_sdf(
sdf, index_fields=combined._internal.index_fields, data_fields=[None] # TODO: dtype?
)
return first_series(DataFrame(internal))
def dot(self, other: Union["Series", DataFrame]) -> Union[Scalar, "Series"]:
"""
Compute the dot product between the Series and the columns of other.
This method computes the dot product between the Series and another
one, or the Series and each columns of a DataFrame.
It can also be called using `self @ other` in Python >= 3.5.
.. note:: This API is slightly different from pandas when indexes from both Series
are not aligned. To match with pandas', it requires to read the whole data for,
for example, counting. pandas raises an exception; however, pandas-on-Spark
just proceeds and performs by ignoring mismatches with NaN permissively.
>>> pdf1 = pd.Series([1, 2, 3], index=[0, 1, 2])
>>> pdf2 = pd.Series([1, 2, 3], index=[0, 1, 3])
>>> pdf1.dot(pdf2) # doctest: +SKIP
...
ValueError: matrices are not aligned
>>> psdf1 = ps.Series([1, 2, 3], index=[0, 1, 2])
>>> psdf2 = ps.Series([1, 2, 3], index=[0, 1, 3])
>>> psdf1.dot(psdf2) # doctest: +SKIP
5
Parameters
----------
other : Series, DataFrame.
The other object to compute the dot product with its columns.
Returns
-------
scalar, Series
Return the dot product of the Series and other if other is a
Series, the Series of the dot product of Series and each rows of
other if other is a DataFrame.
Notes
-----
The Series and other has to share the same index if other is a Series
or a DataFrame.
Examples
--------
>>> s = ps.Series([0, 1, 2, 3])
>>> s.dot(s)
14
>>> s @ s
14
>>> psdf = ps.DataFrame({'x': [0, 1, 2, 3], 'y': [0, -1, -2, -3]})
>>> psdf
x y
0 0 0
1 1 -1
2 2 -2
3 3 -3
>>> with ps.option_context("compute.ops_on_diff_frames", True):
... s.dot(psdf)
...
x 14
y -14
dtype: int64
"""
if isinstance(other, DataFrame):
if not same_anchor(self, other):
if not self.index.sort_values().equals(other.index.sort_values()):
raise ValueError("matrices are not aligned")
other_copy = other.copy() # type: DataFrame
column_labels = other_copy._internal.column_labels
self_column_label = verify_temp_column_name(other_copy, "__self_column__")
other_copy[self_column_label] = self
self_psser = other_copy._psser_for(self_column_label)
product_pssers = [
cast(Series, other_copy._psser_for(label) * self_psser) for label in column_labels
]
dot_product_psser = DataFrame(
other_copy._internal.with_new_columns(product_pssers, column_labels=column_labels)
).sum()
return cast(Series, dot_product_psser).rename(self.name)
else:
assert isinstance(other, Series)
if not same_anchor(self, other):
if len(self.index) != len(other.index):
raise ValueError("matrices are not aligned")
return (self * other).sum()
def __matmul__(self, other: Union["Series", DataFrame]) -> Union[Scalar, "Series"]:
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def repeat(self, repeats: Union[int, "Series"]) -> "Series":
"""
Repeat elements of a Series.
Returns a new Series where each element of the current Series
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or Series
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
Series.
Returns
-------
Series
Newly created Series with repeated elements.
See Also
--------
Index.repeat : Equivalent function for Index.
Examples
--------
>>> s = ps.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
dtype: object
>>> s.repeat(2)
0 a
1 b
2 c
0 a
1 b
2 c
dtype: object
>>> ps.Series([1, 2, 3]).repeat(0)
Series([], dtype: int64)
"""
if not isinstance(repeats, (int, Series)):
raise TypeError(
"`repeats` argument must be integer or Series, but got {}".format(type(repeats))
)
if isinstance(repeats, Series):
if not same_anchor(self, repeats):
psdf = self.to_frame()
temp_repeats = verify_temp_column_name(psdf, "__temp_repeats__")
psdf[temp_repeats] = repeats
return (
psdf._psser_for(psdf._internal.column_labels[0])
.repeat(psdf[temp_repeats])
.rename(self.name)
)
else:
scol = F.explode(
F.array_repeat(self.spark.column, repeats.astype("int32").spark.column)
).alias(name_like_string(self.name))
sdf = self._internal.spark_frame.select(self._internal.index_spark_columns + [scol])
internal = self._internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
data_spark_columns=[scol_for(sdf, name_like_string(self.name))],
)
return first_series(DataFrame(internal))
else:
if repeats < 0:
raise ValueError("negative dimensions are not allowed")
psdf = self._psdf[[self.name]]
if repeats == 0:
return first_series(DataFrame(psdf._internal.with_filter(SF.lit(False))))
else:
return first_series(ps.concat([psdf] * repeats))
def asof(self, where: Union[Any, List]) -> Union[Scalar, "Series"]:
"""
Return the last row(s) without any NaNs before `where`.
The last row (for each element in `where`, if list) without any
NaN is taken.
If there is no good value, NaN is returned.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
where : index or array-like of indices
Returns
-------
scalar or Series
The return can be:
* scalar : when `self` is a Series and `where` is a scalar
* Series: when `self` is a Series and `where` is an array-like
Return scalar or Series
Notes
-----
Indices are assumed to be sorted. Raises if this is not the case.
Examples
--------
>>> s = ps.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])
>>> s
10 1.0
20 2.0
30 NaN
40 4.0
dtype: float64
A scalar `where`.
>>> s.asof(20)
2.0
For a sequence `where`, a Series is returned. The first value is
NaN, because the first element of `where` is before the first
index value.
>>> s.asof([5, 20]).sort_index()
5 NaN
20 2.0
dtype: float64
Missing values are not considered. The following is ``2.0``, not
NaN, even though NaN is at the index location for ``30``.
>>> s.asof(30)
2.0
"""
should_return_series = True
if isinstance(self.index, ps.MultiIndex):
raise ValueError("asof is not supported for a MultiIndex")
if isinstance(where, (ps.Index, ps.Series, DataFrame)):
raise ValueError("where cannot be an Index, Series or a DataFrame")
if not self.index.is_monotonic_increasing:
raise ValueError("asof requires a sorted index")
if not is_list_like(where):
should_return_series = False
where = [where]
index_scol = self._internal.index_spark_columns[0]
index_type = self._internal.spark_type_for(index_scol)
cond = [
F.max(F.when(index_scol <= SF.lit(index).cast(index_type), self.spark.column))
for index in where
]
sdf = self._internal.spark_frame.select(cond)
if not should_return_series:
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
result = cast(pd.DataFrame, sdf.limit(1).toPandas()).iloc[0, 0]
return result if result is not None else np.nan
# The data is expected to be small so it's fine to transpose/use default index.
with ps.option_context("compute.default_index_type", "distributed", "compute.max_rows", 1):
psdf = ps.DataFrame(sdf) # type: DataFrame
psdf.columns = pd.Index(where)
return first_series(psdf.transpose()).rename(self.name)
def mad(self) -> float:
"""
Return the mean absolute deviation of values.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.mad()
1.0
"""
sdf = self._internal.spark_frame
spark_column = self.spark.column
avg = unpack_scalar(sdf.select(F.avg(spark_column)))
mad = unpack_scalar(sdf.select(F.avg(F.abs(spark_column - avg))))
return mad
def unstack(self, level: int = -1) -> DataFrame:
"""
Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame.
The level involved will automatically get sorted.
Notes
-----
Unlike pandas, pandas-on-Spark doesn't check whether an index is duplicated or not
because the checking of duplicated index requires scanning whole data which
can be quite expensive.
Parameters
----------
level : int, str, or list of these, default last level
Level(s) to unstack, can pass level name.
Returns
-------
DataFrame
Unstacked Series.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4],
... index=pd.MultiIndex.from_product([['one', 'two'],
... ['a', 'b']]))
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1).sort_index()
a b
one 1 2
two 3 4
>>> s.unstack(level=0).sort_index()
one two
a 1 3
b 2 4
"""
if not isinstance(self.index, ps.MultiIndex):
raise ValueError("Series.unstack only support for a MultiIndex")
index_nlevels = self.index.nlevels
if level > 0 and (level > index_nlevels - 1):
raise IndexError(
"Too many levels: Index has only {} levels, not {}".format(index_nlevels, level + 1)
)
elif level < 0 and (level < -index_nlevels):
raise IndexError(
"Too many levels: Index has only {} levels, {} is not a valid level number".format(
index_nlevels, level
)
)
internal = self._internal.resolved_copy
index_map = list(zip(internal.index_spark_column_names, internal.index_names))
pivot_col, column_label_names = index_map.pop(level)
index_scol_names, index_names = zip(*index_map)
col = internal.data_spark_column_names[0]
sdf = internal.spark_frame
sdf = sdf.groupby(list(index_scol_names)).pivot(pivot_col).agg(F.first(scol_for(sdf, col)))
internal = InternalFrame( # TODO: dtypes?
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_scol_names],
index_names=list(index_names),
column_label_names=[column_label_names],
)
return DataFrame(internal)
def item(self) -> Scalar:
"""
Return the first element of the underlying data as a Python scalar.
Returns
-------
scalar
The first element of Series.
Raises
------
ValueError
If the data is not length-1.
Examples
--------
>>> psser = ps.Series([10])
>>> psser.item()
10
"""
return self.head(2)._to_internal_pandas().item()
def iteritems(self) -> Iterable[Tuple[Name, Any]]:
"""
Lazily iterate over (index, value) tuples.
This method returns an iterable tuple (index, value). This is
convenient if you want to create a lazy iterator.
.. note:: Unlike pandas', the iteritems in pandas-on-Spark returns generator rather
zip object
Returns
-------
iterable
Iterable of tuples containing the (index, value) pairs from a
Series.
See Also
--------
DataFrame.items : Iterate over (column name, Series) pairs.
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs.
Examples
--------
>>> s = ps.Series(['A', 'B', 'C'])
>>> for index, value in s.items():
... print("Index : {}, Value : {}".format(index, value))
Index : 0, Value : A
Index : 1, Value : B
Index : 2, Value : C
"""
internal_index_columns = self._internal.index_spark_column_names
internal_data_column = self._internal.data_spark_column_names[0]
def extract_kv_from_spark_row(row: Row) -> Tuple[Name, Any]:
k = (
row[internal_index_columns[0]]
if len(internal_index_columns) == 1
else tuple(row[c] for c in internal_index_columns)
)
v = row[internal_data_column]
return k, v
for k, v in map(
extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator()
):
yield k, v
def items(self) -> Iterable[Tuple[Name, Any]]:
"""This is an alias of ``iteritems``."""
return self.iteritems()
def droplevel(self, level: Union[int, Name, List[Union[int, Name]]]) -> "Series":
"""
Return Series with requested index level(s) removed.
Parameters
----------
level : int, str, or list-like
If a string is given, must be the name of a level
If list-like, elements must be names or positional indexes
of levels.
Returns
-------
Series
Series with requested index level(s) removed.
Examples
--------
>>> psser = ps.Series(
... [1, 2, 3],
... index=pd.MultiIndex.from_tuples(
... [("x", "a"), ("x", "b"), ("y", "c")], names=["level_1", "level_2"]
... ),
... )
>>> psser
level_1 level_2
x a 1
b 2
y c 3
dtype: int64
Removing specific index level by level
>>> psser.droplevel(0)
level_2
a 1
b 2
c 3
dtype: int64
Removing specific index level by name
>>> psser.droplevel("level_2")
level_1
x 1
x 2
y 3
dtype: int64
"""
return first_series(self.to_frame().droplevel(level=level, axis=0)).rename(self.name)
def tail(self, n: int = 5) -> "Series":
"""
Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
For negative values of `n`, this function returns all rows except
the first `n` rows, equivalent to ``df[n:]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> psser = ps.Series([1, 2, 3, 4, 5])
>>> psser
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> psser.tail(3) # doctest: +SKIP
2 3
3 4
4 5
dtype: int64
"""
return first_series(self.to_frame().tail(n=n)).rename(self.name)
def explode(self) -> "Series":
"""
Transform each element of a list-like to a row.
Returns
-------
Series
Exploded lists to rows; index will be duplicated for these rows.
See Also
--------
Series.str.split : Split string values on specified separator.
Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex
to produce DataFrame.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
DataFrame.explode : Explode a DataFrame from list-like
columns to long format.
Examples
--------
>>> psser = ps.Series([[1, 2, 3], [], [3, 4]])
>>> psser
0 [1, 2, 3]
1 []
2 [3, 4]
dtype: object
>>> psser.explode() # doctest: +SKIP
0 1.0
0 2.0
0 3.0
1 NaN
2 3.0
2 4.0
dtype: float64
"""
if not isinstance(self.spark.data_type, ArrayType):
return self.copy()
scol = F.explode_outer(self.spark.column).alias(name_like_string(self._column_label))
internal = self._internal.with_new_columns([scol], keep_order=False)
return first_series(DataFrame(internal))
def argsort(self) -> "Series":
"""
Return the integer indices that would sort the Series values.
Unlike pandas, the index order is not preserved in the result.
Returns
-------
Series
Positions of values within the sort order with -1 indicating
nan values.
Examples
--------
>>> psser = ps.Series([3, 3, 4, 1, 6, 2, 3, 7, 8, 7, 10])
>>> psser
0 3
1 3
2 4
3 1
4 6
5 2
6 3
7 7
8 8
9 7
10 10
dtype: int64
>>> psser.argsort().sort_index()
0 3
1 5
2 0
3 1
4 6
5 2
6 4
7 7
8 9
9 8
10 10
dtype: int64
"""
notnull = self.loc[self.notnull()]
sdf_for_index = notnull._internal.spark_frame.select(notnull._internal.index_spark_columns)
tmp_join_key = verify_temp_column_name(sdf_for_index, "__tmp_join_key__")
sdf_for_index, _ = InternalFrame.attach_distributed_sequence_column(
sdf_for_index, tmp_join_key
)
# sdf_for_index:
# +----------------+-----------------+
# |__tmp_join_key__|__index_level_0__|
# +----------------+-----------------+
# | 0| 0|
# | 1| 1|
# | 2| 2|
# | 3| 3|
# | 4| 4|
# +----------------+-----------------+
sdf_for_data = notnull._internal.spark_frame.select(
notnull.spark.column.alias("values"), NATURAL_ORDER_COLUMN_NAME
)
sdf_for_data, _ = InternalFrame.attach_distributed_sequence_column(
sdf_for_data, SPARK_DEFAULT_SERIES_NAME
)
# sdf_for_data:
# +---+------+-----------------+
# | 0|values|__natural_order__|
# +---+------+-----------------+
# | 0| 3| 25769803776|
# | 1| 3| 51539607552|
# | 2| 4| 77309411328|
# | 3| 1| 103079215104|
# | 4| 2| 128849018880|
# +---+------+-----------------+
sdf_for_data = sdf_for_data.sort(
scol_for(sdf_for_data, "values"), NATURAL_ORDER_COLUMN_NAME
).drop("values", NATURAL_ORDER_COLUMN_NAME)
tmp_join_key = verify_temp_column_name(sdf_for_data, "__tmp_join_key__")
sdf_for_data, _ = InternalFrame.attach_distributed_sequence_column(
sdf_for_data, tmp_join_key
)
# sdf_for_index: sdf_for_data:
# +----------------+-----------------+ +----------------+---+
# |__tmp_join_key__|__index_level_0__| |__tmp_join_key__| 0|
# +----------------+-----------------+ +----------------+---+
# | 0| 0| | 0| 3|
# | 1| 1| | 1| 4|
# | 2| 2| | 2| 0|
# | 3| 3| | 3| 1|
# | 4| 4| | 4| 2|
# +----------------+-----------------+ +----------------+---+
sdf = sdf_for_index.join(sdf_for_data, on=tmp_join_key).drop(tmp_join_key)
internal = self._internal.with_new_sdf(
spark_frame=sdf,
data_columns=[SPARK_DEFAULT_SERIES_NAME],
index_fields=[
InternalField(dtype=field.dtype) for field in self._internal.index_fields
],
data_fields=[None],
)
psser = first_series(DataFrame(internal))
return cast(
Series,
ps.concat([psser, self.loc[self.isnull()].spark.transform(lambda _: SF.lit(-1))]),
)
def argmax(self) -> int:
"""
Return int position of the largest value in the Series.
If the maximum is achieved in multiple locations,
the first row position is returned.
Returns
-------
int
Row position of the maximum value.
Examples
--------
Consider dataset containing cereal calories
>>> s = ps.Series({'Corn Flakes': 100.0, 'Almond Delight': 110.0,
... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0})
>>> s # doctest: +SKIP
Corn Flakes 100.0
Almond Delight 110.0
Cinnamon Toast Crunch 120.0
Cocoa Puff 110.0
dtype: float64
>>> s.argmax() # doctest: +SKIP
2
"""
sdf = self._internal.spark_frame.select(self.spark.column, NATURAL_ORDER_COLUMN_NAME)
max_value = sdf.select(
F.max(scol_for(sdf, self._internal.data_spark_column_names[0])),
F.first(NATURAL_ORDER_COLUMN_NAME),
).head()
if max_value[1] is None:
raise ValueError("attempt to get argmax of an empty sequence")
elif max_value[0] is None:
return -1
# We should remember the natural sequence started from 0
seq_col_name = verify_temp_column_name(sdf, "__distributed_sequence_column__")
sdf, _ = InternalFrame.attach_distributed_sequence_column(
sdf.drop(NATURAL_ORDER_COLUMN_NAME), seq_col_name
)
# If the maximum is achieved in multiple locations, the first row position is returned.
return sdf.filter(
scol_for(sdf, self._internal.data_spark_column_names[0]) == max_value[0]
).head()[0]
def argmin(self) -> int:
"""
Return int position of the smallest value in the Series.
If the minimum is achieved in multiple locations,
the first row position is returned.
Returns
-------
int
Row position of the minimum value.
Examples
--------
Consider dataset containing cereal calories
>>> s = ps.Series({'Corn Flakes': 100.0, 'Almond Delight': 110.0,
... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0})
>>> s # doctest: +SKIP
Corn Flakes 100.0
Almond Delight 110.0
Cinnamon Toast Crunch 120.0
Cocoa Puff 110.0
dtype: float64
>>> s.argmin() # doctest: +SKIP
0
"""
sdf = self._internal.spark_frame.select(self.spark.column, NATURAL_ORDER_COLUMN_NAME)
min_value = sdf.select(
F.min(scol_for(sdf, self._internal.data_spark_column_names[0])),
F.first(NATURAL_ORDER_COLUMN_NAME),
).head()
if min_value[1] is None:
raise ValueError("attempt to get argmin of an empty sequence")
elif min_value[0] is None:
return -1
# We should remember the natural sequence started from 0
seq_col_name = verify_temp_column_name(sdf, "__distributed_sequence_column__")
sdf, _ = InternalFrame.attach_distributed_sequence_column(
sdf.drop(NATURAL_ORDER_COLUMN_NAME), seq_col_name
)
# If the minimum is achieved in multiple locations, the first row position is returned.
return sdf.filter(
scol_for(sdf, self._internal.data_spark_column_names[0]) == min_value[0]
).head()[0]
def compare(
self, other: "Series", keep_shape: bool = False, keep_equal: bool = False
) -> DataFrame:
"""
Compare to another Series and show the differences.
Parameters
----------
other : Series
Object to compare with.
keep_shape : bool, default False
If true, all rows and columns are kept.
Otherwise, only the ones with different values are kept.
keep_equal : bool, default False
If true, the result keeps values that are equal.
Otherwise, equal values are shown as NaNs.
Returns
-------
DataFrame
Notes
-----
Matching NaNs will not appear as a difference.
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> s1 = ps.Series(["a", "b", "c", "d", "e"])
>>> s2 = ps.Series(["a", "a", "c", "b", "e"])
Align the differences on columns
>>> s1.compare(s2).sort_index()
self other
1 b a
3 d b
Keep all original rows
>>> s1.compare(s2, keep_shape=True).sort_index()
self other
0 None None
1 b a
2 None None
3 d b
4 None None
Keep all original rows and also all original values
>>> s1.compare(s2, keep_shape=True, keep_equal=True).sort_index()
self other
0 a a
1 b a
2 c c
3 d b
4 e e
>>> reset_option("compute.ops_on_diff_frames")
"""
if same_anchor(self, other):
self_column_label = verify_temp_column_name(other.to_frame(), "__self_column__")
other_column_label = verify_temp_column_name(self.to_frame(), "__other_column__")
combined = DataFrame(
self._internal.with_new_columns(
[self.rename(self_column_label), other.rename(other_column_label)]
)
) # type: DataFrame
else:
if not self.index.equals(other.index):
raise ValueError("Can only compare identically-labeled Series objects")
combined = combine_frames(self.to_frame(), other.to_frame())
this_column_label = "self"
that_column_label = "other"
if keep_equal and keep_shape:
combined.columns = pd.Index([this_column_label, that_column_label])
return combined
this_data_scol = combined._internal.data_spark_columns[0]
that_data_scol = combined._internal.data_spark_columns[1]
index_scols = combined._internal.index_spark_columns
sdf = combined._internal.spark_frame
if keep_shape:
this_scol = (
F.when(this_data_scol == that_data_scol, None)
.otherwise(this_data_scol)
.alias(this_column_label)
)
this_field = combined._internal.data_fields[0].copy(
name=this_column_label, nullable=True
)
that_scol = (
F.when(this_data_scol == that_data_scol, None)
.otherwise(that_data_scol)
.alias(that_column_label)
)
that_field = combined._internal.data_fields[1].copy(
name=that_column_label, nullable=True
)
else:
sdf = sdf.filter(~this_data_scol.eqNullSafe(that_data_scol))
this_scol = this_data_scol.alias(this_column_label)
this_field = combined._internal.data_fields[0].copy(name=this_column_label)
that_scol = that_data_scol.alias(that_column_label)
that_field = combined._internal.data_fields[1].copy(name=that_column_label)
sdf = sdf.select(*index_scols, this_scol, that_scol, NATURAL_ORDER_COLUMN_NAME)
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
index_fields=combined._internal.index_fields,
column_labels=[(this_column_label,), (that_column_label,)],
data_spark_columns=[scol_for(sdf, this_column_label), scol_for(sdf, that_column_label)],
data_fields=[this_field, that_field],
column_label_names=[None],
)
return DataFrame(internal)
def align(
self,
other: Union[DataFrame, "Series"],
join: str = "outer",
axis: Optional[Axis] = None,
copy: bool = True,
) -> Tuple["Series", Union[DataFrame, "Series"]]:
"""
Align two objects on their axes with the specified join method.
Join method is specified for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {{'outer', 'inner', 'left', 'right'}}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None).
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
Returns
-------
(left, right) : (Series, type of other)
Aligned objects.
Examples
--------
>>> ps.set_option("compute.ops_on_diff_frames", True)
>>> s1 = ps.Series([7, 8, 9], index=[10, 11, 12])
>>> s2 = ps.Series(["g", "h", "i"], index=[10, 20, 30])
>>> aligned_l, aligned_r = s1.align(s2)
>>> aligned_l.sort_index()
10 7.0
11 8.0
12 9.0
20 NaN
30 NaN
dtype: float64
>>> aligned_r.sort_index()
10 g
11 None
12 None
20 h
30 i
dtype: object
Align with the join type "inner":
>>> aligned_l, aligned_r = s1.align(s2, join="inner")
>>> aligned_l.sort_index()
10 7
dtype: int64
>>> aligned_r.sort_index()
10 g
dtype: object
Align with a DataFrame:
>>> df = ps.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30])
>>> aligned_l, aligned_r = s1.align(df)
>>> aligned_l.sort_index()
10 7.0
11 8.0
12 9.0
20 NaN
30 NaN
dtype: float64
>>> aligned_r.sort_index()
a b
10 1.0 a
11 NaN None
12 NaN None
20 2.0 b
30 3.0 c
>>> ps.reset_option("compute.ops_on_diff_frames")
"""
axis = validate_axis(axis)
if axis == 1:
raise ValueError("Series does not support columns axis.")
self_df = self.to_frame()
left, right = self_df.align(other, join=join, axis=axis, copy=False)
if left is self_df:
left_ser = self
else:
left_ser = first_series(left).rename(self.name)
return (left_ser.copy(), right.copy()) if copy else (left_ser, right)
def between_time(
self,
start_time: Union[datetime.time, str],
end_time: Union[datetime.time, str],
include_start: bool = True,
include_end: bool = True,
axis: Axis = 0,
) -> "Series":
"""
Select values between particular times of the day (example: 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
Initial time as a time filter limit.
end_time : datetime.time or str
End time as a time filter limit.
include_start : bool, default True
Whether the start time needs to be included in the result.
include_end : bool, default True
Whether the end time needs to be included in the result.
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine range time on index or columns value.
Returns
-------
Series
Data from the original object filtered to the specified dates range.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> idx = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> psser = ps.Series([1, 2, 3, 4], index=idx)
>>> psser
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
dtype: int64
>>> psser.between_time('0:15', '0:45')
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
dtype: int64
"""
return first_series(
self.to_frame().between_time(start_time, end_time, include_start, include_end, axis)
).rename(self.name)
def at_time(
self, time: Union[datetime.time, str], asof: bool = False, axis: Axis = 0
) -> "Series":
"""
Select values at particular time of day (example: 9:30AM).
Parameters
----------
time : datetime.time or str
axis : {0 or 'index', 1 or 'columns'}, default 0
Returns
-------
Series
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> idx = pd.date_range('2018-04-09', periods=4, freq='12H')
>>> psser = ps.Series([1, 2, 3, 4], index=idx)
>>> psser
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
dtype: int64
>>> psser.at_time('12:00')
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
dtype: int64
"""
return first_series(self.to_frame().at_time(time, asof, axis)).rename(self.name)
def _cum(
self,
func: Callable[[Column], Column],
skipna: bool,
part_cols: Sequence["ColumnOrName"] = (),
ascending: bool = True,
) -> "Series":
# This is used to cummin, cummax, cumsum, etc.
if ascending:
window = (
Window.orderBy(F.asc(NATURAL_ORDER_COLUMN_NAME))
.partitionBy(*part_cols)
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
)
else:
window = (
Window.orderBy(F.desc(NATURAL_ORDER_COLUMN_NAME))
.partitionBy(*part_cols)
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
)
if skipna:
# There is a behavior difference between pandas and PySpark. In case of cummax,
#
# Input:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 1.0 0.0
# 3 2.0 4.0
# 4 4.0 9.0
#
# pandas:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 5.0 1.0
# 3 5.0 4.0
# 4 5.0 9.0
#
# PySpark:
# A B
# 0 2.0 1.0
# 1 5.0 1.0
# 2 5.0 1.0
# 3 5.0 4.0
# 4 5.0 9.0
scol = F.when(
# Manually sets nulls given the column defined above.
self.spark.column.isNull(),
SF.lit(None),
).otherwise(func(self.spark.column).over(window))
else:
# Here, we use two Windows.
# One for real data.
# The other one for setting nulls after the first null it meets.
#
# There is a behavior difference between pandas and PySpark. In case of cummax,
#
# Input:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 1.0 0.0
# 3 2.0 4.0
# 4 4.0 9.0
#
# pandas:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 5.0 NaN
# 3 5.0 NaN
# 4 5.0 NaN
#
# PySpark:
# A B
# 0 2.0 1.0
# 1 5.0 1.0
# 2 5.0 1.0
# 3 5.0 4.0
# 4 5.0 9.0
scol = F.when(
# By going through with max, it sets True after the first time it meets null.
F.max(self.spark.column.isNull()).over(window),
# Manually sets nulls given the column defined above.
SF.lit(None),
).otherwise(func(self.spark.column).over(window))
return self._with_new_scol(scol)
def _cumsum(self, skipna: bool, part_cols: Sequence["ColumnOrName"] = ()) -> "Series":
psser = self
if isinstance(psser.spark.data_type, BooleanType):
psser = psser.spark.transform(lambda scol: scol.cast(LongType()))
elif not isinstance(psser.spark.data_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return psser._cum(F.sum, skipna, part_cols)
def _cumprod(self, skipna: bool, part_cols: Sequence["ColumnOrName"] = ()) -> "Series":
if isinstance(self.spark.data_type, BooleanType):
scol = self._cum(
lambda scol: F.min(F.coalesce(scol, SF.lit(True))), skipna, part_cols
).spark.column.cast(LongType())
elif isinstance(self.spark.data_type, NumericType):
num_zeros = self._cum(
lambda scol: F.sum(F.when(scol == 0, 1).otherwise(0)), skipna, part_cols
).spark.column
num_negatives = self._cum(
lambda scol: F.sum(F.when(scol < 0, 1).otherwise(0)), skipna, part_cols
).spark.column
sign = F.when(num_negatives % 2 == 0, 1).otherwise(-1)
abs_prod = F.exp(
self._cum(lambda scol: F.sum(F.log(F.abs(scol))), skipna, part_cols).spark.column
)
scol = F.when(num_zeros > 0, 0).otherwise(sign * abs_prod)
if isinstance(self.spark.data_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(self.spark.data_type),
self.spark.data_type.simpleString(),
)
)
return self._with_new_scol(scol)
# ----------------------------------------------------------------------
# Accessor Methods
# ----------------------------------------------------------------------
dt = CachedAccessor("dt", DatetimeMethods)
str = CachedAccessor("str", StringMethods)
cat = CachedAccessor("cat", CategoricalAccessor)
plot = CachedAccessor("plot", PandasOnSparkPlotAccessor)
# ----------------------------------------------------------------------
def _apply_series_op(
self, op: Callable[["Series"], Union["Series", Column]], should_resolve: bool = False
) -> "Series":
psser_or_scol = op(self)
if isinstance(psser_or_scol, Series):
psser = psser_or_scol
else:
psser = self._with_new_scol(cast(Column, psser_or_scol))
if should_resolve:
internal = psser._internal.resolved_copy
return first_series(DataFrame(internal))
else:
return psser
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str_type,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Scalar:
"""
Applies sfun to the column and returns a scalar
Parameters
----------
sfun : the stats function to be used for aggregation
name : original pandas API name.
axis : used only for sanity check because series only support index axis.
numeric_only : not used by this implementation, but passed down by stats functions
"""
from inspect import signature
axis = validate_axis(axis)
if axis == 1:
raise ValueError("Series does not support columns axis.")
num_args = len(signature(sfun).parameters)
spark_column = self.spark.column
spark_type = self.spark.data_type
if num_args == 1:
# Only pass in the column if sfun accepts only one arg
scol = cast(Callable[[Column], Column], sfun)(spark_column)
else: # must be 2
assert num_args == 2
# Pass in both the column and its data type if sfun accepts two args
scol = cast(Callable[[Column, DataType], Column], sfun)(spark_column, spark_type)
min_count = kwargs.get("min_count", 0)
if min_count > 0:
scol = F.when(Frame._count_expr(spark_column, spark_type) >= min_count, scol)
result = unpack_scalar(self._internal.spark_frame.select(scol))
return result if result is not None else np.nan
# Override the `groupby` to specify the actual return type annotation.
def groupby(
self,
by: Union[Name, "Series", List[Union[Name, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "SeriesGroupBy":
return cast(
"SeriesGroupBy", super().groupby(by=by, axis=axis, as_index=as_index, dropna=dropna)
)
groupby.__doc__ = Frame.groupby.__doc__
def _build_groupby(
self, by: List[Union["Series", Label]], as_index: bool, dropna: bool
) -> "SeriesGroupBy":
from pyspark.pandas.groupby import SeriesGroupBy
return SeriesGroupBy._build(self, by, as_index=as_index, dropna=dropna)
def __getitem__(self, key: Any) -> Any:
try:
if (isinstance(key, slice) and any(type(n) == int for n in [key.start, key.stop])) or (
type(key) == int
and not isinstance(self.index.spark.data_type, (IntegerType, LongType))
):
# Seems like pandas Series always uses int as positional search when slicing
# with ints, searches based on index values when the value is int.
return self.iloc[key]
return self.loc[key]
except SparkPandasIndexingError:
raise KeyError(
"Key length ({}) exceeds index depth ({})".format(
len(key), self._internal.index_level
)
)
def __getattr__(self, item: str_type) -> Any:
if item.startswith("__"):
raise AttributeError(item)
if hasattr(MissingPandasLikeSeries, item):
property_or_func = getattr(MissingPandasLikeSeries, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
raise AttributeError("'Series' object has no attribute '{}'".format(item))
def _to_internal_pandas(self) -> pd.Series:
"""
Return a pandas Series directly from _internal to avoid overhead of copy.
This method is for internal use only.
"""
return self._psdf._internal.to_pandas_frame[self.name]
def __repr__(self) -> str_type:
max_display_count = get_option("display.max_rows")
if max_display_count is None:
return self._to_internal_pandas().to_string(name=self.name, dtype=self.dtype)
pser = self._psdf._get_or_create_repr_pandas_cache(max_display_count)[self.name]
pser_length = len(pser)
pser = pser.iloc[:max_display_count]
if pser_length > max_display_count:
repr_string = pser.to_string(length=True)
rest, prev_footer = repr_string.rsplit("\n", 1)
match = REPR_PATTERN.search(prev_footer)
if match is not None:
length = match.group("length")
dtype_name = str(self.dtype.name)
if self.name is None:
footer = "\ndtype: {dtype}\nShowing only the first {length}".format(
length=length, dtype=pprint_thing(dtype_name)
)
else:
footer = (
"\nName: {name}, dtype: {dtype}"
"\nShowing only the first {length}".format(
length=length, name=self.name, dtype=pprint_thing(dtype_name)
)
)
return rest + footer
return pser.to_string(name=self.name, dtype=self.dtype)
def __dir__(self) -> Iterable[str_type]:
if not isinstance(self.spark.data_type, StructType):
fields = []
else:
fields = [f for f in self.spark.data_type.fieldNames() if " " not in f]
return list(super().__dir__()) + fields
def __iter__(self) -> None:
return MissingPandasLikeSeries.__iter__(self)
if sys.version_info >= (3, 7):
# In order to support the type hints such as Series[...]. See DataFrame.__class_getitem__.
def __class_getitem__(cls, params: Any) -> Type[SeriesType]:
return _create_type_for_series_type(params)
elif (3, 5) <= sys.version_info < (3, 7):
# The implementation is in its metaclass so this flag is needed to distinguish
# pandas-on-Spark Series.
is_series = None
def unpack_scalar(sdf: SparkDataFrame) -> Any:
"""
Takes a dataframe that is supposed to contain a single row with a single scalar value,
and returns this value.
"""
l = cast(pd.DataFrame, sdf.limit(2).toPandas())
assert len(l) == 1, (sdf, l)
row = l.iloc[0]
l2 = list(row)
assert len(l2) == 1, (row, l2)
return l2[0]
@overload
def first_series(df: DataFrame) -> Series:
...
@overload
def first_series(df: pd.DataFrame) -> pd.Series:
...
def first_series(df: Union[DataFrame, pd.DataFrame]) -> Union[Series, pd.Series]:
"""
Takes a DataFrame and returns the first column of the DataFrame as a Series
"""
assert isinstance(df, (DataFrame, pd.DataFrame)), type(df)
if isinstance(df, DataFrame):
return df._psser_for(df._internal.column_labels[0])
else:
return df[df.columns[0]]
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.series
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.series.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]").appName("pyspark.pandas.series tests").getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.series,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
jvivian/rnaseq-lib | src/rnaseq_lib/plot/__init__.py | 1 | 30364 | from __future__ import division
import holoviews as hv
import numpy as np
import pandas as pd
from scipy.stats import pearsonr
from rnaseq_lib.diff_exp import log2fc, de_pearson_dataframe
from rnaseq_lib.dim_red import run_tsne, run_trimap
from rnaseq_lib.math import l2norm, iqr_bounds
from rnaseq_lib.plot.opts import *
from rnaseq_lib.tissues import subset_by_dataset
class Holoview:
"""
Object for Holoviews plots of gene expression data. Created for use with Holomap and DynamicMap which cannot
accept dataframes as arguments. This class circumvents that limitation by referencing
the dataframe internally.
"""
def __init__(self, df):
"""
:param pd.DataFrame df: Dataframe containing metadata / expression values (Synapse.org: syn11515015)
"""
self.df = df
self.df_cols = ['id', 'tumor', 'tissue', 'type', 'label']
# Style attributes - used in conjunction with '.opts()'
self._gene_curves_opts = gene_curves_opts
self._gene_kde_opts = gene_kde_opts
self._gene_distribution_opts = gene_distribution_opts
self._gene_de_opts = gene_de_opts
self._sample_count_opts = sample_count_opts
self._l2fc_by_perc_samples_opts = l2fc_by_perc_samples_opts
self._gene_de_heatmap_opts = gene_de_heatmap_opts
self._de_concordance_opts = de_concordance_opts
self._dist_with_iqr_bounds_opts = dist_with_iqr_bounds_opts
self._dr_opts = dr_opts
self._tissue_de_opts = tissue_de_opts
# Hacky, but 5S_rRNA is the first gene in the annotation set
self.samples = self.df.index.tolist()
try:
self._gene_start = self.df.columns.tolist().index('5S_rRNA')
self.genes = self.df.columns[self._gene_start:].tolist()
self.meta_cols = self.df.columns[:self._gene_start]
self.samples = self.df.index.tolist()
self.tissues = sorted(self.df.tissue.unique().tolist())
except (ValueError, AttributeError) as e:
print 'Missing attributes: \n{}'.format(e.message)
self._gene_start = None
self.genes = None
self.meta_cols = None
self.samples = None
self.tissues = None
# Internal methods
def _subset(self, genes=None, tissue_subset=None):
# Subset dataframe by gene
df = self.df[self.df_cols + genes] if genes else self.df
# Subset by tissues
if tissue_subset:
df = df[df.tissue.isin(tissue_subset)]
return df
def _gene_cutoff(self, gene, tissue, percent):
# Subset dataframe by tissue and gene
df = self._subset([gene], [tissue])
# Subset by dataset
tumor, normal, gtex = subset_by_dataset(df)
# Calculate gene expression cutoffs for each dataset
cutoffs = [x[gene].apply(l2norm).sort_values(ascending=False).iloc[int(len(x) * percent) - 1]
for x in [tumor, normal, gtex]]
# Return mapping of dataset to cutoff
return {x: y for x, y in zip(['tumor', 'normal', 'gtex'], cutoffs)}
def _sample_counts_df(self, groupby='tissue'):
"""
Compute sample counts and returns dataframe
:return: Sample counts for tissues and datasets
:rtype: pd.DataFrame
"""
# Cast value_counts as dataframe
vc = pd.DataFrame(self.df.groupby(groupby).label.value_counts())
# Relabel column and reset_index to cast multi-index as columns
vc.columns = ['counts']
vc.reset_index(inplace=True)
return vc.sort_values([groupby, 'label'])
def perc_tumor_overexpressed(self, gene, tissue_subset=None):
"""
Calculate the percent of tumor samples that overexpress a gene relative to the combined normal
distribution of all samples (in tissue_subset) as well as compared to normals in the same tissue
:param str gene: Gene (ex: ERBB2) to select
:param list tissue_subset: List of tissues to subset by
:return: Table of tissues and corresponding percentages pertaining to upper bound cutoff
:rtype: pd.DataFrame
"""
# Subset by gene and tissue
df = self._subset(gene, tissue_subset)
# Calculate upper and lower bounds across all tissues
upper, lower = iqr_bounds(df[df.tumor == 'no'][gene].apply(l2norm))
records = []
for tissue in sorted(df.tissue.unique()):
# Calclulate upper/lower bound for normal
normals = df[(df.tumor == 'no') & (df.tissue == tissue)][gene].apply(l2norm)
n_upper, n_lower = iqr_bounds(normals)
# Calculate expression for tumor
exp = df[(df.tissue == tissue) & (df.tumor == 'yes')][gene].apply(l2norm)
# Calculate percentage cut offs
perc = len([x for x in exp if x > upper]) / len(exp)
n_perc = len([x for x in exp if x > n_upper]) / len(exp)
records.append([tissue, perc, n_perc])
# Save, sort, and return output dataframe
df = pd.DataFrame.from_records(records, columns=['Tissue', 'Upper', 'T_Upper'])
return df.sort_values('T_Upper', ascending=False)
# Gene Plots
def gene_kde(self, gene, tissue_subset=None, tumor=True, normal=True, gtex=True,
normalize=True, groupby='type', unit='Transcripts Per Million'):
"""
Returns KDE of gene expression (log2) for given tissue
:param str gene: Gene (ex: ERBB2) to select
:param list tissue_subset: List of tissues to subset by
:param bool tumor: If True, include tumor samples
:param bool normal: If True, include normal samples
:param bool gtex: If True, include gtex samples
:return: Returns holoviews Overlay object of gene KDE
:rtype: hv.Overlay
"""
# Subset dataframe by tissue and gene
df = self._subset([gene], tissue_subset)
# Subset by dataset
t, n, g = subset_by_dataset(df)
# Gene dim
kdim = hv.Dimension(gene, label='{} {}'.format(gene, unit), unit='log2(x + 0.001)')
# Create KDE objects for each tissue/type and dataset
dists = []
for group in df[groupby].unique():
for label, dataset, flag in zip(['Tumor', 'GTEx', 'Normal'], [t, g, n], [tumor, gtex, normal]):
if flag:
gene_vals = dataset[dataset[groupby] == group][gene].apply(l2norm) \
if normalize else dataset[dataset[groupby] == group][gene]
# Add dists
dists.append(hv.Distribution(gene_vals, label='{}-{}'.format(label, group), kdims=kdim))
# Combine into Overlay object
return hv.Overlay(dists, label='{} Expression'.format(gene)).opts(self._gene_kde_opts)
def gene_distribution(self, gene, tissue_subset=None, groupby='type', unit='log2(x+0.001)'):
"""
Box and Whisker expression distribution across tissues
:param str gene: Gene (ex: ERBB2) to select
:param list tissue_subset: List of tissues to subset by
:param bool types: If True, uses tissue/cancer subtype instead of Tissue label
:return: Returns holoviews BoxWhisker object
:rtype: hv.BoxWhisker
"""
# Subset dataframe by gene
df = self._subset([gene], tissue_subset)
# Normalize gene expression
df[gene] = df.loc[:, gene].apply(l2norm)
df['type'] = df.loc[:, 'type'].apply(lambda x: x[:20]) # Add label limit
# Define Dimensions
kdims = [hv.Dimension(('label', 'Dataset')),
hv.Dimension((groupby, groupby.capitalize()))]
vdims = hv.Dimension((gene, '{} Expression'.format(gene.capitalize())), unit=unit)
# Return grouped box and whiskers:
return hv.BoxWhisker(df, kdims=kdims, vdims=vdims,
label='{} Expression'.format(gene)).opts(self._gene_distribution_opts)
# Differential Expression
def tissue_de(self, tissue, extents=None, tcga_normal=None, gene_labels=None):
"""
Differential expression for a given tissue
:param str tissue: Tissue to subset by
:param tuple extents: xmin/ymin/xmax/ymax values
:param bool tgca_normal: If True, uses TCGA normal for DE comparison
:return: Scatterplot of DE
:rtype: hv.Scatter
"""
# Subset by tissue
df = self._subset(genes=None, tissue_subset=[tissue])
# Subset by dataset
tumor, normal, gtex = subset_by_dataset(df)
# Subset by genes
t_genes = tumor[tumor.columns[self._gene_start:]]
if tcga_normal:
n_genes = normal[normal.columns[self._gene_start:]]
label = 'Normal'
else:
n_genes = gtex[gtex.columns[self._gene_start:]]
label = 'GTEx'
# Calculate total expression
exp = pd.concat([t_genes, n_genes]).apply(l2norm).median()
# Calculate L2FC
l2fc = log2fc(t_genes.median(), n_genes.median())
# Define dimensions
kdims = [hv.Dimension('exp', label='Gene Expression', unit='log2(x+1)')]
vdim = hv.Dimension('l2fc', label='Log2 Fold Change', unit='log2(Tumor/{})'.format(label))
plot = pd.DataFrame()
plot['exp'] = exp
plot['l2fc'] = l2fc
plot['gene'] = exp.index
plot.index = exp.index
# Apply label column
if gene_labels:
label_vector = ['Other' for _ in plot.index]
size_vector = [1 for _ in plot.index]
for k, v in gene_labels.iteritems():
for i in xrange(len(plot.index)):
if plot.index[i] in v:
label_vector[i] = k
size_vector[i] = 5
plot['label'] = label_vector
plot['size'] = size_vector
vdims = [vdim] + ['gene', 'label', 'size']
else:
vdims = [vdim, 'gene']
if extents:
return hv.Scatter(plot, kdims=kdims, vdims=vdims, extents=extents).opts(self._tissue_de_opts)
else:
return hv.Scatter(plot, kdims=kdims, vdims=vdims).opts(self._tissue_de_opts)
def gene_de(self, gene, tissue_subset=None, extents=None, tcga_normal=False, groupby='type'):
"""
Scatter plot of differential expression across all grouped types (tissue / type)
:param str gene: Gene (ex: ERBB2) to select
:param tuple extents: xmin/ymin/xmax/ymax values
:param list tissue_subset: List of tissues to subset by
:param bool tcga_normal: If True, will use TCGA normal for differential expression comparison
:return: Scatterplot of values
:rtype: hv.Scatter
"""
# Subset dataframe by gene and tissue subset
df = self._subset([gene], tissue_subset)
# For each tissue, calculate L2FC and mean expression
records = []
for group in sorted(df[groupby].unique()):
# Subset by dataset
tumor, normal, gtex = subset_by_dataset(df[df[groupby] == group])
# Calculate tumor and normal expression and L2FC
if tcga_normal:
l2fc = log2fc(tumor[gene].median(), normal[gene].median())
exp = pd.concat([tumor[gene], normal[gene]], axis=0).apply(l2norm).median()
unit = 'log2(Tumor/Normal)'
else:
l2fc = log2fc(tumor[gene].median(), gtex[gene].median())
exp = pd.concat([tumor[gene], gtex[gene]], axis=0).apply(l2norm).median()
unit = 'log2(Tumor/GTEx)'
# Store as record
records.append((exp, l2fc, group))
# Define dimensions of plot
kdims = [hv.Dimension('Expression', label='Gene Expression', unit='log2(x+1)')]
vdims = [hv.Dimension('L2FC', label='Fold Change', unit=unit), groupby.capitalize()]
# Create dataframe
plot = pd.DataFrame.from_records(records, columns=['Expression', 'L2FC', groupby.capitalize()])
if extents:
return hv.Scatter(plot, kdims=kdims, vdims=vdims, extents=extents).opts(self._gene_de_opts)
else:
return hv.Scatter(plot, kdims=kdims, vdims=vdims).opts(self._gene_de_opts)
def gene_de_kde(self, gene, tissue_subset=None, tcga_normal=False):
"""
KDE of L2FC values for the tumor as compared to the normal
:param str gene: Gene (ex: ERBB2) to select
:param list tissue_subset: List of tissues to subset by
:param bool tcga_normal: If True, use TCGA normal to for DE calc, otherwise use GTEx
:return: Collection of Distribution objects
:rtype: hv.Overlay
"""
# Subset dataframe by gene and tissue subset
df = self._subset([gene], tissue_subset)
# Subset by dataset
tumor, normal, gtex = subset_by_dataset(df)
# Create X dimension
xdim = hv.Dimension('Log2 Fold Change', unit='log2(a+1)/log2(b+1)')
dists = []
for tissue in df.tissue.unique():
# Calculate mean expression for normal
if tcga_normal:
n = normal[normal.tissue == tissue][gene].median()
label = 'Tumor-Normal-{}'.format(tissue)
else:
n = gtex[gtex.tissue == tissue][gene].median()
label = 'Tumor-GTEx-{}'.format(tissue)
# Calculate l2fc for each tumor sample and save
l2fcs = []
for i, row in tumor[tumor.tissue == tissue].iterrows():
l2fcs.append(log2fc(row[gene], n))
# Create distribution
dists.append(hv.Distribution(l2fcs, kdims=[xdim], label=label))
return hv.Overlay(dists, label='{} Expression'.format(gene)).opts(self._gene_kde_opts)
def l2fc_by_perc_samples(self, gene, tissue_subset=None, tcga_normal=False, l2fc_cutoff=2):
"""
Calculate the percentage of samples greater than a range of log2 fold change values
:param str gene: Gene (ex: ERBB2) to select
:param list tissue_subset: List of tissues to subset by
:param bool tcga_normal: If True, use TCGA normal to for DE calc, otherwise use GTEx
:param float l2fc_cutoff: Specifies the L2FC cutoff to draw a Spike object
:return: Collection of Curve objects
:rtype: hv.Overlay
"""
# Subset dataframe by gene and tissue subset
df = self._subset([gene], tissue_subset)
# Subset by dataset
tumor, normal, gtex = subset_by_dataset(df)
# Create X dimension
xdim = hv.Dimension('Log2 Fold Change', unit='log2(a+1)/log2(b+1)')
ydim = hv.Dimension('Tumor Samples With Greater L2FC', unit='%')
# Calculate % samples over a given l2fc
curves = []
label = ''
for tissue in sorted(df.tissue.unique()):
# Calculate mean expression for normal
if tcga_normal:
n = normal[normal.tissue == tissue][gene].median()
label = 'Tumor-Normal'
else:
n = gtex[gtex.tissue == tissue][gene].median()
label = 'Tumor-GTEx'
# Calculate l2fc for each tumor sample and save
l2fcs = []
for i, row in tumor[tumor.tissue == tissue].iterrows():
l2fcs.append(log2fc(row[gene], n))
# Calculate percentage samples over l2fc
percentages = {}
l2fc_range = [x * 0.1 for x in xrange(0, int(np.ceil(max(l2fcs) * 10)))]
for l2fc in l2fc_range:
percentages[l2fc] = len([x for x in l2fcs if x >= l2fc]) / len(l2fcs) * 100
# Create line object
curves.append(hv.Area(percentages, kdims=[xdim], vdims=[ydim], label=tissue))
# Return curves along with a Spikes object at the l2fc cutoff
overlay = hv.Overlay(curves + [hv.Spikes([l2fc_cutoff])], label='{} {} Expression'.format(label, gene))
return overlay.opts(self._l2fc_by_perc_samples_opts)
def gene_de_heatmap(self, genes, tissue_subset=None, tcga_normal=False):
"""
Heatmap of gene log2 fold change
:param list(str) genes: Gene (ex: ERBB2) to select
:param list tissue_subset: List of tissues to subset by
:param bool tcga_normal: If True, use TCGA normal to for DE calc, otherwise use GTEx
:return: DE Heatmap of genes for tissue subset
:rtype: hv.HeatMap
"""
# Subset dataframe by genes
df = self.df[self.df_cols + genes]
# Subset by tissues
if tissue_subset:
df = df[df.tissue.isin(tissue_subset)]
# Subset by dataset
tumor, normal, gtex = subset_by_dataset(df)
# For each tissue/gene, calculate L2FC
records = []
for tissue in sorted(df.tissue.unique()):
for gene in genes:
# Calculate mean expression for normal
if tcga_normal:
n = normal[normal.tissue == tissue][gene].median()
else:
n = gtex[gtex.tissue == tissue][gene].median()
# Calculate expression for tumor and compute l2fc
t = tumor[tumor.tissue == tissue][gene].median()
l2fc = log2fc(t, n)
records.append([tissue, gene, l2fc])
# Create dataframe and define dimensions
df = pd.DataFrame.from_records(records, columns=['Tissue', 'Gene', 'L2FC']).sort_values('Tissue')
return hv.HeatMap(df, kdims=['Gene', 'Tissue'], vdims=['L2FC']).opts(self._gene_de_heatmap_opts)
def tissue_top_de_genes(self, tissue):
# Create DE objects to get data
gtex = self.tissue_de(tissue).data
tcga = self.tissue_de(tissue, tcga_normal=True).data
intervals = [10, 100, 500, 1000, 5000, 10000, len(self.genes)]
# Calculate maximum arange for plot
reg_line_arange = gtex[gtex.exp > gtex.exp.median()].sort_values('l2fc', ascending=False).l2fc.tolist()
# Top DE genes with high expression
hmaps = {}
for i in intervals:
x = gtex[gtex.exp > gtex.exp.median()].sort_values('l2fc', ascending=False).l2fc.tolist()[:i]
y = tcga[tcga.exp > tcga.exp.median()].sort_values('l2fc', ascending=False).l2fc.tolist()[:i]
scatter = hv.Scatter((x, y), kdims=['GTEx L2FC'], vdims=['TCGA L2FC'])
reg_line = hv.Curve(self.regression_line(x, y, arange=reg_line_arange))
pearson_r = round(pearsonr(x, y)[0], 2)
title = 'R: {}'.format(pearson_r)
hmaps[i] = hv.Overlay([scatter, reg_line]).relabel(title)
return hv.HoloMap(hmaps, kdims='Num_Genes').relabel('Top DE Gene L2FC in {}'.format(tissue))
# Misc plots
def dist_with_iqr_bounds(self, ys, kdim):
"""
Creates distribution object with IQR bounds
:param list ys: List of values to calculate IQR and bounds
:param str kdim: K-dimension label for distribution
:return: Distribution with IQR bounds
:rtype: hv.Overlay
"""
# Calculate IQR and outlier bounds
q25, q75 = np.percentile(ys, [25, 75])
upper, lower = iqr_bounds(ys)
# Return dist with spikes
return hv.Overlay([hv.Distribution(ys, kdims=[kdim]),
hv.Spikes([q25, q75]),
hv.Spikes([lower, upper])]).opts(self._dist_with_iqr_bounds_opts)
@staticmethod
def regression_line(x, y, arange=None):
"""
Returns x/y vectors of a regression line for 2D input
:param np.array x: Vector of x values
:param np.array y: Vector of y values
:param np.array arange: Provide a custom arange to generate regression line
:return: Regression line vectors
:rtype: tuple(np.array, np.array)
"""
m, b = np.polyfit(x, y, 1)
reg_x = np.arange(min(arange), max(arange)) if arange else np.arange(min(x), max(x))
return reg_x, m * reg_x + b
@staticmethod
def path_box(xmin, xmax, ymin, ymax, color=None):
"""
Returns rectangular Path object for a given set of x/y coordinates
:param float xmin: xmin of box
:param float xmax: xmax of box
:param float ymin: ymin of box
:param float ymax: ymax of box
:param str color: Set the color of the Path object
:return: Rectangular path object
:rtype: hv.Path
"""
path = [(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax), (xmin, ymin)]
if color:
return hv.Path([path]).opts(dict(Path=dict(style=dict(color=color))))
else:
return hv.Path([path])
def highlight_points(self, xs, ys, size=0.1, color=None, hidden_buffer_box=True):
"""
Returns a rectangular Path object for a set of points
:param list|float xs: List of x coordinates or a single x coord
:param list|float ys: List of y coordinates or a single y coord
:param float size: Margin around xmin,xmax,ymin,ymax of points
:param str color: Set the color of the Path object
:param bool hidden_buffer_box: Adds a transparent larger frame around the Path object to improve plot margins
:return: Rectangular Path object
:rtype: hv.Path
"""
# If a set of single points
if isinstance(xs, (int, float)) and isinstance(ys, (int, float)):
xs, ys = [xs], [ys]
# Collect mins nad maxes from all points
xmin, xmax, ymin, ymax = min(xs), max(xs), min(ys), max(ys)
# Add margins
xmin, xmax, ymin, ymax = xmin - size, xmax + size, ymin - size, ymax + size
# Create Path object
plot = self.path_box(xmin, xmax, ymin, ymax, color=color)
# If hidden_buffer_box is enabled
if hidden_buffer_box:
xmin, xmax, ymin, ymax = xmin - size, xmax + size, ymin - size, ymax + size
hbb = self.path_box(xmin, xmax, ymin, ymax).opts(dict(Path=dict(style=dict(alpha=0))))
return plot * hbb
else:
return plot
def gene_curves(self, gene, tissue):
"""
Returns set of 3 plots for tissue / gene given a dataframe of metadata and expression values
:param str gene: Gene (ex: ERBB2) to select
:param str tissue: Tissue (ex: Breast) to select
:return: Returns holoviews Layout object containing 3 plots for selected Tisssue / Gene
:rtype: hv.Layout
"""
# Subset dataframe for gene and tissue
df = self._subset([gene], [tissue])
# Logscale gene for calculations
df[gene] = df[gene].apply(l2norm)
# Subset by dataset
tumor, normal, gtex = subset_by_dataset(df)
# Get values for plot
records = []
for perc_tumor in [x * 0.1 for x in xrange(1, 11)]:
# Get log2 expression value for top x% tumor samples
exp = float(tumor.iloc[int(len(tumor) * perc_tumor) - 1][gene])
# Get percentage of samples in GTEx
perc_normal = (len(gtex[gtex[gene] > exp]) * 1.0) / len(gtex)
# Compute L2FC for tumor sample subset vs GTEx
tumor_mean = tumor.iloc[:int(len(tumor) * perc_tumor) - 1][gene].apply(lambda x: 2 ** x - 1).median()
gtex_mean = gtex[gene].apply(lambda x: 2 ** x - 1).median()
l2fc = log2fc(tumor_mean, gtex_mean)
# Store
records.append((tissue, exp, l2fc, perc_tumor, perc_normal, len(gtex), len(tumor), 'GTEx'))
# Create dataframe from records
info = pd.DataFrame.from_records(records, columns=['tissue', 'expression',
'l2fc',
'percent_tumor',
'percent_normal',
'num_normals', 'num_tumors',
'normal_dataset'])
# Define dimensions
tissue_dim = hv.Dimension('tissue', label='Tissue')
ptumor_dim = hv.Dimension('percent_tumor', label='% Tumor')
pnormal_dim = hv.Dimension('percent_normal', label='percent')
l2fc_dim = hv.Dimension('l2fc', label='log2FC')
exp_dim = hv.Dimension('expression', label='log2(x+1)')
# First plot - Percentage of Normal Samples
c1 = hv.Curve(data=info, kdims=[ptumor_dim],
vdims=[pnormal_dim, tissue_dim], group='Percentage of Normal Samples',
extents=(None, 0, None, 1))
s1 = hv.Scatter(data=info, kdims=[ptumor_dim],
vdims=[pnormal_dim, tissue_dim], group='Percentage of Normal Samples')
# Second Plot - Expression
c2 = hv.Curve(data=info, kdims=[ptumor_dim],
vdims=[exp_dim, tissue_dim], group='Gene Expression',
extents=(None, 0, None, 16))
s2 = hv.Scatter(data=info, kdims=[ptumor_dim],
vdims=[exp_dim, tissue_dim], group='Gene Expression')
# Third Plot - Log2 Fold Change
c3 = hv.Curve(data=info, kdims=[ptumor_dim],
vdims=[l2fc_dim, tissue_dim], group='Log2 Fold Change',
extents=(None, -0.5, None, 8))
s3 = hv.Scatter(data=info, kdims=[ptumor_dim],
vdims=[l2fc_dim, tissue_dim], group='Log2 Fold Change')
return (c1 * s1 + c2 * s2 + c3 * s3).cols(1)
def sample_counts(self, tissue_subset=None, groupby='tissue'):
"""
Bargraph of tissues grouped by dataset
:return: Bargraph of sample counts
:rtype: hv.Bars
"""
df = self._sample_counts_df(groupby=groupby)
# Define dimensions
tissue_dim = hv.Dimension(groupby, label=groupby.capitalize())
label_dim = hv.Dimension('label', label='Label')
count_dim = hv.Dimension('counts', label='Count')
# Return Bars object of sample counts
return hv.Bars(df, kdims=[tissue_dim, label_dim], vdims=[count_dim],
label='Sample Counts for TCGA and GTEx').opts(self._sample_count_opts)
def de_concordance(self, tissue_subset=None, pair_by='type', normalize=True, gtex=True, tcga=True):
"""
Categorical scatterplot of concordance between tissues for gene differential expression
:param list tissue_subset: List of tissues to subset by
:param bool gtex: If True, includes GTEx in normal set
:param bool tcga: if True, includes TCGA in normal set
:return: Heatmap of differential expression comparison across tissue
:rtype: hv.HeatMap
"""
df = self._subset(genes=None, tissue_subset=tissue_subset)
# Get differential expression pearsonR values
de = de_pearson_dataframe(df, genes=self.genes, pair_by=pair_by, gtex=gtex, tcga=tcga)
# If normalize, normalize columns 0 to 1
if normalize:
de = (de - de.min()) / (de.max() - de.min())
# Convert matrix into 3-column
de = de.stack().reset_index()
de.columns = ['Normal', 'TCGA_Tumor/Normal', 'PearsonR']
# Return HeatMap object
label = 'Differential Expression Gene Concordance (PearsonR) by {pair_by}'.format(pair_by=pair_by)
return hv.HeatMap(de, kdims=['TCGA_Tumor/Normal', 'Normal'], vdims=['PearsonR'],
label=label).opts(self._de_concordance_opts)
# Dimensionality Reduction
def trimap(self, genes, tissue_subset=None, kin=50, kout=5, krand=5, eta=10000.0):
"""
Dimensionality reduction via Trimap
:param list(str) genes: List of genes to subset by
:param list(str) tissue_subset: List of tissues to subset by
:param int kin: Number of k-Nearest Neighbor points
:param int kout: Number of outliers (num triplets per point = kin * kout)
:param int krand: Number of random triplets per point
:param float eta: Initial learning rate
:return: Scatterplot of dimensionality reduction
:rtype: hv.Scatter
"""
# Subset dataframe by genes (keeping some metadata)
df = self.df[self.df_cols + genes].sort_values('tissue')
# Subset by tissues
if tissue_subset:
df = df[df.tissue.isin(tissue_subset)]
# Check DTYPE - TriMap will only run with float64
data = df[genes].astype(np.float64) if df[genes[0]].dtype != 'float64' else df[genes]
# Run Trimap (used to be called t-ETE)
z = run_trimap(data, num_dims=2, kin=kin, kout=kout, krand=krand, eta=eta)
# Add results to dataframe
df['x'] = z[:, 0]
df['y'] = z[:, 1]
df['sample'] = df.index
return hv.Scatter(df, kdims=['x'], vdims=['y', 'sample'] + self.df_cols).opts(self._dr_opts)
def tsne(self, genes, tissue_subset=None, perplexity=50, learning_rate=1000):
"""
Dimensionality reduction via t-SNE
:param list(str) genes: List of genes to subset by
:param list(str) tissue_subset: List of tissues to subset by
:param int perplexity: Hyperparamter for t-SNE
:param int learning_rate: Hyperparamter for t-SNE
:return: Scatterplot of dimensionality reduction
:rtype: hv.Scatter
"""
# Subset dataframe by genes (keeping some metadata)
df = self.df[self.df_cols + genes].sort_values('tissue')
# Subset by tissues
if tissue_subset:
df = df[df.tissue.isin(tissue_subset)]
# Run t-SNE
z = run_tsne(df[genes], num_dims=2, perplexity=perplexity, learning_rate=learning_rate)
# Add results to dataframe
df['x'] = z[:, 0]
df['y'] = z[:, 1]
df['sample'] = df.index
return hv.Scatter(df, kdims=['x'], vdims=['y', 'sample'] + self.df_cols).opts(self._dr_opts)
def disable_logo(plot, element):
plot.state.toolbar.logo = None
# hv.plotting.bokeh.ElementPlot.finalize_hooks.append(disable_logo)
| mit |
kleskjr/scipy | scipy/interpolate/ndgriddata.py | 39 | 7457 | """
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
CloughTocher2DInterpolator, _ndim_coords_from_arrays
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbour interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""
NearestNDInterpolator(points, values)
Nearest-neighbour interpolation in N dimensions.
.. versionadded:: 0.9
Methods
-------
__call__
Parameters
----------
x : (Npoints, Ndims) ndarray of floats
Data point coordinates.
y : (Npoints,) ndarray of float or complex
Data values.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
tree_options : dict, optional
Options passed to the underlying ``cKDTree``.
.. versionadded:: 0.17.0
Notes
-----
Uses ``scipy.spatial.cKDTree``
"""
def __init__(self, x, y, rescale=False, tree_options=None):
NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
need_contiguous=False,
need_values=False)
if tree_options is None:
tree_options = dict()
self.tree = cKDTree(self.points, **tree_options)
self.values = y
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
dist, i = self.tree.query(xi)
return self.values[i]
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan,
rescale=False):
"""
Interpolate unstructured D-dimensional data.
Parameters
----------
points : ndarray of floats, shape (n, D)
Data point coordinates. Can either be an array of
shape (n, D), or a tuple of `ndim` arrays.
values : ndarray of float or complex, shape (n,)
Data values.
xi : ndarray of float, shape (M, D)
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``
return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``
tesselate the input point set to n-dimensional
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D)
return the value determined from a cubic
spline.
``cubic`` (2-D)
return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
rescale : bool, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Notes
-----
.. versionadded:: 0.9
Examples
--------
Suppose we want to interpolate the 2-D function
>>> def func(x, y):
... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> points = np.random.rand(1000, 2)
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
"""
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from .interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
if method == 'nearest':
fill_value = 'extrapolate'
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values, rescale=rescale)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
else:
raise ValueError("Unknown interpolation method %r for "
"%d dimensional data" % (method, ndim))
| bsd-3-clause |
f3r/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 21 | 25117 | # Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
from sklearn.utils import check_array
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
def test_check_input_false():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
X = check_array(X, order='F', dtype='float64')
y = check_array(X, order='F', dtype='float64')
clf = ElasticNet(selection='cyclic', tol=1e-8)
# Check that no error is raised if data is provided in the right format
clf.fit(X, y, check_input=False)
X = check_array(X, order='F', dtype='float32')
clf.fit(X, y, check_input=True)
# Check that an error is raised if data is provided in the wrong dtype,
# because of check bypassing
assert_raises(ValueError, clf.fit, X, y, check_input=False)
# With no input checking, providing X in C order should result in false
# computation
X = check_array(X, order='C', dtype='float64')
assert_raises(ValueError, clf.fit, X, y, check_input=False)
def test_overrided_gram_matrix():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
Gram = X.T.dot(X)
clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram,
fit_intercept=True)
assert_warns_message(UserWarning,
"Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
clf.fit, X, y)
def test_lasso_non_float_y():
X = [[0, 0], [1, 1], [-1, -1]]
y = [0, 1, 2]
y_float = [0.0, 1.0, 2.0]
for model in [ElasticNet, Lasso]:
clf = model(fit_intercept=False)
clf.fit(X, y)
clf_float = model(fit_intercept=False)
clf_float.fit(X, y_float)
assert_array_equal(clf.coef_, clf_float.coef_)
| bsd-3-clause |
kevinthesun/mxnet | example/kaggle-ndsb1/gen_img_list.py | 42 | 7000 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import csv
import os
import sys
import random
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='generate train/test image list files form input directory. If training it will also split into tr and va sets.')
parser.add_argument('--image-folder', type=str, default="data/train/",
help='the input data directory')
parser.add_argument('--out-folder', type=str, default="data/",
help='the output folder')
parser.add_argument('--out-file', type=str, default="train.lst",
help='the output lst file')
parser.add_argument('--train', action='store_true',
help='if we are generating training list and hence we have to loop over subdirectories')
## These options are only used if we are doing training lst
parser.add_argument('--percent-val', type=float, default=0.25,
help='the percentage of training list to use as validation')
parser.add_argument('--stratified', action='store_true',
help='if True it will split train lst into tr and va sets using stratified sampling')
args = parser.parse_args()
random.seed(888)
fo_name=os.path.join(args.out_folder+args.out_file)
fo = csv.writer(open(fo_name, "w"), delimiter='\t', lineterminator='\n')
if args.train:
tr_fo_name=os.path.join(args.out_folder+"tr.lst")
va_fo_name=os.path.join(args.out_folder+"va.lst")
tr_fo = csv.writer(open(tr_fo_name, "w"), delimiter='\t', lineterminator='\n')
va_fo = csv.writer(open(va_fo_name, "w"), delimiter='\t', lineterminator='\n')
#check sampleSubmission.csv from kaggle website to view submission format
head = "acantharia_protist_big_center,acantharia_protist_halo,acantharia_protist,amphipods,appendicularian_fritillaridae,appendicularian_s_shape,appendicularian_slight_curve,appendicularian_straight,artifacts_edge,artifacts,chaetognath_non_sagitta,chaetognath_other,chaetognath_sagitta,chordate_type1,copepod_calanoid_eggs,copepod_calanoid_eucalanus,copepod_calanoid_flatheads,copepod_calanoid_frillyAntennae,copepod_calanoid_large_side_antennatucked,copepod_calanoid_large,copepod_calanoid_octomoms,copepod_calanoid_small_longantennae,copepod_calanoid,copepod_cyclopoid_copilia,copepod_cyclopoid_oithona_eggs,copepod_cyclopoid_oithona,copepod_other,crustacean_other,ctenophore_cestid,ctenophore_cydippid_no_tentacles,ctenophore_cydippid_tentacles,ctenophore_lobate,decapods,detritus_blob,detritus_filamentous,detritus_other,diatom_chain_string,diatom_chain_tube,echinoderm_larva_pluteus_brittlestar,echinoderm_larva_pluteus_early,echinoderm_larva_pluteus_typeC,echinoderm_larva_pluteus_urchin,echinoderm_larva_seastar_bipinnaria,echinoderm_larva_seastar_brachiolaria,echinoderm_seacucumber_auricularia_larva,echinopluteus,ephyra,euphausiids_young,euphausiids,fecal_pellet,fish_larvae_deep_body,fish_larvae_leptocephali,fish_larvae_medium_body,fish_larvae_myctophids,fish_larvae_thin_body,fish_larvae_very_thin_body,heteropod,hydromedusae_aglaura,hydromedusae_bell_and_tentacles,hydromedusae_h15,hydromedusae_haliscera_small_sideview,hydromedusae_haliscera,hydromedusae_liriope,hydromedusae_narco_dark,hydromedusae_narco_young,hydromedusae_narcomedusae,hydromedusae_other,hydromedusae_partial_dark,hydromedusae_shapeA_sideview_small,hydromedusae_shapeA,hydromedusae_shapeB,hydromedusae_sideview_big,hydromedusae_solmaris,hydromedusae_solmundella,hydromedusae_typeD_bell_and_tentacles,hydromedusae_typeD,hydromedusae_typeE,hydromedusae_typeF,invertebrate_larvae_other_A,invertebrate_larvae_other_B,jellies_tentacles,polychaete,protist_dark_center,protist_fuzzy_olive,protist_noctiluca,protist_other,protist_star,pteropod_butterfly,pteropod_theco_dev_seq,pteropod_triangle,radiolarian_chain,radiolarian_colony,shrimp_caridean,shrimp_sergestidae,shrimp_zoea,shrimp-like_other,siphonophore_calycophoran_abylidae,siphonophore_calycophoran_rocketship_adult,siphonophore_calycophoran_rocketship_young,siphonophore_calycophoran_sphaeronectes_stem,siphonophore_calycophoran_sphaeronectes_young,siphonophore_calycophoran_sphaeronectes,siphonophore_other_parts,siphonophore_partial,siphonophore_physonect_young,siphonophore_physonect,stomatopod,tornaria_acorn_worm_larvae,trichodesmium_bowtie,trichodesmium_multiple,trichodesmium_puff,trichodesmium_tuft,trochophore_larvae,tunicate_doliolid_nurse,tunicate_doliolid,tunicate_partial,tunicate_salp_chains,tunicate_salp,unknown_blobs_and_smudges,unknown_sticks,unknown_unclassified".split(',')
# make image list
img_lst = []
cnt = 0
if args.train:
for i in xrange(len(head)):
path = args.image_folder + head[i]
lst = os.listdir(args.image_folder + head[i])
for img in lst:
img_lst.append((cnt, i, path + '/' + img))
cnt += 1
else:
lst = os.listdir(args.image_folder)
for img in lst:
img_lst.append((cnt, 0, args.image_folder + img))
cnt += 1
# shuffle
random.shuffle(img_lst)
#write
for item in img_lst:
fo.writerow(item)
## If training, split into train and validation lists (tr.lst and va.lst)
## Optional stratified sampling
if args.train:
img_lst=np.array(img_lst)
if args.stratified:
from sklearn.cross_validation import StratifiedShuffleSplit
## Stratified sampling to generate train and validation sets
labels_train=img_lst[:,1]
# unique_train, counts_train = np.unique(labels_train, return_counts=True) # To have a look at the frecuency distribution
sss = StratifiedShuffleSplit(labels_train, 1, test_size=args.percent_val, random_state=0)
for tr_idx, va_idx in sss:
print("Train subset has ", len(tr_idx), " cases. Validation subset has ", len(va_idx), "cases")
else:
(nRows, nCols) = img_lst.shape
splitat=int(round(nRows*(1-args.percent_val),0))
tr_idx=range(0,splitat)
va_idx=range(splitat,nRows)
print("Train subset has ", len(tr_idx), " cases. Validation subset has ", len(va_idx), "cases")
tr_lst=img_lst[tr_idx,:].tolist()
va_lst=img_lst[va_idx,:].tolist()
for item in tr_lst:
tr_fo.writerow(item)
for item in va_lst:
va_fo.writerow(item)
| apache-2.0 |
mfs6174/Deep6174 | scripts/run-and-draw-last.py | 1 | 2768 | #!/usr/bin/env python2
# -*- coding: UTF-8 -*-
# File: run-and-draw-last.py
# Date: Thu Sep 18 15:43:47 2014 -0700
import matplotlib.pyplot as plt
from scipy.misc import imread, imsave
import numpy as np
import os, sys
import glob
from copy import copy
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '../')))
from lib.imageutil import stack_vectors
from network_runner import get_nn
if len(sys.argv) < 3:
print "Usage: {0} <model> <input images>".format(sys.argv[0])
sys.exit()
def draw(vec, ofname):
""" draw a vector in dots or lines, also save the vector
ofname: output filename
"""
fig = plt.figure(figsize = (38, 2))
plt.plot(range(len(vec)), vec,'bo')
fig.savefig(ofname)
# also save the vector
fname = ofname[:-3] + 'txt'
with open(fname, 'w') as f:
f.write(repr(vec))
fig = plt.figure()
def gen_file_list():
""" generate image filenames from arguments given in the command line"""
for k in range(2, len(sys.argv)):
pattern = sys.argv[k]
for f in glob.glob(pattern):
if os.path.isfile(f):
yield f
# We have already saved the learned parameters in sys.argv[1]
# build nn with params
model_file = sys.argv[1]
# get a network from the saved file
nn = get_nn(model_file)
print "Running network with model {0}".format(model_file)
# get the weight of the digit '3' at the second position
prms = nn.nn.layers[-1].get_params()['Ws'][2][:,3]
# save the weight in all_vecs, to draw together with another vector later
all_vecs = [prms]
draw(prms, './weight-secondposition-3.png')
for idx, f in enumerate(gen_file_list()):
print "Running {0}...".format(f)
# network accepts images ranging from [0, 1]
img = imread(f) / 255.0
# run the network against the image
results = nn.run(img)
pred = nn.predict(img)
print "Predict: ", pred
#print [results[-1][k].shape for k in range(len(results[-1]))]
outdir = os.path.dirname(f) + '/vec'
try:
os.mkdir(outdir)
except:
pass
# get the representation after the last hidden layer, which is [-2]
# layer[-1] is the output layer.
hidden_vec = results[-2].reshape((results[-2].shape[1],))
# build filename for output
pred = str(pred[0]) + '-' + ''.join(map(str, pred[1:]))
basename = os.path.basename(f)[:-4]
fname = os.path.join(outdir, basename + '-{0}-vec.jpg'.format(pred))
draw(hidden_vec, fname)
# plot color-graph of weight vector and representation
vecs = copy(all_vecs)
vecs.append(hidden_vec)
img = stack_vectors(vecs)
plt.imshow(img)
plt.savefig(os.path.join(outdir, basename + '-{0}-color.jpg'.format(pred)))
print "Results written to {0}.".format(outdir)
| apache-2.0 |
VCG/gp | gp/cremi.py | 1 | 1951 | import glob
import h5py
import mahotas as mh
import matplotlib.pyplot as plt
import numpy as np
import partition_comparison
import os
from scipy import ndimage as nd
import skimage.measure
import tifffile as tif
class Cremi(object):
@staticmethod
def read_section(path, z, verbose=True):
'''
'''
image = sorted(glob.glob(os.path.join(path, 'image', '*'+str(z).zfill(9)+'_image.png')))
gold = sorted(glob.glob(os.path.join(path, 'gold', '*'+str(z).zfill(8)+'.tif')))
rhoana = sorted(glob.glob(os.path.join(path, 'rhoana', '*'+str(z).zfill(9)+'_neuroproof.png')))
prob = sorted(glob.glob(os.path.join(path, 'prob', '*'+str(z).zfill(9)+'_membrane-membrane.png')))
if verbose:
print 'Loading', os.path.basename(image[0])
image = mh.imread(image[0])
# mask = mh.imread(mask[0]).astype(np.bool)
gold = tif.imread(gold[0])
rhoana = mh.imread(rhoana[0])
prob = mh.imread(prob[0])
#convert ids from rgb to single channel
rhoana_single = np.zeros((rhoana.shape[0], rhoana.shape[1]), dtype=np.uint64)
rhoana_single[:, :] = rhoana[:,:,0]*256*256 + rhoana[:,:,1]*256 + rhoana[:,:,2]
# gold_single = np.zeros((gold.shape[0], gold.shape[1]), dtype=np.uint64)
# gold_single[:, :] = gold[:,:,0]*256*256 + gold[:,:,1]*256 + gold[:,:,2]
# relabel the segmentations
# gold_single = Util.relabel(gold_single)
# rhoana_single = Util.relabel(rhoana_single)
#
# SNIPPET FOR CONVERTING HFD5 data to single images
#
# hdf5_file = h5py.File('/home/d/data/CREMI/sample_C_20160501.hdf')
# list_of_names = []
# hdf5_file.visit(list_of_names.append)
# data = hdf5_file['volumes/labels/neuron_ids'].value
# for z in range(data.shape[0]):
# slice = data[z]
# tif.imsave('/home/d/data/CREMI/C/gold/'+str(z).zfill(8)+'.tif', slice)
# # hdf5_file.close()
# return image, prob, mask, gold_single, rhoana_single
return image, prob, gold, rhoana_single
| mit |
equialgo/scikit-learn | sklearn/neighbors/approximate.py | 27 | 22368 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Joel Nothman <joel.nothman@gmail.com>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimension as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest(random_state=42)
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=42)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = True)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
wgmueller1/BDA_py_demos | demos_ch3/demo3_5.py | 19 | 2066 | """Bayesian Data Analysis, 3rd ed
Chapter 3, demo 5
Demonstrate a normal model for the Newcomb's data (BDA3 p. 66).
"""
from __future__ import division
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
# Edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2)
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
# data
data_path = '../utilities_and_data/light.txt'
y = np.loadtxt(data_path)
# sufficient statistics
n = len(y)
s2 = np.var(y, ddof=1) # Here ddof=1 is used to get the sample estimate.
my = np.mean(y)
# filtered data
y_pos = y[y > 0]
# sufficient statistics
n_pos = len(y_pos)
s2_pos = np.var(y_pos, ddof=1)
my_pos = np.mean(y_pos)
# For mu compute the density in these points
tl1 = [18, 34]
t1 = np.linspace(tl1[0], tl1[1], 1000)
# compute the exact marginal density for mu
# multiplication by 1./sqrt(s2/n) is due to the transformation of variable
# z=(x-mean(y))/sqrt(s2/n), see BDA3 p. 21
pm_mu = stats.t.pdf((t1 - my) / np.sqrt(s2/n), n-1) / np.sqrt(s2/n)
# compute the exact marginal density for mu for the filtered data
pm_mu_pos = (
stats.t.pdf((t1 - my_pos) / np.sqrt(s2_pos/n_pos), n_pos-1)
/ np.sqrt(s2_pos/n_pos)
)
# ====== Plotting
# create figure
fig, axes = plt.subplots(2,1, sharex=True, figsize=(10,8))
# plot histogram
axes[0].hist(y, np.arange(-44,43,2))
# decorate
axes[0].set_title('Newcomb\'s measurements')
axes[0].set_ylabel('count')
plt.setp(axes[0].get_xticklabels(), visible=True)
# plot the posterior of mu
axes[1].plot(t1, pm_mu)
# plot the posterior of mu in the filtered case
axes[1].plot(t1, pm_mu_pos)
# Plot the currently accepted true value
axes[1].axvline(33, color='k', linestyle='--', linewidth=1.5)
axes[1].legend(
('posterior of $\mu$',
'posterior of $\mu$ given $y > 0$',
'\'true value\''),
'upper left'
)
axes[1].set_title('Normal model')
axes[1].set_xlabel('$\mu$')
axes[1].set_yticks(())
plt.show()
| gpl-3.0 |
starimpact/fast-rcnn | lib/fast_rcnn/test.py | 43 | 11975 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Test a Fast R-CNN network on an imdb (image database)."""
from fast_rcnn.config import cfg, get_output_dir
import argparse
from utils.timer import Timer
import numpy as np
import cv2
import caffe
from utils.cython_nms import nms
import cPickle
import heapq
from utils.blob import im_list_to_blob
import os
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_rois_blob(im_rois, im_scale_factors):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid
"""
rois, levels = _project_im_rois(im_rois, im_scale_factors)
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (list): image pyramid levels used by each projected RoI
"""
im_rois = im_rois.astype(np.float, copy=False)
if len(scales) > 1:
widths = im_rois[:, 2] - im_rois[:, 0] + 1
heights = im_rois[:, 3] - im_rois[:, 1] + 1
areas = widths * heights
scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
diff_areas = np.abs(scaled_areas - 224 * 224)
levels = diff_areas.argmin(axis=1)[:, np.newaxis]
else:
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
rois = im_rois * scales[levels]
return rois, levels
def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data' : None, 'rois' : None}
blobs['data'], im_scale_factors = _get_image_blob(im)
blobs['rois'] = _get_rois_blob(rois, im_scale_factors)
return blobs, im_scale_factors
def _bbox_pred(boxes, box_deltas):
"""Transform the set of class-agnostic boxes into class-specific boxes
by applying the predicted offsets (box_deltas)
"""
if boxes.shape[0] == 0:
return np.zeros((0, box_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + cfg.EPS
heights = boxes[:, 3] - boxes[:, 1] + cfg.EPS
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dx = box_deltas[:, 0::4]
dy = box_deltas[:, 1::4]
dw = box_deltas[:, 2::4]
dh = box_deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(box_deltas.shape)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h
return pred_boxes
def _clip_boxes(boxes, im_shape):
"""Clip boxes to image boundaries."""
# x1 >= 0
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
# y2 < im_shape[0]
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes
def im_detect(net, im, boxes):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
blobs, unused_im_scale_factors = _get_blobs(im, boxes)
# When mapping from image ROIs to feature map ROIs, there's some aliasing
# (some distinct image ROIs get mapped to the same feature ROI).
# Here, we identify duplicate feature ROIs, so we only compute features
# on the unique subset.
if cfg.DEDUP_BOXES > 0:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(hashes, return_index=True,
return_inverse=True)
blobs['rois'] = blobs['rois'][index, :]
boxes = boxes[index, :]
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
net.blobs['rois'].reshape(*(blobs['rois'].shape))
blobs_out = net.forward(data=blobs['data'].astype(np.float32, copy=False),
rois=blobs['rois'].astype(np.float32, copy=False))
if cfg.TEST.SVM:
# use the raw scores before softmax under the assumption they
# were trained as linear SVMs
scores = net.blobs['cls_score'].data
else:
# use softmax estimated probabilities
scores = blobs_out['cls_prob']
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = _bbox_pred(boxes, box_deltas)
pred_boxes = _clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
if cfg.DEDUP_BOXES > 0:
# Map scores and predictions back to the original set of boxes
scores = scores[inv_index, :]
pred_boxes = pred_boxes[inv_index, :]
return scores, pred_boxes
def vis_detections(im, class_name, dets, thresh=0.3):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
for i in xrange(np.minimum(10, dets.shape[0])):
bbox = dets[i, :4]
score = dets[i, -1]
if score > thresh:
plt.cla()
plt.imshow(im)
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='g', linewidth=3)
)
plt.title('{} {:.3f}'.format(class_name, score))
plt.show()
def apply_nms(all_boxes, thresh):
"""Apply non-maximum suppression to all predicted boxes output by the
test_net method.
"""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(num_classes)]
for cls_ind in xrange(num_classes):
for im_ind in xrange(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
keep = nms(dets, thresh)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
return nms_boxes
def test_net(net, imdb):
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
# heuristic: keep an average of 40 detections per class per images prior
# to NMS
max_per_set = 40 * num_images
# heuristic: keep at most 100 detection per class per image prior to NMS
max_per_image = 100
# detection thresold for each class (this is adaptively set based on the
# max_per_set constraint)
thresh = -np.inf * np.ones(imdb.num_classes)
# top_scores will hold one minheap of scores per class (used to enforce
# the max_per_set constraint)
top_scores = [[] for _ in xrange(imdb.num_classes)]
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
output_dir = get_output_dir(imdb, net)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# timers
_t = {'im_detect' : Timer(), 'misc' : Timer()}
roidb = imdb.roidb
for i in xrange(num_images):
im = cv2.imread(imdb.image_path_at(i))
_t['im_detect'].tic()
scores, boxes = im_detect(net, im, roidb[i]['boxes'])
_t['im_detect'].toc()
_t['misc'].tic()
for j in xrange(1, imdb.num_classes):
inds = np.where((scores[:, j] > thresh[j]) &
(roidb[i]['gt_classes'] == 0))[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
top_inds = np.argsort(-cls_scores)[:max_per_image]
cls_scores = cls_scores[top_inds]
cls_boxes = cls_boxes[top_inds, :]
# push new scores onto the minheap
for val in cls_scores:
heapq.heappush(top_scores[j], val)
# if we've collected more than the max number of detection,
# then pop items off the minheap and update the class threshold
if len(top_scores[j]) > max_per_set:
while len(top_scores[j]) > max_per_set:
heapq.heappop(top_scores[j])
thresh[j] = top_scores[j][0]
all_boxes[j][i] = \
np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
if 0:
keep = nms(all_boxes[j][i], 0.3)
vis_detections(im, imdb.classes[j], all_boxes[j][i][keep, :])
_t['misc'].toc()
print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time,
_t['misc'].average_time)
for j in xrange(1, imdb.num_classes):
for i in xrange(num_images):
inds = np.where(all_boxes[j][i][:, -1] > thresh[j])[0]
all_boxes[j][i] = all_boxes[j][i][inds, :]
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)
print 'Applying NMS to all detections'
nms_dets = apply_nms(all_boxes, cfg.TEST.NMS)
print 'Evaluating detections'
imdb.evaluate_detections(nms_dets, output_dir)
| mit |
adamgreenhall/scikit-learn | examples/mixture/plot_gmm_classifier.py | 250 | 3918 | """
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <ronweiss@gmail.com>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
ycaihua/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
debsankha/bedtime-programming | ls222/osc_mod.py | 1 | 4146 | #!/usr/bin/env python
import random
from random import normalvariate
from random import uniform
from math import *
import commands
import matplotlib.pyplot as plt
commands.getoutput('rm cat_mouse.out')
def maxwell(x,xmax):
if x<xmax: # a function <=1 with maxima at xmax
return sqrt(x*1.0/xmax)
else:
return exp(-(x-xmax))
#def maxwell(t,tstable):
# if t<tstable:
# return tstable*1.0/(t*5)
# elif t>tstable:
# return 0.8
class mouse:
def __init__(self):
global t
self.size=6
self.eating_rate=0.6
self.reproduction_gap=1
self.time_from_last_childbirth=0
self.isdead=0
self.age=0
class felix:
def __init__(self):
global t,mice,cats
self.size=15
self.mature_size=20
self.reproduction_gap=1
self.catch_rate=0.25
self.metabolism_rate=self.mature_size*0.2
self.is_virgin=1
self.time_from_last_childbirth=0
self.isdead=0
self.age=0
t=1
mice=[]
cats=[]
starved_cat=0
random_dead_cat=0
random_dead_mouse=0
eaten_mouse=0
born_mouse=0
born_cat=0
mouse_life=[]
mouse_maturetime=[]
cat_life=[]
cat_maturetime=[]
mouse_size=[]
cat_size=[]
mno=[]
cno=[]
tli=[]
def dynamics():
global t,starved_cat,random_dead_cat,random_dead_mouse,eaten_mouse,born_mouse,born_cat
mouse_no=1000
cat_no=1000
rice=500
dt=0.2
t=1
global mice,cats,mouse_life,cat_life
mice=[mouse() for i in range(mouse_no)]
cats=[felix() for i in range(cat_no)]
starved_cat=0
random_dead_cat=0
random_dead_mouse=0
eaten_mouse=0
mouse_life=[]
cat_life=[]
while len(mice)>0 and len(cats)>0 and t<100:
mno.append(len(mice))
cno.append(len(cats))
tli.append(t)
# f=open('cat_mouse.out','a')
for i in mice:
i.age+=dt
i.size+=i.eating_rate*dt # it eats, of course
if i.time_from_last_childbirth>i.reproduction_gap: # maybe reproduces
mice.append(mouse())
i.size-=i.size*1.0/10
i.is_virgin=0
born_mouse+=1
i.time_from_last_childbirth+=dt
n=0
for cat in cats:
cat.age+=dt
num=cat.catch_rate*len(mice)/len(cats)
if num>uniform(0,1) and num <1: # it eats, of course
try:
caught=random.randint(0,len(mice)-1)
cat.size+=mice[caught].size*0.3
mice.pop(caught)
eaten_mouse+=1
except:
pass
elif num>1:
for i in range(int(num)):
try:
caught=random.randint(0,len(mice)-1)
cat.size+=mice[caught].size*0.3
mice.pop(caught)
eaten_mouse+=1
except:
pass
if cat.size>=cat.mature_size:
if cat.is_virgin==0:
if cat.time_from_last_childbirth>cat.reproduction_gap: # maybe reproduces
cats.append(felix())
cat.size-=cat.size*1.0/10
cat.is_virgin=0
born_cat+=1
else :
cat.is_virgin==0
# print 'a cat reproduced'
cat_maturetime.append(cat.age)
cats.append(felix())
cat.size-=cat.size*1.0/10
cat.is_virgin=0
born_cat+=1
cat.size-=cat.metabolism_rate*dt # weight loss due to starvation
if cat.size<=cat.mature_size/4: # death due to starvation
cat.isdead=1
starved_cat+=1
if not cat.is_virgin:
cat.time_from_last_childbirth+=dt
n=0
while n<len(cats): # getting rid of the dead ones
if cats[n].isdead:
cat_life.append(cats[n].age)
cat_size.append(cats[n].age)
cats.pop(n)
else :
n+=1
# print >> f, t,'\t',len(mice),'\t',len(cats)
# print t,'\t',len(mice),'\t',len(cats)
# f.close()
t+=dt
dynamics()
print 'tot mice\trandomly dead mice\teaten mice\tborn mice\ttot cats\trandomly dead cats\tstarved cats\tborn cats'
print len(mice),'\t\t',random_dead_mouse,'\t\t\t',eaten_mouse,'\t\t',born_mouse,'\t\t',len(cats),'\t\t',random_dead_cat,'\t\t\t',starved_cat,'\t\t',born_cat
plt.plot(tli,mno,tli,cno)
plt.show()
#print 'Avg life of mouse is\t%f'%(sum(mouse_life)/len(mouse_life))
#print 'Avg life of cat is\t%f'%(sum(cat_life)/len(cat_life))
#print 'Avg mature age of mouse is\t%f'%(sum(mouse_maturetime)/len(mouse_maturetime))
#print 'Avg mature age of cat is\t%f'%(sum(cat_maturetime)/len(cat_maturetime))
#print 'Avg size of mouse is\t%f'%(sum(mouse_size)/len(mouse_size))
#print 'Avg size of cat is\t%f'%(sum(cat_size)/len(cat_size))
| gpl-3.0 |
terkkila/scikit-learn | sklearn/utils/tests/test_validation.py | 133 | 18339 | """Tests for input validation functions"""
import warnings
from tempfile import NamedTemporaryFile
from itertools import product
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils import as_float_array, check_array, check_symmetric
from sklearn.utils import check_X_y
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.linear_model import ARDRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.datasets import make_blobs
from sklearn.utils.validation import (
NotFittedError,
has_fit_parameter,
check_is_fitted,
check_consistent_length,
DataConversionWarning,
)
from sklearn.utils.testing import assert_raise_message
def test_as_float_array():
# Test function for as_float_array
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
# Confirm that input validation code does not return np.matrix
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
# Confirm that input validation code doesn't copy memory mapped arrays
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=100, dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M)
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by validation utilities.
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering.
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_check_array_pandas_dtype_object_conversion():
# test that data-frame like objects with dtype object
# get converted
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.object)
X_df = MockDataFrame(X)
assert_equal(check_array(X_df).dtype.kind, "f")
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
# smoke-test against dataframes with column named "dtype"
X_df.dtype = "Hans"
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
def test_check_array_dtype_stability():
# test that lists with ints don't get converted to floats
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert_equal(check_array(X).dtype.kind, "i")
assert_equal(check_array(X, ensure_2d=False).dtype.kind, "i")
def test_check_array_dtype_warning():
X_int_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
X_float64 = np.asarray(X_int_list, dtype=np.float64)
X_float32 = np.asarray(X_int_list, dtype=np.float32)
X_int64 = np.asarray(X_int_list, dtype=np.int64)
X_csr_float64 = sp.csr_matrix(X_float64)
X_csr_float32 = sp.csr_matrix(X_float32)
X_csc_float32 = sp.csc_matrix(X_float32)
X_csc_int32 = sp.csc_matrix(X_int64, dtype=np.int32)
y = [0, 0, 1]
integer_data = [X_int64, X_csc_int32]
float64_data = [X_float64, X_csr_float64]
float32_data = [X_float32, X_csr_float32, X_csc_float32]
for X in integer_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_warns(DataConversionWarning, check_array, X,
dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
# Check that the warning message includes the name of the Estimator
X_checked = assert_warns_message(DataConversionWarning,
'SomeEstimator',
check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True,
warn_on_dtype=True,
estimator='SomeEstimator')
assert_equal(X_checked.dtype, np.float64)
X_checked, y_checked = assert_warns_message(
DataConversionWarning, 'KNeighborsClassifier',
check_X_y, X, y, dtype=np.float64, accept_sparse=True,
warn_on_dtype=True, estimator=KNeighborsClassifier())
assert_equal(X_checked.dtype, np.float64)
for X in float64_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=False)
assert_equal(X_checked.dtype, np.float64)
for X in float32_data:
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True)
assert_equal(X_checked.dtype, np.float32)
assert_true(X_checked is X)
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=True)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X)
X_checked = assert_no_warnings(check_array, X_csc_float32,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=False)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X_csc_float32)
assert_equal(X_checked.format, 'csr')
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = "Singleton array array(42) cannot be considered a valid collection."
assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False)
# But this works if the input data is forced to look like a 2 array with
# one sample and one feature:
X_checked = check_array(42, ensure_2d=True)
assert_array_equal(np.array([[42]]), X_checked)
# Simulate a model that would need at least 2 samples to be well defined
X = np.ones((1, 10))
y = np.ones(1)
msg = "1 sample(s) (shape=(1, 10)) while a minimum of 2 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2)
# The same message is raised if the data has 2 dimensions even if this is
# not mandatory
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2, ensure_2d=False)
# Simulate a model that would require at least 3 features (e.g. SelectKBest
# with k=3)
X = np.ones((10, 2))
y = np.ones(2)
msg = "2 feature(s) (shape=(10, 2)) while a minimum of 3 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3)
# Only the feature check is enabled whenever the number of dimensions is 2
# even if allow_nd is enabled:
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3, allow_nd=True)
# Simulate a case where a pipeline stage as trimmed all the features of a
# 2D dataset.
X = np.empty(0).reshape(10, 0)
y = np.ones(10)
msg = "0 feature(s) (shape=(10, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y)
# nd-data is not checked for any minimum number of features by default:
X = np.ones((10, 0, 28, 28))
y = np.ones(10)
X_checked, y_checked = check_X_y(X, y, allow_nd=True)
assert_array_equal(X, X_checked)
assert_array_equal(y, y_checked)
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {'dense': arr_asym,
'dok': sp.dok_matrix(arr_asym),
'csr': sp.csr_matrix(arr_asym),
'csc': sp.csc_matrix(arr_asym),
'coo': sp.coo_matrix(arr_asym),
'lil': sp.lil_matrix(arr_asym),
'bsr': sp.bsr_matrix(arr_asym)}
# check error for bad inputs
assert_raises(ValueError, check_symmetric, arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
assert_warns(UserWarning, check_symmetric, arr)
assert_raises(ValueError, check_symmetric, arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert_equal(output.format, arr_format)
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
def test_check_is_fitted():
# Check is ValueError raised when non estimator instance passed
assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_")
assert_raises(TypeError, check_is_fitted, "SVR", "support_")
ard = ARDRegression()
svr = SVR()
try:
assert_raises(NotFittedError, check_is_fitted, ard, "coef_")
assert_raises(NotFittedError, check_is_fitted, svr, "support_")
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
try:
check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s")
except ValueError as e:
assert_equal(str(e), "Random message ARDRegression, ARDRegression")
try:
check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s")
except AttributeError as e:
assert_equal(str(e), "Another message SVR, SVR")
ard.fit(*make_blobs())
svr.fit(*make_blobs())
assert_equal(None, check_is_fitted(ard, "coef_"))
assert_equal(None, check_is_fitted(svr, "support_"))
def test_check_consistent_length():
check_consistent_length([1], [2], [3], [4], [5])
check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ['a', 'b'])
check_consistent_length([1], (2,), np.array([3]), sp.csr_matrix((1, 2)))
assert_raises_regexp(ValueError, 'inconsistent numbers of samples',
check_consistent_length, [1, 2], [1])
assert_raises_regexp(TypeError, 'got <\w+ \'int\'>',
check_consistent_length, [1, 2], 1)
assert_raises_regexp(TypeError, 'got <\w+ \'object\'>',
check_consistent_length, [1, 2], object())
assert_raises(TypeError, check_consistent_length, [1, 2], np.array(1))
# Despite ensembles having __len__ they must raise TypeError
assert_raises_regexp(TypeError, 'estimator', check_consistent_length,
[1, 2], RandomForestRegressor())
# XXX: We should have a test with a string, but what is correct behaviour?
| bsd-3-clause |
SimonHL/TSA | simpleFF_s.py | 1 | 3844 | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 27 16:22:26 2015
使用前向网络进行频率识别:直接延时法
@author: simon
"""
import sys,time
import numpy
import theano
import theano.tensor as T
import matplotlib.pyplot as plt
def step(*args):
#global n_input, n_hidden
x = [args[u] for u in xrange(n_input)]
W_in = [args[u] for u in xrange(n_input, n_input * 2)]
b_in = args[n_input * 2]
h = T.dot(x[0], W_in[0]) + b_in
for j in xrange(1, n_input):
h = h + T.dot(x[j], W_in[j]) + b_in
return T.tanh(h)
def purelin(*args):
h = args[0]
W_in = [args[u] for u in xrange(1, n_input + 1)]
b_in = args[n_input + 1]
W_in
b_in
W_out = args[n_input + 2]
b_out = args[n_input + 3]
y = T.dot(h,W_out) + b_out
return T.tanh(y)
# 设置网络参数
learning_rate = 0.001
n_input = 4
n_hidden = 10
n_output = 1
N = 400
n_epochs = 2000
dtype=theano.config.floatX
# 加要处理的数据
data = numpy.genfromtxt("mytestdata.txt")
sampleNum = 400-n_input
index = range(sampleNum)
data_x = data[:,0]
data_y = data[:,1]
print data_x.shape, data_y.shape
print 'network: n_in:{},n_hidden:{},n_out:{}'.format(n_input, n_hidden, n_output)
# 构造网络
x_in = T.vector() # 输入向量,第1维是时间
y_out = T.vector() # 输出向量
lr = T.scalar() # 学习速率,标量
h_init = numpy.zeros((n_hidden,n_hidden )).astype(dtype) # 网络隐层状态
W_in = [theano.shared(numpy.random.uniform(size=(1, n_hidden), low= -.01, high=.01).astype(dtype),
name='W_in' + str(u)) for u in range(n_input)]
b_in = theano.shared(numpy.zeros((n_hidden,), dtype=dtype), name="b_in")
W_out = theano.shared(numpy.random.uniform(size=(n_hidden,n_output),low=-0.01,high=0.01).astype(dtype),name="W_out")
b_out = theano.shared(numpy.zeros((n_output,), dtype=dtype),name="b_out")
params = []
params.extend(W_in)
params.extend([b_in])
h_tmp, updates = theano.scan(step, # 计算BPTT的函数
sequences=dict(input=x_in, taps = range(1-n_input, 1)), # 从输出值中延时-1抽取
non_sequences=params)
params.extend([W_out])
params.extend([b_out])
y,updates = theano.scan(purelin,
sequences=h_tmp,
non_sequences=params)
y = T.flatten(y)
cost = ((y_out[n_input-1:,]-y)**2).sum()
# 编译表达式
gparams = []
for param in params:
gparam = T.grad(cost, param)
gparams.append(gparam)
# specify how to update the parameters of the model as a dictionary
updates = []
for param, gparam in zip(params, gparams):
updates.append((param, param - lr * gparam))
# define the train function
train_fn = theano.function([x_in, y_out],
outputs=cost,
updates=updates,
givens=[(lr,T.cast(learning_rate, 'floatX'))])
sim_fn = theano.function([x_in],
outputs=y,
givens=[(lr,T.cast(learning_rate, 'floatX'))])
print 'Running ({} epochs)'.format(n_epochs)
start_time = time.clock()
for epochs_index in xrange(n_epochs) :
print train_fn(data_x, data_y)
print 'Training {}'.format(epochs_index)
y_sim = sim_fn(data_x)
print y_sim.shape
print b_in.get_value()
plt.plot(range(y_sim.shape[0]), y_sim, 'r')
plt.plot(range(data_x.shape[0]), data_x,'b')
plt.plot(range(data_y.shape[0]), data_y,'k')
print >> sys.stderr, ('overall time (%.5fs)' % ((time.clock() - start_time) / 1.))
# 绘图
print "finished!" | bsd-2-clause |
jorge2703/scikit-learn | examples/ensemble/plot_gradient_boosting_regression.py | 227 | 2520 | """
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
| bsd-3-clause |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/mne/viz/tests/test_topomap.py | 3 | 10198 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
import os.path as op
import warnings
import numpy as np
from numpy.testing import assert_raises, assert_array_equal
from nose.tools import assert_true, assert_equal
from mne import io, read_evokeds, read_proj
from mne.io.constants import FIFF
from mne.channels import read_layout, make_eeg_layout
from mne.datasets import testing
from mne.time_frequency.tfr import AverageTFR
from mne.utils import slow_test
from mne.viz import plot_evoked_topomap, plot_projs_topomap
from mne.viz.topomap import (_check_outlines, _onselect, plot_topomap,
_find_peaks)
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
ecg_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_ecg_proj.fif')
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
layout = read_layout('Vectorview-all')
def _get_raw():
return io.Raw(raw_fname, preload=False)
@slow_test
@testing.requires_testing_data
def test_plot_topomap():
"""Test topomap plotting
"""
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
# evoked
warnings.simplefilter('always')
res = 16
evoked = read_evokeds(evoked_fname, 'Left Auditory',
baseline=(None, 0))
ev_bad = evoked.pick_types(meg=False, eeg=True, copy=True)
ev_bad.pick_channels(ev_bad.ch_names[:2])
ev_bad.plot_topomap(times=ev_bad.times[:2] - 1e-6) # auto, should plot EEG
assert_raises(ValueError, ev_bad.plot_topomap, ch_type='mag')
assert_raises(TypeError, ev_bad.plot_topomap, head_pos='foo')
assert_raises(KeyError, ev_bad.plot_topomap, head_pos=dict(foo='bar'))
assert_raises(ValueError, ev_bad.plot_topomap, head_pos=dict(center=0))
assert_raises(ValueError, ev_bad.plot_topomap, times=[-100]) # bad time
assert_raises(ValueError, ev_bad.plot_topomap, times=[[0]]) # bad time
evoked.plot_topomap(0.1, layout=layout, scale=dict(mag=0.1))
plt.close('all')
axes = [plt.subplot(221), plt.subplot(222)]
evoked.plot_topomap(axes=axes, colorbar=False)
plt.close('all')
evoked.plot_topomap(times=[-0.1, 0.2])
plt.close('all')
mask = np.zeros_like(evoked.data, dtype=bool)
mask[[1, 5], :] = True
evoked.plot_topomap(ch_type='mag', outlines=None)
times = [0.1]
evoked.plot_topomap(times, ch_type='eeg', res=res, scale=1)
evoked.plot_topomap(times, ch_type='grad', mask=mask, res=res)
evoked.plot_topomap(times, ch_type='planar1', res=res)
evoked.plot_topomap(times, ch_type='planar2', res=res)
evoked.plot_topomap(times, ch_type='grad', mask=mask, res=res,
show_names=True, mask_params={'marker': 'x'})
plt.close('all')
assert_raises(ValueError, evoked.plot_topomap, times, ch_type='eeg',
res=res, average=-1000)
assert_raises(ValueError, evoked.plot_topomap, times, ch_type='eeg',
res=res, average='hahahahah')
p = evoked.plot_topomap(times, ch_type='grad', res=res,
show_names=lambda x: x.replace('MEG', ''),
image_interp='bilinear')
subplot = [x for x in p.get_children() if
isinstance(x, matplotlib.axes.Subplot)][0]
assert_true(all('MEG' not in x.get_text()
for x in subplot.get_children()
if isinstance(x, matplotlib.text.Text)))
# Test title
def get_texts(p):
return [x.get_text() for x in p.get_children() if
isinstance(x, matplotlib.text.Text)]
p = evoked.plot_topomap(times, ch_type='eeg', res=res, average=0.01)
assert_equal(len(get_texts(p)), 0)
p = evoked.plot_topomap(times, ch_type='eeg', title='Custom', res=res)
texts = get_texts(p)
assert_equal(len(texts), 1)
assert_equal(texts[0], 'Custom')
plt.close('all')
# delaunay triangulation warning
with warnings.catch_warnings(record=True): # can't show
warnings.simplefilter('always')
evoked.plot_topomap(times, ch_type='mag', layout=None, res=res)
assert_raises(RuntimeError, plot_evoked_topomap, evoked, 0.1, 'mag',
proj='interactive') # projs have already been applied
# change to no-proj mode
evoked = read_evokeds(evoked_fname, 'Left Auditory',
baseline=(None, 0), proj=False)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
evoked.plot_topomap(0.1, 'mag', proj='interactive', res=res)
assert_raises(RuntimeError, plot_evoked_topomap, evoked,
np.repeat(.1, 50))
assert_raises(ValueError, plot_evoked_topomap, evoked, [-3e12, 15e6])
with warnings.catch_warnings(record=True): # file conventions
warnings.simplefilter('always')
projs = read_proj(ecg_fname)
projs = [pp for pp in projs if pp['desc'].lower().find('eeg') < 0]
plot_projs_topomap(projs, res=res)
plt.close('all')
ax = plt.subplot(111)
plot_projs_topomap([projs[0]], res=res, axes=ax) # test axes param
plt.close('all')
for ch in evoked.info['chs']:
if ch['coil_type'] == FIFF.FIFFV_COIL_EEG:
ch['loc'].fill(0)
# Remove extra digitization point, so EEG digitization points
# correspond with the EEG electrodes
del evoked.info['dig'][85]
pos = make_eeg_layout(evoked.info).pos[:, :2]
pos, outlines = _check_outlines(pos, 'head')
assert_true('head' in outlines.keys())
assert_true('nose' in outlines.keys())
assert_true('ear_left' in outlines.keys())
assert_true('ear_right' in outlines.keys())
assert_true('autoshrink' in outlines.keys())
assert_true(outlines['autoshrink'])
assert_true('clip_radius' in outlines.keys())
assert_array_equal(outlines['clip_radius'], 0.5)
pos, outlines = _check_outlines(pos, 'skirt')
assert_true('head' in outlines.keys())
assert_true('nose' in outlines.keys())
assert_true('ear_left' in outlines.keys())
assert_true('ear_right' in outlines.keys())
assert_true('autoshrink' in outlines.keys())
assert_true(not outlines['autoshrink'])
assert_true('clip_radius' in outlines.keys())
assert_array_equal(outlines['clip_radius'], 0.625)
pos, outlines = _check_outlines(pos, 'skirt',
head_pos={'scale': [1.2, 1.2]})
assert_array_equal(outlines['clip_radius'], 0.75)
# Plot skirt
evoked.plot_topomap(times, ch_type='eeg', outlines='skirt')
# Pass custom outlines without patch
evoked.plot_topomap(times, ch_type='eeg', outlines=outlines)
plt.close('all')
# Pass custom outlines with patch callable
def patch():
return Circle((0.5, 0.4687), radius=.46,
clip_on=True, transform=plt.gca().transAxes)
outlines['patch'] = patch
plot_evoked_topomap(evoked, times, ch_type='eeg', outlines=outlines)
# Remove digitization points. Now topomap should fail
evoked.info['dig'] = None
assert_raises(RuntimeError, plot_evoked_topomap, evoked,
times, ch_type='eeg')
plt.close('all')
# Test error messages for invalid pos parameter
n_channels = len(pos)
data = np.ones(n_channels)
pos_1d = np.zeros(n_channels)
pos_3d = np.zeros((n_channels, 2, 2))
assert_raises(ValueError, plot_topomap, data, pos_1d)
assert_raises(ValueError, plot_topomap, data, pos_3d)
assert_raises(ValueError, plot_topomap, data, pos[:3, :])
pos_x = pos[:, :1]
pos_xyz = np.c_[pos, np.zeros(n_channels)[:, np.newaxis]]
assert_raises(ValueError, plot_topomap, data, pos_x)
assert_raises(ValueError, plot_topomap, data, pos_xyz)
# An #channels x 4 matrix should work though. In this case (x, y, width,
# height) is assumed.
pos_xywh = np.c_[pos, np.zeros((n_channels, 2))]
plot_topomap(data, pos_xywh)
plt.close('all')
# Test peak finder
axes = [plt.subplot(131), plt.subplot(132)]
evoked.plot_topomap(times='peaks', axes=axes)
plt.close('all')
evoked.data = np.zeros(evoked.data.shape)
evoked.data[50][1] = 1
assert_array_equal(_find_peaks(evoked, 10), evoked.times[1])
evoked.data[80][100] = 1
assert_array_equal(_find_peaks(evoked, 10), evoked.times[[1, 100]])
evoked.data[2][95] = 2
assert_array_equal(_find_peaks(evoked, 10), evoked.times[[1, 95]])
assert_array_equal(_find_peaks(evoked, 1), evoked.times[95])
def test_plot_tfr_topomap():
"""Test plotting of TFR data
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
raw = _get_raw()
times = np.linspace(-0.1, 0.1, 200)
n_freqs = 3
nave = 1
rng = np.random.RandomState(42)
data = rng.randn(len(raw.ch_names), n_freqs, len(times))
tfr = AverageTFR(raw.info, data, times, np.arange(n_freqs), nave)
tfr.plot_topomap(ch_type='mag', tmin=0.05, tmax=0.150, fmin=0, fmax=10,
res=16)
eclick = mpl.backend_bases.MouseEvent('button_press_event',
plt.gcf().canvas, 0, 0, 1)
eclick.xdata = 0.1
eclick.ydata = 0.1
eclick.inaxes = plt.gca()
erelease = mpl.backend_bases.MouseEvent('button_release_event',
plt.gcf().canvas, 0.9, 0.9, 1)
erelease.xdata = 0.3
erelease.ydata = 0.2
pos = [[0.11, 0.11], [0.25, 0.5], [0.0, 0.2], [0.2, 0.39]]
_onselect(eclick, erelease, tfr, pos, 'mag', 1, 3, 1, 3, 'RdBu_r', list())
tfr._onselect(eclick, erelease, None, 'mean', None)
plt.close('all')
| bsd-3-clause |
phobson/statsmodels | statsmodels/datasets/longley/data.py | 3 | 1959 | """Longley dataset"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = __doc__
SOURCE = """
The classic 1967 Longley Data
http://www.itl.nist.gov/div898/strd/lls/data/Longley.shtml
::
Longley, J.W. (1967) "An Appraisal of Least Squares Programs for the
Electronic Comptuer from the Point of View of the User." Journal of
the American Statistical Association. 62.319, 819-41.
"""
DESCRSHORT = """"""
DESCRLONG = """The Longley dataset contains various US macroeconomic
variables that are known to be highly collinear. It has been used to appraise
the accuracy of least squares routines."""
NOTE = """::
Number of Observations - 16
Number of Variables - 6
Variable name definitions::
TOTEMP - Total Employment
GNPDEFL - GNP deflator
GNP - GNP
UNEMP - Number of unemployed
ARMED - Size of armed forces
POP - Population
YEAR - Year (1947 - 1962)
"""
from numpy import recfromtxt, array, column_stack
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the Longley data and return a Dataset class.
Returns
-------
Dataset instance
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""
Load the Longley data and return a Dataset class.
Returns
-------
Dataset instance
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0)
def _get_data():
filepath = dirname(abspath(__file__))
with open(filepath+'/longley.csv',"rb") as f:
data = recfromtxt(f, delimiter=",",
names=True, dtype=float, usecols=(1,2,3,4,5,6,7))
return data
| bsd-3-clause |
altairpearl/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 24 | 5256 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and scikit-learn do not match in a few places,
# these values are for the scikit-learn version.
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
tbarbette/clickwatcher | npf/npf.py | 1 | 17287 | import argparse
import os
from argparse import ArgumentParser
from typing import Dict, List
import regex
import re
from decimal import Decimal
from npf.node import Node
from .variable import VariableFactory
def get_valid_filename(s):
s = str(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
class ExtendAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
super(ExtendAction, self).__init__(option_strings, dest, nargs, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, getattr(namespace, self.dest) + values)
def add_verbosity_options(parser: ArgumentParser):
v = parser.add_argument_group('Verbosity options')
v.add_argument('--show-full', help='Show full execution results',
dest='show_full', action='store_true',
default=False)
v.add_argument('--show-files', help='Show content of created files',
dest='show_files', action='store_true',
default=False)
v.add_argument('--show-cmd', help='Show the executed script',
dest='show_cmd', action='store_true',
default=False)
v.add_argument('--show-time-results', help='Show time results',
dest='print_time_results', action='store_true',
default=False)
v.add_argument('--quiet', help='Quiet mode', dest='quiet', action='store_true', default=False)
v.add_argument('--quiet-regression', help='Do not tell about the regression process', dest='quiet_regression',
action='store_true', default=False)
vf = v.add_mutually_exclusive_group()
vf.add_argument('--quiet-build', help='Do not tell about the build process', dest='quiet_build',
action='store_true', default=False)
vf.add_argument('--show-build-cmd', help='Show build commands', dest='show_build_cmd', action='store_true',
default=False)
return v
def add_graph_options(parser: ArgumentParser):
o = parser.add_argument_group('Output data')
o.add_argument('--output',
help='Output data to CSV', dest='output', type=str, nargs='?', const='graph', default=None)
o.add_argument('--pandas', metavar='pandas_filename', type=str, default=None, dest='pandas_filename',
help='Output a Pandas dataframe to CSV')
o.add_argument('--output-columns', dest='output_columns', type=str, nargs='+', default=['x', 'mean'])
g = parser.add_argument_group('Graph options')
g.add_argument('--graph-size', metavar='INCH', type=float, nargs=2, default=None,
help='Size of graph', dest="graph_size")
g.add_argument('--graph-filename', metavar='graph_filename', type=str, default=None, dest='graph_filename',
help='path to the file to output the graph')
g.add_argument('--show-serie', dest='show_serie', action='store_true', default=False, help='always add the serie name in the file path')
g.add_argument('--graph-reject-outliers', dest='graph_reject_outliers', action='store_true', default=False)
g.add_argument('--graph-no-series', dest='graph_no_series', action='store_true', default=False)
g.add_argument('--graph-group-repo', dest='group_repo', action='store_true', default=False)
g.add_argument('--no-transform', dest='do_transform', action='store_false', default=True, help="Forbid automatic transformation of data such as extracting a variable as a serie")
g.add_argument('--graph-select-max', dest='graph_select_max', type=int, default=None)
g.add_argument('--graph-dpi', dest='graph_dpi', type=int, default=300)
g.add_argument('--no-graph-time', dest='do_time', action='store_false', default=True)
g.add_argument('--no-graph', dest='no_graph', action='store_true', default=False)
g.add_argument('--iterative', dest='iterative', action='store_true', default=False,
help='Graph after each results, allowing to get a faster glimpse at the results')
g.add_argument('--onefirst', dest='onefirst', action='store_true', default=False,
help='Do a first pass with one run per variables, then do the last runs')
s = parser.add_argument_group('Statistics options')
s.add_argument('--statistics',
help='Give some statistics output', dest='statistics', action='store_true',
default=False)
s.add_argument('--statistics-maxdepth',
help='Max depth of learning tree', dest='statistics_maxdepth', type=int, default=None)
s.add_argument('--statistics-filename',
help='Output of learning tree', dest='statistics_filename', type=str, default=None)
return g
def add_testing_options(parser: ArgumentParser, regression: bool = False):
t = parser.add_argument_group('Testing options')
tf = t.add_mutually_exclusive_group()
tf.add_argument('--no-test',
help='Do not run any tests, use previous results', dest='do_test', action='store_false',
default=True)
t.add_argument('--no-supplementary-test',
help='Do not run supplementary tests for regression, use previous results', dest='allow_supplementary', action='store_false',
default=True)
tf.add_argument('--force-test',
help='Force re-doing all tests even if data for the given version and '
'variables is already known, but append the new data to exesting one', dest='force_test', action='store_true',
default=False)
tf.add_argument('--force-retest',
help='Force re-doing all tests even if data for the given version and '
'variables is already known, and replace it', dest='force_retest', action='store_true',
default=False)
t.add_argument('--no-init',
help='Do not run any init scripts', dest='do_init', action='store_false',
default=True)
t.add_argument('--no-conntest',
help='Do not run connection tests', dest='do_conntest', action='store_false',
default=True)
t.add_argument('--max-results',
help='Count the number of valid previous tests as the maxium number of points in all tests, instead of the minimum', dest='min_test', action='store_false',
default=True)
t.add_argument('--preserve-temporaries',
help='Do not delete tesite folder with temporary files', dest='preserve_temp',
action='store_true',
default=False)
t.add_argument('--use-last',
help='Use data from previous version instead of running test if possible', dest='use_last',
nargs='?',
default=0)
t.add_argument('--result-path', metavar='path', type=str, nargs=1, help='Path to NPF\'s own database of results. By default it is a "result" folder.', default=["results"])
t.add_argument('--tags', metavar='tag', type=str, nargs='+', help='list of tags', default=[], action=ExtendAction)
t.add_argument('--variables', metavar='variable=value', type=str, nargs='+', action=ExtendAction,
help='list of variables values to override', default=[])
t.add_argument('--config', metavar='config=value', type=str, nargs='+', action=ExtendAction,
help='list of config values to override', default=[])
t.add_argument('--testie', '--test', '--npf', metavar='path or testie', type=str, nargs='?', default='tests',
help='script or script folder. Default is tests')
t.add_argument('--cluster', metavar='user@address:path', type=str, nargs='*', default=[],
help='role to node mapping for remote execution of tests')
t.add_argument('--build-folder', metavar='path', type=str, default=None, dest='build_folder')
t.add_argument('--no-mp', dest='allow_mp', action='store_false',
default=True, help='Run tests in the same thread. If there is multiple script, they will run '
'one after the other, hence breaking most of the tests.')
t.add_argument('--expand', type=str, default=None, dest="expand")
t.add_argument('--rand-env', type=int, default=65536, dest="rand_env")
return t
def add_building_options(parser):
b = parser.add_argument_group('Building options')
bf = b.add_mutually_exclusive_group()
bf.add_argument('--use-local',
help='Use a local version of the program instead of the automatically builded one',
dest='use_local',
default=None)
bf.add_argument('--no-build',
help='Do not build the last version, use the currently compiled one', dest='no_build', action='store_true', default=False)
bf.add_argument('--force-build',
help='Force to rebuild even if the current version is matching the regression versions '
'(see --version or --history).', dest='force_build',
action='store_true', default=False)
b.add_argument('--no-build-deps',
help='Do not build the last version of some dependencies, use the currently compiled one',
dest='no_build_deps',
action=ExtendAction,default=[],nargs='+')
b.add_argument('--ignore-deps',
help='Do not use the specified dependencies, find binaries in the current path instead',
dest='ignore_deps',
action=ExtendAction,default=[],nargs='+')
b.add_argument('--force-build-deps',
help='Force to rebuild some dependencies', dest='force_build_deps',
action=ExtendAction, default=[], nargs='+')
return b
nodePattern = regex.compile(
"(?P<role>[a-zA-Z0-9]+)=(:?(?P<user>[a-zA-Z0-9]+)@)?(?P<addr>[a-zA-Z0-9.-]+)(:?[:](?P<path>path))?")
roles = {}
def nodes_for_role(role, self_role=None, self_node=None, default_role_map={}):
if role is None or role == '':
role = 'default'
if role == 'self':
if self_role:
role = self_role
if self_node:
return [self_node]
else:
raise Exception("Using self without a role context. Usually, this happens when self is used in a %file")
if role not in roles:
if role in default_role_map:
role = default_role_map[role]
return roles.get(role, roles['default'])
def executor(role, default_role_map):
"""
Return the executor for a given role as associated by the cluster configuration
:param role: A role name
:return: The executor
"""
return nodes_for_role(role, default_role_map)[0].executor
def parse_nodes(options):
if type(options.use_last) is not int:
if options.use_last:
options.use_last = 100
roles['default'] = [Node.makeLocal(options)]
for val in options.cluster:
variables = val.split(',')
if len(variables) == 0:
raise Exception("Bad definition of cluster parameter : %s" % variables)
mapping=variables[0].strip()
match = nodePattern.match(mapping)
if not match:
raise Exception("Bad definition of node : %s" % mapping)
if match.group('addr') == 'localhost':
node = roles['default'][0]
else:
node = Node.makeSSH(user=match.group('user'), addr=match.group('addr'), path=match.group('path'),
options=options)
role = match.group('role')
if role in roles:
roles[role].append(node)
print("Role %s has multiple nodes. The role will be executed by multiple machines. If this is not intended, fix your --cluster option." % role)
else:
roles[role] = [node]
del variables[0]
for opts in variables:
var,val = opts.split('=')
if var == 'nic':
node.active_nics = [ int(v) for v in val.split('+') ]
elif var == "multi":
node.multi = int(val)
elif var == "mode":
node.mode = val
else:
raise Exception("Unknown cluster variable : %s" % var)
def parse_variables(args_variables, tags, sec) -> Dict:
variables = {}
for variable in args_variables:
var, val, assign = sec.parse_variable(variable,tags)
if var:
val.assign = assign
variables[var] = val
return variables
def override(args, testies):
for testie in testies:
overriden_variables = parse_variables(args.variables, testie.tags, testie.variables)
overriden_config = parse_variables(args.config, testie.tags, testie.config)
testie.variables.override_all(overriden_variables)
testie.config.override_all(overriden_config)
return testies
def npf_root():
# Return the path to NPF root
return os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def from_root(path):
# Returns the path under NPF root if it is not absolute
if (os.path.isabs(path)):
return path
else:
return npf_root() + os.sep + path
def find_local(path):
if not os.path.exists(path):
return npf_root() + '/' + path
return path
def splitpath(hint):
if hint is None:
hint = "results"
dirname, c_filename = os.path.split(hint)
if c_filename == '':
basename = ''
ext = ''
else:
basename, ext = os.path.splitext(c_filename)
if not ext and basename.startswith('.'):
ext = basename
basename = ''
return dirname, basename, ext
def build_filename(testie, build, hint, variables, def_ext, type_str='', show_serie=False, suffix='', force_ext = False, data_folder = False, prefix=None):
var_str = get_valid_filename('_'.join(
["%s=%s" % (k, (val[1] if type(val) is tuple else val)) for k, val in sorted(variables.items()) if val]))
if hint is None:
if data_folder:
path = build.result_path(testie.filename, def_ext, folder = var_str + (('-' if var_str else '') + type_str if type_str else ''), prefix=prefix, suffix = ('-' + get_valid_filename(build.pretty_name()) if show_serie else ''))
else:
path = build.result_path(testie.filename, def_ext, suffix=var_str + ('-' + type_str if type_str else '') + ('-' + get_valid_filename(build.pretty_name()) if show_serie else '') , prefix=prefix)
else:
dirname, basename, ext = splitpath(hint)
if ext is None or ext == '' or force_ext:
ext = '.' + def_ext
if basename is None or basename == '':
basename = var_str
if not data_folder:
if prefix:
basename = prefix + basename
if not dirname or show_serie:
basename = (get_valid_filename(build.pretty_name()) if show_serie else '') + basename
path = (dirname + '/' if dirname else '') + basename + (
('-' if basename else '') + type_str if type_str else '') + ('' if not suffix else ("-" + suffix)) + ext
else:
if not dirname or show_serie:
dirname = (dirname + "/" if dirname else '') + basename
path = (dirname + '/' if dirname else '') + (prefix if prefix else '') + (get_valid_filename(build.pretty_name()) if show_serie else '') + (type_str if type_str else '') + ('' if not suffix else ("-" + suffix)) + ext
folder = os.path.dirname(path)
if folder and not os.path.exists(folder):
os.makedirs(folder)
return path
def build_output_filename(options, repo_list):
if options.graph_filename is None:
filename = 'compare/' + os.path.splitext(os.path.basename(options.testie))[0] + '_' + '_'.join(
["%s" % repo.reponame for repo in repo_list]) + '.pdf'
else:
filename = options.graph_filename
return filename
def replace_path(path, build = None):
if build:
if build.version is not None:
path = path.replace('$version', build.version)
for var,val in build.repo.env.items():
path = path.replace('$'+var,val)
return path
def parseBool(s):
if type(s) is str and s.lower() == "false":
return False
else:
return bool(s)
def parseUnit(u):
r = re.match('([-]?)([0-9]+)[ ]*([GMK]?)',u)
if r != None:
n = float(r.group(2))
unit = r.group(3)
if r.group(1) == "-":
n = -n
if unit is None or unit == '':
return n
if unit == 'G':
n = n * 1000000000
elif unit == 'M':
n = n * 1000000
elif unit == 'K':
n = n * 1000
else:
raise Exception('%s is not a valid unit !' % unit)
return n
else:
raise Exception("%s is not a number !" % u)
def all_num(l):
for x in l:
if type(x) is not int and type(x) is not float and type(x) is not Decimal:
return False
return True
| gpl-3.0 |
zzcclp/spark | python/pyspark/pandas/plot/matplotlib.py | 14 | 30172 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import matplotlib as mat
import numpy as np
import pandas as pd
from matplotlib.axes._base import _process_plot_format
from pandas.core.dtypes.inference import is_list_like
from pandas.io.formats.printing import pprint_thing
from pyspark.pandas.plot import (
TopNPlotBase,
SampledPlotBase,
HistogramPlotBase,
BoxPlotBase,
unsupported_function,
KdePlotBase,
)
if LooseVersion(pd.__version__) < LooseVersion("0.25"):
from pandas.plotting._core import (
_all_kinds,
BarPlot as PandasBarPlot,
BoxPlot as PandasBoxPlot,
HistPlot as PandasHistPlot,
MPLPlot as PandasMPLPlot,
PiePlot as PandasPiePlot,
AreaPlot as PandasAreaPlot,
LinePlot as PandasLinePlot,
BarhPlot as PandasBarhPlot,
ScatterPlot as PandasScatterPlot,
KdePlot as PandasKdePlot,
)
else:
from pandas.plotting._matplotlib import (
BarPlot as PandasBarPlot,
BoxPlot as PandasBoxPlot,
HistPlot as PandasHistPlot,
PiePlot as PandasPiePlot,
AreaPlot as PandasAreaPlot,
LinePlot as PandasLinePlot,
BarhPlot as PandasBarhPlot,
ScatterPlot as PandasScatterPlot,
KdePlot as PandasKdePlot,
)
from pandas.plotting._core import PlotAccessor
from pandas.plotting._matplotlib.core import MPLPlot as PandasMPLPlot
_all_kinds = PlotAccessor._all_kinds
class PandasOnSparkBarPlot(PandasBarPlot, TopNPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_top_n(data), **kwargs)
def _plot(self, ax, x, y, w, start=0, log=False, **kwds):
self.set_result_text(ax)
return ax.bar(x, y, w, bottom=start, log=log, **kwds)
class PandasOnSparkBoxPlot(PandasBoxPlot, BoxPlotBase):
def boxplot(
self,
ax,
bxpstats,
notch=None,
sym=None,
vert=None,
whis=None,
positions=None,
widths=None,
patch_artist=None,
bootstrap=None,
usermedians=None,
conf_intervals=None,
meanline=None,
showmeans=None,
showcaps=None,
showbox=None,
showfliers=None,
boxprops=None,
labels=None,
flierprops=None,
medianprops=None,
meanprops=None,
capprops=None,
whiskerprops=None,
manage_ticks=None,
# manage_xticks is for compatibility of matplotlib < 3.1.0.
# Remove this when minimum version is 3.0.0
manage_xticks=None,
autorange=False,
zorder=None,
precision=None,
):
def update_dict(dictionary, rc_name, properties):
"""Loads properties in the dictionary from rc file if not already
in the dictionary"""
rc_str = "boxplot.{0}.{1}"
if dictionary is None:
dictionary = dict()
for prop_dict in properties:
dictionary.setdefault(prop_dict, mat.rcParams[rc_str.format(rc_name, prop_dict)])
return dictionary
# Common property dictionaries loading from rc
flier_props = [
"color",
"marker",
"markerfacecolor",
"markeredgecolor",
"markersize",
"linestyle",
"linewidth",
]
default_props = ["color", "linewidth", "linestyle"]
boxprops = update_dict(boxprops, "boxprops", default_props)
whiskerprops = update_dict(whiskerprops, "whiskerprops", default_props)
capprops = update_dict(capprops, "capprops", default_props)
medianprops = update_dict(medianprops, "medianprops", default_props)
meanprops = update_dict(meanprops, "meanprops", default_props)
flierprops = update_dict(flierprops, "flierprops", flier_props)
if patch_artist:
boxprops["linestyle"] = "solid"
boxprops["edgecolor"] = boxprops.pop("color")
# if non-default sym value, put it into the flier dictionary
# the logic for providing the default symbol ('b+') now lives
# in bxp in the initial value of final_flierprops
# handle all of the `sym` related logic here so we only have to pass
# on the flierprops dict.
if sym is not None:
# no-flier case, which should really be done with
# 'showfliers=False' but none-the-less deal with it to keep back
# compatibility
if sym == "":
# blow away existing dict and make one for invisible markers
flierprops = dict(linestyle="none", marker="", color="none")
# turn the fliers off just to be safe
showfliers = False
# now process the symbol string
else:
# process the symbol string
# discarded linestyle
_, marker, color = _process_plot_format(sym)
# if we have a marker, use it
if marker is not None:
flierprops["marker"] = marker
# if we have a color, use it
if color is not None:
# assume that if color is passed in the user want
# filled symbol, if the users want more control use
# flierprops
flierprops["color"] = color
flierprops["markerfacecolor"] = color
flierprops["markeredgecolor"] = color
# replace medians if necessary:
if usermedians is not None:
if len(np.ravel(usermedians)) != len(bxpstats) or np.shape(usermedians)[0] != len(
bxpstats
):
raise ValueError("usermedians length not compatible with x")
else:
# reassign medians as necessary
for stats, med in zip(bxpstats, usermedians):
if med is not None:
stats["med"] = med
if conf_intervals is not None:
if np.shape(conf_intervals)[0] != len(bxpstats):
err_mess = "conf_intervals length not compatible with x"
raise ValueError(err_mess)
else:
for stats, ci in zip(bxpstats, conf_intervals):
if ci is not None:
if len(ci) != 2:
raise ValueError("each confidence interval must " "have two values")
else:
if ci[0] is not None:
stats["cilo"] = ci[0]
if ci[1] is not None:
stats["cihi"] = ci[1]
should_manage_ticks = True
if manage_xticks is not None:
should_manage_ticks = manage_xticks
if manage_ticks is not None:
should_manage_ticks = manage_ticks
if LooseVersion(mat.__version__) < LooseVersion("3.1.0"):
extra_args = {"manage_xticks": should_manage_ticks}
else:
extra_args = {"manage_ticks": should_manage_ticks}
artists = ax.bxp(
bxpstats,
positions=positions,
widths=widths,
vert=vert,
patch_artist=patch_artist,
shownotches=notch,
showmeans=showmeans,
showcaps=showcaps,
showbox=showbox,
boxprops=boxprops,
flierprops=flierprops,
medianprops=medianprops,
meanprops=meanprops,
meanline=meanline,
showfliers=showfliers,
capprops=capprops,
whiskerprops=whiskerprops,
zorder=zorder,
**extra_args,
)
return artists
def _plot(self, ax, bxpstats, column_num=None, return_type="axes", **kwds):
bp = self.boxplot(ax, bxpstats, **kwds)
if return_type == "dict":
return bp, bp
elif return_type == "both":
return self.BP(ax=ax, lines=bp), bp
else:
return ax, bp
def _compute_plot_data(self):
colname = self.data.name
spark_column_name = self.data._internal.spark_column_name_for(self.data._column_label)
data = self.data
# Updates all props with the rc defaults from matplotlib
self.kwds.update(PandasOnSparkBoxPlot.rc_defaults(**self.kwds))
# Gets some important kwds
showfliers = self.kwds.get("showfliers", False)
whis = self.kwds.get("whis", 1.5)
labels = self.kwds.get("labels", [colname])
# This one is pandas-on-Spark specific to control precision for approx_percentile
precision = self.kwds.get("precision", 0.01)
# # Computes mean, median, Q1 and Q3 with approx_percentile and precision
col_stats, col_fences = BoxPlotBase.compute_stats(data, spark_column_name, whis, precision)
# # Creates a column to flag rows as outliers or not
outliers = BoxPlotBase.outliers(data, spark_column_name, *col_fences)
# # Computes min and max values of non-outliers - the whiskers
whiskers = BoxPlotBase.calc_whiskers(spark_column_name, outliers)
if showfliers:
fliers = BoxPlotBase.get_fliers(spark_column_name, outliers, whiskers[0])
else:
fliers = []
# Builds bxpstats dict
stats = []
item = {
"mean": col_stats["mean"],
"med": col_stats["med"],
"q1": col_stats["q1"],
"q3": col_stats["q3"],
"whislo": whiskers[0],
"whishi": whiskers[1],
"fliers": fliers,
"label": labels[0],
}
stats.append(item)
self.data = {labels[0]: stats}
def _make_plot(self):
bxpstats = list(self.data.values())[0]
ax = self._get_ax(0)
kwds = self.kwds.copy()
for stats in bxpstats:
if len(stats["fliers"]) > 1000:
stats["fliers"] = stats["fliers"][:1000]
ax.text(
1,
1,
"showing top 1,000 fliers only",
size=6,
ha="right",
va="bottom",
transform=ax.transAxes,
)
ret, bp = self._plot(ax, bxpstats, column_num=0, return_type=self.return_type, **kwds)
self.maybe_color_bp(bp)
self._return_obj = ret
labels = [l for l, _ in self.data.items()]
labels = [pprint_thing(l) for l in labels]
if not self.use_index:
labels = [pprint_thing(key) for key in range(len(labels))]
self._set_ticklabels(ax, labels)
@staticmethod
def rc_defaults(
notch=None,
vert=None,
whis=None,
patch_artist=None,
bootstrap=None,
meanline=None,
showmeans=None,
showcaps=None,
showbox=None,
showfliers=None,
**kwargs
):
# Missing arguments default to rcParams.
if whis is None:
whis = mat.rcParams["boxplot.whiskers"]
if bootstrap is None:
bootstrap = mat.rcParams["boxplot.bootstrap"]
if notch is None:
notch = mat.rcParams["boxplot.notch"]
if vert is None:
vert = mat.rcParams["boxplot.vertical"]
if patch_artist is None:
patch_artist = mat.rcParams["boxplot.patchartist"]
if meanline is None:
meanline = mat.rcParams["boxplot.meanline"]
if showmeans is None:
showmeans = mat.rcParams["boxplot.showmeans"]
if showcaps is None:
showcaps = mat.rcParams["boxplot.showcaps"]
if showbox is None:
showbox = mat.rcParams["boxplot.showbox"]
if showfliers is None:
showfliers = mat.rcParams["boxplot.showfliers"]
return dict(
whis=whis,
bootstrap=bootstrap,
notch=notch,
vert=vert,
patch_artist=patch_artist,
meanline=meanline,
showmeans=showmeans,
showcaps=showcaps,
showbox=showbox,
showfliers=showfliers,
)
class PandasOnSparkHistPlot(PandasHistPlot, HistogramPlotBase):
def _args_adjust(self):
if is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
def _compute_plot_data(self):
self.data, self.bins = HistogramPlotBase.prepare_hist_data(self.data, self.bins)
def _make_plot(self):
# TODO: this logic is similar with KdePlot. Might have to deduplicate it.
# 'num_colors' requires to calculate `shape` which has to count all.
# Use 1 for now to save the computation.
colors = self._get_colors(num_colors=1)
stacking_id = self._get_stacking_id()
output_series = HistogramPlotBase.compute_hist(self.data, self.bins)
for (i, label), y in zip(enumerate(self.data._internal.column_labels), output_series):
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label if len(label) > 1 else label[0])
kwds["label"] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds["style"] = style
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
@classmethod
def _plot(cls, ax, y, style=None, bins=None, bottom=0, column_num=0, stacking_id=None, **kwds):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(bins) - 1)
base = np.zeros(len(bins) - 1)
bottom = bottom + cls._get_stacked_values(ax, stacking_id, base, kwds["label"])
# Since the counts were computed already, we use them as weights and just generate
# one entry for each bin
n, bins, patches = ax.hist(bins[:-1], bins=bins, bottom=bottom, weights=y, **kwds)
cls._update_stacker(ax, stacking_id, n)
return patches
class PandasOnSparkPiePlot(PandasPiePlot, TopNPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_top_n(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkAreaPlot(PandasAreaPlot, SampledPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_sampled(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkLinePlot(PandasLinePlot, SampledPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_sampled(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkBarhPlot(PandasBarhPlot, TopNPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_top_n(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkScatterPlot(PandasScatterPlot, TopNPlotBase):
def __init__(self, data, x, y, **kwargs):
super().__init__(self.get_top_n(data), x, y, **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkKdePlot(PandasKdePlot, KdePlotBase):
def _compute_plot_data(self):
self.data = KdePlotBase.prepare_kde_data(self.data)
def _make_plot(self):
# 'num_colors' requires to calculate `shape` which has to count all.
# Use 1 for now to save the computation.
colors = self._get_colors(num_colors=1)
stacking_id = self._get_stacking_id()
sdf = self.data._internal.spark_frame
for i, label in enumerate(self.data._internal.column_labels):
# 'y' is a Spark DataFrame that selects one column.
y = sdf.select(self.data._internal.spark_column_for(label))
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label if len(label) > 1 else label[0])
kwds["label"] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds["style"] = style
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
def _get_ind(self, y):
return KdePlotBase.get_ind(y, self.ind)
@classmethod
def _plot(
cls, ax, y, style=None, bw_method=None, ind=None, column_num=None, stacking_id=None, **kwds
):
y = KdePlotBase.compute_kde(y, bw_method=bw_method, ind=ind)
lines = PandasMPLPlot._plot(ax, ind, y, style=style, **kwds)
return lines
_klasses = [
PandasOnSparkHistPlot,
PandasOnSparkBarPlot,
PandasOnSparkBoxPlot,
PandasOnSparkPiePlot,
PandasOnSparkAreaPlot,
PandasOnSparkLinePlot,
PandasOnSparkBarhPlot,
PandasOnSparkScatterPlot,
PandasOnSparkKdePlot,
]
_plot_klass = {getattr(klass, "_kind"): klass for klass in _klasses}
_common_kinds = {"area", "bar", "barh", "box", "hist", "kde", "line", "pie"}
_series_kinds = _common_kinds.union(set())
_dataframe_kinds = _common_kinds.union({"scatter", "hexbin"})
_pandas_on_spark_all_kinds = _common_kinds.union(_series_kinds).union(_dataframe_kinds)
def plot_pandas_on_spark(data, kind, **kwargs):
if kind not in _pandas_on_spark_all_kinds:
raise ValueError("{} is not a valid plot kind".format(kind))
from pyspark.pandas import DataFrame, Series
if isinstance(data, Series):
if kind not in _series_kinds:
return unsupported_function(class_name="pd.Series", method_name=kind)()
return plot_series(data=data, kind=kind, **kwargs)
elif isinstance(data, DataFrame):
if kind not in _dataframe_kinds:
return unsupported_function(class_name="pd.DataFrame", method_name=kind)()
return plot_frame(data=data, kind=kind, **kwargs)
def plot_series(
data,
kind="line",
ax=None, # Series unique
figsize=None,
use_index=True,
title=None,
grid=None,
legend=False,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
label=None,
secondary_y=False, # Series unique
**kwds
):
"""
Make plots of Series using matplotlib / pylab.
Each plot kind has a corresponding method on the
``Series.plot`` accessor:
``s.plot(kind='line')`` is equivalent to
``s.plot.line()``.
Parameters
----------
data : Series
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
ax : matplotlib axes object
If not passed, uses gca()
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string or list
Title to use for the plot. If a string is passed, print the string at
the top of the figure. If a list is passed and `subplots` is True,
print each item in the list above the corresponding subplot.
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : same types as yerr.
label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
**kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
"""
# function copied from pandas.plotting._core
# so it calls modified _plot below
import matplotlib.pyplot as plt
if ax is None and len(plt.get_fignums()) > 0:
with plt.rc_context():
ax = plt.gca()
ax = PandasMPLPlot._get_ax_layer(ax)
return _plot(
data,
kind=kind,
ax=ax,
figsize=figsize,
use_index=use_index,
title=title,
grid=grid,
legend=legend,
style=style,
logx=logx,
logy=logy,
loglog=loglog,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
rot=rot,
fontsize=fontsize,
colormap=colormap,
table=table,
yerr=yerr,
xerr=xerr,
label=label,
secondary_y=secondary_y,
**kwds,
)
def plot_frame(
data,
x=None,
y=None,
kind="line",
ax=None,
subplots=None,
sharex=None,
sharey=False,
layout=None,
figsize=None,
use_index=True,
title=None,
grid=None,
legend=True,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
secondary_y=False,
sort_columns=False,
**kwds
):
"""
Make plots of DataFrames using matplotlib / pylab.
Each plot kind has a corresponding method on the
``DataFrame.plot`` accessor:
``psdf.plot(kind='line')`` is equivalent to
``psdf.plot.line()``.
Parameters
----------
data : DataFrame
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
- 'scatter' : scatter plot
ax : matplotlib axes object
If not passed, uses gca()
x : label or position, default None
y : label, position or list of label, positions, default None
Allows plotting of one column versus another.
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string or list
Title to use for the plot. If a string is passed, print the string at
the top of the figure. If a list is passed and `subplots` is True,
print each item in the list above the corresponding subplot.
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
sharex: bool or None, default is None
Whether to share x axis or not.
sharey: bool, default is False
Whether to share y axis or not.
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : same types as yerr.
label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
sort_columns: bool, default is False
When True, will sort values on plots.
**kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
"""
return _plot(
data,
kind=kind,
x=x,
y=y,
ax=ax,
figsize=figsize,
use_index=use_index,
title=title,
grid=grid,
legend=legend,
subplots=subplots,
style=style,
logx=logx,
logy=logy,
loglog=loglog,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
rot=rot,
fontsize=fontsize,
colormap=colormap,
table=table,
yerr=yerr,
xerr=xerr,
sharex=sharex,
sharey=sharey,
secondary_y=secondary_y,
layout=layout,
sort_columns=sort_columns,
**kwds,
)
def _plot(data, x=None, y=None, subplots=False, ax=None, kind="line", **kwds):
from pyspark.pandas import DataFrame
# function copied from pandas.plotting._core
# and adapted to handle pandas-on-Spark DataFrame and Series
kind = kind.lower().strip()
kind = {"density": "kde"}.get(kind, kind)
if kind in _all_kinds:
klass = _plot_klass[kind]
else:
raise ValueError("%r is not a valid plot kind" % kind)
# scatter and hexbin are inherited from PlanePlot which require x and y
if kind in ("scatter", "hexbin"):
plot_obj = klass(data, x, y, subplots=subplots, ax=ax, kind=kind, **kwds)
else:
# check data type and do preprocess before applying plot
if isinstance(data, DataFrame):
if x is not None:
data = data.set_index(x)
# TODO: check if value of y is plottable
if y is not None:
data = data[y]
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
plot_obj.generate()
plot_obj.draw()
return plot_obj.result
| apache-2.0 |
Jimmy-Morzaria/scikit-learn | sklearn/datasets/base.py | 15 | 17969 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets: dictionary-like object that
exposes its keys as attributes."""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |
maxlikely/scikit-learn | sklearn/metrics/pairwise.py | 1 | 28395 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.metrics.pairwise` submodule implements utilities to evaluate
pairwise distances or affinity of sets of samples.
This module contains both distance metrics and kernels. A brief summary is
given on the two here.
Distance metrics are a function d(a, b) such that d(a, b) < d(a, c) if objects
a and b are considered "more similar" to objects a and c. Two objects exactly
alike would have a distance of zero.
One of the most popular examples is Euclidean distance.
To be a 'true' metric, it must obey the following four conditions::
1. d(a, b) >= 0, for all a and b
2. d(a, b) == 0, if and only if a = b, positive definiteness
3. d(a, b) == d(b, a), symmetry
4. d(a, c) <= d(a, b) + d(b, c), the triangle inequality
Kernels are measures of similarity, i.e. ``s(a, b) > s(a, c)``
if objects ``a`` and ``b`` are considered "more similar" to objects
``a`` and ``c``. A kernel must also be positive semi-definite.
There are a number of ways to convert between a distance metric and a
similarity measure, such as a kernel. Let D be the distance, and S be the
kernel:
1. ``S = np.exp(-D * gamma)``, where one heuristic for choosing
``gamma`` is ``1 / num_features``
2. ``S = 1. / (D / np.max(D))``
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD Style.
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import atleast2d_or_csr
from ..utils import gen_even_slices
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import array2d
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast
# Utility Functions
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples_a, n_features]
Y : {array-like, sparse matrix}, shape = [n_samples_b, n_features]
Returns
-------
safe_X : {array-like, sparse matrix}, shape = [n_samples_a, n_features]
An array equal to X, guarenteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape = [n_samples_b, n_features]
An array equal to Y if Y was not None, guarenteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
if Y is X or Y is None:
X = Y = atleast2d_or_csr(X, dtype=np.float)
else:
X = atleast2d_or_csr(X, dtype=np.float)
Y = atleast2d_or_csr(Y, dtype=np.float)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
# Distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two main advantages. First, it is computationally
efficient when dealing with sparse data. Second, if x varies but y
remains unchanged, then the right-most dot-product `dot(y, y)` can be
pre-computed.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples_1, n_features]
Y : {array-like, sparse matrix}, shape = [n_samples_2, n_features]
Y_norm_squared : array-like, shape = [n_samples_2], optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape = [n_samples_1, n_samples_2]
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if issparse(X):
XX = X.multiply(X).sum(axis=1)
else:
XX = np.sum(X * X, axis=1)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is None:
if issparse(Y):
# scipy.sparse matrices don't have element-wise scalar
# exponentiation, and tocsr has a copy kwarg only on CSR matrices.
YY = Y.copy() if isinstance(Y, csr_matrix) else Y.tocsr()
YY.data **= 2
YY = np.asarray(YY.sum(axis=1)).T
else:
YY = np.sum(Y ** 2, axis=1)[np.newaxis, :]
else:
YY = atleast2d_or_csr(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
# TODO: a faster Cython implementation would do the clipping of negative
# values in a single pass over the output matrix.
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances)
def manhattan_distances(X, Y=None, sum_over_features=True):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise l1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
if issparse(X) or issparse(Y):
raise ValueError("manhattan_distance does not support sparse"
" matrices.")
X, Y = check_pairwise_arrays(X, Y)
D = np.abs(X[:, np.newaxis, :] - Y[np.newaxis, :, :])
if sum_over_features:
D = np.sum(D, axis=2)
else:
D = D.reshape((-1, X.shape[1]))
return D
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-γ ||x-y||²)
for each pair of rows x in X and y in Y.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
kernel matrix : array_like
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = linear_kernel(X_normalized, Y_normalized)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -∑ᵢ [(xᵢ - yᵢ)² / (xᵢ + yᵢ)]
It can be interpreted as a weighted difference per entry.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferrable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
### we don't use check_pairwise to preserve float32.
if Y is None:
# optimize this case!
X = array2d(X)
if X.dtype != np.float32:
X.astype(np.float)
Y = X
if (X < 0).any():
raise ValueError("X contains negative values.")
else:
X = array2d(X)
Y = array2d(Y)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
if X.dtype != np.float32 or Y.dtype != np.float32:
# if not both are 32bit float, convert to 64bit float
X = X.astype(np.float)
Y = Y.astype(np.float)
if (X < 0).any():
raise ValueError("X contains negative values.")
if (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-γ ∑ᵢ [(xᵢ - yᵢ)² / (xᵢ + yᵢ)])
It can be interpreted as a weighted difference per entry.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'cityblock': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
ret = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(func)(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatability with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Please note that support for sparse matrices is currently limited to those
metrics listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
Valid values for metric are:
- from scikit-learn: ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
Note in the case of 'euclidean' and 'cityblock' (which are valid
scipy.spatial.distance metrics), the values will use the scikit-learn
implementation, which is faster and has support for sparse matrices.
For a verbose description of the metrics from scikit-learn, see the
__doc__ of the sklearn.pairwise.distance_metrics function.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debuging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate distance for each element in X and Y.
# FIXME: can use n_jobs here too
D = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# distance assumed to be symmetric.
D[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
D[j][i] = D[i][j]
return D
else:
# Note: the distance module doesn't support sparse matrices!
if type(X) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
if Y is None:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
else:
if type(Y) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
return distance.cdist(X, Y, metric=metric, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"chi2": (),
"exp_chi2": set(("gamma", )),
"linear": (),
"rbf": set(("gamma",)),
"sigmoid": set(("gamma", "coef0")),
"polynomial": set(("gamma", "degree", "coef0")),
"poly": set(("gamma", "degree", "coef0")),
"cosine": set(), }
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
""" Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatability with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debuging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate kernel for each element in X and Y.
K = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# Kernel assumed to be symmetric.
K[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
K[j][i] = K[i][j]
return K
else:
raise AttributeError("Unknown metric %s" % metric)
| bsd-3-clause |
HaoMood/cs231n | assignment1/assignment1sol/features/features.py | 30 | 4807 | import matplotlib
import numpy as np
from scipy.ndimage import uniform_filter
def extract_features(imgs, feature_fns, verbose=False):
"""
Given pixel data for images and several feature functions that can operate on
single images, apply all feature functions to all images, concatenating the
feature vectors for each image and storing the features for all images in
a single matrix.
Inputs:
- imgs: N x H X W X C array of pixel data for N images.
- feature_fns: List of k feature functions. The ith feature function should
take as input an H x W x D array and return a (one-dimensional) array of
length F_i.
- verbose: Boolean; if true, print progress.
Returns:
An array of shape (N, F_1 + ... + F_k) where each column is the concatenation
of all features for a single image.
"""
num_images = imgs.shape[0]
if num_images == 0:
return np.array([])
# Use the first image to determine feature dimensions
feature_dims = []
first_image_features = []
for feature_fn in feature_fns:
feats = feature_fn(imgs[0].squeeze())
assert len(feats.shape) == 1, 'Feature functions must be one-dimensional'
feature_dims.append(feats.size)
first_image_features.append(feats)
# Now that we know the dimensions of the features, we can allocate a single
# big array to store all features as columns.
total_feature_dim = sum(feature_dims)
imgs_features = np.zeros((num_images, total_feature_dim))
imgs_features[0] = np.hstack(first_image_features).T
# Extract features for the rest of the images.
for i in xrange(1, num_images):
idx = 0
for feature_fn, feature_dim in zip(feature_fns, feature_dims):
next_idx = idx + feature_dim
imgs_features[i, idx:next_idx] = feature_fn(imgs[i].squeeze())
idx = next_idx
if verbose and i % 1000 == 0:
print 'Done extracting features for %d / %d images' % (i, num_images)
return imgs_features
def rgb2gray(rgb):
"""Convert RGB image to grayscale
Parameters:
rgb : RGB image
Returns:
gray : grayscale image
"""
return np.dot(rgb[...,:3], [0.299, 0.587, 0.144])
def hog_feature(im):
"""Compute Histogram of Gradient (HOG) feature for an image
Modified from skimage.feature.hog
http://pydoc.net/Python/scikits-image/0.4.2/skimage.feature.hog
Reference:
Histograms of Oriented Gradients for Human Detection
Navneet Dalal and Bill Triggs, CVPR 2005
Parameters:
im : an input grayscale or rgb image
Returns:
feat: Histogram of Gradient (HOG) feature
"""
# convert rgb to grayscale if needed
if im.ndim == 3:
image = rgb2gray(im)
else:
image = np.at_least_2d(im)
sx, sy = image.shape # image size
orientations = 9 # number of gradient bins
cx, cy = (8, 8) # pixels per cell
gx = np.zeros(image.shape)
gy = np.zeros(image.shape)
gx[:, :-1] = np.diff(image, n=1, axis=1) # compute gradient on x-direction
gy[:-1, :] = np.diff(image, n=1, axis=0) # compute gradient on y-direction
grad_mag = np.sqrt(gx ** 2 + gy ** 2) # gradient magnitude
grad_ori = np.arctan2(gy, (gx + 1e-15)) * (180 / np.pi) + 90 # gradient orientation
n_cellsx = int(np.floor(sx / cx)) # number of cells in x
n_cellsy = int(np.floor(sy / cy)) # number of cells in y
# compute orientations integral images
orientation_histogram = np.zeros((n_cellsx, n_cellsy, orientations))
for i in range(orientations):
# create new integral image for this orientation
# isolate orientations in this range
temp_ori = np.where(grad_ori < 180 / orientations * (i + 1),
grad_ori, 0)
temp_ori = np.where(grad_ori >= 180 / orientations * i,
temp_ori, 0)
# select magnitudes for those orientations
cond2 = temp_ori > 0
temp_mag = np.where(cond2, grad_mag, 0)
orientation_histogram[:,:,i] = uniform_filter(temp_mag, size=(cx, cy))[cx/2::cx, cy/2::cy].T
return orientation_histogram.ravel()
def color_histogram_hsv(im, nbin=10, xmin=0, xmax=255, normalized=True):
"""
Compute color histogram for an image using hue.
Inputs:
- im: H x W x C array of pixel data for an RGB image.
- nbin: Number of histogram bins. (default: 10)
- xmin: Minimum pixel value (default: 0)
- xmax: Maximum pixel value (default: 255)
- normalized: Whether to normalize the histogram (default: True)
Returns:
1D vector of length nbin giving the color histogram over the hue of the
input image.
"""
ndim = im.ndim
bins = np.linspace(xmin, xmax, nbin+1)
hsv = matplotlib.colors.rgb_to_hsv(im/xmax) * xmax
imhist, bin_edges = np.histogram(hsv[:,:,0], bins=bins, density=normalized)
imhist = imhist * np.diff(bin_edges)
# return histogram
return imhist
pass
| gpl-3.0 |
srgblnch/MeasuredFillingPattern | tango-ds/MeasuredFillingPatternFCT/BunchAnalyzer.py | 1 | 43934 | #! /usr/bin/env python
# -*- coding:utf-8 -*-
##############################################################################
## license : GPLv3+
##============================================================================
##
## File : BunchAnalyzer.py
##
## Project : Filling Pattern from the FCT
##
## description : Python source with the class that has the appropriate methods
## to...
##
## This file is part of Tango device class.
##
## Tango is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## Tango is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Tango. If not, see <http://www.gnu.org/licenses/>.
##
## $Author : Laura Torino$ (first developer)
##
## $Revision : $
##
## $Date : $
##
## $HeadUrl : $
##
## copyleft : Cells / Alba Synchrotron
## Cerdanyola/Bellaterra
## Spain
##############################################################################
###############################################################################
# This program analyses data coming from a FCT:
# - Data are uploaded
# - Low band filter is applied (500 MHz-1 GHz...)
# - Cutoff frequency as input(later to be fixed, probably 700 MHz)
# - Starting analyzing point fixed at 127 bins = 6.2497 ns
# - Consecutive peaks are considered only if one is positive and the other
# is above the negative threshold of 0.05*minimum of the filtered signal
# - Frequency and the Delta t (time window) as input (from the machine)
# DONE, to uncoment
# - Number of bucket
# - Number of filled bucket
# - Number of spuorious bunches
# - Maximum peak to peak amplitude
###############################################################################
META = u"""
$URL: https://svn.code.sf.net/p/tango-ds/code/Servers/Calculation/FillingPatternFCT/src/BunchAnalyzer.py $
$LastChangedBy: sergiblanch $
$Date: 2012-12-12 11:33:48 +0100 (Wed, 12 Dec 2012)$
$Rev: 5901 $
License: GPL3+
Author: Laura Torino
""".encode('latin1')
from copy import copy,deepcopy
from numpy import *
import PyTango
from scipy import signal
import time
import traceback
class Attribute(object):
def __init__(self,devName='SR/DI/DCCT',attrName='AverageCurrent',
callback=None,
info_stream=None,error_stream=None,
warn_stream=None,debug_stream=None):
super(Attribute,self).__init__()
try:
self._devName = devName
self._attrName = attrName
self._name = devName+'/'+attrName
self.info_stream = info_stream
self.debug_stream = debug_stream
self.warn_stream = warn_stream
self.error_stream = error_stream
self._devProxy = PyTango.DeviceProxy(devName)
try:
self._attrValue = self._devProxy[attrName].value
except:
self._attrValue = None
self._attrEvent = None
self._callback = callback
self._writeSend = False
self.subscribe_event()
except Exception as e:
self.error("%s/%s init exception" % (devName, attrName))
finally:
self.debug("%s/%s init done"%(devName,attrName))
def info(self,msg):
try: self.info_stream("In %s Attribute: %s"%(self._name,msg))
except: print("%s::info: %s"%(self._name,msg))
def debug(self,msg):
try: self.debug_stream("In %s Attribute: %s"%(self._name,msg))
except: print("%s::debug: %s"%(self._name,msg))
def warn(self,msg):
try: self.warn_stream("In %s Attribute: %s"%(self._name,msg))
except: print("%s::warn: %s"%(self._name,msg))
def error(self,msg):
try: self.error_stream("In %s Attribute: %s"%(self._name,msg))
except: print("%s::error: %s"%(self._name,msg))
def subscribe_event(self):
try:
self._attrEvent = self._devProxy.subscribe_event(self._attrName,
PyTango.EventType.CHANGE_EVENT,
self)
except PyTango.DevFailed as e:
msgs = ""
for each in e.args:
msgs = "%s%s. " % (msgs, each.desc)
self.error("%s::subscribe_event() DevFailed: %s" % (self._name, msgs))
raise e
except Exception as e:
self.error("%s::subscribe_event() exception %s:"%(self._name,e))
else:
self.debug("%s::subscribe_event() subscribed"%(self._name))
def unsubscribe_event(self):
if self._attrEvent != None:
self._devProxy.unsubscribe_event(self._attrEvent)
def push_event(self,event):
try:
if event != None:
if event.attr_value != None and event.attr_value.value != None:
self.debug("%s::PushEvent() %s: %s"
%(self._name,event.attr_name,
event.attr_value.value))
else:
self.warn("%s::PushEvent() %s: value has None type"
%(self._name,event.attr_name))
return
else:
self.warn("%s::PushEvent() received a event = None")
return
except Exception,e:
self.error("%s::PushEvent() exception %s:"%(self._name,e))
try:#when there is a callback, it's responsible to store the new value
if self._writeSend:
if event.attr_value.value == self._attrValue:
self.info("%s::PushEvent() write (%s) applied confirmed"
%(self._name,event.attr_value.value))
self._writeSend = False
return
else:
self.info("%s::PushEvent() write (%s) not applied (%s) yet"
%(self._name,self._attrValue,
event.attr_value.value))
return
if self._callback != None:
self.info("%s::PushEvent() callback call with value %s"
%(self._name,str(event.attr_value.value)))
self._callback(event.attr_value.value)
else:
self._attrValue = event.attr_value.value
except Exception,e:
self.error("%s::PushEvent() callback exception: %s"
%(self._name,e))
traceback.print_exc()
@property
def value(self):
if self._attrValue == None:
try:
self._attrValue = self._devProxy[self._attrName].value
except Exception,e:
self.error("Cannot read %s value: %s"%(self._attrName,e))
raise e
return self._attrValue
@value.setter
def value(self,value):
try:
self._devProxy[self._attrName] = value
self._writeSend = True
except Exception,e:
self.error("Cannot write %s/%s due to exception: %s"
%(self._devName,self._attrName,e))
else:
if self._attrValue != value:
self.info("Write to %s (%s)"%(self._attrName,str(value)))
self._attrValue = value
class BunchAnalyzer(object):
def __init__(self,parent=None,
timingDevName=None,timingoutput=0,delayTick=18281216,
scopeDevName=None,cyclicBuffer=[],
rfDev=None,rfAttr=None,dcctDev=None,dcctAttr=None,
threshold=1,nAcquisitions=30,startingPoint=906,
max_cyclicBuf=100,alarm_cyclicBuf=50):
super(BunchAnalyzer,self).__init__()
self._parent=parent
#---- timing
try:
self._timingDevName = timingDevName
self._timingProxy = PyTango.DeviceProxy(self._timingDevName)
except Exception,e:
self._timingDevName = ""
self._timingProxy = None
raise e
self._timingoutput = timingoutput
self._delayTick = delayTick
self.debug("BunchAnalyzer.__init__() timming ok")
#---- scope
try:
self._scopeDevName = scopeDevName
self._scopeProxy = PyTango.DeviceProxy(self._scopeDevName)
self._scopeSampleRate = Attribute(devName=scopeDevName,
attrName='CurrentSampleRate',
callback=self.cbScopeSampleRate,
info_stream=self.info,
debug_stream=self.debug,
warn_stream=self.warn,
error_stream=self.error)
#TODO: replace the current subscription to the channel1
self._scopeScaleH = Attribute(devName=scopeDevName,
attrName='ScaleH',
callback=self.cbScopeScaleH,
info_stream=self.info,
debug_stream=self.debug,
warn_stream=self.warn,
error_stream=self.error)
self._scopeOffsetH = Attribute(devName=scopeDevName,
attrName='OffsetH',
callback=self.cbScopeOffsetH,
info_stream=self.info,
debug_stream=self.debug,
warn_stream=self.warn,
error_stream=self.error)
except Exception,e:
self.error("BunchAnalyzer.__init__() scope exception: %s"%(str(e)))
self._scopeDevName = ""
self._scopeProxy = None
self._scopeSampleRate = None
raise e
self.debug("BunchAnalyzer.__init__() scope ok")
#---- RF
try:
if rfDev:
self._rfDev = rfDev
else:
self._rfDev = 'SR09/rf/sgn-01'
if rfAttr:
self._rfAttr = rfAttr
else:
self._rfAttr = 'Frequency'
self._rfFrequency = Attribute(devName=self._rfDev,
attrName=self._rfAttr,
debug_stream=self.debug,
warn_stream=self.warn,
error_stream=self.error)
except Exception,e:
self.error("BunchAnalyzer.__init__() RF exception: %s"%(str(e)))
self._rfFrequency = None#499650374.85
raise e
self.debug("BunchAnalyzer.__init__() RF ok")
#---- dcct
try:
if dcctDev:
self._dcctDev = dcctDev
else:
self._dcctDev = 'SR/DI/DCCT'
if dcctAttr:
self._dcctAttr = dcctAttr
else:
self._dcctAttr = 'AverageCurrent'
self._currentAttr = Attribute(devName=self._dcctDev,
attrName=self._dcctAttr,
debug_stream=self.debug,
warn_stream=self.warn,
error_stream=self.error)
except Exception,e:
self.error("BunchAnalyzer.__init__() DCCT exception: %s"%(str(e)))
self._currentAttr = None
raise e
self.debug("BunchAnalyzer.__init__() DCCT ok")
#---- internals
self._threshold = threshold
self._nAcquisitions = nAcquisitions
self._cyclicBuffer = cyclicBuffer
self._cyclicBufferTracer("buffer initialised")
self._t0 = []
self._tf = []
self._startingPoint = startingPoint
self.__max_cyclicBuf = max_cyclicBuf
self.__alarm_cyclicBuf = alarm_cyclicBuf
#---- outputs
self._filledBunches = 0
self._spuriousBunches = 0
self._yFiltered = []
self._bunchIntensity = []
self._resultingFrequency = 0
self.debug("BunchAnalyzer created with parameters: "\
"nAcquisitions=%d, startingPoint=%d, threshold=%6.3f"
%(self.nAcquisitions,self.StartingPoint,self.Threshold))
######
#----# Auxiliary setters and getters to modify the behaviour from the device server
#----##Timming
@property
def TimingDevName(self):
return self._timingDevName
@TimingDevName.setter
def TimingDevName(self,value):
self._timingDevName = value
self._timingProxy = PyTango.DeviceProxy(self._timingDevName)
@property
def TimingDevice(self):
return self._timingProxy
@property
def TimingOutput(self):
return self._timingoutput
@TimingOutput.setter
def TimingOutput(self):
self._timingoutput = value
@property
def DelayTick(self):
return self._delayTick
@DelayTick.setter
def DelayTick(self,value):
try:
value = int(value)
if self._delayTick != value:
self._delayTick = value
self.delay()
except Exception,e:
self.error("Exception setting the timming delay: %s"%(e))
#----##Scope
@property
def ScopeDevName(self):
return self._scopeDevName
@ScopeDevName.setter
def ScopeDevName(self,value):
self._scopeDevName = name
self._scopeProxy = PyTango.DeviceProxy(self._scopeDevName)
@property
def ScopeDevice(self):
return self._scopeProxy
@property
def ScopeSampleRate(self):
return self._scopeSampleRate.value
def cbScopeSampleRate(self,value):
#The buffer cannot contain different length of the waveforms
if hasattr(self,'_scopeSampleRate'):
self.debug("Sample rate event: ¿%6.3f == %6.3f?"
%(self._scopeSampleRate.value,value))
if self._scopeSampleRate.value != value:
self.debug("Sample rate changed: clean the cyclic buffer")
self.CyclicBuffer = []
if self.StartingPoint != 0:
oldStartingPoint = self.StartingPoint
self.debug("Sample rate changed: update the starting "\
"point (was:%d)"%oldStartingPoint)
relation = (value/self._scopeSampleRate.value)
self.debug("Sample rate changed: relation %6.3f"%relation)
newStartingPoint = oldStartingPoint * relation
self.debug("Sample rate changed: update the starting "\
"point (set:%d)"%newStartingPoint)
self.StartingPoint = newStartingPoint
#this increases or reduces the starting point
#maintaining its ratio
#if value 2e10 and was 4e10, divide by 2 (multiply by a half)
#if value 4e10 and was 2e10, multiply by 2
self._scopeSampleRate.value = value
@property
def ScopeScaleH(self):
return self._scopeScaleH.value
@ScopeScaleH.setter
def ScopeScaleH(self,value):
self._scopeScaleH.value = value
def cbScopeScaleH(self,value):
if hasattr(self,'_scopeScaleH') and \
self._scopeScaleH.value != value:
self.debug("Horizontal Scale changed: clean the cyclic buffer")
self.CyclicBuffer = []
self._scopeScaleH.value = value
@property
def ScopeOffsetH(self):
return self._scopeOffsetH.value
@ScopeOffsetH.setter
def ScopeOffsetH(self,value):
self._scopeOffsetH.value = value
def cbScopeOffsetH(self,value):
if hasattr(self,'_scopeOffsetH') and \
self._scopeOffsetH.value != value:
self.info("Horizontal Offset changed: clean the cyclic buffer")
self.CyclicBuffer = []
self._scopeOffsetH.value = value
#----##RF
@property
def RfFrequency(self):
return self._rfFrequency.value
@RfFrequency.setter
def RfFrequency(self,value):
self._rfFrequency.value = value
#----##internals
@property
def Threshold(self,value=None):
return self._threshold
@Threshold.setter
def Threshold(self,value):
if value >= 0 and value <= 100:
self._threshold = value
else:
raise AttributeError("Threshold out of range")
@property
def nAcquisitions(self):
return self._nAcquisitions
@nAcquisitions.setter
def nAcquisitions(self,value):
self._nAcquisitions = int(value)
def _cyclicBufferTracer(self,msg):
if self._parent:
self._parent._cyclicBufferTracer(msg)
else:
self.debug(msg)
@property
def CyclicBuffer(self):
return self._cyclicBuffer[:]
@CyclicBuffer.setter
def CyclicBuffer(self,value):
if type(value) == list:
self.debug("Clean cyclic buffer")
self._cyclicBuffer = value
else:
raise AttributeError("Unrecognized type for buffer")
@property
def lenCyclicBuffer(self):
if self._cyclicBuffer is not None:
return len(self._cyclicBuffer)
return 0
def CyclicBufferReset(self):
self._cyclicBuffer = []
self._cyclicBufferTracer("buffer re-initialised. Input "\
"array length change")
def CyclicBufferAppend(self,lst):
self._cyclicBuffer.append(lst)
def CyclicBufferCycle(self):
if self.lenCyclicBuffer > self.nAcquisitions:
self.info("Cyclic buffer needs a decrement of %d elements"
%(self.lenCyclicBuffer - self.nAcquisitions))
while self.lenCyclicBuffer > self.nAcquisitions:
self._cyclicBuffer.pop(0)
# ?? self._cyclicBuffer = self._cyclicBuffer[-self.nAcquisitions]
@property
def StartingPoint(self):
return self._startingPoint
@StartingPoint.setter
def StartingPoint(self,value):
self._startingPoint = int(value)
@property
def FilledBunches(self):
return self._filledBunches
@property
def SpuriousBunches(self):
return self._spuriousBunches
@property
def BunchIntensity(self):
return self._bunchIntensity
@property
def ResultingFrequency(self):
return self._resultingFrequency
@property
def InputSignal(self):
return self.CyclicBuffer[-1:][0]
#equivalent to self.CyclicBuffer[self.lenCyclicBuffer-1]
# done auxiliary setters and getters to modify the behaviour from the device server
####
######
#----# auxiliary methods for logging
def info(self,msg):
try:
if self._parent:
self._parent.info_stream(msg)
else:
print("info: %s"%(msg))
except: print("cannot print in info stream (%s)"%msg)
def debug(self,msg):
try:
if self._parent:
self._parent.debug_stream(msg)
else:
print("debug: %s"%(msg))
except: print("cannot print in debug stream (%s)"%msg)
def warn(self,msg):
try:
if self._parent:
self._parent.warn_stream(msg)
else:
print("warn: %s"%(msg))
except: print("cannot print in warn stream (%s)"%msg)
def error(self,msg):
try:
if self._parent:
self._parent.error_stream(msg)
else:
print("error: %s"%(msg))
except: print("cannot print in error stream (%s)"%msg)
# done logging section
######
######
#----# auxiliary methods to manage events
def subscribe_event(self,attrName):
self.debug("subscribe to %s"%(attrName))
if self._scopeProxy == None:
raise EnvironmentError("Cannot subscribe if no scope proxy set")
self._scopeChAttrEvent = self._scopeProxy.subscribe_event(attrName,
PyTango.EventType.CHANGE_EVENT,
self)
def unsubscribe_event(self):
self._scopeProxy.unsubscribe_event(self._scopeChAttrEvent)
self._parent.change_state(PyTango.DevState.OFF)
def push_event(self,event):
#a callback method for the scope channel attribute
try:
if event != None:
if event.attr_value != None and event.attr_value.value != None:
self.debug("PushEvent() %s: array of %d elements"
%(event.attr_name,event.attr_value.value.size))
else:
self.debug("PushEvent() %s: value has None type"
%(event.attr_name))
return
except Exception,e:
self.error("PushEvent() exception %s:"%(e))
if not self.isCurrentOk():
return
#if the size of the elements in the buffer have changed, restart them
if self.lenCyclicBuffer > 0:
if len(self.CyclicBuffer[0]) != len(event.attr_value.value):
self.CyclicBufferReset()
#populate the cyclicBuffer
self.CyclicBufferAppend(event.attr_value.value)
#timestamps when this starts
t0 = time.time()
try:
self.precalculation()
self.calculateMeasurements()
tf = time.time()
self._t0.append(t0)
self._tf.append(tf)
self.debug("current calculation in %f"%(tf-t0))
while len(self._tf) > self.nAcquisitions:
self._t0.pop(0)
self._tf.pop(0)
#use the time to calculate the output frequency
self.calculateResultingFrequency()
self.emit_results()
except Exception as e:
self.error("Exception during calculation: %s"%(e))
#FIXME: should be set the status to fault?
def precalculation(self):
eventList = []
#state changes between STANDBY<->ON when the len(cyclicBuffer)<nAcquitions
bufLen = self.lenCyclicBuffer
if bufLen < self.nAcquisitions:
if not self._parent.get_state() in [PyTango.DevState.STANDBY,
PyTango.DevState.ALARM]:
self._parent.change_state(PyTango.DevState.STANDBY)
if bufLen >= self.__alarm_cyclicBuf:
eventList.append(['nAcquisitions',self.lenCyclicBuffer,
PyTango.AttrQuality.ATTR_ALARM])
else:
eventList.append(['nAcquisitions',self.lenCyclicBuffer,
PyTango.AttrQuality.ATTR_CHANGING])
else:
self.CyclicBufferCycle()
if not self._parent.get_state() in [PyTango.DevState.ON,
PyTango.DevState.ALARM]:
self._parent.change_state(PyTango.DevState.ON)
if self.lenCyclicBuffer >= self.__alarm_cyclicBuf:
eventList.append(['nAcquisitions',self.lenCyclicBuffer,
PyTango.AttrQuality.ATTR_ALARM])
else:
eventList.append(['nAcquisitions',self.lenCyclicBuffer,
PyTango.AttrQuality.ATTR_VALID])
eventList.append(['CyclicBuffer',self.CyclicBuffer,
PyTango.AttrQuality.ATTR_CHANGING])
self._parent.fireEventsList(eventList)
#TODO: are there any scope attribute to reread when a new waveform is received?
# or any that must be reread after N waveforms received
#with the current values on the cyclic buffer, analyze it.
#FIXME: why call delay when we are maintaining the delayTick
# self.debug("Time delay: %d (DelayTick: %d)"
# %(self.delay(),self.DelayTick))
def calculateMeasurements(self):
#Usefull variables
SampRate = self.ScopeSampleRate
self.debug("SampRate = %f"%(SampRate))
secperbin = 1./SampRate
self.debug("secperbin = %12.12f"%(secperbin))
CutOffFreq = 500#FIXME: would be this variable?
freq = self._rfFrequency.value
self.debug("RF freq = %f"%(freq))
time_win = int((1/freq)/secperbin)
self.debug("time_win = %d"%(time_win))
Tot_Bucket = int(448*2*10**(-9)/secperbin)
self.debug("Tot_Bucket = %d"%(Tot_Bucket))
#Starting point ~ 907 bin = 22.675 ns when
# Timing = 146351002 ns,
# OffsetH = 200 ns,
# ScaleH = 100 ns
start = int(self.StartingPoint)
#NOT WORKING IF THE BEAM IS UNSTABLE
#if self.lenCyclicBuffer==0: return
self.debug("cyclic buffer size: %d"%(self.lenCyclicBuffer))
if self.lenCyclicBuffer == 0:
self.warn("empty buffer")
return
y = array(self.CyclicBuffer[0][0:Tot_Bucket+start+1])
for i in range(1,self.lenCyclicBuffer):
y += array(self.CyclicBuffer[i][0:Tot_Bucket+start+1])
y = y/(self.lenCyclicBuffer)
x = range(len(y))
#the calculation itself
try:
# self.debug("input to bandPassFilter = %s"%(y.tolist()))
self._yFiltered = self.bandPassFilter(SampRate, time_win, start,
CutOffFreq, x, y, secperbin)
#print(time_win)
p2p = self.peakToPeak(time_win, x)
#FIXME: better to subscribe
current = self._currentAttr.value
#FIXME: use the new attr
nBunches = self._filledBunches-self._spuriousBunches
self._bunchIntensity = ((p2p/sum(p2p))*current)#/(nBunches)
self.debug("len(_bunchIntensity) = %d"%(len(self._bunchIntensity)))
self._filledBunches = self.bunchCount(self._bunchIntensity)
self.debug("FilledBunches = %d"%self._filledBunches)
self._spuriousBunches = self.spuriousBunches(self._bunchIntensity)
self.debug("SpuriousBunches = %d"%self._spuriousBunches)
except Exception,e:
self.error("Exception during calculation: %s"%(e))
#FIXME: should be set the status to fault?
def calculateResultingFrequency(self):
samples = len(self._tf)
lapses = []
for i in range(samples-1):
lapses.append(self._tf[i+1]-self._tf[i])
self._resultingFrequency = 1/average(lapses)
def isCurrentOk(self):
if self._currentAttr.value > 0.0:
return True
else:
#when there is no beam, no calculation to be made
if not self.isStandby(): # self.isRunning():
self.emit_zeros()
self.setStandby("Beam current")
return False
def emit_results(self):
#emit output events
if self._parent:
events2emit = []
events2emit.append(['BunchIntensity',self._bunchIntensity])
events2emit.append(['FilledBunches',self._filledBunches])
events2emit.append(['SpuriousBunches',self._spuriousBunches])
events2emit.append(['nBunches',self._filledBunches-self._spuriousBunches])
self.debug("len InputSignal = %d"%(len(self.InputSignal)))
events2emit.append(['InputSignal',self.InputSignal])
events2emit.append(['resultingFrequency',self._resultingFrequency])
self._parent.fireEventsList(events2emit)
def emit_zeros(self):
if self._parent:
self._parent.fireEventsList([['BunchIntensity',array([0]*448)],
['resultingFrequency',0.0],
['FilledBunches',0],
['SpuriousBunches',0],
['nBunches',0]])
def calculateResultingFrequency(self):
samples = len(self._tf)
lapses = []
for i in range(samples-1):
lapses.append(self._tf[i+1]-self._tf[i])
self._resultingFrequency = 1/average(lapses)
# done auxiliary methods to manage events
####
####
# original methods of the bunch analysis
def delay(self):
'''TODO: document this method'''
#backup pulse params
if self._timingProxy == None:
self.warn("BuncherAnalyzer.delay() not callable if Event "\
"Receiver property not configured")
return self._delayTick #FIXME: return this is meaningless
pulse_params = self._timingProxy.command_inout("GetPulseParams",
self._timingoutput)
pulse_params = [int(i) for i in pulse_params]
if (pulse_params[1] != self._delayTick):
# pulse_params = self._timingProxy.command_inout("GetPulseParams",
# self._timingoutput)
# #command returns numpy array
# pulse_params = [int(i) for i in pulse_params]
pulse_params[1] = self._delayTick
pulse_params = [self._timingoutput] + pulse_params
self._timingProxy.command_inout("SetPulseParams",pulse_params)
return pulse_params[1]
def bandPassFilter(self,Samp_Rate, Time_Windows,Start, Cut_Off_Freq,
x_data, y_data, Secperbin):
'''TODO: document this method'''
self.debug("lowPassFiler(SampleRate=%6.3f,Time window=%d,Start=%d,"\
"CutFreq=%6.3f,len(x)=%d,len(y)=%d,Secprebin=%12.12f"
%(Samp_Rate,Time_Windows,Start,Cut_Off_Freq,
len(x_data),len(y_data),Secperbin))
try:
#FIXME: parameters would be in side the class?
#cutoff frequency at 0.05 Hz normalized at the Niquist frequency
#(1/2 samp rate)
CutOffFreqNq = Cut_Off_Freq*10**6/(Samp_Rate*0.5)
LowFreq = 499*10**6/(Samp_Rate*0.5)
#HighFreq = 501*10**6/(Samp_Rate*0.5)
# filter order = amount of additional attenuation for frequencies
# higher than the cutoff fr.
filterorder = 3
#b,a = signal.filter_design.butter(filterorder,[LowFreq,HighFreq])
b,a = signal.filter_design.butter(filterorder,[LowFreq])
#FIXME: why this assignment if it will be reassigned before use it.
y_Fil = copy(y_data)
try:
y_Fil = signal.lfilter(b,a,y_data)
except Exception,e:
if self._parent and \
self._parent.get_state() != PyTango.DevState.ALARM:
self._parent.change_state(PyTango.DevState.ALARM)
self.error("Exception in signal filter: %s"%(e))
self._parent.addStatusMsg("Cannot apply a low pass filter.")
raise Exception(e)
if Start > len(y_Fil):
if self._parent and \
self._parent.get_state() != PyTango.DevState.ALARM:
self._parent.change_state(PyTango.DevState.ALARM)
msg = "Starting point (%d) farther than input signal "\
"length (%d)."%(Start,len(y_Fil))
self._parent.addStatusMsg(msg)
raise BufferError(msg)
else:
if self._parent and \
self._parent.get_state() == PyTango.DevState.ALARM:
if self.lenCyclicBuffer < self.nAcquisitions:
self._parent.change_state(PyTango.DevState.STANDBY)
else:
self._parent.change_state(PyTango.DevState.ON)
if Start>0:
for i in range(Start):
y_Fil[i] = sum(y_Fil)/len(y_Fil)#FIXME: is this using numpy?
return y_Fil[Start:len(y_Fil)-1]
except BufferError,e:
raise BufferError(e)
except Exception,e:
self.error("BunchAnalyzer.bandPassFilter() Exception: %s"%(e))
def peakToPeak(self,Time_Window, x_data, y_Fil=None):
'''TODO: document this method'''
#FIXME: parameters would be in side the class?
if y_Fil == None:
y_Fil = self._yFiltered
p_to_p = []
k = 0
Start = 0
Av = sum(y_Fil)/len(y_Fil)
#Analysis
self.debug("Data analysis")
while (Start <= len(y_Fil)):
k = 0
time_win_ar = [] #Array that leasts the considered time window
if (Start + Time_Window <= len(y_Fil)):
for k in range(0, Time_Window):
time_win_ar.append(y_Fil[Start+k])
if (max(time_win_ar) > Av and min(time_win_ar) > Av):
p_to_p.append(0)
else:
p_to_p.append(max(time_win_ar)-min(time_win_ar))
Start = Start + Time_Window
i = 0
Max = max(p_to_p)
#thr = input('Threshold (%): ')
thr = self._threshold
thr = thr*0.01
for i in range (len(p_to_p)-1):
#Threshold set at 1% of the maximum peak to peak amplitude
if (p_to_p[i] < thr*Max):
p_to_p[i] = 0
if (len(p_to_p) == 0):
print "No Beam!"#FIXME: would be this a raise exception?
return p_to_p
def bunchCount(self,vec_p_to_p):
'''TODO: document this method'''
#FIXME: parameters would be in side the class?
count = 0
bunch = 0
#TODO: document the loop
for count in range(0, len(vec_p_to_p)-1):
if(vec_p_to_p[count] > 0):
bunch = bunch + 1
return bunch
def spuriousBunches(self,vec_p_to_p):
'''TODO: document this method'''
#FIXME: parameters would be in side the class?
i = 0
j = 0
sp_bun = 0
#TODO: document
if (vec_p_to_p [i] != 0 and vec_p_to_p[i+1] == 0):
sp_bun = sp_bun + 1
i = i + 1
#TODO: document the loop
while (i < len(vec_p_to_p)-1):
if (i < len(vec_p_to_p)-10 and \
vec_p_to_p[i-1] == 0 and \
vec_p_to_p[i] != 0 and \
vec_p_to_p[i+10] == 0):
while (j < 10):
if (vec_p_to_p[i+j] != 0):
sp_bun = sp_bun +1
j = j + 1
elif (i < len(vec_p_to_p)-9 and \
vec_p_to_p[i-1] == 0 and \
vec_p_to_p[i] != 0 and \
vec_p_to_p[i+9] == 0):
while (j < 9):
if (vec_p_to_p[i+j] != 0):
sp_bun = sp_bun +1
j = j + 1
elif (i < len(vec_p_to_p)-8 and \
vec_p_to_p[i-1] == 0 and \
vec_p_to_p[i] != 0 and \
vec_p_to_p[i+8] == 0):
while (j < 8):
if (vec_p_to_p[i+j] != 0):
sp_bun = sp_bun +1
j = j + 1
elif (i < len(vec_p_to_p)-7 and \
vec_p_to_p[i-1] == 0 and \
vec_p_to_p[i] != 0 and \
vec_p_to_p[i+7] == 0):
while (j < 7):
if (vec_p_to_p[i+j] != 0):
sp_bun = sp_bun +1
j = j + 1
elif (i < len(vec_p_to_p)-6 and \
vec_p_to_p[i-1] == 0 and \
vec_p_to_p[i] != 0 and \
vec_p_to_p[i+6] == 0):
while (j < 6):
if (vec_p_to_p[i+j] != 0):
sp_bun = sp_bun +1
j = j + 1
elif (i < len(vec_p_to_p)-5 and \
vec_p_to_p[i-1] == 0 and \
vec_p_to_p[i] != 0 and \
vec_p_to_p[i+5] == 0):
while (j < 5):
if (vec_p_to_p[i+j] != 0):
sp_bun = sp_bun +1
j = j + 1
elif (i < len(vec_p_to_p)-4 and \
vec_p_to_p[i-1] == 0 and \
vec_p_to_p[i] != 0 and \
vec_p_to_p[i+4] == 0):
while (j < 4):
if (vec_p_to_p[i+j] != 0):
sp_bun = sp_bun +1
j = j + 1
elif (i < len(vec_p_to_p)-3 and \
vec_p_to_p[i-1] == 0 and \
vec_p_to_p[i] != 0 and \
vec_p_to_p[i+3] == 0):
while (j < 3):
if (vec_p_to_p[i+j] != 0):
sp_bun = sp_bun +1
j = j + 1
elif (i < len(vec_p_to_p)-2 and \
vec_p_to_p[i-1] == 0 and \
vec_p_to_p[i] != 0 and \
vec_p_to_p[i+2] == 0):
while (j < 2):
if (vec_p_to_p[i+j] != 0):
sp_bun = sp_bun +1
j = j + 1
elif (i < len(vec_p_to_p)-1 and \
vec_p_to_p[i-1] == 0 and \
vec_p_to_p[i] != 0 and \
vec_p_to_p[i+1] == 0):
sp_bun = sp_bun +1
j = 1
i = i + j + 1
j = 0
if (vec_p_to_p[len(vec_p_to_p)-1] != 0 and \
vec_p_to_p[len(vec_p_to_p)-2] == 0 ):
sp_bun = sp_bun + 1
return sp_bun
# done original methods of the bunch analysis
####
######
#----# auxiliary methods to manage states
def isStandby(self):
if self._parent:
return self._parent.get_state() == PyTango.DevState.STANDBY
return False
def isRunning(self):
if self._parent:
return self._parent.get_state() == PyTango.DevState.RUNNING
return False
def setStandby(self,msg=None):
if self._parent:
self._parent.change_state(PyTango.DevState.STANDBY)
if msg:
self._parent.addStatusMsg("Waiting for %s" % (msg))
else:
self._parent.addStatusMsg("Waiting...")
def setRunning(self):
if self._parent:
self._parent.change_state(PyTango.DevState.RUNNING)
self._parent.addStatusMsg("Receiving events")
def setFault(self,msg):
if self._parent:
self._parent.change_state(PyTango.DevState.FAULT)
self._parent.addStatusMsg(msg)
#---- auxiliary methods to manage states
######
# Done BunchAnalyser Class
####
#imports only used from the command line call
try:#The try is necessary by the Tango device
from matplotlib.pyplot import figure, show
from pylab import plt,savefig,xlabel,ylabel
except: pass
####
# plot methods used when this is called by command line
def plot1(x_data,y_data,y_Fil):
#Plotting raw and filtered data
f1 = figure()
af1 = f1.add_subplot(111)
#af1.plot(array(x_data)*2.5e-2,y_data)
af1.plot(array(x_data),y_data)
plt.title("Raw and Filtered Data")
#af1.plot(array(x_data)*2.5e-2, y_Fil, 'r')
af1.plot(array(x_data), y_Fil, 'r')
savefig('scope_LowPassFil.png')
def plot2(x_data,y_Fil):
f2 = figure()
af2 = f2.add_subplot(111)
# af2.plot(array(x_data)*2.5e-2, y_Fil, 'r')
af2.plot(array(x_data), y_Fil, 'r')
plt.title("Filtered Data")
savefig('scope_FilSig.png')
def plot3(p_to_p):
#Potting the peak to peak signal in function of the bunch number
f3 = figure()
af3 = f3.add_subplot(111)
plt.bar(range(len(p_to_p)), p_to_p/max(p_to_p))
xlabel('Bucket Number')
ylabel('Normalized peak to peak amplitude')
plt.title("Peak to Peak")
savefig('scope_peakTOpeakTimeWin.png')
# end plot methods
####
def main():
bunchAnalyzer = BunchAnalyzer(timingDevName="SR02/TI/EVR-CDI0203-A",
scopeDevName="SR02/DI/sco-01")
# Setting Offset and scale
PyTango.AttributeProxy('SR02/DI/sco-01/OffsetH').write(2e-07)
PyTango.AttributeProxy('SR02/DI/sco-01/ScaleH').write(1e-07)
TimeDel = bunchAnalyzer.delay()
print "Offset: ",\
PyTango.AttributeProxy('SR02/DI/sco-01/OffsetH').read().value*1e9, " ns"
print "Scale: ",\
PyTango.AttributeProxy('SR02/DI/sco-01/ScaleH').read().value*1e9, " ns"
print "Time delay: ", TimeDel
# Usefull variables
SampRate = PyTango.AttributeProxy(\
'SR02/DI/sco-01/CurrentSampleRate').read().value
print("SampRate = %f"%(SampRate))
secperbin = 1./SampRate
print("secperbin = %f"%(secperbin))
CutOffFreq = 500
freq = PyTango.AttributeProxy('SR09/rf/sgn-01/Frequency').read().value
time_win = int((1/freq)/secperbin)
print("time_win = %d"%(time_win))
Tot_Bucket = int(448*2*10**(-9)/secperbin)
print("Tot_Bucket = %d"%(Tot_Bucket))
#Starting point ~ 907 bin = 22.675 ns when Timing = 146351002 ns,
#OffsetH = 200 ns, ScaleH = 100 ns NOT WORKING IF THE BEAM IS UNSTABLE
start = 907
Ac_num = input('Number of acquisitions: ')
######## Loading and averaging data ######################################
n = 0
y = PyTango.AttributeProxy('sr02/di/sco-01/Channel1').read().value
time1 = time.time()
y = y[0:Tot_Bucket+start+1]
print "Data Acquisition..."
time.sleep(0.1)
for n in range(1,Ac_num):
y_temp = []
y_temp =PyTango.AttributeProxy('sr02/di/sco-01/Channel1').read().value
y_temp = y_temp[0:Tot_Bucket+start+1]
y = y + y_temp
time.sleep(0.1)
y = y/(n+1)
x = range(len(y))
######## Filtering Data ###################################################
y_fil = bunchAnalyzer.bandPassFilter(SampRate, time_win, start,
CutOffFreq, x, y, secperbin)
plot1(x[:len(y_fil)],y[:len(y_fil)],y_fil)
plot2(x[:len(y_fil)],y_fil)
######## Analysis #########################################################
bunchAnalyzer._threshold = input('Threshold (%): ')
P_to_P = bunchAnalyzer.peakToPeak(time_win, x, y_fil)
plot3(P_to_P/max(P_to_P))
######## Bunch Counting ###################################################
Bunch = bunchAnalyzer.bunchCount(P_to_P)
######## Spurious Bunches #################################################
Sp_Bun = bunchAnalyzer.spuriousBunches(P_to_P)
######## Output ###########################################################
print "Total number of bucket: ", len(P_to_P)+1
print "Number of filled bunches", Bunch
print "Number of spurious bunches: ", Sp_Bun
print "Max peak to peak amplitude: ", max(P_to_P)
print "Average current: ",\
PyTango.AttributeProxy('SR/DI/DCCT/AverageCurrent').read().value, "mA"
show()
if __name__ == "__main__":
main()
| gpl-3.0 |
vlukes/sfepy | script/plot_logs.py | 4 | 3290 | #!/usr/bin/env python
"""
Plot logs of variables saved in a text file by sfepy.base.log.Log class.
The plot should be almost the same as the plot that would be generated by the
Log directly.
"""
from __future__ import absolute_import
import sys
sys.path.append('.')
from argparse import ArgumentParser, Action, RawDescriptionHelpFormatter
import matplotlib.pyplot as plt
from sfepy.base.log import read_log, plot_log
class ParseRc(Action):
def __call__(self, parser, namespace, values, option_string=None):
pars = eval('{' + values + '}')
setattr(namespace, self.dest, pars)
helps = {
'groups' :
'list of log data groups subplots (from 0) to plot - all groups are'
' plotted if not given',
'output_filename' :
'save the figure using the given file name',
'rc' : 'matplotlib resources',
'no_legends' :
'do not show legends in the log plots',
'nbins' :
'the numbers of bins in x, y axes for all groups [default: %(default)s]',
'swap_axes' :
'swap the axes of the plots',
'no_show' :
'do not show the figure',
}
def main():
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('-g', '--groups', metavar='int[,int,...]',
action='store', dest='groups',
default=None, help=helps['groups'])
parser.add_argument('-o', '--output', metavar='filename',
action='store', dest='output_filename',
default=None, help=helps['output_filename'])
parser.add_argument('--rc', type=str, metavar='key:val,...',
action=ParseRc, dest='rc',
default={}, help=helps['rc'])
parser.add_argument('--no-legends',
action='store_false', dest='show_legends',
default=True, help=helps['no_legends'])
parser.add_argument('--nbins', metavar='nx1,ny1,...',
action='store', dest='nbins',
default=None, help=helps['nbins'])
parser.add_argument('--swap-axes',
action='store_true', dest='swap_axes',
default=False, help=helps['swap_axes'])
parser.add_argument('-n', '--no-show',
action='store_true', dest='no_show',
default=False, help=helps['no_show'])
parser.add_argument('filename')
options = parser.parse_args()
filename = options.filename
if options.groups is not None:
options.groups = [int(ii) for ii in options.groups.split(',')]
if options.nbins is not None:
aux = [int(ii) if ii != 'None' else None
for ii in options.nbins.split(',')]
xnbins, ynbins = aux[::2], aux[1::2]
else:
xnbins = ynbins = None
log, info = read_log(filename)
plt.rcParams.update(options.rc)
plot_log(None, log, info, groups=options.groups,
xnbins=xnbins, ynbins=ynbins,
show_legends=options.show_legends, swap_axes=options.swap_axes)
if options.output_filename:
plt.savefig(options.output_filename)
if not options.no_show:
plt.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
kapteyn-astro/kapteyn | doc/source/EXAMPLES/mu_clown.py | 1 | 1155 | from kapteyn import maputils
from matplotlib import pyplot as plt
from numpy import fft, log, asarray, float64, abs, angle
#f = maputils.FITSimage("m101.fits")
f = maputils.FITSimage("cl2.fits")
fig = plt.figure(figsize=(8,8))
frame = fig.add_subplot(2,2,1)
mplim = f.Annotatedimage(frame, cmap="gray")
mplim.Image()
mplim.plot()
fftA = fft.rfft2(f.dat, f.dat.shape)
fftre = fftA.real
fftim = fftA.imag
frame = fig.add_subplot(2,2,2)
#f = maputils.FITSimage("m101.fits", externaldata=log(abs(fftre)+1.0))
f = maputils.FITSimage("m101.fits", externaldata=log(abs(fftA)+1.0))
mplim2 = f.Annotatedimage(frame, cmap="gray")
im = mplim2.Image()
mplim2.plot()
frame = fig.add_subplot(2,2,3)
f = maputils.FITSimage("m101.fits", externaldata=angle(fftA))
mplim3 = f.Annotatedimage(frame, cmap="gray")
im = mplim3.Image()
mplim3.plot()
frame = fig.add_subplot(2,2,4)
D = fft.irfft2(fftA)
f = maputils.FITSimage("m101.fits", externaldata=D.real)
mplim4 = f.Annotatedimage(frame, cmap="gray")
im = mplim4.Image()
mplim4.plot()
mplim.interact_imagecolors()
mplim2.interact_imagecolors()
mplim3.interact_imagecolors()
mplim4.interact_imagecolors()
plt.show()
| bsd-3-clause |
rishikksh20/scikit-learn | examples/neural_networks/plot_mlp_training_curves.py | 58 | 3692 | """
========================================================
Compare Stochastic learning strategies for MLPClassifier
========================================================
This example visualizes some training loss curves for different stochastic
learning strategies, including SGD and Adam. Because of time-constraints, we
use several small datasets, for which L-BFGS might be more suitable. The
general trend shown in these examples seems to carry over to larger datasets,
however.
Note that those results can be highly dependent on the value of
``learning_rate_init``.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn import datasets
# different learning rate schedules and momentum parameters
params = [{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': 0,
'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0,
'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'solver': 'adam', 'learning_rate_init': 0.01}]
labels = ["constant learning-rate", "constant with momentum",
"constant with Nesterov's momentum",
"inv-scaling learning-rate", "inv-scaling with momentum",
"inv-scaling with Nesterov's momentum", "adam"]
plot_args = [{'c': 'red', 'linestyle': '-'},
{'c': 'green', 'linestyle': '-'},
{'c': 'blue', 'linestyle': '-'},
{'c': 'red', 'linestyle': '--'},
{'c': 'green', 'linestyle': '--'},
{'c': 'blue', 'linestyle': '--'},
{'c': 'black', 'linestyle': '-'}]
def plot_on_dataset(X, y, ax, name):
# for each dataset, plot learning for each learning strategy
print("\nlearning on dataset %s" % name)
ax.set_title(name)
X = MinMaxScaler().fit_transform(X)
mlps = []
if name == "digits":
# digits is larger but converges fairly quickly
max_iter = 15
else:
max_iter = 400
for label, param in zip(labels, params):
print("training: %s" % label)
mlp = MLPClassifier(verbose=0, random_state=0,
max_iter=max_iter, **param)
mlp.fit(X, y)
mlps.append(mlp)
print("Training set score: %f" % mlp.score(X, y))
print("Training set loss: %f" % mlp.loss_)
for mlp, label, args in zip(mlps, labels, plot_args):
ax.plot(mlp.loss_curve_, label=label, **args)
fig, axes = plt.subplots(2, 2, figsize=(15, 10))
# load / generate some toy datasets
iris = datasets.load_iris()
digits = datasets.load_digits()
data_sets = [(iris.data, iris.target),
(digits.data, digits.target),
datasets.make_circles(noise=0.2, factor=0.5, random_state=1),
datasets.make_moons(noise=0.3, random_state=0)]
for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits',
'circles', 'moons']):
plot_on_dataset(*data, ax=ax, name=name)
fig.legend(ax.get_lines(), labels=labels, ncol=3, loc="upper center")
plt.show()
| bsd-3-clause |
mwv/scikit-learn | examples/bicluster/bicluster_newsgroups.py | 162 | 7103 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
weight = X[rows[:, np.newaxis], cols].sum()
cut = (X[row_complement[:, np.newaxis], cols].sum() +
X[rows[:, np.newaxis], col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
ldirer/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 8 | 5475 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <emmanuelle.gouillart@nsup.org>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x].astype(np.float64)
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return np.logical_xor(res, ndimage.binary_erosion(res))
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
jonwright/ImageD11 | setup.py | 1 | 7186 |
from __future__ import print_function
# ImageD11_v1.0 Software for beamline ID11
# Copyright (C) 2005-2018 Jon Wright
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
Setup script
To re-build the wrappers do:
cd src && python make_pyf.py
"""
from io import open # this misery may never end.
# For pip / bdist_wheel etc
import setuptools
import os, sys, platform, os.path
from distutils.core import setup, Extension
from distutils.command import build_ext
############################################################################
def get_version():
with open("ImageD11/__init__.py","r") as f:
for line in f.readlines():
if line.find("__version__")>-1:
return eval(line.split("=")[1].strip())
print("Building version |%s|"%get_version(), "on system:", platform.system())
#############################################################################
# Set the openmp flag if needed. Also CFLAGS and LDSHARED from sys.argv ?
#
# JW https://stackoverflow.com/questions/724664/python-distutils-how-to-get-a-compiler-that-is-going-to-be-used
copt = {
'msvc': ['/openmp', '/O2'] ,
'unix': ['-fopenmp', '-O2'], #, '-DF2PY_REPORT_ON_ARRAY_COPY=100'] ,
'mingw32': ['-fopenmp', '-O2'] ,
}
lopt = { k : [a for a in l] for k,l in copt.items() }
lopt['msvc'] = []
if platform.system() == "Darwin":
copt['unix'].remove("-fopenmp")
lopt['unix'].remove("-fopenmp")
# might try:
# set CFLAGS=/arch:AVX2 for msvc
# CFLAGS=-march=native -mtune=native
# LDFLAGS=-march=native -mtune=native
class build_ext_subclass( build_ext.build_ext ):
def build_extensions(self):
""" attempt to defer the numpy import until later """
from numpy import get_include
self.compiler.add_include_dir(get_include())
c = self.compiler.compiler_type
CF = [] ; LF=[]
if "CFLAGS" in os.environ:
CF = os.environ.get("CFLAGS").split(" ")
if "LDFLAGS" in os.environ:
LF = os.environ.get("LDFLAGS").split(" ")
for e in self.extensions:
if c in copt:
e.extra_compile_args = copt[ c ] + CF
e.extra_link_args = lopt[ c ] + LF
print("Customised compiler",c,e.extra_compile_args,
e.extra_link_args)
build_ext.build_ext.build_extensions(self)
cnames = "_cImageD11module.c blobs.c cdiffraction.c cimaged11utils.c"+\
" closest.c connectedpixels.c darkflat.c localmaxlabel.c sparse_image.c "+\
" splat.c fortranobject.c"
csources = [os.path.join('src',c) for c in cnames.split()]
extension = Extension( "_cImageD11", csources,
include_dirs = [ 'src' ])
################################################################################
# Try to further reduce this long list
scripts = ["ImageD11/rsv_mapper.py",
"ImageD11/tkGui/plot3d.py",
"scripts/peaksearch.py",
"scripts/fitgrain.py",
"scripts/ubi2cellpars.py",
"scripts/filtergrain.py",
"scripts/ImageD11_2_shelx.py",
"scripts/fix_spline.py",
"scripts/edfheader.py",
"scripts/ImageD11_gui.py",
"scripts/bgmaker.py",
"scripts/merge_flt.py",
"scripts/makemap.py",
"scripts/plotlayer.py",
"scripts/plotgrainhist.py",
"scripts/plotImageD11map.py",
"scripts/cutgrains.py",
"scripts/index_unknown.py",
"scripts/spatialfix.py",
"scripts/refine_em.py",
"scripts/avg_par.py",
"scripts/powderimagetopeaks.py"]
################################################################################
# Things we depend on. This generally borks things if pip
# tries to do source installs of all of these.
# sfood -I ImageD11/depreciated -I build/ -v -u >sfood.out 2>sfood.err
minimal = [ # can't compile without this
"six",
"numpy",
"setuptools",
]
useful = [ # stuff you probably want, and should be able to get easily
"fabio",
"xfab>=0.0.4", #
# comes from xfab : "PyCifRW",
"matplotlib", # tkGui
"pyopengltk", # plot3d in tkGui
"scipy", #
# 32 bit windows binary wheel for python 3 is missing
# this is not quite right, but seems close
'h5py <= 2.10.0 ; (sys_platform=="win32") and (python_version >= "3")',
'h5py ; (sys_platform=="win32") and (python_version < "3")',
'h5py ; (sys_platform!="win32")',
'pyyaml',
]
more = [
# Used in sandbox / test / not completely essential, but should work for CI
"pytest",
'numba==0.46.0 ; python_version < "3" ', # for some test cases
'numba ; python_version > "3" ', # for some test cases
"pillow", # in sandbox
"lmfit", # in sandbox
"PyMca5", # in sandbox
"sympy", # for maths
'ipywidgets', # for notebook nbGui
'pyopencl', # (was?) in sandbox
'pyFAI ; python_version >= "3" ', # pypi problematic
'pyFAI <= 0.18.0 ; python_version < "3" ',
'silx[full] ; python_version >= "3" ', # for silxGui
]
rare = [ #
"FitAllB", # not for python3
"minuit", # for fitallb
"PyTango", # sandbox
]
# read the contents of your README file
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f:
readme = f.read()
# See the distutils docs...
setup(name='ImageD11',
version=get_version(),
author='Jon Wright',
author_email='wright@esrf.fr',
cmdclass={ 'build_ext' : build_ext_subclass },
description='ImageD11',
license = "GPL",
ext_package = "ImageD11", # Puts extensions in the ImageD11 directory
ext_modules = [extension,],
setup_requires = minimal, # to compile
install_requires = minimal + useful,
extras_require = { 'full' : more, 'rare' : rare },
packages = ["ImageD11",
"ImageD11.tkGui",
"ImageD11.silxGui",
"ImageD11.nbGui"],
package_dir = {"ImageD11":"ImageD11"},
url = "http://github.com/jonwright/ImageD11",
package_data = {"ImageD11" : ["doc/*.html", "data/*", "sandbox/*.py" ]},
scripts = scripts,
long_description = readme,
long_description_content_type='text/markdown',
)
| gpl-2.0 |
harisbal/pandas | scripts/tests/test_validate_docstrings.py | 1 | 23041 | import string
import random
import io
import pytest
import numpy as np
import validate_docstrings
validate_one = validate_docstrings.validate_one
from pandas.util.testing import capture_stderr
class GoodDocStrings(object):
"""
Collection of good doc strings.
This class contains a lot of docstrings that should pass the validation
script without any errors.
"""
def plot(self, kind, color='blue', **kwargs):
"""
Generate a plot.
Render the data in the Series as a matplotlib plot of the
specified kind.
Parameters
----------
kind : str
Kind of matplotlib plot.
color : str, default 'blue'
Color name or rgb code.
**kwargs
These parameters will be passed to the matplotlib plotting
function.
"""
pass
def sample(self):
"""
Generate and return a random number.
The value is sampled from a continuous uniform distribution between
0 and 1.
Returns
-------
float
Random number generated.
"""
return random.random()
def random_letters(self):
"""
Generate and return a sequence of random letters.
The length of the returned string is also random, and is also
returned.
Returns
-------
length : int
Length of the returned string.
letters : str
String of random letters.
"""
length = random.randint(1, 10)
letters = "".join(random.sample(string.ascii_lowercase, length))
return length, letters
def sample_values(self):
"""
Generate an infinite sequence of random numbers.
The values are sampled from a continuous uniform distribution between
0 and 1.
Yields
------
float
Random number generated.
"""
while True:
yield random.random()
def head(self):
"""
Return the first 5 elements of the Series.
This function is mainly useful to preview the values of the
Series without displaying the whole of it.
Returns
-------
Series
Subset of the original series with the 5 first values.
See Also
--------
Series.tail : Return the last 5 elements of the Series.
Series.iloc : Return a slice of the elements in the Series,
which can also be used to return the first or last n.
"""
return self.iloc[:5]
def head1(self, n=5):
"""
Return the first elements of the Series.
This function is mainly useful to preview the values of the
Series without displaying the whole of it.
Parameters
----------
n : int
Number of values to return.
Returns
-------
Series
Subset of the original series with the n first values.
See Also
--------
tail : Return the last n elements of the Series.
Examples
--------
>>> s = pd.Series(['Ant', 'Bear', 'Cow', 'Dog', 'Falcon'])
>>> s.head()
0 Ant
1 Bear
2 Cow
3 Dog
4 Falcon
dtype: object
With the `n` parameter, we can change the number of returned rows:
>>> s.head(n=3)
0 Ant
1 Bear
2 Cow
dtype: object
"""
return self.iloc[:n]
def contains(self, pat, case=True, na=np.nan):
"""
Return whether each value contains `pat`.
In this case, we are illustrating how to use sections, even
if the example is simple enough and does not require them.
Parameters
----------
pat : str
Pattern to check for within each element.
case : bool, default True
Whether check should be done with case sensitivity.
na : object, default np.nan
Fill value for missing data.
Examples
--------
>>> s = pd.Series(['Antelope', 'Lion', 'Zebra', np.nan])
>>> s.str.contains(pat='a')
0 False
1 False
2 True
3 NaN
dtype: object
**Case sensitivity**
With `case_sensitive` set to `False` we can match `a` with both
`a` and `A`:
>>> s.str.contains(pat='a', case=False)
0 True
1 False
2 True
3 NaN
dtype: object
**Missing values**
We can fill missing values in the output using the `na` parameter:
>>> s.str.contains(pat='a', na=False)
0 False
1 False
2 True
3 False
dtype: bool
"""
pass
def mode(self, axis, numeric_only):
"""
Ensure sphinx directives don't affect checks for trailing periods.
Parameters
----------
axis : str
Sentence ending in period, followed by single directive.
.. versionchanged:: 0.1.2
numeric_only : bool
Sentence ending in period, followed by multiple directives.
.. versionadded:: 0.1.2
.. deprecated:: 0.00.0
A multiline description,
which spans another line.
"""
pass
def good_imports(self):
"""
Ensure import other than numpy and pandas are fine.
Examples
--------
This example does not import pandas or import numpy.
>>> import datetime
>>> datetime.MAXYEAR
9999
"""
pass
class BadGenericDocStrings(object):
"""Everything here has a bad docstring
"""
def func(self):
"""Some function.
With several mistakes in the docstring.
It has a blank like after the signature `def func():`.
The text 'Some function' should go in the line after the
opening quotes of the docstring, not in the same line.
There is a blank line between the docstring and the first line
of code `foo = 1`.
The closing quotes should be in the next line, not in this one."""
foo = 1
bar = 2
return foo + bar
def astype(self, dtype):
"""
Casts Series type.
Verb in third-person of the present simple, should be infinitive.
"""
pass
def astype1(self, dtype):
"""
Method to cast Series type.
Does not start with verb.
"""
pass
def astype2(self, dtype):
"""
Cast Series type
Missing dot at the end.
"""
pass
def astype3(self, dtype):
"""
Cast Series type from its current type to the new type defined in
the parameter dtype.
Summary is too verbose and doesn't fit in a single line.
"""
pass
def plot(self, kind, **kwargs):
"""
Generate a plot.
Render the data in the Series as a matplotlib plot of the
specified kind.
Note the blank line between the parameters title and the first
parameter. Also, note that after the name of the parameter `kind`
and before the colon, a space is missing.
Also, note that the parameter descriptions do not start with a
capital letter, and do not finish with a dot.
Finally, the `**kwargs` parameter is missing.
Parameters
----------
kind: str
kind of matplotlib plot
"""
pass
def method(self, foo=None, bar=None):
"""
A sample DataFrame method.
Do not import numpy and pandas.
Try to use meaningful data, when it makes the example easier
to understand.
Try to avoid positional arguments like in `df.method(1)`. They
can be alright if previously defined with a meaningful name,
like in `present_value(interest_rate)`, but avoid them otherwise.
When presenting the behavior with different parameters, do not place
all the calls one next to the other. Instead, add a short sentence
explaining what the example shows.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> df = pd.DataFrame(np.ones((3, 3)),
... columns=('a', 'b', 'c'))
>>> df.all(1)
0 True
1 True
2 True
dtype: bool
>>> df.all(bool_only=True)
Series([], dtype: bool)
"""
pass
class BadSummaries(object):
def wrong_line(self):
"""Exists on the wrong line"""
pass
def no_punctuation(self):
"""
Has the right line but forgets punctuation
"""
pass
def no_capitalization(self):
"""
provides a lowercase summary.
"""
pass
def no_infinitive(self):
"""
Started with a verb that is not infinitive.
"""
def multi_line(self):
"""
Extends beyond one line
which is not correct.
"""
def two_paragraph_multi_line(self):
"""
Extends beyond one line
which is not correct.
Extends beyond one line, which in itself is correct but the
previous short summary should still be an issue.
"""
class BadParameters(object):
"""
Everything here has a problem with its Parameters section.
"""
def missing_params(self, kind, **kwargs):
"""
Lacks kwargs in Parameters.
Parameters
----------
kind : str
Foo bar baz.
"""
def bad_colon_spacing(self, kind):
"""
Has bad spacing in the type line.
Parameters
----------
kind: str
Needs a space after kind.
"""
def no_description_period(self, kind):
"""
Forgets to add a period to the description.
Parameters
----------
kind : str
Doesn't end with a dot
"""
def no_description_period_with_directive(self, kind):
"""
Forgets to add a period, and also includes a directive.
Parameters
----------
kind : str
Doesn't end with a dot
.. versionadded:: 0.00.0
"""
def no_description_period_with_directives(self, kind):
"""
Forgets to add a period, and also includes multiple directives.
Parameters
----------
kind : str
Doesn't end with a dot
.. versionchanged:: 0.00.0
.. deprecated:: 0.00.0
"""
def parameter_capitalization(self, kind):
"""
Forgets to capitalize the description.
Parameters
----------
kind : str
this is not capitalized.
"""
def blank_lines(self, kind):
"""
Adds a blank line after the section header.
Parameters
----------
kind : str
Foo bar baz.
"""
pass
def integer_parameter(self, kind):
"""
Uses integer instead of int.
Parameters
----------
kind : integer
Foo bar baz.
"""
pass
def string_parameter(self, kind):
"""
Uses string instead of str.
Parameters
----------
kind : string
Foo bar baz.
"""
pass
def boolean_parameter(self, kind):
"""
Uses boolean instead of bool.
Parameters
----------
kind : boolean
Foo bar baz.
"""
pass
def list_incorrect_parameter_type(self, kind):
"""
Uses list of boolean instead of list of bool.
Parameters
----------
kind : list of boolean, integer, float or string
Foo bar baz.
"""
pass
class BadReturns(object):
def return_not_documented(self):
"""
Lacks section for Returns
"""
return "Hello world!"
def yield_not_documented(self):
"""
Lacks section for Yields
"""
yield "Hello world!"
def no_type(self):
"""
Returns documented but without type.
Returns
-------
Some value.
"""
return "Hello world!"
def no_description(self):
"""
Provides type but no descrption.
Returns
-------
str
"""
return "Hello world!"
def no_punctuation(self):
"""
Provides type and description but no period.
Returns
-------
str
A nice greeting
"""
return "Hello world!"
class BadSeeAlso(object):
def desc_no_period(self):
"""
Return the first 5 elements of the Series.
See Also
--------
Series.tail : Return the last 5 elements of the Series.
Series.iloc : Return a slice of the elements in the Series,
which can also be used to return the first or last n
"""
pass
def desc_first_letter_lowercase(self):
"""
Return the first 5 elements of the Series.
See Also
--------
Series.tail : return the last 5 elements of the Series.
Series.iloc : Return a slice of the elements in the Series,
which can also be used to return the first or last n.
"""
pass
def prefix_pandas(self):
"""
Have `pandas` prefix in See Also section.
See Also
--------
pandas.Series.rename : Alter Series index labels or name.
DataFrame.head : The first `n` rows of the caller object.
"""
pass
class BadExamples(object):
def unused_import(self):
"""
Examples
--------
>>> import pandas as pdf
>>> df = pd.DataFrame(np.ones((3, 3)), columns=('a', 'b', 'c'))
"""
pass
def missing_whitespace_around_arithmetic_operator(self):
"""
Examples
--------
>>> 2+5
7
"""
pass
def indentation_is_not_a_multiple_of_four(self):
"""
Examples
--------
>>> if 2 + 5:
... pass
"""
pass
def missing_whitespace_after_comma(self):
"""
Examples
--------
>>> df = pd.DataFrame(np.ones((3,3)),columns=('a','b', 'c'))
"""
pass
class TestValidator(object):
def _import_path(self, klass=None, func=None):
"""
Build the required import path for tests in this module.
Parameters
----------
klass : str
Class name of object in module.
func : str
Function name of object in module.
Returns
-------
str
Import path of specified object in this module
"""
base_path = "scripts.tests.test_validate_docstrings"
if klass:
base_path = ".".join([base_path, klass])
if func:
base_path = ".".join([base_path, func])
return base_path
@capture_stderr
def test_good_class(self):
errors = validate_one(self._import_path(
klass='GoodDocStrings'))['errors']
assert isinstance(errors, list)
assert not errors
@capture_stderr
@pytest.mark.parametrize("func", [
'plot', 'sample', 'random_letters', 'sample_values', 'head', 'head1',
'contains', 'mode', 'good_imports'])
def test_good_functions(self, func):
errors = validate_one(self._import_path(
klass='GoodDocStrings', func=func))['errors']
assert isinstance(errors, list)
assert not errors
@capture_stderr
def test_bad_class(self):
errors = validate_one(self._import_path(
klass='BadGenericDocStrings'))['errors']
assert isinstance(errors, list)
assert errors
@capture_stderr
@pytest.mark.parametrize("func", [
'func', 'astype', 'astype1', 'astype2', 'astype3', 'plot', 'method'])
def test_bad_generic_functions(self, func):
errors = validate_one(self._import_path( # noqa:F821
klass='BadGenericDocStrings', func=func))['errors']
assert isinstance(errors, list)
assert errors
@pytest.mark.parametrize("klass,func,msgs", [
# See Also tests
('BadSeeAlso', 'desc_no_period',
('Missing period at end of description for See Also "Series.iloc"',)),
('BadSeeAlso', 'desc_first_letter_lowercase',
('should be capitalized for See Also "Series.tail"',)),
# Summary tests
('BadSummaries', 'wrong_line',
('should start in the line immediately after the opening quotes',)),
('BadSummaries', 'no_punctuation',
('Summary does not end with a period',)),
('BadSummaries', 'no_capitalization',
('Summary does not start with a capital letter',)),
('BadSummaries', 'no_capitalization',
('Summary must start with infinitive verb',)),
('BadSummaries', 'multi_line',
('Summary should fit in a single line.',)),
('BadSummaries', 'two_paragraph_multi_line',
('Summary should fit in a single line.',)),
# Parameters tests
('BadParameters', 'missing_params',
('Parameters {**kwargs} not documented',)),
('BadParameters', 'bad_colon_spacing',
('Parameters {kind} not documented',
'Unknown parameters {kind: str}',
'Parameter "kind: str" has no type')),
('BadParameters', 'no_description_period',
('Parameter "kind" description should finish with "."',)),
('BadParameters', 'no_description_period_with_directive',
('Parameter "kind" description should finish with "."',)),
('BadParameters', 'parameter_capitalization',
('Parameter "kind" description should start with a capital letter',)),
('BadParameters', 'integer_parameter',
('Parameter "kind" type should use "int" instead of "integer"',)),
('BadParameters', 'string_parameter',
('Parameter "kind" type should use "str" instead of "string"',)),
('BadParameters', 'boolean_parameter',
('Parameter "kind" type should use "bool" instead of "boolean"',)),
('BadParameters', 'list_incorrect_parameter_type',
('Parameter "kind" type should use "bool" instead of "boolean"',)),
('BadParameters', 'list_incorrect_parameter_type',
('Parameter "kind" type should use "int" instead of "integer"',)),
('BadParameters', 'list_incorrect_parameter_type',
('Parameter "kind" type should use "str" instead of "string"',)),
pytest.param('BadParameters', 'blank_lines', ('No error yet?',),
marks=pytest.mark.xfail),
# Returns tests
('BadReturns', 'return_not_documented', ('No Returns section found',)),
('BadReturns', 'yield_not_documented', ('No Yields section found',)),
pytest.param('BadReturns', 'no_type', ('foo',),
marks=pytest.mark.xfail),
pytest.param('BadReturns', 'no_description', ('foo',),
marks=pytest.mark.xfail),
pytest.param('BadReturns', 'no_punctuation', ('foo',),
marks=pytest.mark.xfail),
# Examples tests
('BadGenericDocStrings', 'method',
('numpy does not need to be imported in the examples',)),
('BadGenericDocStrings', 'method',
('pandas does not need to be imported in the examples',)),
# See Also tests
('BadSeeAlso', 'prefix_pandas',
('pandas.Series.rename in `See Also` section '
'does not need `pandas` prefix',)),
# Examples tests
('BadExamples', 'unused_import',
('1 F401 \'pandas as pdf\' imported but unused',)),
('BadExamples', 'indentation_is_not_a_multiple_of_four',
('1 E111 indentation is not a multiple of four',)),
('BadExamples', 'missing_whitespace_around_arithmetic_operator',
('1 E226 missing whitespace around arithmetic operator',)),
('BadExamples', 'missing_whitespace_after_comma',
('3 E231 missing whitespace after \',\'',)),
])
def test_bad_examples(self, capsys, klass, func, msgs):
result = validate_one(self._import_path(klass=klass, func=func))
for msg in msgs:
assert msg in ' '.join(result['errors'])
class ApiItems(object):
@property
def api_doc(self):
return io.StringIO('''
.. currentmodule:: itertools
Itertools
---------
Infinite
~~~~~~~~
.. autosummary::
cycle
count
Finite
~~~~~~
.. autosummary::
chain
.. currentmodule:: random
Random
------
All
~~~
.. autosummary::
seed
randint
''')
@pytest.mark.parametrize('idx,name', [(0, 'itertools.cycle'),
(1, 'itertools.count'),
(2, 'itertools.chain'),
(3, 'random.seed'),
(4, 'random.randint')])
def test_item_name(self, idx, name):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert result[idx][0] == name
@pytest.mark.parametrize('idx,func', [(0, 'cycle'),
(1, 'count'),
(2, 'chain'),
(3, 'seed'),
(4, 'randint')])
def test_item_function(self, idx, func):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert callable(result[idx][1])
assert result[idx][1].__name__ == func
@pytest.mark.parametrize('idx,section', [(0, 'Itertools'),
(1, 'Itertools'),
(2, 'Itertools'),
(3, 'Random'),
(4, 'Random')])
def test_item_section(self, idx, section):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert result[idx][2] == section
@pytest.mark.parametrize('idx,subsection', [(0, 'Infinite'),
(1, 'Infinite'),
(2, 'Finite'),
(3, 'All'),
(4, 'All')])
def test_item_subsection(self, idx, subsection):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert result[idx][3] == subsection
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.