repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
xzh86/scikit-learn | sklearn/neighbors/nearest_centroid.py | 199 | 7249 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
peterfpeterson/mantid | Testing/PerformanceTests/make_report.py | 3 | 3243 | #!/usr/bin/env python
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import argparse
import sys
import os
# ====================================================================================
def getSourceDir():
"""Returns the location of the source code."""
import os
import sys
script = os.path.abspath(sys.argv[0])
if os.path.islink(script):
script = os.path.realpath(script)
return os.path.dirname(script)
def join_databases(dbfiles):
"""Create a single DB joining several ones
Returns: filename created
"""
outfile = os.path.join(os.path.dirname(dbfiles[0]), "JoinedDatabases.db")
all_results = []
# Get the results of each file
for dbfile in dbfiles:
print "Reading", dbfile
sqlresults.set_database_filename(dbfile)
these_results = sqlresults.get_results("")
all_results += these_results
# Write them into one
sqlresults.set_database_filename(outfile)
sqlresults.setup_database()
reporter = sqlresults.SQLResultReporter()
for res in all_results:
reporter.dispatchResults(res)
# Done!
return outfile
# ====================================================================================
if __name__ == "__main__":
# Parse the command line
parser = argparse.ArgumentParser(
description='Generates a HTML report using the Mantid System Tests results database')
parser.add_argument('--path', dest='path',
default="./Report",
help='Path to the output HTML. Default "./Report".')
parser.add_argument('--x_field', dest='x_field',
default="revision",
help="Field to use as the x-axis. Default: 'revision'. Other possibilities: 'date'.")
parser.add_argument('dbfile', metavar='DBFILE', type=str, nargs='+',
default=["./MantidSystemTests.db"],
help='Required: Path to the SQL database file(s).')
parser.add_argument('--plotting', dest='plotting',
default="plotly",
help='Plotting toolkit to generate the plots. Options=["plotly", "matplotlib"]')
args = parser.parse_args()
if args.plotting == 'plotly':
import analysis
elif args.plotting == 'matplotlib':
import analysis_mpl as analysis
else:
raise RuntimeError("Unknown plotting toolkit '{}'".format(args.plotting))
import sqlresults
if len(args.dbfile) > 1:
# Several files - join them into one big .db
dbfile = join_databases(args.dbfile)
else:
# Only one file - use it
dbfile = args.dbfile[0]
if not os.path.exists(dbfile):
print "Error! Could not find", dbfile
sys.exit(1)
# This is where we look for the DB file
sqlresults.set_database_filename(dbfile)
# Make the report
analysis.generate_html_report(args.path, 100, args.x_field)
| gpl-3.0 |
seanli9jan/tensorflow | tensorflow/python/client/notebook.py | 61 | 4779 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import socket
import sys
from tensorflow.python.platform import app
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
FLAGS = None
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--password",
type=str,
default=None,
help="""\
Password to require. If set, the server will allow public access. Only
used if notebook config file does not exist.\
""")
parser.add_argument(
"--notebook_dir",
type=str,
default="experimental/brain/notebooks",
help="root location where to store notebooks")
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = (
[sys.argv[0]] + [x for x in sys.argv[1:] if x.startswith("--flagfile")])
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
pigna90/lastfm_network_analysis | src/community_discovery.py | 1 | 12604 | from Demon import Demon
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cmx
import matplotlib.colors as colors
import networkx as nx
import os
from itertools import product
import seaborn as sns
import pandas as pd
import community
from sklearn.preprocessing import normalize
##
# Print a bar histogram
##
# Params:
# x - list of labels
# freq - label's frequencies
# xlabel - name for x axis
# ylabel - name for y axis
# out - output name file for figure
##
def histogram(x,freq,xlabel=None,ylabel=None,out=None):
for i in range(0,len(x)-1):
if (i%5 != 0):
x[i] = ""
plt.bar(range(len(freq)),freq,color='g',alpha=0.6,linewidth=0)
plt.xticks(range(len(x)),x, size='small',rotation='vertical')
if (xlabel != None and ylabel != None):
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if out == None:
plt.show()
else:
plt.savefig(out+".svg",bbox_inches="tight")
##
# Make a communities analysis using DEMON algorithm written
# by Giulio Rossetti (https://github.com/GiulioRossetti/DEMON).
# The analysis is made by iterating on a range of epsilon values.
##
# Params:
# network - file network as edges list
# epsilon_range - tuple of two values, rappresent epsilon range
# min_community - minimum numbers of element needed to create a community
# bins - distance betwen every epsilon inside the range
# out - output path for result of communities analysis
##
def demon_analysis(network,epsilon_range,min_community,bins,out):
epsilon = epsilon_range[0]
x = []
freq = []
for i in range(0,bins):
out_path = out + "_" + str(i) + "_" + str(round(epsilon,3)) + "_" + str(min_community)
dm = Demon(network,epsilon,min_community,out_path)
communities = dm.execute()
freq.append(len(communities))
x.append(round(epsilon,3))
epsilon += epsilon_range[1]/bins
##
# Make a community analusis using k-clique algorithm from networkx.
# The analysis is made by iterating on a range of k values.
##
# Params:
# graph - networkx graph
# k_lists - lists of k as integer
# out_path - output path for results of communities analysis
##
def k_clique_analysis(G,k_list,out_path):
for k in k_list:
c = list(nx.k_clique_communities(G, k))
print(type(c[0]))
quit()
c = list(map(list,c))
out = open(out_path + str(k) + "_clique.dat","w")
for community in c:
out.write("%d\t[" % c.index(community))
for node in community:
out.write('"%s",' % node)
out.write("]\n")
out.close()
##
# Make community analysis using Louvain algorithm from communities module,
# and serialize result on file as list of nodes for each communities.
##
# Params:
# graph - networkx graph
# out_path - output path for results of communities analysis
##
def louvain_analysis(graph,out_path):
partition = community.best_partition(graph)
out = open(out_path + "louvain_communities.dat","w")
for c in set(partition.values()):
out.write("%d\t[" % c)
comm = [k for k, v in partition.items() if v == c]
for node in comm:
out.write('"%s",' % node)
out.write("]\n")
out.close()
##
# Deserialize DEMON/K-Clique results from file and return a list of
# first n ordered communities for every file read
##
# Params:
# param - List of epsilon/k to read
# log_path - Path to demon log folder
# n - number of communities for each eps/k
##
def deserialize_demon_kclique(log_path,param=None,n=None):
list_communities = []
if param == None:
dict_list = os.listdir(log_path)
for d in dict_list:
list_dict_values = list(dict_from_file(log_path+d).values())
list_dict_values.sort(key=len,reverse=True)
if n == None:
list_communities.append(list_dict_values)
else:
list_communities.append(list_dict_values[:n])
else:
for p in param:
dict_list = os.listdir(log_path)
for d in dict_list:
if p in d:
list_dict_values = list(dict_from_file(log_path+d).values())
list_dict_values.sort(key=len,reverse=True)
if n == None:
list_communities.append(list_dict_values)
else:
list_communities.append(list_dict_values[:n])
return list_communities
##
# Deserialize Louvain results from file and return a list of
# first n ordered communities
##
# Params:
# log_path - Path to demon log folder
# n - number of communities to read
##
def deserialize_louvain(log_path,n=None):
list_dict_values = list(dict_from_file(log_path).values())
list_dict_values.sort(key=len,reverse=True)
if n==None:
return list_dict_values
else:
return list_dict_values[:n]
##
# Plot two type of distribution analysis computed on a set of comunity.
##
# Params:
# distribution_type - Type of distribution analysis {density,transitivity,nodes}
# legend - plot legend
# graph - Main network contain communities for analysis
# list_communities - lists of communities
# out - Path for output plot result
##
def plot_distribution(distribution_type,legend,graph,list_communities,out=None):
x = [i for i in range(0,len(list_communities[0]))]
for communities in list_communities:
if distribution_type.lower() == "nodes":
y = list(map(len,communities))
else:
y = []
for l in communities:
H = graph.subgraph(l)
if distribution_type.lower() == "density":
y.append(nx.density(H))
elif distribution_type.lower() == "transitivity":
y.append(nx.transitivity(H))
else:
return None
plt.plot(x,y,linewidth=2,alpha=0.8)
#plt.yscale("log")
plt.legend(legend, loc='upper left')
plt.xlabel("Comunity ID")
plt.ylabel(distribution_type)
if out == None:
plt.show()
else:
plt.savefig(out+".svg",bbox_inches="tight")
plt.close()
##
# Load dict from file. Format:
# key\t["a","b","c"]
##
def dict_from_file(path_dict):
out_dict = {}
with open(path_dict, 'r') as f:
for line in f:
out_dict[int(line.split("\t")[0])] = eval(line.split("\t")[1])
return out_dict
##
# Plot histogram of DEMON results with different eps serialized on file
##
# Params:
# communities_lists - list of results with differents eps
# out - Path for output plot result
##
def histogram_epsilon_frequencies(communities_lists,out=None):
freq = [len(x) for x in communities_lists]
freq.sort()
x = list(range(0,len(freq)))
histogram(x,freq,"Epsilon","Number of communities",out)
##
# Plot jaccard heatmap calculated on comunity result serialized on file.
##
# Params:
# communities - list of community
# out - Path for output plot result
# shape - shape of matrix
##
def plot_jaccard_heatmap(communities,shape=30,out=None):
data =np.array(list(map(jaccard_similarity,list(product(communities, repeat=2)))))
data = data.reshape(shape,shape)
ax = plt.axes()
cmap = sns.diverging_palette(220, 10, as_cmap=True)
heat = sns.heatmap(data,cmap=plt.cm.Reds,square=True,linewidths=.5, cbar_kws={"shrink": .5},ax = ax)
heat.invert_yaxis()
plt.ylabel("Comunity ID")
plt.xlabel("Comunity ID")
plt.yticks(size='small',rotation='horizontal')
plt.xticks(size='small',rotation='vertical')
if out == None:
plt.show()
else:
plt.savefig(out+".svg",bbox_inches="tight")
plt.close()
##
# Plot piechart of communities external data.
##
# Params:
# data - csv of external data
# community - nodes of community
# pie_pieces - number of segment
# out - Path for output plot result
##
def plot_pie_external(data,dim,community,pie_pieces=10,out=None):
df = pd.read_csv(data)
counts = df[df["username"].isin(list(set(community)))][dim].value_counts()
other = pd.Series([abs(sum(counts[:pie_pieces])-sum(counts[pie_pieces:]))],index=["Other"])
counts = counts[:pie_pieces]
counts = counts.append(other)
labels = [i[0] for i in counts.iteritems()]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral','violet','tomato','cyan','blueviolet','palevioletred','darkorange','grey']
colors[pie_pieces] = "grey"
plt.pie(counts,labels=labels,colors=colors,autopct='%1.1f%%',shadow=True, startangle=90,center=(0, 0))
if out == None:
plt.show()
else:
plt.savefig(out+".svg",bbox_inches="tight")
plt.close()
##
# Plot graph with different color for every set of communities
##
# Params:
# graph - networkx graph to plot
# communities - list communities (each communities is a list of nodes)
# colors - colormap
# out - Path for output plot result
##
def plot_communities(graph,communities,colors,out=None):
nodes = [y for x in communities for y in x]
nodes = list(set(nodes))
class_colors = {}
for n in nodes:
for c in communities:
if n in c:
class_colors[n] = colors[communities.index(c)]
if(all(n in c for c in communities)):
class_colors[n] = 'white'
H = graph.subgraph(nodes)
d = nx.degree(H)
nx.draw(H,node_list = list(class_colors.keys()), node_color=list(class_colors.values()),node_size = [v * 5 for v in d.values()],width=0.2)
if out == None:
plt.show()
else:
plt.savefig(out+".svg",bbox_inches="tight")
plt.close()
##
# Jaccard similarity between two list.
# (Made for use with map())
##
# Params:
# pair - tuple of list
##
def jaccard_similarity(pair):
x = pair[0]
y = pair[1]
intersection_cardinality = len(set.intersection(*[set(x), set(y)]))
union_cardinality = len(set.union(*[set(x), set(y)]))
return intersection_cardinality/float(union_cardinality)
##
# Read edges list from file
##
def read_graph(filename):
# Read graph from edgelist file
g = nx.Graph()
f = open(filename)
for l in f:
l = l.rstrip().replace(" ", ";").replace(",", ";").replace("\t", ";").split(";")
g.add_edge(l[0], l[1])
return g
def example_usage():
graph = "../data/network_cleaned16.csv"
external_data = "../data/artists_genres.csv"
log_demon = "../data/demon_log/"
log_kclique = "../data/k-clique"
log_louvain = "../data/louvain"
eps_list = ["0.034","0.234","0.301","0.368"]
colors = ['yellowgreen', 'gold', 'lightskyblue','royalblue','magenta','r']
## Loading graph from file
G=read_graph(graph)
# Make DEMON analysis on eps range and serialize results on file
demon_analysis(graph,(0.001,0.4),3,60,"/tmp/demon")
# Reading ommunities serialized by DEMON
list_communities = deserialize_demon_kclique(log_demon)
# Reading eps_list results serialized by DEMON. For each eps
only the first 30 communities
list_communities = deserialize_demon_kclique(log_demon,eps_list,30)
# Plot histogram of communities made with different eps
histogram_epsilon_frequencies(list_communities)
# Legend for plot distribution
legend = ["eps = " + eps for eps in eps_list]
# Plot number of nodes/density and transitivity
# for different eps results
for d_type in ["Nodes","Density","Transitivity"]:
plot_distribution(distribution_type=d_type,list_communities=list_communities,legend=legend,graph=G)
# Jaccard heatmap for communities overlapping on one of eps results
# communities. For example communities calculated with eps = 0.031
plot_jaccard_heatmap(list_communities[3])
Validation with external data.
Select an eps results for analysis. For example eps = 0.031
data = list_communities[2]
# For some dimensions of external data...
for dim in ["artist","genre"]:
# for some communities...
for c in [7,8,9]:
community = data[c]
plot_pie_external(data=external_data,dim=dim,pie_pieces=8,community=community)
# Plot graph with communities
data = list_communities[2]
data = [data[7],data[8],data[9]]
plot_communities(G,data,colors)
# K-Clique analysis
k_list = list(range(2,10))
k_clique_analysis(G,[4],"../data/k-clique/")
# Deserialize results make with k-clique
k_clique_communities = deserialize_demon_results(list(map(str,[4])),"../data/k-clique/",10)
legend = ["k = " + str(k) for k in [3,4,5,6]]
for d_type in ["Nodes","Density","Transitivity"]:
plot_distribution(distribution_type=d_type,list_communities=k_clique_communities,legend=legend,graph=G)
# Plot graph with communities
for data in k_clique_communities:
plot_communities(G,data[:5],colors,out=str(k_clique_communities.index(data)+3)+"_clique_graph")
# Validation with external data
c_4 = (k_clique_communities[0])[:3]
for community in c_4:
plot_pie_external(external_data,"artist",community)
plot_pie_external(external_data,"genre",community)
# Louvain analysis
louvain_analysis(G,log_louvain)
louvain = [deserialize_louvain("../data/louvain_communities.dat")]
for d_type in ["Nodes","Density","Transitivity"]:
plot_distribution(distribution_type=d_type,list_communities=louvain,legend=["louvain"],graph=G)
plot_communities(G,(louvain[0])[:5],colors)
c_3 = (louvain[0])[:3]
for community in c_3:
plot_pie_external(external_data,"artist",community,out=str(c_3.index(community))+"_artist")
plot_pie_external(external_data,"genre",community,out=str(c_3.index(community))+"_genre")
if __name__ == "__main__":
example_usage()
| gpl-3.0 |
hips/autograd | examples/black_box_svi.py | 3 | 3136 | from __future__ import absolute_import
from __future__ import print_function
import matplotlib.pyplot as plt
import autograd.numpy as np
import autograd.numpy.random as npr
import autograd.scipy.stats.multivariate_normal as mvn
import autograd.scipy.stats.norm as norm
from autograd import grad
from autograd.misc.optimizers import adam
def black_box_variational_inference(logprob, D, num_samples):
"""Implements http://arxiv.org/abs/1401.0118, and uses the
local reparameterization trick from http://arxiv.org/abs/1506.02557"""
def unpack_params(params):
# Variational dist is a diagonal Gaussian.
mean, log_std = params[:D], params[D:]
return mean, log_std
def gaussian_entropy(log_std):
return 0.5 * D * (1.0 + np.log(2*np.pi)) + np.sum(log_std)
rs = npr.RandomState(0)
def variational_objective(params, t):
"""Provides a stochastic estimate of the variational lower bound."""
mean, log_std = unpack_params(params)
samples = rs.randn(num_samples, D) * np.exp(log_std) + mean
lower_bound = gaussian_entropy(log_std) + np.mean(logprob(samples, t))
return -lower_bound
gradient = grad(variational_objective)
return variational_objective, gradient, unpack_params
if __name__ == '__main__':
# Specify an inference problem by its unnormalized log-density.
D = 2
def log_density(x, t):
mu, log_sigma = x[:, 0], x[:, 1]
sigma_density = norm.logpdf(log_sigma, 0, 1.35)
mu_density = norm.logpdf(mu, 0, np.exp(log_sigma))
return sigma_density + mu_density
# Build variational objective.
objective, gradient, unpack_params = \
black_box_variational_inference(log_density, D, num_samples=2000)
# Set up plotting code
def plot_isocontours(ax, func, xlimits=[-2, 2], ylimits=[-4, 2], numticks=101):
x = np.linspace(*xlimits, num=numticks)
y = np.linspace(*ylimits, num=numticks)
X, Y = np.meshgrid(x, y)
zs = func(np.concatenate([np.atleast_2d(X.ravel()), np.atleast_2d(Y.ravel())]).T)
Z = zs.reshape(X.shape)
plt.contour(X, Y, Z)
ax.set_yticks([])
ax.set_xticks([])
# Set up figure.
fig = plt.figure(figsize=(8,8), facecolor='white')
ax = fig.add_subplot(111, frameon=False)
plt.ion()
plt.show(block=False)
def callback(params, t, g):
print("Iteration {} lower bound {}".format(t, -objective(params, t)))
plt.cla()
target_distribution = lambda x : np.exp(log_density(x, t))
plot_isocontours(ax, target_distribution)
mean, log_std = unpack_params(params)
variational_contour = lambda x: mvn.pdf(x, mean, np.diag(np.exp(2*log_std)))
plot_isocontours(ax, variational_contour)
plt.draw()
plt.pause(1.0/30.0)
print("Optimizing variational parameters...")
init_mean = -1 * np.ones(D)
init_log_std = -5 * np.ones(D)
init_var_params = np.concatenate([init_mean, init_log_std])
variational_params = adam(gradient, init_var_params, step_size=0.1, num_iters=2000, callback=callback)
| mit |
nitin-cherian/LifeLongLearning | Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/IPython/core/display.py | 2 | 43675 | # -*- coding: utf-8 -*-
"""Top-level display functions for displaying object in different formats."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
try:
from base64 import encodebytes as base64_encode
except ImportError:
from base64 import encodestring as base64_encode
from binascii import b2a_hex
import json
import mimetypes
import os
import struct
import sys
import warnings
from IPython.utils.py3compat import cast_unicode
from IPython.testing.skipdoctest import skip_doctest
__all__ = ['display', 'display_pretty', 'display_html', 'display_markdown',
'display_svg', 'display_png', 'display_jpeg', 'display_latex', 'display_json',
'display_javascript', 'display_pdf', 'DisplayObject', 'TextDisplayObject',
'Pretty', 'HTML', 'Markdown', 'Math', 'Latex', 'SVG', 'JSON', 'GeoJSON', 'Javascript',
'Image', 'clear_output', 'set_matplotlib_formats', 'set_matplotlib_close',
'publish_display_data', 'update_display', 'DisplayHandle', 'Video']
#-----------------------------------------------------------------------------
# utility functions
#-----------------------------------------------------------------------------
def _safe_exists(path):
"""Check path, but don't let exceptions raise"""
try:
return os.path.exists(path)
except Exception:
return False
def _merge(d1, d2):
"""Like update, but merges sub-dicts instead of clobbering at the top level.
Updates d1 in-place
"""
if not isinstance(d2, dict) or not isinstance(d1, dict):
return d2
for key, value in d2.items():
d1[key] = _merge(d1.get(key), value)
return d1
def _display_mimetype(mimetype, objs, raw=False, metadata=None):
"""internal implementation of all display_foo methods
Parameters
----------
mimetype : str
The mimetype to be published (e.g. 'image/png')
objs : tuple of objects
The Python objects to display, or if raw=True raw text data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
if metadata:
metadata = {mimetype: metadata}
if raw:
# turn list of pngdata into list of { 'image/png': pngdata }
objs = [ {mimetype: obj} for obj in objs ]
display(*objs, raw=raw, metadata=metadata, include=[mimetype])
#-----------------------------------------------------------------------------
# Main functions
#-----------------------------------------------------------------------------
# use * to indicate transient is keyword-only
def publish_display_data(data, metadata=None, source=None, *, transient=None, **kwargs):
"""Publish data and metadata to all frontends.
See the ``display_data`` message in the messaging documentation for
more details about this message type.
The following MIME types are currently implemented:
* text/plain
* text/html
* text/markdown
* text/latex
* application/json
* application/javascript
* image/png
* image/jpeg
* image/svg+xml
Parameters
----------
data : dict
A dictionary having keys that are valid MIME types (like
'text/plain' or 'image/svg+xml') and values that are the data for
that MIME type. The data itself must be a JSON'able data
structure. Minimally all data should have the 'text/plain' data,
which can be displayed by all frontends. If more than the plain
text is given, it is up to the frontend to decide which
representation to use.
metadata : dict
A dictionary for metadata related to the data. This can contain
arbitrary key, value pairs that frontends can use to interpret
the data. mime-type keys matching those in data can be used
to specify metadata about particular representations.
source : str, deprecated
Unused.
transient : dict, keyword-only
A dictionary of transient data, such as display_id.
"""
from IPython.core.interactiveshell import InteractiveShell
display_pub = InteractiveShell.instance().display_pub
# only pass transient if supplied,
# to avoid errors with older ipykernel.
# TODO: We could check for ipykernel version and provide a detailed upgrade message.
if transient:
kwargs['transient'] = transient
display_pub.publish(
data=data,
metadata=metadata,
**kwargs
)
def _new_id():
"""Generate a new random text id with urandom"""
return b2a_hex(os.urandom(16)).decode('ascii')
def display(*objs, include=None, exclude=None, metadata=None, transient=None, display_id=None, **kwargs):
"""Display a Python object in all frontends.
By default all representations will be computed and sent to the frontends.
Frontends can decide which representation is used and how.
In terminal IPython this will be similar to using :func:`print`, for use in richer
frontends see Jupyter notebook examples with rich display logic.
Parameters
----------
objs : tuple of objects
The Python objects to display.
raw : bool, optional
Are the objects to be displayed already mimetype-keyed dicts of raw display data,
or Python objects that need to be formatted before display? [default: False]
include : list, tuple or set, optional
A list of format type strings (MIME types) to include in the
format data dict. If this is set *only* the format types included
in this list will be computed.
exclude : list, tuple or set, optional
A list of format type strings (MIME types) to exclude in the format
data dict. If this is set all format types will be computed,
except for those included in this argument.
metadata : dict, optional
A dictionary of metadata to associate with the output.
mime-type keys in this dictionary will be associated with the individual
representation formats, if they exist.
transient : dict, optional
A dictionary of transient data to associate with the output.
Data in this dict should not be persisted to files (e.g. notebooks).
display_id : str, bool optional
Set an id for the display.
This id can be used for updating this display area later via update_display.
If given as `True`, generate a new `display_id`
kwargs: additional keyword-args, optional
Additional keyword-arguments are passed through to the display publisher.
Returns
-------
handle: DisplayHandle
Returns a handle on updatable displays for use with :func:`update_display`,
if `display_id` is given. Returns :any:`None` if no `display_id` is given
(default).
Examples
--------
>>> class Json(object):
... def __init__(self, json):
... self.json = json
... def _repr_pretty_(self, pp, cycle):
... import json
... pp.text(json.dumps(self.json, indent=2))
... def __repr__(self):
... return str(self.json)
...
>>> d = Json({1:2, 3: {4:5}})
>>> print(d)
{1: 2, 3: {4: 5}}
>>> display(d)
{
"1": 2,
"3": {
"4": 5
}
}
>>> def int_formatter(integer, pp, cycle):
... pp.text('I'*integer)
>>> plain = get_ipython().display_formatter.formatters['text/plain']
>>> plain.for_type(int, int_formatter)
<function _repr_pprint at 0x...>
>>> display(7-5)
II
>>> del plain.type_printers[int]
>>> display(7-5)
2
See Also
--------
:func:`update_display`
Notes
-----
In Python, objects can declare their textual representation using the
`__repr__` method. IPython expands on this idea and allows objects to declare
other, rich representations including:
- HTML
- JSON
- PNG
- JPEG
- SVG
- LaTeX
A single object can declare some or all of these representations; all are
handled by IPython's display system.
The main idea of the first approach is that you have to implement special
display methods when you define your class, one for each representation you
want to use. Here is a list of the names of the special methods and the
values they must return:
- `_repr_html_`: return raw HTML as a string
- `_repr_json_`: return a JSONable dict
- `_repr_jpeg_`: return raw JPEG data
- `_repr_png_`: return raw PNG data
- `_repr_svg_`: return raw SVG data as a string
- `_repr_latex_`: return LaTeX commands in a string surrounded by "$".
- `_repr_mimebundle_`: return a full mimebundle containing the mapping
from all mimetypes to data
When you are directly writing your own classes, you can adapt them for
display in IPython by following the above approach. But in practice, you
often need to work with existing classes that you can't easily modify.
You can refer to the documentation on IPython display formatters in order to
register custom formatters for already existing types.
.. versionadded:: 5.4 display available without import
.. versionadded:: 6.1 display available without import
Since IPython 5.4 and 6.1 :func:`display` is automatically made available to
the user without import. If you are using display in a document that might
be used in a pure python context or with older version of IPython, use the
following import at the top of your file::
from IPython.display import display
"""
raw = kwargs.pop('raw', False)
if transient is None:
transient = {}
if display_id:
if display_id is True:
display_id = _new_id()
transient['display_id'] = display_id
if kwargs.get('update') and 'display_id' not in transient:
raise TypeError('display_id required for update_display')
if transient:
kwargs['transient'] = transient
from IPython.core.interactiveshell import InteractiveShell
if not raw:
format = InteractiveShell.instance().display_formatter.format
for obj in objs:
if raw:
publish_display_data(data=obj, metadata=metadata, **kwargs)
else:
format_dict, md_dict = format(obj, include=include, exclude=exclude)
if not format_dict:
# nothing to display (e.g. _ipython_display_ took over)
continue
if metadata:
# kwarg-specified metadata gets precedence
_merge(md_dict, metadata)
publish_display_data(data=format_dict, metadata=md_dict, **kwargs)
if display_id:
return DisplayHandle(display_id)
# use * for keyword-only display_id arg
def update_display(obj, *, display_id, **kwargs):
"""Update an existing display by id
Parameters
----------
obj:
The object with which to update the display
display_id: keyword-only
The id of the display to update
See Also
--------
:func:`display`
"""
kwargs['update'] = True
display(obj, display_id=display_id, **kwargs)
class DisplayHandle(object):
"""A handle on an updatable display
Call `.update(obj)` to display a new object.
Call `.display(obj`) to add a new instance of this display,
and update existing instances.
See Also
--------
:func:`display`, :func:`update_display`
"""
def __init__(self, display_id=None):
if display_id is None:
display_id = _new_id()
self.display_id = display_id
def __repr__(self):
return "<%s display_id=%s>" % (self.__class__.__name__, self.display_id)
def display(self, obj, **kwargs):
"""Make a new display with my id, updating existing instances.
Parameters
----------
obj:
object to display
**kwargs:
additional keyword arguments passed to display
"""
display(obj, display_id=self.display_id, **kwargs)
def update(self, obj, **kwargs):
"""Update existing displays with my id
Parameters
----------
obj:
object to display
**kwargs:
additional keyword arguments passed to update_display
"""
update_display(obj, display_id=self.display_id, **kwargs)
def display_pretty(*objs, **kwargs):
"""Display the pretty (default) representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw text data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/plain', objs, **kwargs)
def display_html(*objs, **kwargs):
"""Display the HTML representation of an object.
Note: If raw=False and the object does not have a HTML
representation, no HTML will be shown.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw HTML data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/html', objs, **kwargs)
def display_markdown(*objs, **kwargs):
"""Displays the Markdown representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw markdown data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/markdown', objs, **kwargs)
def display_svg(*objs, **kwargs):
"""Display the SVG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw svg data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/svg+xml', objs, **kwargs)
def display_png(*objs, **kwargs):
"""Display the PNG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw png data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/png', objs, **kwargs)
def display_jpeg(*objs, **kwargs):
"""Display the JPEG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw JPEG data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/jpeg', objs, **kwargs)
def display_latex(*objs, **kwargs):
"""Display the LaTeX representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw latex data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/latex', objs, **kwargs)
def display_json(*objs, **kwargs):
"""Display the JSON representation of an object.
Note that not many frontends support displaying JSON.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw json data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/json', objs, **kwargs)
def display_javascript(*objs, **kwargs):
"""Display the Javascript representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw javascript data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/javascript', objs, **kwargs)
def display_pdf(*objs, **kwargs):
"""Display the PDF representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw javascript data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/pdf', objs, **kwargs)
#-----------------------------------------------------------------------------
# Smart classes
#-----------------------------------------------------------------------------
class DisplayObject(object):
"""An object that wraps data to be displayed."""
_read_flags = 'r'
_show_mem_addr = False
def __init__(self, data=None, url=None, filename=None):
"""Create a display object given raw data.
When this object is returned by an expression or passed to the
display function, it will result in the data being displayed
in the frontend. The MIME type of the data should match the
subclasses used, so the Png subclass should be used for 'image/png'
data. If the data is a URL, the data will first be downloaded
and then displayed. If
Parameters
----------
data : unicode, str or bytes
The raw data or a URL or file to load the data from
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
"""
if data is not None and isinstance(data, str):
if data.startswith('http') and url is None:
url = data
filename = None
data = None
elif _safe_exists(data) and filename is None:
url = None
filename = data
data = None
self.data = data
self.url = url
self.filename = filename
self.reload()
self._check_data()
def __repr__(self):
if not self._show_mem_addr:
cls = self.__class__
r = "<%s.%s object>" % (cls.__module__, cls.__name__)
else:
r = super(DisplayObject, self).__repr__()
return r
def _check_data(self):
"""Override in subclasses if there's something to check."""
pass
def reload(self):
"""Reload the raw data from file or URL."""
if self.filename is not None:
with open(self.filename, self._read_flags) as f:
self.data = f.read()
elif self.url is not None:
try:
# Deferred import
from urllib.request import urlopen
response = urlopen(self.url)
self.data = response.read()
# extract encoding from header, if there is one:
encoding = None
for sub in response.headers['content-type'].split(';'):
sub = sub.strip()
if sub.startswith('charset'):
encoding = sub.split('=')[-1].strip()
break
# decode data, if an encoding was specified
if encoding:
self.data = self.data.decode(encoding, 'replace')
except:
self.data = None
class TextDisplayObject(DisplayObject):
"""Validate that display data is text"""
def _check_data(self):
if self.data is not None and not isinstance(self.data, str):
raise TypeError("%s expects text, not %r" % (self.__class__.__name__, self.data))
class Pretty(TextDisplayObject):
def _repr_pretty_(self):
return self.data
class HTML(TextDisplayObject):
def _repr_html_(self):
return self.data
def __html__(self):
"""
This method exists to inform other HTML-using modules (e.g. Markupsafe,
htmltag, etc) that this object is HTML and does not need things like
special characters (<>&) escaped.
"""
return self._repr_html_()
class Markdown(TextDisplayObject):
def _repr_markdown_(self):
return self.data
class Math(TextDisplayObject):
def _repr_latex_(self):
s = self.data.strip('$')
return "$$%s$$" % s
class Latex(TextDisplayObject):
def _repr_latex_(self):
return self.data
class SVG(DisplayObject):
_read_flags = 'rb'
# wrap data in a property, which extracts the <svg> tag, discarding
# document headers
_data = None
@property
def data(self):
return self._data
@data.setter
def data(self, svg):
if svg is None:
self._data = None
return
# parse into dom object
from xml.dom import minidom
x = minidom.parseString(svg)
# get svg tag (should be 1)
found_svg = x.getElementsByTagName('svg')
if found_svg:
svg = found_svg[0].toxml()
else:
# fallback on the input, trust the user
# but this is probably an error.
pass
svg = cast_unicode(svg)
self._data = svg
def _repr_svg_(self):
return self.data
class JSON(DisplayObject):
"""JSON expects a JSON-able dict or list
not an already-serialized JSON string.
Scalar types (None, number, string) are not allowed, only dict or list containers.
"""
# wrap data in a property, which warns about passing already-serialized JSON
_data = None
def __init__(self, data=None, url=None, filename=None, expanded=False, metadata=None, **kwargs):
"""Create a JSON display object given raw data.
Parameters
----------
data : dict or list
JSON data to display. Not an already-serialized JSON string.
Scalar types (None, number, string) are not allowed, only dict
or list containers.
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
expanded : boolean
Metadata to control whether a JSON display component is expanded.
metadata: dict
Specify extra metadata to attach to the json display object.
"""
self.metadata = {'expanded': expanded}
if metadata:
self.metadata.update(metadata)
if kwargs:
self.metadata.update(kwargs)
super(JSON, self).__init__(data=data, url=url, filename=filename)
def _check_data(self):
if self.data is not None and not isinstance(self.data, (dict, list)):
raise TypeError("%s expects JSONable dict or list, not %r" % (self.__class__.__name__, self.data))
@property
def data(self):
return self._data
@data.setter
def data(self, data):
if isinstance(data, str):
if getattr(self, 'filename', None) is None:
warnings.warn("JSON expects JSONable dict or list, not JSON strings")
data = json.loads(data)
self._data = data
def _data_and_metadata(self):
return self.data, self.metadata
def _repr_json_(self):
return self._data_and_metadata()
_css_t = """$("head").append($("<link/>").attr({
rel: "stylesheet",
type: "text/css",
href: "%s"
}));
"""
_lib_t1 = """$.getScript("%s", function () {
"""
_lib_t2 = """});
"""
class GeoJSON(JSON):
"""GeoJSON expects JSON-able dict
not an already-serialized JSON string.
Scalar types (None, number, string) are not allowed, only dict containers.
"""
def __init__(self, *args, **kwargs):
"""Create a GeoJSON display object given raw data.
Parameters
----------
data : dict or list
VegaLite data. Not an already-serialized JSON string.
Scalar types (None, number, string) are not allowed, only dict
or list containers.
url_template : string
Leaflet TileLayer URL template: http://leafletjs.com/reference.html#url-template
layer_options : dict
Leaflet TileLayer options: http://leafletjs.com/reference.html#tilelayer-options
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
metadata: dict
Specify extra metadata to attach to the json display object.
Examples
--------
The following will display an interactive map of Mars with a point of
interest on frontend that do support GeoJSON display.
>>> from IPython.display import GeoJSON
>>> GeoJSON(data={
... "type": "Feature",
... "geometry": {
... "type": "Point",
... "coordinates": [-81.327, 296.038]
... }
... },
... url_template="http://s3-eu-west-1.amazonaws.com/whereonmars.cartodb.net/{basemap_id}/{z}/{x}/{y}.png",
... layer_options={
... "basemap_id": "celestia_mars-shaded-16k_global",
... "attribution" : "Celestia/praesepe",
... "minZoom" : 0,
... "maxZoom" : 18,
... })
<IPython.core.display.GeoJSON object>
In the terminal IPython, you will only see the text representation of
the GeoJSON object.
"""
super(GeoJSON, self).__init__(*args, **kwargs)
def _ipython_display_(self):
bundle = {
'application/geo+json': self.data,
'text/plain': '<IPython.display.GeoJSON object>'
}
metadata = {
'application/geo+json': self.metadata
}
display(bundle, metadata=metadata, raw=True)
class Javascript(TextDisplayObject):
def __init__(self, data=None, url=None, filename=None, lib=None, css=None):
"""Create a Javascript display object given raw data.
When this object is returned by an expression or passed to the
display function, it will result in the data being displayed
in the frontend. If the data is a URL, the data will first be
downloaded and then displayed.
In the Notebook, the containing element will be available as `element`,
and jQuery will be available. Content appended to `element` will be
visible in the output area.
Parameters
----------
data : unicode, str or bytes
The Javascript source code or a URL to download it from.
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
lib : list or str
A sequence of Javascript library URLs to load asynchronously before
running the source code. The full URLs of the libraries should
be given. A single Javascript library URL can also be given as a
string.
css: : list or str
A sequence of css files to load before running the source code.
The full URLs of the css files should be given. A single css URL
can also be given as a string.
"""
if isinstance(lib, str):
lib = [lib]
elif lib is None:
lib = []
if isinstance(css, str):
css = [css]
elif css is None:
css = []
if not isinstance(lib, (list,tuple)):
raise TypeError('expected sequence, got: %r' % lib)
if not isinstance(css, (list,tuple)):
raise TypeError('expected sequence, got: %r' % css)
self.lib = lib
self.css = css
super(Javascript, self).__init__(data=data, url=url, filename=filename)
def _repr_javascript_(self):
r = ''
for c in self.css:
r += _css_t % c
for l in self.lib:
r += _lib_t1 % l
r += self.data
r += _lib_t2*len(self.lib)
return r
# constants for identifying png/jpeg data
_PNG = b'\x89PNG\r\n\x1a\n'
_JPEG = b'\xff\xd8'
def _pngxy(data):
"""read the (width, height) from a PNG header"""
ihdr = data.index(b'IHDR')
# next 8 bytes are width/height
w4h4 = data[ihdr+4:ihdr+12]
return struct.unpack('>ii', w4h4)
def _jpegxy(data):
"""read the (width, height) from a JPEG header"""
# adapted from http://www.64lines.com/jpeg-width-height
idx = 4
while True:
block_size = struct.unpack('>H', data[idx:idx+2])[0]
idx = idx + block_size
if data[idx:idx+2] == b'\xFF\xC0':
# found Start of Frame
iSOF = idx
break
else:
# read another block
idx += 2
h, w = struct.unpack('>HH', data[iSOF+5:iSOF+9])
return w, h
class Image(DisplayObject):
_read_flags = 'rb'
_FMT_JPEG = u'jpeg'
_FMT_PNG = u'png'
_ACCEPTABLE_EMBEDDINGS = [_FMT_JPEG, _FMT_PNG]
def __init__(self, data=None, url=None, filename=None, format=None,
embed=None, width=None, height=None, retina=False,
unconfined=False, metadata=None):
"""Create a PNG/JPEG image object given raw data.
When this object is returned by an input cell or passed to the
display function, it will result in the image being displayed
in the frontend.
Parameters
----------
data : unicode, str or bytes
The raw image data or a URL or filename to load the data from.
This always results in embedded image data.
url : unicode
A URL to download the data from. If you specify `url=`,
the image data will not be embedded unless you also specify `embed=True`.
filename : unicode
Path to a local file to load the data from.
Images from a file are always embedded.
format : unicode
The format of the image data (png/jpeg/jpg). If a filename or URL is given
for format will be inferred from the filename extension.
embed : bool
Should the image data be embedded using a data URI (True) or be
loaded using an <img> tag. Set this to True if you want the image
to be viewable later with no internet connection in the notebook.
Default is `True`, unless the keyword argument `url` is set, then
default value is `False`.
Note that QtConsole is not able to display images if `embed` is set to `False`
width : int
Width in pixels to which to constrain the image in html
height : int
Height in pixels to which to constrain the image in html
retina : bool
Automatically set the width and height to half of the measured
width and height.
This only works for embedded images because it reads the width/height
from image data.
For non-embedded images, you can just set the desired display width
and height directly.
unconfined: bool
Set unconfined=True to disable max-width confinement of the image.
metadata: dict
Specify extra metadata to attach to the image.
Examples
--------
# embedded image data, works in qtconsole and notebook
# when passed positionally, the first arg can be any of raw image data,
# a URL, or a filename from which to load image data.
# The result is always embedding image data for inline images.
Image('http://www.google.fr/images/srpr/logo3w.png')
Image('/path/to/image.jpg')
Image(b'RAW_PNG_DATA...')
# Specifying Image(url=...) does not embed the image data,
# it only generates `<img>` tag with a link to the source.
# This will not work in the qtconsole or offline.
Image(url='http://www.google.fr/images/srpr/logo3w.png')
"""
if filename is not None:
ext = self._find_ext(filename)
elif url is not None:
ext = self._find_ext(url)
elif data is None:
raise ValueError("No image data found. Expecting filename, url, or data.")
elif isinstance(data, str) and (
data.startswith('http') or _safe_exists(data)
):
ext = self._find_ext(data)
else:
ext = None
if format is None:
if ext is not None:
if ext == u'jpg' or ext == u'jpeg':
format = self._FMT_JPEG
if ext == u'png':
format = self._FMT_PNG
else:
format = ext.lower()
elif isinstance(data, bytes):
# infer image type from image data header,
# only if format has not been specified.
if data[:2] == _JPEG:
format = self._FMT_JPEG
# failed to detect format, default png
if format is None:
format = 'png'
if format.lower() == 'jpg':
# jpg->jpeg
format = self._FMT_JPEG
self.format = format.lower()
self.embed = embed if embed is not None else (url is None)
if self.embed and self.format not in self._ACCEPTABLE_EMBEDDINGS:
raise ValueError("Cannot embed the '%s' image format" % (self.format))
self.width = width
self.height = height
self.retina = retina
self.unconfined = unconfined
self.metadata = metadata
super(Image, self).__init__(data=data, url=url, filename=filename)
if retina:
self._retina_shape()
def _retina_shape(self):
"""load pixel-doubled width and height from image data"""
if not self.embed:
return
if self.format == 'png':
w, h = _pngxy(self.data)
elif self.format == 'jpeg':
w, h = _jpegxy(self.data)
else:
# retina only supports png
return
self.width = w // 2
self.height = h // 2
def reload(self):
"""Reload the raw data from file or URL."""
if self.embed:
super(Image,self).reload()
if self.retina:
self._retina_shape()
def _repr_html_(self):
if not self.embed:
width = height = klass = ''
if self.width:
width = ' width="%d"' % self.width
if self.height:
height = ' height="%d"' % self.height
if self.unconfined:
klass = ' class="unconfined"'
return u'<img src="{url}"{width}{height}{klass}/>'.format(
url=self.url,
width=width,
height=height,
klass=klass,
)
def _data_and_metadata(self):
"""shortcut for returning metadata with shape information, if defined"""
md = {}
if self.width:
md['width'] = self.width
if self.height:
md['height'] = self.height
if self.unconfined:
md['unconfined'] = self.unconfined
if self.metadata:
md.update(self.metadata)
if md:
return self.data, md
else:
return self.data
def _repr_png_(self):
if self.embed and self.format == u'png':
return self._data_and_metadata()
def _repr_jpeg_(self):
if self.embed and (self.format == u'jpeg' or self.format == u'jpg'):
return self._data_and_metadata()
def _find_ext(self, s):
return s.split('.')[-1].lower()
class Video(DisplayObject):
def __init__(self, data=None, url=None, filename=None, embed=False, mimetype=None):
"""Create a video object given raw data or an URL.
When this object is returned by an input cell or passed to the
display function, it will result in the video being displayed
in the frontend.
Parameters
----------
data : unicode, str or bytes
The raw video data or a URL or filename to load the data from.
Raw data will require passing `embed=True`.
url : unicode
A URL for the video. If you specify `url=`,
the image data will not be embedded.
filename : unicode
Path to a local file containing the video.
Will be interpreted as a local URL unless `embed=True`.
embed : bool
Should the video be embedded using a data URI (True) or be
loaded using a <video> tag (False).
Since videos are large, embedding them should be avoided, if possible.
You must confirm embedding as your intention by passing `embed=True`.
Local files can be displayed with URLs without embedding the content, via::
Video('./video.mp4')
mimetype: unicode
Specify the mimetype for embedded videos.
Default will be guessed from file extension, if available.
Examples
--------
Video('https://archive.org/download/Sita_Sings_the_Blues/Sita_Sings_the_Blues_small.mp4')
Video('path/to/video.mp4')
Video('path/to/video.mp4', embed=True)
Video(b'raw-videodata', embed=True)
"""
if url is None and isinstance(data, str) and data.startswith(('http:', 'https:')):
url = data
data = None
elif os.path.exists(data):
filename = data
data = None
if data and not embed:
msg = ''.join([
"To embed videos, you must pass embed=True ",
"(this may make your notebook files huge)\n",
"Consider passing Video(url='...')",
])
raise ValueError(msg)
self.mimetype = mimetype
self.embed = embed
super(Video, self).__init__(data=data, url=url, filename=filename)
def _repr_html_(self):
# External URLs and potentially local files are not embedded into the
# notebook output.
if not self.embed:
url = self.url if self.url is not None else self.filename
output = """<video src="{0}" controls>
Your browser does not support the <code>video</code> element.
</video>""".format(url)
return output
# Embedded videos are base64-encoded.
mimetype = self.mimetype
if self.filename is not None:
if not mimetype:
mimetype, _ = mimetypes.guess_type(self.filename)
with open(self.filename, 'rb') as f:
video = f.read()
else:
video = self.data
if isinstance(video, str):
# unicode input is already b64-encoded
b64_video = video
else:
b64_video = base64_encode(video).decode('ascii').rstrip()
output = """<video controls>
<source src="data:{0};base64,{1}" type="{0}">
Your browser does not support the video tag.
</video>""".format(mimetype, b64_video)
return output
def reload(self):
# TODO
pass
def _repr_png_(self):
# TODO
pass
def _repr_jpeg_(self):
# TODO
pass
def clear_output(wait=False):
"""Clear the output of the current cell receiving output.
Parameters
----------
wait : bool [default: false]
Wait to clear the output until new output is available to replace it."""
from IPython.core.interactiveshell import InteractiveShell
if InteractiveShell.initialized():
InteractiveShell.instance().display_pub.clear_output(wait)
else:
print('\033[2K\r', end='')
sys.stdout.flush()
print('\033[2K\r', end='')
sys.stderr.flush()
@skip_doctest
def set_matplotlib_formats(*formats, **kwargs):
"""Select figure formats for the inline backend. Optionally pass quality for JPEG.
For example, this enables PNG and JPEG output with a JPEG quality of 90%::
In [1]: set_matplotlib_formats('png', 'jpeg', quality=90)
To set this in your config files use the following::
c.InlineBackend.figure_formats = {'png', 'jpeg'}
c.InlineBackend.print_figure_kwargs.update({'quality' : 90})
Parameters
----------
*formats : strs
One or more figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
**kwargs :
Keyword args will be relayed to ``figure.canvas.print_figure``.
"""
from IPython.core.interactiveshell import InteractiveShell
from IPython.core.pylabtools import select_figure_formats
# build kwargs, starting with InlineBackend config
kw = {}
from ipykernel.pylab.config import InlineBackend
cfg = InlineBackend.instance()
kw.update(cfg.print_figure_kwargs)
kw.update(**kwargs)
shell = InteractiveShell.instance()
select_figure_formats(shell, formats, **kw)
@skip_doctest
def set_matplotlib_close(close=True):
"""Set whether the inline backend closes all figures automatically or not.
By default, the inline backend used in the IPython Notebook will close all
matplotlib figures automatically after each cell is run. This means that
plots in different cells won't interfere. Sometimes, you may want to make
a plot in one cell and then refine it in later cells. This can be accomplished
by::
In [1]: set_matplotlib_close(False)
To set this in your config files use the following::
c.InlineBackend.close_figures = False
Parameters
----------
close : bool
Should all matplotlib figures be automatically closed after each cell is
run?
"""
from ipykernel.pylab.config import InlineBackend
cfg = InlineBackend.instance()
cfg.close_figures = close
| mit |
guptachetan1997/Episodes | tvshow/utils/dataset_builder.py | 1 | 2971 | import requests
from bs4 import BeautifulSoup
import random
from urllib.parse import quote
import time
import pandas as pd
user_agents = [
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
'Opera/9.25 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.142 Safari/535.19',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:8.0.1) Gecko/20100101 Firefox/8.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.151 Safari/535.19',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:41.0) Gecko/20100101 Firefox/41.0'
]
popular_networks = ['CBS', 'USA Network', 'AMC' , 'ABC (US)', 'Netflix', 'HBO', 'FOX', 'NBC', 'FX', 'BBC']
cols = [
'SeriesName',
'tvdbID',
'Network',
'tvdbRating',
'indicator']
genres = [
'Action',
'Adventure',
'Animation',
'Children',
'Comedy',
'Crime',
'Documentary',
'Drama',
'Family',
'Fantasy',
'Food',
'Game Show',
'Home and Garden',
'Horror',
'Mini-Series',
'Mystery',
'News',
'Reality',
'Romance',
'Science-Fiction',
'Soap',
'Special Interest',
'Sport',
'Suspense',
'Talk Show',
'Thriller',
'Travel',
'Western',
]
tv_df = pd.DataFrame(columns=cols+genres)
def get_shows_for_network(network):
headers={'User-Agent':user_agents[random.randint(0,8)]}
url = 'http://thetvdb.com/?language=7&genre=' + '&network=' + quote(network) + '&order=fanartcount%20desc&searching=Search&tab=advancedsearch'
r = requests.get(url, headers=headers)
html = r.text.encode('utf8')
soup = BeautifulSoup(html, "lxml")
ex = soup.find('table', attrs={'id':"listtable"})
shows = ex.findAll('tr')
for show in shows[1:51]:
try:
show_data = show.findAll('td')
seriesName = show_data[1].text
tvdbID = show_data[1].find('a')['href']
tvdbID = tvdbID[tvdbID.find('id')+3:tvdbID.rfind('&')]
show_genre = show_data[2].text
rating = show_data[6].text
fanart = show_data[7].text
indicator = (float(fanart)*float(rating))
show_genre = show_genre[1:len(show_genre)-1]
show_genre = show_genre.split('|')
show_genre_list = [0]*28
length = len(show_genre)
for genre in show_genre:
show_genre_list[genres.index(genre)] = 1.0/length
show_data = [seriesName, tvdbID , network, rating, indicator]
global tv_df
tv_df = tv_df.append(pd.DataFrame([show_data+show_genre_list], columns=cols+genres))
except:
pass
for network in popular_networks:
print(network)
get_shows_for_network(network)
time.sleep(2)
tv_df.to_csv('data.csv', index=False)
df = pd.read_csv('data.csv')
df = pd.DataFrame(df)
df2 = df.sort_values('indicator', ascending=False)
print(df2.head())
df2.to_csv('data.csv', index=False)
| mit |
vortex-ape/scikit-learn | sklearn/setup.py | 14 | 3236 | import os
from os.path import join
import warnings
from sklearn._build_utils import maybe_cythonize_extensions
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
# submodules with build utilities
config.add_subpackage('__check_build')
config.add_subpackage('_build_utils')
# submodules which do not have their own setup.py
# we must manually add sub-submodules & tests
config.add_subpackage('compose')
config.add_subpackage('compose/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('cross_decomposition/tests')
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('model_selection')
config.add_subpackage('model_selection/tests')
config.add_subpackage('neural_network')
config.add_subpackage('neural_network/tests')
config.add_subpackage('preprocessing')
config.add_subpackage('preprocessing/tests')
config.add_subpackage('semi_supervised')
config.add_subpackage('semi_supervised/tests')
# submodules which have their own setup.py
# leave out "linear_model" and "utils" for now; add them after cblas below
config.add_subpackage('cluster')
config.add_subpackage('datasets')
config.add_subpackage('decomposition')
config.add_subpackage('ensemble')
config.add_subpackage('externals')
config.add_subpackage('feature_extraction')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('neighbors')
config.add_subpackage('tree')
config.add_subpackage('svm')
# add cython extension module for isotonic regression
config.add_extension('_isotonic',
sources=['_isotonic.pyx'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
maybe_cythonize_extensions(top_path, config)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
mberent/tweets-storm | src/main/resources/splitsentence.py | 1 | 7233 | from sklearn.feature_extraction.text import CountVectorizer
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import traceback
from collections import deque
try:
import simplejson as json
except ImportError:
import json
json_encode = lambda x: json.dumps(x)
json_decode = lambda x: json.loads(x)
#reads lines and reconstructs newlines appropriately
def readMsg():
msg = ""
while True:
line = sys.stdin.readline()
if not line:
raise Exception('Read EOF from stdin')
if line[0:-1] == "end":
break
msg = msg + line
return json_decode(msg[0:-1])
MODE = None
ANCHOR_TUPLE = None
#queue up commands we read while trying to read taskids
pending_commands = deque()
def readTaskIds():
if pending_taskids:
return pending_taskids.popleft()
else:
msg = readMsg()
while type(msg) is not list:
pending_commands.append(msg)
msg = readMsg()
return msg
#queue up taskids we read while trying to read commands/tuples
pending_taskids = deque()
def readCommand():
if pending_commands:
return pending_commands.popleft()
else:
msg = readMsg()
while type(msg) is list:
pending_taskids.append(msg)
msg = readMsg()
return msg
def readTuple():
cmd = readCommand()
return Tuple(cmd["id"], cmd["comp"], cmd["stream"], cmd["task"], cmd["tuple"])
def sendMsgToParent(msg):
print json_encode(msg)
print "end"
sys.stdout.flush()
def sync():
sendMsgToParent({'command':'sync'})
def sendpid(heartbeatdir):
pid = os.getpid()
sendMsgToParent({'pid':pid})
open(heartbeatdir + "/" + str(pid), "w").close()
def emit(*args, **kwargs):
__emit(*args, **kwargs)
return readTaskIds()
def emitDirect(task, *args, **kwargs):
kwargs["directTask"] = task
__emit(*args, **kwargs)
def __emit(*args, **kwargs):
global MODE
if MODE == Bolt:
emitBolt(*args, **kwargs)
elif MODE == Spout:
emitSpout(*args, **kwargs)
def emitBolt(tup, stream=None, anchors = [], directTask=None):
global ANCHOR_TUPLE
if ANCHOR_TUPLE is not None:
anchors = [ANCHOR_TUPLE]
m = {"command": "emit"}
if stream is not None:
m["stream"] = stream
m["anchors"] = map(lambda a: a.id, anchors)
if directTask is not None:
m["task"] = directTask
m["tuple"] = tup
sendMsgToParent(m)
def emitSpout(tup, stream=None, id=None, directTask=None):
m = {"command": "emit"}
if id is not None:
m["id"] = id
if stream is not None:
m["stream"] = stream
if directTask is not None:
m["task"] = directTask
m["tuple"] = tup
sendMsgToParent(m)
def ack(tup):
sendMsgToParent({"command": "ack", "id": tup.id})
def fail(tup):
sendMsgToParent({"command": "fail", "id": tup.id})
def reportError(msg):
sendMsgToParent({"command": "error", "msg": msg})
def log(msg, level=2):
sendMsgToParent({"command": "log", "msg": msg, "level":level})
def logTrace(msg):
log(msg, 0)
def logDebug(msg):
log(msg, 1)
def logInfo(msg):
log(msg, 2)
def logWarn(msg):
log(msg, 3)
def logError(msg):
log(msg, 4)
def rpcMetrics(name, params):
sendMsgToParent({"command": "metrics", "name": name, "params": params})
def initComponent():
setupInfo = readMsg()
sendpid(setupInfo['pidDir'])
return [setupInfo['conf'], setupInfo['context']]
class Tuple(object):
def __init__(self, id, component, stream, task, values):
self.id = id
self.component = component
self.stream = stream
self.task = task
self.values = values
def __repr__(self):
return '<%s%s>' % (
self.__class__.__name__,
''.join(' %s=%r' % (k, self.__dict__[k]) for k in sorted(self.__dict__.keys())))
def is_heartbeat_tuple(self):
return self.task == -1 and self.stream == "__heartbeat"
class Bolt(object):
def initialize(self, stormconf, context):
pass
def process(self, tuple):
pass
def run(self):
global MODE
MODE = Bolt
conf, context = initComponent()
try:
self.initialize(conf, context)
while True:
tup = readTuple()
if tup.is_heartbeat_tuple():
sync()
else:
self.process(tup)
except Exception, e:
reportError(traceback.format_exc(e))
class BasicBolt(object):
def initialize(self, stormconf, context):
pass
def process(self, tuple):
pass
def run(self):
global MODE
MODE = Bolt
global ANCHOR_TUPLE
conf, context = initComponent()
try:
self.initialize(conf, context)
while True:
tup = readTuple()
if tup.is_heartbeat_tuple():
sync()
else:
ANCHOR_TUPLE = tup
try:
self.process(tup)
ack(tup)
except Exception, e:
reportError(traceback.format_exc(e))
fail(tup)
except Exception, e:
reportError(traceback.format_exc(e))
class Spout(object):
def initialize(self, conf, context):
pass
def ack(self, id):
pass
def fail(self, id):
pass
def nextTuple(self):
pass
def run(self):
global MODE
MODE = Spout
conf, context = initComponent()
try:
self.initialize(conf, context)
while True:
msg = readCommand()
if msg["command"] == "next":
self.nextTuple()
if msg["command"] == "ack":
self.ack(msg["id"])
if msg["command"] == "fail":
self.fail(msg["id"])
sync()
except Exception, e:
reportError(traceback.format_exc(e))
class SplitSentenceBolt(BasicBolt):
def __init__(self):
self.analyzer = CountVectorizer(
stop_words='english',
strip_accents='unicode',
lowercase=True).build_analyzer()
def process(self, tup):
for word in self.analyzer(tup.values[0]):
emit([word])
SplitSentenceBolt().run()
| apache-2.0 |
petosegan/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 276 | 1736 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/linear_model/tests/test_logistic.py | 5 | 49337 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import compute_class_weight
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import raises
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_lr_liblinear_warning():
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
lr = LogisticRegression(solver='liblinear', n_jobs=2)
assert_warns_message(UserWarning,
"'n_jobs' > 1 does not have any effect when"
" 'solver' is set to 'liblinear'. Got 'n_jobs'"
" = 2.",
lr.fit, iris.data, target)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='sag', tol=1e-2,
multi_class='ovr', random_state=42),
LogisticRegression(C=len(iris.data), solver='saga', tol=1e-2,
multi_class='ovr', random_state=42)
]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg', 'sag', 'saga']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ('Logistic Regression supports only liblinear, newton-cg, '
'lbfgs, sag and saga solvers, got wrong_name')
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# only 'liblinear' solver
msg = "Solver liblinear does not support a multinomial backend."
lr = LR(solver='liblinear', multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs', 'sag']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
for solver in ['newton-cg', 'lbfgs', 'sag', 'saga']:
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg', 'sag', 'saga']:
clf = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=2000)
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for solver in ['sag', 'saga']:
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-5, solver=solver,
max_iter=1000,
random_state=0)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-5,
solver=solver,
random_state=0)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4,
err_msg="with solver = %s" % solver)
# test for fit_intercept=True
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'):
Cs = [1e3]
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-6, solver=solver,
intercept_scaling=10000., random_state=0)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000., random_state=0)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4,
err_msg="with solver = %s" % solver)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20, random_state=0)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20, random_state=0)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1,))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_multinomial_logistic_regression_string_inputs():
# Test with string labels for LogisticRegression(CV)
n_samples, n_features, n_classes = 50, 5, 3
X_ref, y = make_classification(n_samples=n_samples, n_features=n_features,
n_classes=n_classes, n_informative=3,
random_state=0)
y_str = LabelEncoder().fit(['bar', 'baz', 'foo']).inverse_transform(y)
# For numerical labels, let y values be taken from set (-1, 0, 1)
y = np.array(y) - 1
# Test for string labels
lr = LogisticRegression(solver='lbfgs', multi_class='multinomial')
lr_cv = LogisticRegressionCV(solver='lbfgs', multi_class='multinomial')
lr_str = LogisticRegression(solver='lbfgs', multi_class='multinomial')
lr_cv_str = LogisticRegressionCV(solver='lbfgs', multi_class='multinomial')
lr.fit(X_ref, y)
lr_cv.fit(X_ref, y)
lr_str.fit(X_ref, y_str)
lr_cv_str.fit(X_ref, y_str)
assert_array_almost_equal(lr.coef_, lr_str.coef_)
assert_equal(sorted(lr_str.classes_), ['bar', 'baz', 'foo'])
assert_array_almost_equal(lr_cv.coef_, lr_cv_str.coef_)
assert_equal(sorted(lr_str.classes_), ['bar', 'baz', 'foo'])
assert_equal(sorted(lr_cv_str.classes_), ['bar', 'baz', 'foo'])
# The predictions should be in original labels
assert_equal(sorted(np.unique(lr_str.predict(X_ref))),
['bar', 'baz', 'foo'])
assert_equal(sorted(np.unique(lr_cv_str.predict(X_ref))),
['bar', 'baz', 'foo'])
# Make sure class weights can be given with string labels
lr_cv_str = LogisticRegression(
solver='lbfgs', class_weight={'bar': 1, 'baz': 2, 'foo': 0},
multi_class='multinomial').fit(X_ref, y_str)
assert_equal(sorted(np.unique(lr_cv_str.predict(X_ref))), ['bar', 'baz'])
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# The cv indices from stratified kfold (where stratification is done based
# on the fine-grained iris classes, i.e, before the classes 0 and 1 are
# conflated) is used for both clf and clf1
n_cv = 2
cv = StratifiedKFold(n_cv)
precomputed_folds = list(cv.split(train, target))
# Train clf on the original dataset where classes 0 and 1 are separated
clf = LogisticRegressionCV(cv=precomputed_folds)
clf.fit(train, target)
# Conflate classes 0 and 1 and train clf1 on this modified dataset
clf1 = LogisticRegressionCV(cv=precomputed_folds)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
# Ensure that what OvR learns for class2 is same regardless of whether
# classes 0 and 1 are separated or not
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10,))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, n_cv, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg', 'sag', 'saga']:
max_iter = 2000 if solver in ['sag', 'saga'] else 15
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=max_iter,
random_state=42, tol=1e-5 if solver in ['sag', 'saga'] else 1e-2,
cv=2)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, n_cv, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10,))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, n_cv, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
lib = LogisticRegression(fit_intercept=False)
sag = LogisticRegression(solver='sag', fit_intercept=False,
random_state=42)
saga = LogisticRegression(solver='saga', fit_intercept=False,
random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
saga.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(saga.coef_, sag.coef_, decimal=3)
assert_array_almost_equal(saga.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(saga.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(saga.coef_, lib.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
tol = 1e-7
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False, tol=tol)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False, tol=tol)
lib = LogisticRegression(fit_intercept=False, tol=tol)
sag = LogisticRegression(solver='sag', fit_intercept=False, tol=tol,
max_iter=1000, random_state=42)
saga = LogisticRegression(solver='saga', fit_intercept=False, tol=tol,
max_iter=10000, random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
saga.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(saga.coef_, sag.coef_, decimal=4)
assert_array_almost_equal(saga.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(saga.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(saga.coef_, lib.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
for weight in [{0: 0.1, 1: 0.2}, {0: 0.1, 1: 0.2, 2: 0.5}]:
n_classes = len(weight)
for class_weight in (weight, 'balanced'):
X, y = make_classification(n_samples=30, n_features=3,
n_repeated=0,
n_informative=3, n_redundant=0,
n_classes=n_classes, random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', Cs=1,
fit_intercept=False,
class_weight=class_weight)
clf_ncg = LogisticRegressionCV(solver='newton-cg', Cs=1,
fit_intercept=False,
class_weight=class_weight)
clf_lib = LogisticRegressionCV(solver='liblinear', Cs=1,
fit_intercept=False,
class_weight=class_weight)
clf_sag = LogisticRegressionCV(solver='sag', Cs=1,
fit_intercept=False,
class_weight=class_weight,
tol=1e-5, max_iter=10000,
random_state=0)
clf_saga = LogisticRegressionCV(solver='saga', Cs=1,
fit_intercept=False,
class_weight=class_weight,
tol=1e-5, max_iter=10000,
random_state=0)
clf_lbf.fit(X, y)
clf_ncg.fit(X, y)
clf_lib.fit(X, y)
clf_sag.fit(X, y)
clf_saga.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_ncg.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_saga.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_sample_weights():
X, y = make_classification(n_samples=20, n_features=5, n_informative=3,
n_classes=2, random_state=0)
sample_weight = y + 1
for LR in [LogisticRegression, LogisticRegressionCV]:
# Test that passing sample_weight as ones is the same as
# not passing them at all (default None)
for solver in ['lbfgs', 'liblinear']:
clf_sw_none = LR(solver=solver, fit_intercept=False,
random_state=42)
clf_sw_none.fit(X, y)
clf_sw_ones = LR(solver=solver, fit_intercept=False,
random_state=42)
clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(
clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4)
# Test that sample weights work the same with the lbfgs,
# newton-cg, and 'sag' solvers
clf_sw_lbfgs = LR(solver='lbfgs', fit_intercept=False, random_state=42)
clf_sw_lbfgs.fit(X, y, sample_weight=sample_weight)
clf_sw_n = LR(solver='newton-cg', fit_intercept=False, random_state=42)
clf_sw_n.fit(X, y, sample_weight=sample_weight)
clf_sw_sag = LR(solver='sag', fit_intercept=False, tol=1e-10,
random_state=42)
# ignore convergence warning due to small dataset
with ignore_warnings():
clf_sw_sag.fit(X, y, sample_weight=sample_weight)
clf_sw_liblinear = LR(solver='liblinear', fit_intercept=False,
random_state=42)
clf_sw_liblinear.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4)
assert_array_almost_equal(
clf_sw_lbfgs.coef_, clf_sw_liblinear.coef_, decimal=4)
# Test that passing class_weight as [1,2] is the same as
# passing class weight = [1,1] but adjusting sample weights
# to be 2 for all instances of class 2
for solver in ['lbfgs', 'liblinear']:
clf_cw_12 = LR(solver=solver, fit_intercept=False,
class_weight={0: 1, 1: 2}, random_state=42)
clf_cw_12.fit(X, y)
clf_sw_12 = LR(solver=solver, fit_intercept=False, random_state=42)
clf_sw_12.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(
clf_cw_12.coef_, clf_sw_12.coef_, decimal=4)
# Test the above for l1 penalty and l2 penalty with dual=True.
# since the patched liblinear code is different.
clf_cw = LogisticRegression(
solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2},
penalty="l1", tol=1e-5, random_state=42)
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear", fit_intercept=False, penalty="l1", tol=1e-5,
random_state=42)
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
clf_cw = LogisticRegression(
solver="liblinear", fit_intercept=False, class_weight={0: 1, 1: 2},
penalty="l2", dual=True, random_state=42)
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear", fit_intercept=False, penalty="l2", dual=True,
random_state=42)
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
def _compute_class_weight_dictionary(y):
# helper for returning a dictionary instead of an array
classes = np.unique(y)
class_weight = compute_class_weight("balanced", classes, y)
class_weight_dict = dict(zip(classes, class_weight))
return class_weight_dict
def test_logistic_regression_class_weights():
# Multinomial case: remove 90% of class 0
X = iris.data[45:, :]
y = iris.target[45:]
solvers = ("lbfgs", "newton-cg")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="multinomial",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=4)
# Binary case: remove 90% of class 0 and 100% of class 2
X = iris.data[45:100, :]
y = iris.target[45:100]
solvers = ("lbfgs", "newton-cg", "liblinear")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight="balanced")
clf2 = LogisticRegression(solver=solver, multi_class="ovr",
class_weight=class_weight_dict)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20, random_state=0)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
# 'lbfgs' is used as a referenced
solver = 'lbfgs'
ref_i = LogisticRegression(solver=solver, multi_class='multinomial')
ref_w = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
ref_i.fit(X, y)
ref_w.fit(X, y)
assert_array_equal(ref_i.coef_.shape, (n_classes, n_features))
assert_array_equal(ref_w.coef_.shape, (n_classes, n_features))
for solver in ['sag', 'saga', 'newton-cg']:
clf_i = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=2000, tol=1e-7,
)
clf_w = LogisticRegression(solver=solver, multi_class='multinomial',
random_state=42, max_iter=2000, tol=1e-7,
fit_intercept=False)
clf_i.fit(X, y)
clf_w.fit(X, y)
assert_array_equal(clf_i.coef_.shape, (n_classes, n_features))
assert_array_equal(clf_w.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and the other solvers
assert_almost_equal(ref_i.coef_, clf_i.coef_, decimal=3)
assert_almost_equal(ref_w.coef_, clf_w.coef_, decimal=3)
assert_almost_equal(ref_i.intercept_, clf_i.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg', 'sag', 'saga']:
clf_path = LogisticRegressionCV(solver=solver, max_iter=2000, tol=1e-6,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, ref_i.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, ref_i.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5, random_state=0)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5, random_state=0)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_saga_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5, random_state=0)
clf = LogisticRegressionCV(solver='saga')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_l1():
# Because liblinear penalizes the intercept and saga does not, we do not
# fit the intercept to make it possible to compare the coefficients of
# the two models at convergence.
rng = np.random.RandomState(42)
n_samples = 50
X, y = make_classification(n_samples=n_samples, n_features=20,
random_state=0)
X_noise = rng.normal(size=(n_samples, 3))
X_constant = np.ones(shape=(n_samples, 2))
X = np.concatenate((X, X_noise, X_constant), axis=1)
lr_liblinear = LogisticRegression(penalty="l1", C=1.0, solver='liblinear',
fit_intercept=False,
tol=1e-10)
lr_liblinear.fit(X, y)
lr_saga = LogisticRegression(penalty="l1", C=1.0, solver='saga',
fit_intercept=False,
max_iter=1000, tol=1e-10)
lr_saga.fit(X, y)
assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_)
# Noise and constant features should be regularized to zero by the l1
# penalty
assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5))
assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5))
def test_logreg_l1_sparse_data():
# Because liblinear penalizes the intercept and saga does not, we do not
# fit the intercept to make it possible to compare the coefficients of
# the two models at convergence.
rng = np.random.RandomState(42)
n_samples = 50
X, y = make_classification(n_samples=n_samples, n_features=20,
random_state=0)
X_noise = rng.normal(scale=0.1, size=(n_samples, 3))
X_constant = np.zeros(shape=(n_samples, 2))
X = np.concatenate((X, X_noise, X_constant), axis=1)
X[X < 1] = 0
X = sparse.csr_matrix(X)
lr_liblinear = LogisticRegression(penalty="l1", C=1.0, solver='liblinear',
fit_intercept=False,
tol=1e-10)
lr_liblinear.fit(X, y)
lr_saga = LogisticRegression(penalty="l1", C=1.0, solver='saga',
fit_intercept=False,
max_iter=1000, tol=1e-10)
lr_saga.fit(X, y)
assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_)
# Noise and constant features should be regularized to zero by the l1
# penalty
assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5))
assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5))
# Check that solving on the sparse and dense data yield the same results
lr_saga_dense = LogisticRegression(penalty="l1", C=1.0, solver='saga',
fit_intercept=False,
max_iter=1000, tol=1e-10)
lr_saga_dense.fit(X.toarray(), y)
assert_array_almost_equal(lr_saga.coef_, lr_saga_dense.coef_)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
def test_logreg_predict_proba_multinomial():
X, y = make_classification(n_samples=10, n_features=20, random_state=0,
n_classes=3, n_informative=10)
# Predicted probabilities using the true-entropy loss should give a
# smaller loss than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert_greater(clf_ovr_loss, clf_multi_loss)
# Predicted probabilities using the soft-max function should give a
# smaller loss than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert_greater(clf_wrong_loss, clf_multi_loss)
@ignore_warnings
def test_max_iter():
# Test that the maximum number of iteration is reached
X, y_bin = iris.data, iris.target.copy()
y_bin[y_bin == 2] = 0
solvers = ['newton-cg', 'liblinear', 'sag', 'saga', 'lbfgs']
for max_iter in range(1, 5):
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
if solver == 'liblinear' and multi_class == 'multinomial':
continue
lr = LogisticRegression(max_iter=max_iter, tol=1e-15,
multi_class=multi_class,
random_state=0, solver=solver)
lr.fit(X, y_bin)
assert_equal(lr.n_iter_[0], max_iter)
def test_n_iter():
# Test that self.n_iter_ has the correct format.
X, y = iris.data, iris.target
y_bin = y.copy()
y_bin[y_bin == 2] = 0
n_Cs = 4
n_cv_fold = 2
for solver in ['newton-cg', 'liblinear', 'sag', 'saga', 'lbfgs']:
# OvR case
n_classes = 1 if solver == 'liblinear' else np.unique(y).shape[0]
clf = LogisticRegression(tol=1e-2, multi_class='ovr',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
n_classes = np.unique(y).shape[0]
clf = LogisticRegressionCV(tol=1e-2, multi_class='ovr',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
# multinomial case
n_classes = 1
if solver in ('liblinear', 'sag', 'saga'):
break
clf = LogisticRegression(tol=1e-2, multi_class='multinomial',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
clf = LogisticRegressionCV(tol=1e-2, multi_class='multinomial',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
def test_warm_start():
# A 1-iteration second fit on same data should give almost same result
# with warm starting, and quite different result without warm starting.
# Warm starting does not work with liblinear solver.
X, y = iris.data, iris.target
solvers = ['newton-cg', 'sag', 'saga', 'lbfgs']
for warm_start in [True, False]:
for fit_intercept in [True, False]:
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
clf = LogisticRegression(tol=1e-4, multi_class=multi_class,
warm_start=warm_start,
solver=solver,
random_state=42, max_iter=100,
fit_intercept=fit_intercept)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
coef_1 = clf.coef_
clf.max_iter = 1
clf.fit(X, y)
cum_diff = np.sum(np.abs(coef_1 - clf.coef_))
msg = ("Warm starting issue with %s solver in %s mode "
"with fit_intercept=%s and warm_start=%s"
% (solver, multi_class, str(fit_intercept),
str(warm_start)))
if warm_start:
assert_greater(2.0, cum_diff, msg)
else:
assert_greater(cum_diff, 2.0, msg)
def test_saga_vs_liblinear():
iris = load_iris()
X, y = iris.data, iris.target
X = np.concatenate([X] * 10)
y = np.concatenate([y] * 10)
X_bin = X[y <= 1]
y_bin = y[y <= 1] * 2 - 1
X_sparse, y_sparse = make_classification(n_samples=50, n_features=20,
random_state=0)
X_sparse = sparse.csr_matrix(X_sparse)
for (X, y) in ((X_bin, y_bin), (X_sparse, y_sparse)):
for penalty in ['l1', 'l2']:
n_samples = X.shape[0]
# alpha=1e-3 is time consuming
for alpha in np.logspace(-1, 1, 3):
saga = LogisticRegression(
C=1. / (n_samples * alpha),
solver='saga',
multi_class='ovr',
max_iter=200,
fit_intercept=False,
penalty=penalty, random_state=0, tol=1e-24)
liblinear = LogisticRegression(
C=1. / (n_samples * alpha),
solver='liblinear',
multi_class='ovr',
max_iter=200,
fit_intercept=False,
penalty=penalty, random_state=0, tol=1e-24)
saga.fit(X, y)
liblinear.fit(X, y)
# Convergence for alpha=1e-3 is very slow
assert_array_almost_equal(saga.coef_, liblinear.coef_, 3)
def test_dtype_match():
# Test that np.float32 input data is not cast to np.float64 when possible
X_32 = np.array(X).astype(np.float32)
y_32 = np.array(Y1).astype(np.float32)
X_64 = np.array(X).astype(np.float64)
y_64 = np.array(Y1).astype(np.float64)
X_sparse_32 = sp.csr_matrix(X, dtype=np.float32)
for solver in ['newton-cg']:
for multi_class in ['ovr', 'multinomial']:
# Check type consistency
lr_32 = LogisticRegression(solver=solver, multi_class=multi_class)
lr_32.fit(X_32, y_32)
assert_equal(lr_32.coef_.dtype, X_32.dtype)
# check consistency with sparsity
lr_32_sparse = LogisticRegression(solver=solver,
multi_class=multi_class)
lr_32_sparse.fit(X_sparse_32, y_32)
assert_equal(lr_32_sparse.coef_.dtype, X_sparse_32.dtype)
# Check accuracy consistency
lr_64 = LogisticRegression(solver=solver, multi_class=multi_class)
lr_64.fit(X_64, y_64)
assert_equal(lr_64.coef_.dtype, X_64.dtype)
assert_almost_equal(lr_32.coef_, lr_64.coef_.astype(np.float32))
| mit |
mlyundin/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
subodhchhabra/pandashells | pandashells/bin/p_rand.py | 3 | 5729 | #! /usr/bin/env python
# standard library imports
import argparse
import textwrap
import sys # NOQA importing sys so I can mock sys.argv in tests
from pandashells.lib import module_checker_lib, arg_lib
module_checker_lib.check_for_modules(['pandas'])
from pandashells.lib import io_lib
import pandas as pd
import numpy as np
# want different default mu values for normal and poisson distributions
def fill_default_mu(args):
if args.type[0] == 'normal':
args.mu = [0.] if args.mu is None else args.mu
elif args.type[0] == 'poisson':
args.mu = [1.] if args.mu is None else args.mu
return args
def get_samples(args):
"""
Return samples from selected distribution
"""
# dictionary to hold numpy arguments for different distributions
distribution_for = {
'uniform': {
'function': np.random.uniform,
'kwargs': {
'low': args.min[0],
'high': args.max[0],
'size': (args.num_samples[0], args.columns[0]),
},
},
'normal': {
'function': np.random.normal,
'kwargs': {
'loc': args.mu[0] if args.mu else None,
'scale': args.sigma[0],
'size': (args.num_samples[0], args.columns[0]),
},
},
'poisson': {
'function': np.random.poisson,
'kwargs': {
'lam': args.mu[0] if args.mu else None,
'size': (args.num_samples[0], args.columns[0]),
},
},
'beta': {
'function': np.random.beta,
'kwargs': {
'a': args.alpha[0],
'b': args.beta[0],
'size': (args.num_samples[0], args.columns[0]),
},
},
'gamma': {
'function': np.random.gamma,
'kwargs': {
'shape': args.alpha[0],
'scale': 1. / args.beta[0],
'size': (args.num_samples[0], args.columns[0]),
},
},
'binomial': {
'function': np.random.binomial,
'kwargs': {
'n': args.N[0],
'p': args.p[0],
'size': (args.num_samples[0], args.columns[0]),
},
},
}
# grab the function for generating proper distribution
dist = distribution_for[args.type[0]]
# call the random generating function with the proper kwargs
values = dist['function'](**dist['kwargs'])
# set column names of output dataframe
columns = ['c{}'.format(c) for c in range(args.columns[0])]
# framify and return results
return pd.DataFrame(values, columns=columns)
def main():
msg = textwrap.dedent(
"""
Return random samples from common probability distrubtions.
-----------------------------------------------------------------------
Examples:
uniform: p.rand -n 1000 -t uniform --min=0 --max=1 | p.hist
normal: p.rand -n 1000 -t normal --mu=0 --sigma=1 | p.hist
poisson: p.rand -n 1000 -t poisson --mu=1 | p.hist
beta: p.rand -n 1000 -t beta --alpha=2 --beta=6 | p.hist
gamma: p.rand -n 1000 -t gamma --alpha=1 --beta=1 | p.hist
binomial: p.rand -n 1000 -t binomial --N=10 --p=0.4 | p.hist
-----------------------------------------------------------------------
"""
)
# read command line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, description=msg)
parser.add_argument(
'-t', '--type', nargs=1, type=str, default=['uniform'],
choices=['uniform', 'normal', 'beta', 'gamma', 'binomial', 'poisson'],
help='type of distribution (default=\'uniform\')')
parser.add_argument(
'-n', '--num_samples', nargs=1, default=[10], type=int,
help='The number of rows to generate (default=10)')
parser.add_argument(
'-c', '--columns', nargs=1, default=[1], type=int,
help='The number of columns to generate per row (default=1)')
parser.add_argument(
'--N', nargs=1, default=[10], type=int,
help=(
'(Binomial Dist) Largest possible value for random variable. '
'(default=10)'
)
)
parser.add_argument(
'--p', nargs=1, default=[.5], type=float,
help=(
'(Binomial Dist) Bernoulli probability for each trial'
'(default=.5)'
)
)
parser.add_argument(
'--mu', nargs=1, type=float,
help='(Normal, Poisson) Mean (defaults: normal:0, poisson:1')
parser.add_argument(
'--sigma', nargs=1, default=[1.], type=float,
help='(Normal) standard deviation, (default: 1)')
parser.add_argument(
'--min', nargs=1, default=[0.], type=float,
help='(Uniform) Minimum value of range, (default: 0)')
parser.add_argument(
'--max', nargs=1, default=[1.], type=float,
help='(Uniform) Maximum value of range, (default: 1)')
parser.add_argument(
'--alpha', nargs=1, default=[2.], type=float,
help='(Beta, Gamma) (default: 2)')
parser.add_argument(
'--beta', nargs=1, default=[2.], type=float,
help='(Beta, Gamma) (default: 2)')
arg_lib.add_args(parser, 'io_out')
# parse arguments
args = parser.parse_args()
# set some defaults
args = fill_default_mu(args)
# get the samples
df = get_samples(args)
# write dataframe to output
io_lib.df_to_output(args, df)
if __name__ == '__main__': # pragma: no cover
main()
| bsd-2-clause |
dandanvidi/capacity-usage | scripts/thermal_stability.py | 3 | 1877 | import pandas as pd
from capacity_usage import CAPACITY_USAGE
import matplotlib.pyplot as plt
from scipy.stats import pearsonr, spearmanr
import seaborn as sns
from cobra.manipulation.modify import revert_to_reversible
from itertools import product
flux = pd.DataFrame.from_csv("../data/mmol_gCDW_h.csv")
copies_fL = pd.read_csv("../data/abundance[copies_fl].csv")
copies_fL = copies_fL[['bnumber', 'GLC_BATCH_mu=0.58_S']]
abundance = pd.DataFrame.from_csv("../data/g_gCDW.csv")
cu = CAPACITY_USAGE(flux, abundance)
uni_to_b = {row[48:54]:row[0:5].split(';')[0].strip()
for row in open("../data/all_ecoli_genes.txt", 'r')}
id_mapper = pd.DataFrame.from_dict(uni_to_b.items())
id_mapper.columns = ["uniprot", "bnumber"]
TS = pd.read_csv("../data/thermoal_stability_ecoli.csv")
df = TS.merge(id_mapper, on=["uniprot"])
#%%
model = cu.model.copy()
revert_to_reversible(model)
def one2one_mapping():
l = []
for b in cu.model.genes:
l+=(list(product([b.id], list(b.reactions))))
df = pd.DataFrame(l)
df.columns = ['bnumber', 'reaction']
df.loc[:, '# genes in reaction'] = [len(r.genes) for r in df['reaction']]
return df
b2r = one2one_mapping()
df = df.merge(b2r, on="bnumber", how='outer')
df = df.merge(copies_fL, on="bnumber", how='outer')
df['reaction'] = df['reaction'].map(str, )
kmax = cu.kmax
kmax.name='kmax'
kcat = cu.load_kcats_umolmgmin()
kcat.name='kcat'
specific_activity = cu.SA.join(kmax)
specific_activity = specific_activity.join(kcat)
specific_activity = specific_activity[['glucose', 'kmax', 'kcat']].dropna(how='all')
specific_activity.columns = ['kapp_glucose', 'kmax', 'kcat']
specific_activity.loc[:, 'kcat/kmax'] = specific_activity['kcat'] / specific_activity['kmax']
df = df.merge(specific_activity, left_on='reaction', right_index=True, how='outer')
df.to_csv("../cache/thermal_stability_metadata.csv")
| mit |
ngoix/OCRF | examples/svm/plot_separating_hyperplane_unbalanced.py | 329 | 1850 | """
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
#from sklearn.linear_model import SGDClassifier
# we create 40 separable points
rng = np.random.RandomState(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * rng.randn(n_samples_1, 2),
0.5 * rng.randn(n_samples_2, 2) + [2, 2]]
y = [0] * (n_samples_1) + [1] * (n_samples_2)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_[0] / w[1]
# get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_[0]
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_[0] / ww[1]
# plot separating hyperplanes and samples
h0 = plt.plot(xx, yy, 'k-', label='no weights')
h1 = plt.plot(xx, wyy, 'k--', label='with weights')
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.legend()
plt.axis('tight')
plt.show()
| bsd-3-clause |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/tools/rplot.py | 2 | 30022 | import random
import warnings
from copy import deepcopy
from pandas.core.common import _values_from_object
import numpy as np
from pandas.compat import range, zip
#
# TODO:
# * Make sure legends work properly
#
warnings.warn("\n"
"The rplot trellis plotting interface is deprecated and will be "
"removed in a future version. We refer to external packages "
"like seaborn for similar but more refined functionality. \n\n"
"See our docs http://pandas.pydata.org/pandas-docs/stable"
"/visualization.html#rplot "
"for some example how to convert your existing code to these "
"packages.", FutureWarning, stacklevel=2)
class Scale:
"""
Base class for mapping between graphical and data attributes.
"""
pass
class ScaleGradient(Scale):
"""
A mapping between a data attribute value and a
point in colour space between two specified colours.
"""
def __init__(self, column, colour1, colour2):
"""Initialize ScaleGradient instance.
Parameters:
-----------
column: string, pandas DataFrame column name
colour1: tuple
3 element tuple with float values representing an RGB colour
colour2: tuple
3 element tuple with float values representing an RGB colour
"""
self.column = column
self.colour1 = colour1
self.colour2 = colour2
self.categorical = False
def __call__(self, data, index):
"""Return a colour corresponding to data attribute value.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
A three element tuple representing an RGB somewhere between colour1 and
colour2
"""
x = data[self.column].iget(index)
a = min(data[self.column])
b = max(data[self.column])
r1, g1, b1 = self.colour1
r2, g2, b2 = self.colour2
x_scaled = (x - a) / (b - a)
return (r1 + (r2 - r1) * x_scaled,
g1 + (g2 - g1) * x_scaled,
b1 + (b2 - b1) * x_scaled)
class ScaleGradient2(Scale):
"""
Create a mapping between a data attribute value and a
point in colour space in a line of three specified colours.
"""
def __init__(self, column, colour1, colour2, colour3):
"""Initialize ScaleGradient2 instance.
Parameters:
-----------
column: string, pandas DataFrame column name
colour1: tuple
3 element tuple with float values representing an RGB colour
colour2: tuple
3 element tuple with float values representing an RGB colour
colour3: tuple
3 element tuple with float values representing an RGB colour
"""
self.column = column
self.colour1 = colour1
self.colour2 = colour2
self.colour3 = colour3
self.categorical = False
def __call__(self, data, index):
"""Return a colour corresponding to data attribute value.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
A three element tuple representing an RGB somewhere along the line
of colour1, colour2 and colour3
"""
x = data[self.column].iget(index)
a = min(data[self.column])
b = max(data[self.column])
r1, g1, b1 = self.colour1
r2, g2, b2 = self.colour2
r3, g3, b3 = self.colour3
x_scaled = (x - a) / (b - a)
if x_scaled < 0.5:
x_scaled *= 2.0
return (r1 + (r2 - r1) * x_scaled,
g1 + (g2 - g1) * x_scaled,
b1 + (b2 - b1) * x_scaled)
else:
x_scaled = (x_scaled - 0.5) * 2.0
return (r2 + (r3 - r2) * x_scaled,
g2 + (g3 - g2) * x_scaled,
b2 + (b3 - b2) * x_scaled)
class ScaleSize(Scale):
"""
Provide a mapping between a DataFrame column and matplotlib
scatter plot shape size.
"""
def __init__(self, column, min_size=5.0, max_size=100.0,
transform=lambda x: x):
"""Initialize ScaleSize instance.
Parameters:
-----------
column: string, a column name
min_size: float, minimum point size
max_size: float, maximum point size
transform: function
a one argument function of form float -> float (e.g. lambda x:
log(x))
"""
self.column = column
self.min_size = min_size
self.max_size = max_size
self.transform = transform
self.categorical = False
def __call__(self, data, index):
"""Return matplotlib scatter plot marker shape size.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
"""
x = data[self.column].iget(index)
a = float(min(data[self.column]))
b = float(max(data[self.column]))
return self.transform(self.min_size + ((x - a) / (b - a)) *
(self.max_size - self.min_size))
class ScaleShape(Scale):
"""
Provides a mapping between matplotlib marker shapes
and attribute values.
"""
def __init__(self, column):
"""Initialize ScaleShape instance.
Parameters:
-----------
column: string, pandas DataFrame column name
"""
self.column = column
self.shapes = ['o', '+', 's', '*', '^', '<', '>', 'v', '|', 'x']
self.legends = set([])
self.categorical = True
def __call__(self, data, index):
"""Returns a matplotlib marker identifier.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
a matplotlib marker identifier
"""
values = sorted(list(set(data[self.column])))
if len(values) > len(self.shapes):
raise ValueError("Too many different values of the categorical "
"attribute for ScaleShape")
x = data[self.column].iget(index)
return self.shapes[values.index(x)]
class ScaleRandomColour(Scale):
"""
Maps a random colour to a DataFrame attribute.
"""
def __init__(self, column):
"""Initialize ScaleRandomColour instance.
Parameters:
-----------
column: string, pandas DataFrame column name
"""
self.column = column
self.categorical = True
def __call__(self, data, index):
"""Return a tuple of three floats, representing
an RGB colour.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
"""
random.seed(data[self.column].iget(index))
return [random.random() for _ in range(3)]
class ScaleConstant(Scale):
"""
Constant returning scale. Usually used automatically.
"""
def __init__(self, value):
"""Initialize ScaleConstant instance.
Parameters:
-----------
value: any Python value to be returned when called
"""
self.value = value
self.categorical = False
def __call__(self, data, index):
"""Return the constant value.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
A constant value specified during initialisation
"""
return self.value
def default_aes(x=None, y=None):
"""Create the default aesthetics dictionary.
Parameters:
-----------
x: string, DataFrame column name
y: string, DataFrame column name
Returns:
--------
a dictionary with aesthetics bindings
"""
return {
'x': x,
'y': y,
'size': ScaleConstant(40.0),
'colour': ScaleConstant('grey'),
'shape': ScaleConstant('o'),
'alpha': ScaleConstant(1.0),
}
def make_aes(x=None, y=None, size=None, colour=None, shape=None, alpha=None):
"""Create an empty aesthetics dictionary.
Parameters:
-----------
x: string, DataFrame column name
y: string, DataFrame column name
size: function, binding for size attribute of Geoms
colour: function, binding for colour attribute of Geoms
shape: function, binding for shape attribute of Geoms
alpha: function, binding for alpha attribute of Geoms
Returns:
--------
a dictionary with aesthetics bindings
"""
if not hasattr(size, '__call__') and size is not None:
size = ScaleConstant(size)
if not hasattr(colour, '__call__') and colour is not None:
colour = ScaleConstant(colour)
if not hasattr(shape, '__call__') and shape is not None:
shape = ScaleConstant(shape)
if not hasattr(alpha, '__call__') and alpha is not None:
alpha = ScaleConstant(alpha)
if any([isinstance(size, scale)
for scale in [ScaleConstant, ScaleSize]]) or size is None:
pass
else:
raise ValueError(
'size mapping should be done through ScaleConstant or ScaleSize')
if (any([isinstance(colour, scale)
for scale in [ScaleConstant, ScaleGradient,
ScaleGradient2, ScaleRandomColour]]) or
colour is None):
pass
else:
raise ValueError('colour mapping should be done through '
'ScaleConstant, ScaleRandomColour, ScaleGradient '
'or ScaleGradient2')
if (any([isinstance(shape, scale)
for scale in [ScaleConstant, ScaleShape]]) or
shape is None):
pass
else:
raise ValueError('shape mapping should be done through ScaleConstant '
'or ScaleShape')
if (any([isinstance(alpha, scale) for scale in [ScaleConstant]]) or
alpha is None):
pass
else:
raise ValueError('alpha mapping should be done through ScaleConstant')
return {
'x': x,
'y': y,
'size': size,
'colour': colour,
'shape': shape,
'alpha': alpha,
}
class Layer:
"""
Layer object representing a single plot layer.
"""
def __init__(self, data=None, **kwds):
"""Initialize layer object.
Parameters:
-----------
data: pandas DataFrame instance
aes: aesthetics dictionary with bindings
"""
self.data = data
self.aes = make_aes(**kwds)
self.legend = {}
def work(self, fig=None, ax=None):
"""Do the drawing (usually) work.
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis object
Returns:
--------
a tuple with the same figure and axis instances
"""
return fig, ax
class GeomPoint(Layer):
def work(self, fig=None, ax=None):
"""Render the layer on a matplotlib axis.
You can specify either a figure or an axis to draw on.
Parameters:
-----------
fig: matplotlib figure object
ax: matplotlib axis object to draw on
Returns:
--------
fig, ax: matplotlib figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
for index in range(len(self.data)):
row = self.data.iloc[index]
x = row[self.aes['x']]
y = row[self.aes['y']]
size_scaler = self.aes['size']
colour_scaler = self.aes['colour']
shape_scaler = self.aes['shape']
alpha = self.aes['alpha']
size_value = size_scaler(self.data, index)
colour_value = colour_scaler(self.data, index)
marker_value = shape_scaler(self.data, index)
alpha_value = alpha(self.data, index)
patch = ax.scatter(x, y,
s=size_value,
c=colour_value,
marker=marker_value,
alpha=alpha_value)
label = []
if colour_scaler.categorical:
label += [colour_scaler.column, row[colour_scaler.column]]
if shape_scaler.categorical:
label += [shape_scaler.column, row[shape_scaler.column]]
self.legend[tuple(label)] = patch
ax.set_xlabel(self.aes['x'])
ax.set_ylabel(self.aes['y'])
return fig, ax
class GeomPolyFit(Layer):
"""
Draw a polynomial fit of specified degree.
"""
def __init__(self, degree, lw=2.0, colour='grey'):
"""Initialize GeomPolyFit object.
Parameters:
-----------
degree: an integer, polynomial degree
lw: line width
colour: matplotlib colour
"""
self.degree = degree
self.lw = lw
self.colour = colour
Layer.__init__(self)
def work(self, fig=None, ax=None):
"""Draw the polynomial fit on matplotlib figure or axis
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis
Returns:
--------
a tuple with figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
from numpy.polynomial.polynomial import polyfit
from numpy.polynomial.polynomial import polyval
x = self.data[self.aes['x']]
y = self.data[self.aes['y']]
min_x = min(x)
max_x = max(x)
c = polyfit(x, y, self.degree)
x_ = np.linspace(min_x, max_x, len(x))
y_ = polyval(x_, c)
ax.plot(x_, y_, lw=self.lw, c=self.colour)
return fig, ax
class GeomScatter(Layer):
"""
An efficient scatter plot, use this instead of GeomPoint for speed.
"""
def __init__(self, marker='o', colour='lightblue', alpha=1.0):
"""Initialize GeomScatter instance.
Parameters:
-----------
marker: matplotlib marker string
colour: matplotlib colour
alpha: matplotlib alpha
"""
self.marker = marker
self.colour = colour
self.alpha = alpha
Layer.__init__(self)
def work(self, fig=None, ax=None):
"""Draw a scatter plot on matplotlib figure or axis
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis
Returns:
--------
a tuple with figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
x = self.data[self.aes['x']]
y = self.data[self.aes['y']]
ax.scatter(x, y, marker=self.marker, c=self.colour, alpha=self.alpha)
return fig, ax
class GeomHistogram(Layer):
"""
An efficient histogram, use this instead of GeomBar for speed.
"""
def __init__(self, bins=10, colour='lightblue'):
"""Initialize GeomHistogram instance.
Parameters:
-----------
bins: integer, number of histogram bins
colour: matplotlib colour
"""
self.bins = bins
self.colour = colour
Layer.__init__(self)
def work(self, fig=None, ax=None):
"""Draw a histogram on matplotlib figure or axis
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis
Returns:
--------
a tuple with figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
x = self.data[self.aes['x']]
ax.hist(_values_from_object(x), self.bins, facecolor=self.colour)
ax.set_xlabel(self.aes['x'])
return fig, ax
class GeomDensity(Layer):
"""
A kernel density estimation plot.
"""
def work(self, fig=None, ax=None):
"""Draw a one dimensional kernel density plot.
You can specify either a figure or an axis to draw on.
Parameters:
-----------
fig: matplotlib figure object
ax: matplotlib axis object to draw on
Returns:
--------
fig, ax: matplotlib figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
from scipy.stats import gaussian_kde
x = self.data[self.aes['x']]
gkde = gaussian_kde(x)
ind = np.linspace(x.min(), x.max(), 200)
ax.plot(ind, gkde.evaluate(ind))
return fig, ax
class GeomDensity2D(Layer):
def work(self, fig=None, ax=None):
"""Draw a two dimensional kernel density plot.
You can specify either a figure or an axis to draw on.
Parameters:
-----------
fig: matplotlib figure object
ax: matplotlib axis object to draw on
Returns:
--------
fig, ax: matplotlib figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
x = self.data[self.aes['x']]
y = self.data[self.aes['y']]
# TODO: unused?
# rvs = np.array([x, y])
x_min = x.min()
x_max = x.max()
y_min = y.min()
y_max = y.max()
X, Y = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
import scipy.stats as stats
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
ax.contour(Z, extent=[x_min, x_max, y_min, y_max])
return fig, ax
class TrellisGrid(Layer):
def __init__(self, by):
"""Initialize TreelisGrid instance.
Parameters:
-----------
by: column names to group by
"""
if len(by) != 2:
raise ValueError("You must give a list of length 2 to group by")
elif by[0] == '.' and by[1] == '.':
raise ValueError(
"At least one of grouping attributes must be not a dot")
self.by = by
def trellis(self, layers):
"""
Create a trellis structure for a list of layers. Each layer will be
cloned with different data in to a two dimensional grid.
Parameters:
-----------
layers: a list of Layer objects
Returns:
--------
trellised_layers: Clones of each layer in the list arranged in a
trellised latice
"""
trellised_layers = []
for layer in layers:
data = layer.data
if self.by[0] == '.':
grouped = data.groupby(self.by[1])
elif self.by[1] == '.':
grouped = data.groupby(self.by[0])
else:
grouped = data.groupby(self.by)
groups = list(grouped.groups.keys())
if self.by[0] == '.' or self.by[1] == '.':
shingle1 = set([g for g in groups])
else:
shingle1 = set([g[0] for g in groups])
shingle2 = set([g[1] for g in groups])
if self.by[0] == '.':
self.rows = 1
self.cols = len(shingle1)
elif self.by[1] == '.':
self.rows = len(shingle1)
self.cols = 1
else:
self.rows = len(shingle1)
self.cols = len(shingle2)
trellised = [[None for _ in range(self.cols)]
for _ in range(self.rows)]
self.group_grid = [[None for _ in range(
self.cols)] for _ in range(self.rows)]
row = 0
col = 0
for group, data in grouped:
new_layer = deepcopy(layer)
new_layer.data = data
trellised[row][col] = new_layer
self.group_grid[row][col] = group
col += 1
if col >= self.cols:
col = 0
row += 1
trellised_layers.append(trellised)
return trellised_layers
def dictionary_union(dict1, dict2):
"""Take two dictionaries, return dictionary union.
Parameters:
-----------
dict1: Python dictionary
dict2: Python dictionary
Returns:
--------
A union of the dictionaries. It assumes that values
with the same keys are identical.
"""
keys1 = list(dict1.keys())
keys2 = list(dict2.keys())
result = {}
for key1 in keys1:
result[key1] = dict1[key1]
for key2 in keys2:
result[key2] = dict2[key2]
return result
def merge_aes(layer1, layer2):
"""Merges the aesthetics dictionaries for the two layers.
Look up sequence_layers function. Which layer is first and which
one is second is important.
Parameters:
-----------
layer1: Layer object
layer2: Layer object
"""
for key in layer2.aes.keys():
if layer2.aes[key] is None:
layer2.aes[key] = layer1.aes[key]
def sequence_layers(layers):
"""
Go through the list of layers and fill in the missing bits of information.
The basic rules are this:
* If the current layer has data set to None, take the data from previous
layer.
* For each aesthetic mapping, if that mapping is set to None, take it from
previous layer.
Parameters:
-----------
layers: a list of Layer objects
"""
for layer1, layer2 in zip(layers[:-1], layers[1:]):
if layer2.data is None:
layer2.data = layer1.data
merge_aes(layer1, layer2)
return layers
def sequence_grids(layer_grids):
"""
Go through the list of layer girds and perform the same thing as
sequence_layers.
Parameters:
-----------
layer_grids: a list of two dimensional layer grids
"""
for grid1, grid2 in zip(layer_grids[:-1], layer_grids[1:]):
for row1, row2 in zip(grid1, grid2):
for layer1, layer2 in zip(row1, row2):
if layer2.data is None:
layer2.data = layer1.data
merge_aes(layer1, layer2)
return layer_grids
def work_grid(grid, fig):
"""
Take a two dimensional grid, add subplots to a figure for each cell and do
layer work.
Parameters:
-----------
grid: a two dimensional grid of layers
fig: matplotlib figure to draw on
Returns:
--------
axes: a two dimensional list of matplotlib axes
"""
nrows = len(grid)
ncols = len(grid[0])
axes = [[None for _ in range(ncols)] for _ in range(nrows)]
for row in range(nrows):
for col in range(ncols):
axes[row][col] = fig.add_subplot(
nrows, ncols, ncols * row + col + 1)
grid[row][col].work(ax=axes[row][col])
return axes
def adjust_subplots(fig, axes, trellis, layers):
"""Adjust the subtplots on matplotlib figure with the
fact that we have a trellis plot in mind.
Parameters:
-----------
fig: matplotlib figure
axes: a two dimensional grid of matplotlib axes
trellis: TrellisGrid object
layers: last grid of layers in the plot
"""
# Flatten the axes grid
axes = [ax for row in axes for ax in row]
min_x = min([ax.get_xlim()[0] for ax in axes])
max_x = max([ax.get_xlim()[1] for ax in axes])
min_y = min([ax.get_ylim()[0] for ax in axes])
max_y = max([ax.get_ylim()[1] for ax in axes])
[ax.set_xlim(min_x, max_x) for ax in axes]
[ax.set_ylim(min_y, max_y) for ax in axes]
for index, axis in enumerate(axes):
if index % trellis.cols == 0:
pass
else:
axis.get_yaxis().set_ticks([])
axis.set_ylabel('')
if index / trellis.cols == trellis.rows - 1:
pass
else:
axis.get_xaxis().set_ticks([])
axis.set_xlabel('')
if trellis.by[0] == '.':
label1 = "%s = %s" % (trellis.by[1], trellis.group_grid[
index // trellis.cols][index % trellis.cols])
label2 = None
elif trellis.by[1] == '.':
label1 = "%s = %s" % (trellis.by[0], trellis.group_grid[
index // trellis.cols][index % trellis.cols])
label2 = None
else:
label1 = "%s = %s" % (
trellis.by[0],
trellis.group_grid[index // trellis.cols]
[index % trellis.cols][0])
label2 = "%s = %s" % (
trellis.by[1],
trellis.group_grid[index // trellis.cols]
[index % trellis.cols][1])
if label2 is not None:
axis.table(cellText=[[label1], [label2]],
loc='top', cellLoc='center',
cellColours=[['lightgrey'], ['lightgrey']])
else:
axis.table(cellText=[[label1]], loc='top',
cellLoc='center', cellColours=[['lightgrey']])
# Flatten the layer grid
layers = [layer for row in layers for layer in row]
legend = {}
for layer in layers:
legend = dictionary_union(legend, layer.legend)
patches = []
labels = []
if len(list(legend.keys())) == 0:
key_function = lambda tup: tup
elif len(list(legend.keys())[0]) == 2:
key_function = lambda tup: (tup[1])
else:
key_function = lambda tup: (tup[1], tup[3])
for key in sorted(list(legend.keys()), key=key_function):
value = legend[key]
patches.append(value)
if len(key) == 2:
col, val = key
labels.append("%s" % str(val))
elif len(key) == 4:
col1, val1, col2, val2 = key
labels.append("%s, %s" % (str(val1), str(val2)))
else:
raise ValueError(
"Maximum 2 categorical attributes to display a lengend of")
if len(legend):
fig.legend(patches, labels, loc='upper right')
fig.subplots_adjust(wspace=0.05, hspace=0.2)
class RPlot:
"""
The main plot object. Add layers to an instance of this object to create a
plot.
"""
def __init__(self, data, x=None, y=None):
"""Initialize RPlot instance.
Parameters:
-----------
data: pandas DataFrame instance
x: string, DataFrame column name
y: string, DataFrame column name
"""
self.layers = [Layer(data, **default_aes(x=x, y=y))]
def add(self, layer):
"""Add a layer to RPlot instance.
Parameters:
-----------
layer: Layer instance
"""
if not isinstance(layer, Layer):
raise TypeError(
"The operand on the right side of + must be a Layer instance")
self.layers.append(layer)
def render(self, fig=None):
"""Render all the layers on a matplotlib figure.
Parameters:
-----------
fig: matplotlib figure
"""
import matplotlib.pyplot as plt
if fig is None:
fig = plt.gcf()
# Look for the last TrellisGrid instance in the layer list
last_trellis = None
for layer in self.layers:
if isinstance(layer, TrellisGrid):
last_trellis = layer
if last_trellis is None:
# We have a simple, non-trellised plot
new_layers = sequence_layers(self.layers)
for layer in new_layers:
layer.work(fig=fig)
legend = {}
for layer in new_layers:
legend = dictionary_union(legend, layer.legend)
patches = []
labels = []
if len(list(legend.keys())) == 0:
key_function = lambda tup: tup
elif len(list(legend.keys())[0]) == 2:
key_function = lambda tup: (tup[1])
else:
key_function = lambda tup: (tup[1], tup[3])
for key in sorted(list(legend.keys()), key=key_function):
value = legend[key]
patches.append(value)
if len(key) == 2:
col, val = key
labels.append("%s" % str(val))
elif len(key) == 4:
col1, val1, col2, val2 = key
labels.append("%s, %s" % (str(val1), str(val2)))
else:
raise ValueError("Maximum 2 categorical attributes to "
"display a lengend of")
if len(legend):
fig.legend(patches, labels, loc='upper right')
else:
# We have a trellised plot. First let's remove all other
# TrellisGrid instances from the layer list, including this one.
new_layers = []
for layer in self.layers:
if not isinstance(layer, TrellisGrid):
new_layers.append(layer)
new_layers = sequence_layers(new_layers)
# Now replace the old layers by their trellised versions
new_layers = last_trellis.trellis(new_layers)
# Prepare the subplots and draw on them
new_layers = sequence_grids(new_layers)
axes_grids = [work_grid(grid, fig) for grid in new_layers]
axes_grid = axes_grids[-1]
adjust_subplots(fig, axes_grid, last_trellis, new_layers[-1])
# And we're done
return fig
| mit |
massmutual/scikit-learn | examples/model_selection/plot_precision_recall.py | 249 | 6150 | """
================
Precision-Recall
================
Example of Precision-Recall metric to evaluate classifier output quality.
In information retrieval, precision is a measure of result relevancy, while
recall is a measure of how many truly relevant results are returned. A high
area under the curve represents both high recall and high precision, where high
precision relates to a low false positive rate, and high recall relates to a
low false negative rate. High scores for both show that the classifier is
returning accurate results (high precision), as well as returning a majority of
all positive results (high recall).
A system with high recall but low precision returns many results, but most of
its predicted labels are incorrect when compared to the training labels. A
system with high precision but low recall is just the opposite, returning very
few results, but most of its predicted labels are correct when compared to the
training labels. An ideal system with high precision and high recall will
return many results, with all results labeled correctly.
Precision (:math:`P`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false positives
(:math:`F_p`).
:math:`P = \\frac{T_p}{T_p+F_p}`
Recall (:math:`R`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false negatives
(:math:`F_n`).
:math:`R = \\frac{T_p}{T_p + F_n}`
These quantities are also related to the (:math:`F_1`) score, which is defined
as the harmonic mean of precision and recall.
:math:`F1 = 2\\frac{P \\times R}{P+R}`
It is important to note that the precision may not decrease with recall. The
definition of precision (:math:`\\frac{T_p}{T_p + F_p}`) shows that lowering
the threshold of a classifier may increase the denominator, by increasing the
number of results returned. If the threshold was previously set too high, the
new results may all be true positives, which will increase precision. If the
previous threshold was about right or too low, further lowering the threshold
will introduce false positives, decreasing precision.
Recall is defined as :math:`\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does
not depend on the classifier threshold. This means that lowering the classifier
threshold may increase recall, by increasing the number of true positive
results. It is also possible that lowering the threshold may leave recall
unchanged, while the precision fluctuates.
The relationship between recall and precision can be observed in the
stairstep area of the plot - at the edges of these steps a small change
in the threshold considerably reduces precision, with only a minor gain in
recall. See the corner at recall = .59, precision = .8 for an example of this
phenomenon.
Precision-recall curves are typically used in binary classification to study
the output of a classifier. In order to extend Precision-recall curve and
average precision to multi-class or multi-label classification, it is necessary
to binarize the output. One curve can be drawn per label, but one can also draw
a precision-recall curve by considering each element of the label indicator
matrix as a binary prediction (micro-averaging).
.. note::
See also :func:`sklearn.metrics.average_precision_score`,
:func:`sklearn.metrics.recall_score`,
:func:`sklearn.metrics.precision_score`,
:func:`sklearn.metrics.f1_score`
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Split into training and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=random_state)
# Run classifier
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
# Plot Precision-Recall curve
plt.clf()
plt.plot(recall[0], precision[0], label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
plt.legend(loc="lower left")
plt.show()
# Plot Precision-Recall curve for each class
plt.clf()
plt.plot(recall["micro"], precision["micro"],
label='micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i in range(n_classes):
plt.plot(recall[i], precision[i],
label='Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
cl4rke/scikit-learn | sklearn/linear_model/randomized_l1.py | 95 | 23365 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], y_numeric=True)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
Averroes/statsmodels | statsmodels/graphics/tests/test_boxplots.py | 28 | 1257 | import numpy as np
from numpy.testing import dec
from statsmodels.graphics.boxplots import violinplot, beanplot
from statsmodels.datasets import anes96
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
@dec.skipif(not have_matplotlib)
def test_violinplot_beanplot():
# Test violinplot and beanplot with the same dataset.
data = anes96.load_pandas()
party_ID = np.arange(7)
labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat",
"Independent-Independent", "Independent-Republican",
"Weak Republican", "Strong Republican"]
age = [data.exog['age'][data.endog == id] for id in party_ID]
fig = plt.figure()
ax = fig.add_subplot(111)
violinplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
plt.close(fig)
fig = plt.figure()
ax = fig.add_subplot(111)
beanplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
plt.close(fig)
| bsd-3-clause |
voxlol/scikit-learn | sklearn/metrics/cluster/tests/test_unsupervised.py | 230 | 2823 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.metrics.cluster.unsupervised import silhouette_score
from sklearn.metrics import pairwise_distances
from sklearn.utils.testing import assert_false, assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
# Assert Silhouette Coefficient != nan when there is 1 sample in a class.
# This tests for the condition that caused issue 960.
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
| bsd-3-clause |
xgds/xgds_instrument | xgds_instrument/views.py | 1 | 6157 | # __BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
# __END_LICENSE__
import datetime
import json
import pandas as pd
import pytz
import httplib
from django.http import HttpResponse
from django.shortcuts import render, get_object_or_404
from django.conf import settings
from xgds_instrument.forms import ImportInstrumentDataForm
from requests.api import request
from django.core.urlresolvers import reverse
from geocamUtil.loader import LazyGetModelByName
def lookupImportFunctionByName(moduleName, functionName):
importModule = __import__(moduleName)
function = getattr(getattr(importModule, moduleName.split(".")[-1]),
functionName)
return function
def cleanValue(s):
if not s:
return None
try:
return float(s)
except ValueError:
return None
def isNumber(s):
if not s:
return False
try:
float(s)
return True
except ValueError:
return False
def editInstrumentDataPosition(dataProduct, newLatitude, newLongitude, newAltitude):
cleanLatitude = cleanValue(newLatitude)
cleanLongitude = cleanValue(newLongitude)
cleanAltitude = cleanValue(newAltitude)
if not newLatitude or not newLongitude:
return
''' create or update the user position for an instrument data reading '''
if cleanLatitude != dataProduct.lat or cleanLongitude != dataProduct.lon or cleanAltitude != dataProduct.alt:
if dataProduct.user_position is None:
LOCATION_MODEL = LazyGetModelByName(settings.GEOCAM_TRACK_PAST_POSITION_MODEL)
dataProduct.user_position = LOCATION_MODEL.get().objects.create(serverTimestamp = datetime.datetime.now(pytz.utc),
timestamp = dataProduct.acquisition_time,
latitude = cleanLatitude,
longitude = cleanLongitude,
altitude = cleanAltitude)
else:
dataProduct.user_position.latitude = cleanLatitude
dataProduct.user_position.longitude = cleanLongitude
dataProduct.user_position.altitude = cleanAltitude
dataProduct.user_position.save()
dataProduct.save()
def editInstrumentData(request, instrument_name, pk):
form = ImportInstrumentDataForm()
errors = form.errors
return render(
request,
'xgds_instrument/editInstrumentData.html',
{
'form': form,
'errorstring': errors,
},
)
def instrumentDataImport(request):
errors = None
status = httplib.OK
if request.method == 'POST':
form = ImportInstrumentDataForm(request.POST, request.FILES)
if form.is_valid():
instrument = form.cleaned_data["instrument"]
importFxn = lookupImportFunctionByName(
settings.XGDS_INSTRUMENT_IMPORT_MODULE_PATH,
instrument.dataImportFunctionName)
object_id = None
if 'object_id' in form.cleaned_data:
object_id = int(form.cleaned_data['object_id'])
return importFxn(instrument=instrument,
portableDataFile=request.FILES["portableDataFile"],
manufacturerDataFile=request.FILES["manufacturerDataFile"],
utcStamp=form.cleaned_data["dataCollectionTime"],
timezone=form.getTimezone(),
vehicle=form.getVehicle(),
user=request.user,
latitude=form.cleaned_data['lat'],
longitude=form.cleaned_data['lon'],
altitude=form.cleaned_data['alt'],
collector=form.cleaned_data["collector"],
object_id=object_id)
else:
errors = form.errors
status = status=httplib.NOT_ACCEPTABLE
else:
form = ImportInstrumentDataForm()
return render(
request,
'xgds_instrument/importInstrumentData.html',
{
'form': form,
'errorstring': errors,
'instrumentDataImportUrl': reverse('instrument_data_import'),
'instrumentType': 'Science Instruments',
'title': settings.XGDS_CORE_FLIGHT_MONIKER,
'help_content_path': 'xgds_instrument/help/import.rst'
},
status=status
)
def getInstrumentDataJson(request, productModel, productPk):
INSTRUMENT_DATA_PRODUCT_MODEL = LazyGetModelByName(productModel)
dataProduct = get_object_or_404(INSTRUMENT_DATA_PRODUCT_MODEL.get(), pk=productPk)
sampleList = dataProduct.samples
return HttpResponse(json.dumps(sampleList), content_type='application/json')
def getInstrumentDataCsvResponse(request, productModel, productPk):
INSTRUMENT_DATA_PRODUCT_MODEL = LazyGetModelByName(productModel)
dataProduct = get_object_or_404(INSTRUMENT_DATA_PRODUCT_MODEL.get(), pk=productPk)
filename, dataframe = dataProduct.getInstrumentDataCsv()
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=' + filename
dataframe.to_csv(response, index=False)
return response
| apache-2.0 |
datacommonsorg/data | scripts/us_census/geojsons_low_res/plotter.py | 1 | 2623 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Plots and compares two Geojson files: before and after running simplify.py.
Typical usage:
python3 plotter.py --original_path original-data/geoId-01.geojson
--simplified_path simplified-data/geoId-01-simple.geojson
"""
from absl import flags
from absl import app
import geopandas as gpd
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
_, (ax1, ax2) = plt.subplots(ncols=2, sharex=True, sharey=True)
def compare_plots(geojson1, geojson2, show=True):
"""Compares geopandas objects by plotting them side by side.
Args:
geojson1: A geopandas GeoDataFrame of the first object to compare.
geojson2: A geopandas GeoDataFrame of the second object to compare.
show: If True, it shows the plot after it is created. If False, it does
not automatically show, waiting for the user to explicitly call
plt.show(). This allows multiple GeoDataFrames to be compared at
once.
"""
if show:
_, (new_ax1, new_ax2) = plt.subplots(ncols=2, sharex=True, sharey=True)
f1 = geojson1.plot(ax=new_ax1)
f2 = geojson2.plot(ax=new_ax2)
f1.set_title('Original.')
f2.set_title('Simplified.')
plt.show()
else:
f1 = geojson1.plot(ax=ax1)
f2 = geojson2.plot(ax=ax2)
f1.set_title('Original.')
f2.set_title('Simplified.')
def main(_):
original = gpd.read_file(FLAGS.original_path)
simple = gpd.read_file(FLAGS.simplified_path)
compare_plots(original, simple)
if __name__ == '__main__':
FLAGS = flags.FLAGS
flags.DEFINE_string('original_path',
default=None,
help='Path to original geojson to be compared.')
flags.DEFINE_string('simplified_path',
default=None,
help='Path to simplified geojson to be compared.')
flags.mark_flag_as_required('original_path')
flags.mark_flag_as_required('simplified_path')
app.run(main)
| apache-2.0 |
gkulkarni/JetMorphology | jet_unqualmasses.py | 1 | 3657 | """
File: jet_unequalmasses.py
Jet morphology from a BH binary that is in the GW-dominated phase of
its inspiral (Figures 4). BH masses are not assumed to be equal.
"""
import matplotlib as mpl
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.serif'] = 'cm'
mpl.rcParams['font.size'] = '22'
import matplotlib.pyplot as plt
import numpy as np
import math
import sys
i = 40.0 # degrees
beta = 0.90 # jet speed in units of c
d = 100.0 # Mpc; Distance between jet and observer
psi0 = 2.0*np.pi/180.0 # radians
m1 = 0.5e10 #msun
m2 = m1 #msun
mu = m1*m2/(m1+m2) # reduced mass
zeta = 4.0*mu/(m1+m2) # See Loeb 2010 near eqn.1
print 'zeta=', zeta
M = 1.0e10 # Msun; total mass of the equal-mass binary
Mdot = 1.0 # Eddington units
a0 = 8.3e-3*(M*1.0e-8)**(3./4.)*(Mdot**(-0.25)) # pc
pcto_10to16cm = 0.003241
a0 /= pcto_10to16cm # 1.0e16 cm
coeff = -2.56e5/(M*1.0e-8)**3
coeff /= zeta # for unequal-mass binary; see Loeb 2010 eqn. 4
d *= 1.0e3 # kpc
output_filename = 'jet_i%2d_beta%3.2f_mdot%3.2f' % (i, beta, Mdot)
save_pdf = False
i *= np.pi/180.0 # radians
c = 3.0e5 # km/s; speed of light
yrbys = 3.154e7
kpcbykm = 3.086e16
def binary_separation_gw(t):
a = (4.0/coeff) * (t - t0 + coeff*a0**4/4.0)
a = a**(1./4.)
return a
def binary_orbital_period(a_16):
t = 1.72*(a_16**1.5)/np.sqrt(M*1.0e-8) # yr
return t
def half_opening_angle_intrinsic(a_16):
angle = np.arcsin(np.sin(psi0)*a0/a_16)
return angle #*180.0/np.pi # degrees
case = int(sys.argv[1])
if case == 0:
t = np.logspace(-2.0,2.0,10000000)
output_filename += '_full'
elif case == 1:
#t = np.linspace(10.0,14.0,10000)
t = np.logspace(-2.0,2.0,10000000)
t = t[6000000:9500000]
output_filename += '_zoom1'
elif case == 2:
t = np.linspace(1.0,2.0,10000)
output_filename += '_zoom2'
t0 = t[0]
def t_binary(time):
t_merge=abs(coeff)*a0**4/4.0
return np.abs(time-t_merge) # yr
#return np.abs(time-64001.477505390176) # yr
def Omega(time):
# Angular velocity times time for jet precession
period = binary_orbital_period(binary_separation_gw(t_binary(time))) # yr
return 2.0*np.pi*time/period # radians
def vel(time):
# We are following geometry from Gower et al. Figure 1.
psi = half_opening_angle_intrinsic(binary_separation_gw(t_binary(time)))
vx = beta*c*(np.sin(psi)*np.sin(i)*np.cos(Omega(time)) + np.cos(psi)*np.cos(i))
vy = beta*c*np.sin(psi)*np.sin(Omega(time))
vz = beta*c*(np.cos(psi)*np.sin(i)-np.sin(psi)*np.cos(i)*np.cos(Omega(time)))
return sign*vx, sign*vy, sign*vz # km/s
sign = 1 # forward jet
velx, vely, velz = vel(t)
y = vely*t*yrbys/kpcbykm # kpc
z = velz*t*yrbys/kpcbykm # kpc
y_obs = y/(1.0-velx/c)
z_obs = z/(1.0-velx/c)
phi_y_obs = y_obs/d * 180.0/np.pi * 3600.0 # arcsec
phi_z_obs = z_obs/d * 180.0/np.pi * 3600.0 # arcsec
sign = -1 # backward jet
velx, vely, velz = vel(t)
yb = vely*t*yrbys/kpcbykm # kpc
zb = velz*t*yrbys/kpcbykm # kpc
y_obsb = yb/(1.0-velx/c)
z_obsb = zb/(1.0-velx/c)
phi_y_obsb = y_obsb/d * 180.0/np.pi * 3600.0 # arcsec
phi_z_obsb = z_obsb/d * 180.0/np.pi * 3600.0 # arcsec
phi_y_obsb *= 1.0e3 # mas
phi_z_obsb *= 1.0e3 # mas
phi_y_obs *= 1.0e3 # mas
phi_z_obs *= 1.0e3 # mas
fig = plt.figure(figsize=(7, 7), dpi=100)
ax = fig.add_subplot(1, 1, 1)
if case==1:
ax.set_ylim(-50.0,50.0)
ax.plot(phi_z_obs,phi_y_obs,c='k',lw=1,rasterized=True)
if case!=1:
ax.plot(phi_z_obsb,phi_y_obsb,c='k',lw=1,rasterized=True)
ax.set_xlabel('mas',labelpad=15)
ax.set_ylabel('mas')
if save_pdf:
plt.savefig(output_filename+'.pdf',bbox_inches='tight')
plt.show()
| mit |
pkruskal/scikit-learn | sklearn/cluster/tests/test_spectral.py | 262 | 7954 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
eyadsibai/rep | tests/test_factory_regression.py | 4 | 3005 | from __future__ import division, print_function, absolute_import
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor
from sklearn.metrics.metrics import mean_squared_error
import numpy
from rep.data import LabeledDataStorage
from rep.metaml import RegressorsFactory
from six.moves import cPickle
from rep.report import RegressionReport
from rep.test.test_estimators import generate_classification_data
__author__ = 'Tatiana Likhomanenko'
# TODO testing of right-classification of estimators
def test_factory():
factory = RegressorsFactory()
try:
from rep.estimators.tmva import TMVARegressor
factory.add_regressor('tmva', TMVARegressor())
except ImportError:
pass
factory.add_regressor('rf', RandomForestRegressor(n_estimators=10))
factory.add_regressor('ada', AdaBoostRegressor(n_estimators=20))
X, y, sample_weight = generate_classification_data()
assert factory == factory.fit(X, y, sample_weight=sample_weight, features=list(X.columns))
values = factory.predict(X)
for cl in factory.values():
assert list(cl.features) == list(X.columns)
for key, val in values.items():
score = mean_squared_error(y, val)
print(score)
assert score < 0.2
for key, iterator in factory.staged_predict(X).items():
assert key != 'tmva', 'tmva does not support staged pp'
for p in iterator:
assert p.shape == (len(X), )
# checking that last iteration coincides with previous
assert numpy.all(p == values[key])
# testing picklability
dump_string = cPickle.dumps(factory)
clf_loaded = cPickle.loads(dump_string)
assert type(factory) == type(clf_loaded)
probs1 = factory.predict(X)
probs2 = clf_loaded.predict(X)
for key, val in probs1.items():
assert numpy.all(val == probs2[key]), 'something strange was loaded'
report = RegressionReport({'rf': factory['rf']}, LabeledDataStorage(X, y, sample_weight))
report.feature_importance_shuffling(mean_squared_mod).plot(new_plot=True, figsize=(18, 3))
report = factory.test_on_lds(LabeledDataStorage(X, y, sample_weight))
report = factory.test_on(X, y, sample_weight=sample_weight)
report.feature_importance()
report.features_correlation_matrix()
report.predictions_scatter()
val = numpy.mean(X['column0'])
report_mask(report, "column0 > %f" % val, X)
report_mask(report, lambda x: numpy.array(x['column0']) < val, X)
report_mask(report, None, X)
def mean_squared_mod(y_true, values, sample_weight=None):
return mean_squared_error(y_true, values, sample_weight=sample_weight)
def report_mask(report, mask, X):
report.features_correlation_matrix(mask=mask).plot()
report.feature_importance().plot()
report.scatter([(X.columns[0], X.columns[2])], mask=mask).plot()
report.predictions_scatter([X.columns[0], X.columns[2]], mask=mask).plot()
report.learning_curve(mean_squared_error, mask=mask).plot()
| apache-2.0 |
binghongcha08/pyQMD | GWP/2D/1.0.9/c.py | 28 | 1767 | ##!/usr/bin/python
import numpy as np
import pylab as plt
import seaborn as sns
sns.set_context('poster')
#with open("traj.dat") as f:
# data = f.read()
#
# data = data.split('\n')
#
# x = [row.split(' ')[0] for row in data]
# y = [row.split(' ')[1] for row in data]
#
# fig = plt.figure()
#
# ax1 = fig.add_subplot(111)
#
# ax1.set_title("Plot title...")
# ax1.set_xlabel('your x label..')
# ax1.set_ylabel('your y label...')
#
# ax1.plot(x,y, c='r', label='the data')
#
# leg = ax1.legend()
#fig = plt.figure()
f, (ax1, ax2) = plt.subplots(2, sharex=True)
#f.subplots_adjust(hspace=0.1)
#plt.subplot(211)
ax1.set_ylim(0,4)
data = np.genfromtxt(fname='q.dat')
#data = np.loadtxt('traj.dat')
for x in range(1,data.shape[1]):
ax1.plot(data[:,0],data[:,x], linewidth=1)
#plt.figure(1)
#plt.plot(x,y1,'-')
#plt.plot(x,y2,'g-')
#plt.xlabel('time')
ax1.set_ylabel('position [bohr]')
#plt.title('traj')
#plt.subplot(212)
data = np.genfromtxt(fname='c.dat')
#data = np.loadtxt('traj.dat')
for x in range(1,16):
ax2.plot(data[:,0],data[:,x], linewidth=1)
ax2.set_xlabel('time [a.u.]')
ax2.set_ylabel('$|c_i|$')
#plt.ylim(-0.2,5)
#plt.subplot(2,2,3)
#data = np.genfromtxt(fname='norm')
#plt.plot(data[:,0],data[:,1],'r-',linewidth=2)
#plt.ylim(0,2)
#plt.subplot(2,2,4)
#data = np.genfromtxt(fname='wf.dat')
#data1 = np.genfromtxt(fname='wf0.dat')
#data0 = np.genfromtxt('../spo_1d/t500')
#plt.plot(data[:,0],data[:,1],'r--',linewidth=2)
#plt.plot(data0[:,0],data0[:,1],'k-',linewidth=2)
#plt.plot(data1[:,0],data1[:,1],'k-.',linewidth=2)
#plt.title('t=100')
#plt.figure(1)
#plt.plot(x,y1,'-')
#plt.plot(x,y2,'g-')
#plt.xlim(0.8,2.1)
#plt.xlabel('x')
#plt.ylabel('$\psi^*\psi$')
plt.savefig('traj.pdf')
plt.show()
| gpl-3.0 |
justinfinkle/pydiffexp | pydiffexp/pipeline.py | 1 | 21056 | import multiprocessing as mp
import os
import sys
import warnings
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
from pydiffexp import DEAnalysis, DEPlot, pairwise_corr
from pydiffexp.gnw import GnwNetResults, GnwSimResults, draw_results, get_graph
from scipy import stats
from sklearn.metrics import mean_squared_error as mse
class DynamicDifferentialExpression(object):
"""
Coordinate training and testing of DDE models
"""
def __init__(self, directory, p=0.05):
self.dir = None
self.project = None
self.p = p
self.training = {} # type: dict
self.test = []
self.estimators = None # type: pd.core.groupby.DataFrameGroupBy
self.dea = None # type: DEAnalysis
self.sim_stats = None # type: pd.DataFrame
self.corr = None # type: pd.DataFrame
self.sim_scores = None # type: pd.DataFrame
self.match = None # type: pd.DataFrame
self.ddegs = None # type: pd.DataFrame
self.ddeg_contrast = None # type: pd.DataFrame
self.times = None # type: list
self.sim_dea = None # type: DEAnalysis
# There is a lot of intermediary data that can be saved to make rerunning the analysis easier
self.set_save_directory(directory)
self.dea_path = None
self.scores_path = None
self.corr_path = None
def set_save_directory(self, path):
"""
Set the default path for saving intermediate files
:param path: path-like
:return:
"""
os.path.isdir(path)
self.dir = os.path.abspath(path)
return
def set_training_conditions(self, exp, ctrl):
self.training = {'experimental': exp, 'control': ctrl}
return
def set_test_conditions(self, t):
self.test.append(t)
return
@staticmethod
def calc_error(pred_lfc, true_lfc, save_path, f):
try:
gene_to_model_error = pd.read_pickle(save_path)
except FileNotFoundError:
# Create a dictionary of each simulations prediction to each matched gene
# This is the distribution of the null model for randomly chosen models
print('Calculating model error distributions...', end='', flush=True)
gene_err_dist_dict = {ii: [f(pwlfc, twlfc) for pwlfc in pred_lfc.values]
for ii, twlfc in true_lfc.iterrows()}
gene_to_model_error = pd.DataFrame.from_dict(gene_err_dist_dict,
orient='index')
# # Set the columns to match the models
gene_to_model_error.columns = pred_lfc.index
gene_to_model_error.to_pickle(save_path)
print('DONE')
return gene_to_model_error
def score(self, exp, ctrl, f=None, reduced_set=True) -> pd.DataFrame:
# Set the error function
if f is None:
f = mse
contrast = "{}-{}".format(exp, ctrl)
train_contrast = "{}-{}".format(self.training['experimental'],
self.training['control'])
true_der = self.dea.results[contrast]
true_lfc = true_der.top_table().iloc[:, :len(self.times)]
pred = self.predict(exp)
pred_der = self.sim_dea.results[contrast]
pred_lfc = pred_der.top_table().iloc[:, :len(self.times)]
pred_lfc.index = pred_lfc.index.astype(int)
train_lfc = self.sim_dea.results['ko-wt'].coefficients.abs().mean(axis=1)
train_lfc.index = train_lfc.index.astype(int)
# Reduce the number of genes to score against, for speed purposes
if reduced_set:
true_lfc = true_lfc.loc[list(set(self.match['train_gene']))]
fname = "{}_{}_pred_{}_gtme.pkl".format(self.project, train_contrast, contrast)
gtme_path = os.path.join(self.dir, fname)
gtme = self.calc_error(pred_lfc, true_lfc, gtme_path, f)
# Calculate null model
null_stats = self.estimators.apply(self.sample_stats, gtme, true_lfc,
pred_lfc, true_der, train_lfc)
# Combine the stats together
train_cluster = self.dea.results[train_contrast].cluster_scores.loc[self.ddegs]
test_stats = pd.concat([train_cluster, null_stats], axis=1).dropna()
# Add known cluster info in
test_stats['{}_cluster'.format(exp)] = true_der.score_clustering().loc[test_stats.index, 'Cluster']
# Add the LFC data in as a predictor
train_der = self.dea.results[train_contrast]
test_stats['mean_abs_lfc'] = train_der.coefficients.loc[test_stats.index].abs().mean(axis=1)
test_stats['percent'] = test_stats.grouped_diff / test_stats.random_grouped_e * 100
return test_stats
@staticmethod
def moderate_lfc(df):
avg_lfc = df.median()
if len(df) > 2:
p = avg_lfc * (1-stats.ttest_1samp(df, 0).pvalue)
else:
p = avg_lfc
return p
def sample_stats(self, df, dist_dict, true_lfc, pred_lfc, true_der, trainlfc,
resamples=100, err=mse):
# Test filter
gene = df.name
# preddev = pred_lfc.abs().mean(axis=1)
# df['preddev'] = [preddev.loc[ii] for ii in df['index']]
# df = df[df.preddev < 1].copy()
# For readability
models = df['net'].values.astype(int)
n = len(df)
# if n < 1:
# return
# Get the true log fold change for this dataframe
test = true_lfc.loc[gene]
# t = test*(1-true_der.p_value.loc[gene])
# Get the distribution of errors for all models to this gene
e_dist = dist_dict.loc[gene]
# Calculate prediction error
elfc = pd.concat([e_dist.loc[models], trainlfc.loc[models]], keys=['error', 'sumlfc'], axis=1)
# Group the models log fold change predictions together for each time point
# then calculate the error of the 'averaged' model
grouped_prediction = pred_lfc.loc[models].median()
# p = self.moderate_lfc(pred_lfc.loc[models])
grouped_error = err(test, grouped_prediction)
group_dev = grouped_prediction.abs().mean()
# Calculate a predicted cluster
# n = self.moderate_lfc(pred_lfc)
n = pred_lfc.median()
nonzero = [stats.ttest_1samp(pred_lfc.loc[models, t], 0).pvalue < 0.05 for t in pred_lfc.columns]
grouped_cluster = (np.sign(grouped_prediction)*nonzero).astype(int)
grouped_cluster = str(tuple(grouped_cluster.values.tolist()))
# Average error of each model to the true values
avg_error = e_dist.loc[models].median()
# The dimensions must be consistent
assert dist_dict.shape[1] == pred_lfc.shape[0]
# Get random sample indices
# rs = np.random.randint(0, len(pred_lfc), (resamples, len(df)))
# Calculate null models
# rs_avg_error = [np.median(e_dist.iloc[r]) for r in rs]
# rg_lfc_error = [err(t, self.moderate_lfc(pred_lfc.loc[r])) for r in rs]
# Calculate the average across all the random samples
# The average of the medians should be close to the true median
rs_median = e_dist.median()
rg_median = err(test, n)
# rg_median = np.median(rg_lfc_error)
# Error if all log fold change values are assumed to be zero
all_zeros = err(test, np.zeros((len(test))))
magnitude = err(grouped_prediction, np.zeros(len(grouped_prediction)))
# Return a series of statistics
s_labels = ['n', 'grouped_mag','grouped_e', 'random_grouped_e', 'grouped_diff',
'avg_e', 'random_avg_e', 'avg_diff', 'all_zeros', 'abs_dev', 'group_dev',
'group_cluster']
s_values = [len(df), magnitude, grouped_error, rg_median, rg_median-grouped_error,
avg_error, rs_median, rs_median-avg_error, all_zeros, test.abs().mean(),
group_dev, grouped_cluster]
s = pd.Series(s_values, index=s_labels)
return s
def random_sample(self, df: pd.DataFrame):
n_estimators = len(df)
possible_estimators = self.sim_stats.index.values
random_estimators = np.random.choice(possible_estimators, n_estimators)
df.index = pd.MultiIndex.from_tuples(random_estimators,
names=df.index.names)
return df
def predict(self, test, estimators=None, ctrl=None, baseline=False):
"""
Calculate predictions from trained estimators
:param test:
:param estimators:
:param ctrl:
:return:
"""
# todo: it would be ideal if this could rapidly query the estimators with flexible time sampling
self.set_test_conditions(test)
if ctrl is None:
ctrl = self.training['control']
contrast = '{}-{}'.format(test, ctrl)
# Set default.
if estimators is None:
estimators = self.estimators
# Calculate estimator predictions
prediction = estimators.apply(self._e_predict, contrast=contrast,
with_base=baseline, ctrl=ctrl)
return prediction
def _e_predict(self, df, contrast, with_base=False, ctrl=None):
"""
Calculate the prediction for a new condition with a set of trained
estimators. Meant to work with groubpy.apply()
:param df:
:param contrast
:param ctrl: the control condition
:param with_base: Add in the baseline values
:return:
"""
# Calculate the average log fold change prediction in the group
sim_predictors = df['net'].astype(str)
est_lfc = self.sim_dea.results[contrast].coefficients.loc[sim_predictors]
est_lfc.columns = self.times
pred = est_lfc
if with_base:
# Calculate the baseline
baseline = self.dea.data.loc[df.name, ctrl].groupby('time').mean()
pred = baseline + est_lfc
pred.index.name = 'net'
return pred
def set_paths(self, prefix, contrast):
"""
Set paths for intermediate data
:param prefix:
:param contrast:
:return:
"""
prepend = '{}_{}'.format(prefix, contrast)
self.corr_path = os.path.join(self.dir, '{}_data_to_sim_corr.pkl'.format(prepend))
return
def fit_data(self, data, **kwargs):
# Default expects count data and no additional log2
kwargs.setdefault('counts', True)
kwargs.setdefault('log2', False)
self.dea = self.fit_dea(self.dea, data, **kwargs)
self.times = self.dea.times
def fit_sim(self, data, **kwargs):
kwargs.setdefault('counts', False)
kwargs.setdefault('log2', True)
self.sim_dea = self.fit_dea(self.sim_dea, data, **kwargs)
@staticmethod
def fit_dea(dea, data=None, override=False, **kwargs):
"""
:param data:
:param default_contrast:
:param kwargs:
:return:
"""
if dea is None or override:
# Set defaults
kwargs.setdefault('reference_labels', ['condition', 'time'])
kwargs.setdefault('index_names', ['condition', 'replicate', 'time'])
# Make the dea object and fit it
new_dea = DEAnalysis(data, **kwargs)
new_dea.fit_contrasts(new_dea.default_contrasts, status=True)
else:
new_dea = dea
return new_dea
@staticmethod
def match_to_gene(x, y, correlation, unique_net=True):
# Match the dtype
y.index = y.index.astype(correlation.columns.dtype)
match = pd.DataFrame()
# For each row in the actual data, match all networks in y with the same
# cluster value
for gene, row in x.iterrows():
candidate_nets = y.loc[y.Cluster == row.Cluster]
cur_corr = correlation.loc[gene, candidate_nets.index.values]
cur_corr.name = 'pearson_r'
ranking = pd.concat([candidate_nets, cur_corr], axis=1)
ranking['mean'] = (ranking['cscore'] + ranking['pearson_r']) / 2
# Remove same network ids that are just different perturbations
if unique_net:
sorted_ranking = ranking.sort_values('mean', ascending=False)
ranking = sorted_ranking[~sorted_ranking.index.get_level_values('id').duplicated(keep='first')].copy()
# Add the gene name that is matched
ranking['train_gene'] = gene
# Add it the dataframe
ranking.index.name = 'net'
match = pd.concat([match, ranking.reset_index()], ignore_index=True)
match.sort_values('mean', ascending=False, inplace=True)
match = match[(match['cscore'] > 0) & (match['pearson_r'] > 0)]
return match
def train(self, project, data, sim_data, override=False, exp='ko', ctrl='wt',
data_kwargs=None, sim_kwargs=None):
"""
Train DDE estimators
:param project:
:param data:
:param sim_data:
:param override:
:param exp:
:param ctrl:
:param data_kwargs:
:param sim_kwargs:
:return:
"""
# Set conditions
contrast = "{}-{}".format(exp, ctrl)
self.set_training_conditions(exp, ctrl)
self.project = project
self.ddeg_contrast = contrast
# Define paths to save or read pickles from
self.set_paths(project, contrast)
# Fit the expression data
if data_kwargs is None:
data_kwargs = {}
self.fit_data(data, override=override, **data_kwargs)
# Fit the simulation data
if sim_kwargs is None:
sim_kwargs = {}
self.fit_sim(sim_data, override=override, **sim_kwargs)
# Get dDEGs
ddegs = self.dea.results[contrast].get_dDegs()
# Also filter out genes that don't pass the basic pairwise test (not DEG)
degs = self.dea.results[contrast].top_table(p=self.p)
ddegs = ddegs.loc[set(ddegs.index).intersection(degs.index)].copy()
# Get the data needed for the correlation
filtered_data = self.dea.data.loc[:, contrast.split('-')]
filtered_sim = self.sim_dea.data.loc[:, contrast.split('-')]
# Correlate the mean trajectories
corr = self.correlate(filtered_data, filtered_sim, override=override)
# Match the genes to simulation networks
sim_scores = self.sim_dea.results[contrast].cluster_scores
match = self.match_to_gene(ddegs, sim_scores, corr, unique_net=False)
self.estimators = match.groupby('train_gene')
self.match = match
self.ddegs = set(ddegs.index)
return match
def correlate(self, exp: pd.DataFrame, sim: pd.DataFrame, sim_node=None,
override=False):
try:
if override:
raise ValueError('Override to retrain')
corr = pd.read_pickle(self.corr_path)
except (FileNotFoundError, ValueError):
# Get group means and zscore
gene_mean = exp.groupby(level=['condition', 'time'], axis=1).mean()
mean_z = gene_mean.groupby(level='condition', axis=1).transform(stats.zscore, ddof=1).fillna(0)
# Correlate zscored means for each gene with each node in every simulation
if sim_node is not None:
sim = sim[sim.index.get_level_values('gene') == sim_node]
sim_mean = sim.groupby(level=['condition', 'time'], axis=1).mean()
sim_mean_z = sim_mean.groupby(level='condition', axis=1).transform(stats.zscore, ddof=1).fillna(0)
all_corr = []
for c in self.training.values():
print('Computing pairwise for {}...'.format(c), end=' ', flush=True)
pcorr, p = pairwise_corr(sim_mean_z.loc[:, c], mean_z.loc[:, c], axis=1)
all_corr.append(pcorr)
print('DONE')
corr = (all_corr[0] + all_corr[1]) / 2
return corr
def to_pickle(self, path=None, force_save=False):
# Note, this is taken directly from pandas generic.py which defines the method in class NDFrame
"""
Pickle (serialize) object to input file path
Parameters
----------
path : string
File path
"""
should_pickle = True
# Set the save directory
if path is None:
fname = "{}_{}_dde.pkl".format(self.project, self.ddeg_contrast)
path = os.path.join(self.dir, fname)
if not os.path.exists(os.path.dirname(path)):
sys.exit('The directory entered to save the pickle to, "%s", does not exist' % os.path.dirname(path))
# If the pickle path exists, ask if the user wants to save over it
if os.path.isfile(path) and not force_save:
print("Pickle file to save: ", path)
answer = input('The proposed pickle file already exists. Would you like to replace it [y/n]?')
if answer != 'y':
should_pickle = False
if answer != 'n':
warnings.warn('Invalid answer')
if should_pickle:
print("Pickling object to %s" % os.path.abspath(path))
pd.to_pickle(self, path)
else:
sys.exit("Object not pickled."
"\nTo save object please rerun with a different file path or choose to rewrite")
return
def compile_sim(sim_dir, times, save_path=None, pp=True, **kwargs):
# Initialize the results object
gnr = GnwNetResults(sim_dir, **kwargs)
print("Compiling simulation results. This could take a while")
sim_results = gnr.compile_results(censor_times=times, save_intermediates=False,
pp=pp)
if save_path is not None:
sim_results.to_pickle(save_path)
return sim_results
def get_net_data(network, stim, directory, conditions):
data_dir = '{}/{}/{}/'.format(directory, network, stim)
results = [GnwSimResults(data_dir, network, c, sim_suffix='dream4_timeseries.tsv',
perturb_suffix="dream4_timeseries_perturbations.tsv").data for c in conditions]
data = pd.concat(results).T.sort_index(axis=0).sort_index(axis=1)
return data
def display_sim(network, stim, perturbation, times, directory, exp_condition='ko', ctrl_condition='wt', node=None):
data_dir = '{}/{}/{}/'.format(directory, network, stim)
network_structure = "{}/{}/{}_goldstandard_signed.tsv".format(directory, network, network)
ctrl_gsr = GnwSimResults(data_dir, network, ctrl_condition, sim_suffix='dream4_timeseries.tsv',
perturb_suffix="dream4_timeseries_perturbations.tsv")
exp_gsr = GnwSimResults(data_dir, network, exp_condition, sim_suffix='dream4_timeseries.tsv',
perturb_suffix="dream4_timeseries_perturbations.tsv")
data = pd.concat([ctrl_gsr.data, exp_gsr.data]).T.sort_index(axis=0).sort_index(axis=1)
if node:
dep = DEPlot()
idx = pd.IndexSlice
dep.tsplot(data.loc[node, idx[:, :, perturbation, times]], subgroup='Time', no_fill_legend=True)
plt.tight_layout()
return
dg = get_graph(network_structure)
titles = ["x", "y", "PI3K"]
mapping = {'G': "PI3k"}
dg = nx.relabel_nodes(dg, mapping)
draw_results(np.log2(data+1), perturbation, titles, times=times, g=dg)
plt.tight_layout()
def compile_match_sim_data(matching, base_dir, condition='ki', times=None):
compile_args = [(ii, base_dir, condition, times) for ii in matching.index.unique()]
pool = mp.Pool()
info = pool.starmap(get_sim_data, compile_args)
pool.close()
pool.join()
sim_data = pd.concat(info, axis=1) # type: pd.DataFrame
return sim_data
def get_sim_data(sim_tuple, directory, condition='ki', times=None):
net = sim_tuple[0]
print(net)
perturb = abs(sim_tuple[1])
mode = 'activating' if sim_tuple[1] >= 0 else 'deactivating'
node = sim_tuple[2]
data_dir = "{}/{}/{}/".format(directory, net, mode)
gsr = GnwSimResults(data_dir, net, condition, sim_suffix='dream4_timeseries.tsv',
perturb_suffix="dream4_timeseries_perturbations.tsv")
idx = pd.IndexSlice
if times is not None:
series = gsr.annotated_data.loc[idx[:, :, perturb, times], node]
else:
series = gsr.annotated_data.loc[idx[:, :, perturb, :], node]
# Add a name to make concationation easier
series.name = str(sim_tuple)
# Drop the perturbation so NaNs aren't made in the final DataFrame
series.index = series.index.droplevel('perturbation')
return series | gpl-3.0 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/dask/array/percentile.py | 3 | 6288 | from __future__ import absolute_import, division, print_function
from itertools import count
from functools import wraps
from collections import Iterator
import numpy as np
from toolz import merge, merge_sorted
from .core import Array
from ..base import tokenize
@wraps(np.percentile)
def _percentile(a, q, interpolation='linear'):
if not len(a):
return None
if isinstance(q, Iterator):
q = list(q)
if str(a.dtype) == 'category':
result = np.percentile(a.codes, q, interpolation=interpolation)
import pandas as pd
return pd.Categorical.from_codes(result, a.categories, a.ordered)
if np.issubdtype(a.dtype, np.datetime64):
a2 = a.astype('i8')
result = np.percentile(a2, q, interpolation=interpolation)
return result.astype(a.dtype)
if not np.issubdtype(a.dtype, np.number):
interpolation = 'nearest'
return np.percentile(a, q, interpolation=interpolation)
names = ('percentile-%d' % i for i in count(1))
def percentile(a, q, interpolation='linear'):
""" Approximate percentile of 1-D array
See numpy.percentile for more information
"""
if not a.ndim == 1:
raise NotImplementedError(
"Percentiles only implemented for 1-d arrays")
q = np.array(q)
token = tokenize(a, list(q), interpolation)
name = 'percentile_chunk-' + token
dsk = dict(((name, i), (_percentile, (key), q, interpolation))
for i, key in enumerate(a._keys()))
name2 = 'percentile-' + token
dsk2 = {(name2, 0): (merge_percentiles, q, [q] * len(a.chunks[0]),
sorted(dsk), a.chunks[0], interpolation)}
dtype = a.dtype
if np.issubdtype(dtype, np.integer):
dtype = (np.array([], dtype=dtype) / 0.5).dtype
return Array(merge(a.dask, dsk, dsk2), name2, chunks=((len(q),),),
dtype=dtype)
def merge_percentiles(finalq, qs, vals, Ns, interpolation='lower'):
""" Combine several percentile calculations of different data.
Parameters
----------
finalq : numpy.array
Percentiles to compute (must use same scale as ``qs``).
qs : sequence of numpy.arrays
Percentiles calculated on different sets of data.
vals : sequence of numpy.arrays
Resulting values associated with percentiles ``qs``.
Ns : sequence of integers
The number of data elements associated with each data set.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
Specify the type of interpolation to use to calculate final
percentiles. For more information, see numpy.percentile.
Examples
--------
>>> finalq = [10, 20, 30, 40, 50, 60, 70, 80]
>>> qs = [[20, 40, 60, 80], [20, 40, 60, 80]]
>>> vals = [np.array([1, 2, 3, 4]), np.array([10, 11, 12, 13])]
>>> Ns = [100, 100] # Both original arrays had 100 elements
>>> merge_percentiles(finalq, qs, vals, Ns)
array([ 1, 2, 3, 4, 10, 11, 12, 13])
"""
if isinstance(finalq, Iterator):
finalq = list(finalq)
finalq = np.array(finalq)
qs = list(map(list, qs))
vals = list(vals)
Ns = list(Ns)
L = list(zip(*[(q, val, N) for q, val, N in zip(qs, vals, Ns) if N]))
if not L:
raise ValueError("No non-trivial arrays found")
qs, vals, Ns = L
# TODO: Perform this check above in percentile once dtype checking is easy
# Here we silently change meaning
if str(vals[0].dtype) == 'category':
result = merge_percentiles(finalq, qs, [v.codes for v in vals], Ns, interpolation)
import pandas as pd
return pd.Categorical.from_codes(result, vals[0].categories, vals[0].ordered)
if not np.issubdtype(vals[0].dtype, np.number):
interpolation = 'nearest'
if len(vals) != len(qs) or len(Ns) != len(qs):
raise ValueError('qs, vals, and Ns parameters must be the same length')
# transform qs and Ns into number of observations between percentiles
counts = []
for q, N in zip(qs, Ns):
count = np.empty(len(q))
count[1:] = np.diff(q)
count[0] = q[0]
count *= N
counts.append(count)
# Sort by calculated percentile values, then number of observations.
# >95% of the time in this function is spent in `merge_sorted` below.
# An alternative that uses numpy sort is shown. It is sometimes
# comparable to, but typically slower than, `merge_sorted`.
#
# >>> A = np.concatenate(map(np.array, map(zip, vals, counts)))
# >>> A.sort(0, kind='mergesort')
combined_vals_counts = merge_sorted(*map(zip, vals, counts))
combined_vals, combined_counts = zip(*combined_vals_counts)
combined_vals = np.array(combined_vals)
combined_counts = np.array(combined_counts)
# percentile-like, but scaled by total number of observations
combined_q = np.cumsum(combined_counts)
# rescale finalq percentiles to match combined_q
desired_q = finalq * sum(Ns)
# the behavior of different interpolation methods should be
# investigated further.
if interpolation == 'linear':
rv = np.interp(desired_q, combined_q, combined_vals)
else:
left = np.searchsorted(combined_q, desired_q, side='left')
right = np.searchsorted(combined_q, desired_q, side='right') - 1
np.minimum(left, len(combined_vals) - 1, left) # don't exceed max index
lower = np.minimum(left, right)
upper = np.maximum(left, right)
if interpolation == 'lower':
rv = combined_vals[lower]
elif interpolation == 'higher':
rv = combined_vals[upper]
elif interpolation == 'midpoint':
rv = 0.5 * (combined_vals[lower] + combined_vals[upper])
elif interpolation == 'nearest':
lower_residual = np.abs(combined_q[lower] - desired_q)
upper_residual = np.abs(combined_q[upper] - desired_q)
mask = lower_residual > upper_residual
index = lower # alias; we no longer need lower
index[mask] = upper[mask]
rv = combined_vals[index]
else:
raise ValueError("interpolation can only be 'linear', 'lower', "
"'higher', 'midpoint', or 'nearest'")
return rv
| mit |
vberaudi/scipy | scipy/cluster/tests/test_hierarchy.py | 26 | 35153 | #! /usr/bin/env python
#
# Author: Damian Eads
# Date: April 17, 2008
#
# Copyright (C) 2008 Damian Eads
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (TestCase, run_module_suite, dec, assert_raises,
assert_allclose, assert_equal, assert_)
from scipy._lib.six import xrange, u
import scipy.cluster.hierarchy
from scipy.cluster.hierarchy import (
linkage, from_mlab_linkage, to_mlab_linkage, num_obs_linkage, inconsistent,
cophenet, fclusterdata, fcluster, is_isomorphic, single, leaders,
correspond, is_monotonic, maxdists, maxinconsts, maxRstat,
is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram,
set_link_color_palette)
from scipy.spatial.distance import pdist
import hierarchy_test_data
# Matplotlib is not a scipy dependency but is optionally used in dendrogram, so
# check if it's available
try:
import matplotlib
# and set the backend to be Agg (no gui)
matplotlib.use('Agg')
# before importing pyplot
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
class TestLinkage(object):
def test_linkage_empty_distance_matrix(self):
# Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected.
y = np.zeros((0,))
assert_raises(ValueError, linkage, y)
def test_linkage_tdist(self):
for method in ['single', 'complete', 'average', 'weighted', u('single')]:
yield self.check_linkage_tdist, method
def check_linkage_tdist(self, method):
# Tests linkage(Y, method) on the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method)
assert_allclose(Z, expectedZ, atol=1e-10)
def test_linkage_X(self):
for method in ['centroid', 'median', 'ward']:
yield self.check_linkage_q, method
def check_linkage_q(self, method):
# Tests linkage(Y, method) on the Q data set.
Z = linkage(hierarchy_test_data.X, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method)
assert_allclose(Z, expectedZ, atol=1e-06)
class TestInconsistent(object):
def test_inconsistent_tdist(self):
for depth in hierarchy_test_data.inconsistent_ytdist:
yield self.check_inconsistent_tdist, depth
def check_inconsistent_tdist(self, depth):
Z = hierarchy_test_data.linkage_ytdist_single
assert_allclose(inconsistent(Z, depth),
hierarchy_test_data.inconsistent_ytdist[depth])
class TestCopheneticDistance(object):
def test_linkage_cophenet_tdist_Z(self):
# Tests cophenet(Z) on tdist data set.
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
Z = hierarchy_test_data.linkage_ytdist_single
M = cophenet(Z)
assert_allclose(M, expectedM, atol=1e-10)
def test_linkage_cophenet_tdist_Z_Y(self):
# Tests cophenet(Z, Y) on tdist data set.
Z = hierarchy_test_data.linkage_ytdist_single
(c, M) = cophenet(Z, hierarchy_test_data.ytdist)
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
expectedc = 0.639931296433393415057366837573
assert_allclose(c, expectedc, atol=1e-10)
assert_allclose(M, expectedM, atol=1e-10)
class TestMLabLinkageConversion(object):
def test_mlab_linkage_conversion_empty(self):
# Tests from/to_mlab_linkage on empty linkage array.
X = np.asarray([])
assert_equal(from_mlab_linkage([]), X)
assert_equal(to_mlab_linkage([]), X)
def test_mlab_linkage_conversion_single_row(self):
# Tests from/to_mlab_linkage on linkage array with single row.
Z = np.asarray([[0., 1., 3., 2.]])
Zm = [[1, 2, 3]]
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
def test_mlab_linkage_conversion_multiple_rows(self):
# Tests from/to_mlab_linkage on linkage array with multiple rows.
Zm = np.asarray([[3, 6, 138], [4, 5, 219],
[1, 8, 255], [2, 9, 268], [7, 10, 295]])
Z = np.array([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[0., 7., 255., 3.],
[1., 8., 268., 4.],
[6., 9., 295., 6.]],
dtype=np.double)
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
class TestFcluster(object):
def test_fclusterdata(self):
for t in hierarchy_test_data.fcluster_inconsistent:
yield self.check_fclusterdata, t, 'inconsistent'
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fclusterdata, t, 'distance'
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fclusterdata, t, 'maxclust'
def check_fclusterdata(self, t, criterion):
# Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
X = hierarchy_test_data.Q_X
T = fclusterdata(X, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster(self):
for t in hierarchy_test_data.fcluster_inconsistent:
yield self.check_fcluster, t, 'inconsistent'
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fcluster, t, 'distance'
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fcluster, t, 'maxclust'
def check_fcluster(self, t, criterion):
# Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster_monocrit(self):
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fcluster_monocrit, t
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fcluster_maxclust_monocrit, t
def check_fcluster_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_distance[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
def check_fcluster_maxclust_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_maxclust[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
class TestLeaders(object):
def test_leaders_single(self):
# Tests leaders using a flat clustering generated by single linkage.
X = hierarchy_test_data.Q_X
Y = pdist(X)
Z = linkage(Y)
T = fcluster(Z, criterion='maxclust', t=3)
Lright = (np.array([53, 55, 56]), np.array([2, 3, 1]))
L = leaders(Z, T)
assert_equal(L, Lright)
class TestIsIsomorphic(object):
def test_is_isomorphic_1(self):
# Tests is_isomorphic on test case #1 (one flat cluster, different labellings)
a = [1, 1, 1]
b = [2, 2, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_2(self):
# Tests is_isomorphic on test case #2 (two flat clusters, different labelings)
a = [1, 7, 1]
b = [2, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_3(self):
# Tests is_isomorphic on test case #3 (no flat clusters)
a = []
b = []
assert_(is_isomorphic(a, b))
def test_is_isomorphic_4A(self):
# Tests is_isomorphic on test case #4A (3 flat clusters, different labelings, isomorphic)
a = [1, 2, 3]
b = [1, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_4B(self):
# Tests is_isomorphic on test case #4B (3 flat clusters, different labelings, nonisomorphic)
a = [1, 2, 3, 3]
b = [1, 3, 2, 3]
assert_(is_isomorphic(a, b) == False)
assert_(is_isomorphic(b, a) == False)
def test_is_isomorphic_4C(self):
# Tests is_isomorphic on test case #4C (3 flat clusters, different labelings, isomorphic)
a = [7, 2, 3]
b = [6, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_5(self):
# Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling).
for nc in [2, 3, 5]:
yield self.help_is_isomorphic_randperm, 1000, nc
def test_is_isomorphic_6(self):
# Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling, slightly
# nonisomorphic.)
for nc in [2, 3, 5]:
yield self.help_is_isomorphic_randperm, 1000, nc, True, 5
def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0):
for k in range(3):
a = np.int_(np.random.rand(nobs) * nclusters)
b = np.zeros(a.size, dtype=np.int_)
P = np.random.permutation(nclusters)
for i in xrange(0, a.shape[0]):
b[i] = P[a[i]]
if noniso:
Q = np.random.permutation(nobs)
b[Q[0:nerrors]] += 1
b[Q[0:nerrors]] %= nclusters
assert_(is_isomorphic(a, b) == (not noniso))
assert_(is_isomorphic(b, a) == (not noniso))
class TestIsValidLinkage(object):
def test_is_valid_linkage_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
yield self.check_is_valid_linkage_various_size, nrow, ncol, valid
def check_is_valid_linkage_various_size(self, nrow, ncol, valid):
# Tests is_valid_linkage(Z) with linkage matrics of various sizes
Z = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
Z = Z[:nrow, :ncol]
assert_(is_valid_linkage(Z) == valid)
if not valid:
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_int_type(self):
# Tests is_valid_linkage(Z) with integer type.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=int)
assert_(is_valid_linkage(Z) == False)
assert_raises(TypeError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_empty(self):
# Tests is_valid_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(is_valid_linkage(Z) == True)
def test_is_valid_linkage_4_and_up_neg_index_left(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (left).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,0] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_index_right(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (right).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,1] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_dist(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative distances.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,2] = -0.5
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_counts(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative counts.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,3] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
class TestIsValidInconsistent(object):
def test_is_valid_im_int_type(self):
# Tests is_valid_im(R) with integer type.
R = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=int)
assert_(is_valid_im(R) == False)
assert_raises(TypeError, is_valid_im, R, throw=True)
def test_is_valid_im_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
yield self.check_is_valid_im_various_size, nrow, ncol, valid
def check_is_valid_im_various_size(self, nrow, ncol, valid):
# Tests is_valid_im(R) with linkage matrics of various sizes
R = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
R = R[:nrow, :ncol]
assert_(is_valid_im(R) == valid)
if not valid:
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_empty(self):
# Tests is_valid_im(R) with empty inconsistency matrix.
R = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
assert_(is_valid_im(R) == True)
def test_is_valid_im_4_and_up_neg_index_left(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height means.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,0] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_index_right(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height standard deviations.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,1] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_dist(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link counts.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,2] = -0.5
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
class TestNumObsLinkage(TestCase):
def test_num_obs_linkage_empty(self):
# Tests num_obs_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, num_obs_linkage, Z)
def test_num_obs_linkage_1x4(self):
# Tests num_obs_linkage(Z) on linkage over 2 observations.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 2)
def test_num_obs_linkage_2x4(self):
# Tests num_obs_linkage(Z) on linkage over 3 observations.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 3)
def test_num_obs_linkage_4_and_up(self):
# Tests num_obs_linkage(Z) on linkage on observation sets between sizes
# 4 and 15 (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_equal(num_obs_linkage(Z), i)
class TestLeavesList(object):
def test_leaves_list_1x4(self):
# Tests leaves_list(Z) on a 1x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1])
def test_leaves_list_2x4(self):
# Tests leaves_list(Z) on a 2x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1, 2])
def test_leaves_list_Q(self):
for method in ['single', 'complete', 'average', 'weighted', 'centroid',
'median', 'ward']:
yield self.check_leaves_list_Q, method
def check_leaves_list_Q(self, method):
# Tests leaves_list(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
node = to_tree(Z)
assert_equal(node.pre_order(), leaves_list(Z))
def test_Q_subtree_pre_order(self):
# Tests that pre_order() works when called on sub-trees.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
node = to_tree(Z)
assert_equal(node.pre_order(), (node.get_left().pre_order()
+ node.get_right().pre_order()))
class TestCorrespond(TestCase):
def test_correspond_empty(self):
# Tests correspond(Z, y) with empty linkage and condensed distance matrix.
y = np.zeros((0,))
Z = np.zeros((0,4))
assert_raises(ValueError, correspond, Z, y)
def test_correspond_2_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes.
for i in xrange(2, 4):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(correspond(Z, y))
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(correspond(Z, y))
def test_correspond_4_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondance should be false.
for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) +
list(zip(list(range(3, 5)), list(range(2, 4))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_correspond_4_and_up_2(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondance should be false.
for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) +
list(zip(list(range(2, 7)), list(range(16, 21))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_num_obs_linkage_multi_matrix(self):
# Tests num_obs_linkage with observation matrices of multiple sizes.
for n in xrange(2, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
Z = linkage(Y)
assert_equal(num_obs_linkage(Z), n)
class TestIsMonotonic(TestCase):
def test_is_monotonic_empty(self):
# Tests is_monotonic(Z) on an empty linkage.
Z = np.zeros((0, 4))
assert_raises(ValueError, is_monotonic, Z)
def test_is_monotonic_1x4(self):
# Tests is_monotonic(Z) on 1x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_T(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_F(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting False.
Z = np.asarray([[0, 1, 0.4, 2],
[2, 3, 0.3, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_T(self):
# Tests is_monotonic(Z) on 3x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_3x4_F1(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.2, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F2(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False.
Z = np.asarray([[0, 1, 0.8, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F3(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.2, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_tdist_linkage1(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Expecting True.
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_tdist_linkage2(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Perturbing. Expecting False.
Z = linkage(hierarchy_test_data.ytdist, 'single')
Z[2,2] = 0.0
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_Q_linkage(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# Q data set. Expecting True.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
assert_equal(is_monotonic(Z), True)
class TestMaxDists(object):
def test_maxdists_empty_linkage(self):
# Tests maxdists(Z) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxdists, Z)
def test_maxdists_one_cluster_linkage(self):
# Tests maxdists(Z) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxdists_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
yield self.check_maxdists_Q_linkage, method
def check_maxdists_Q_linkage(self, method):
# Tests maxdists(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxInconsts(object):
def test_maxinconsts_empty_linkage(self):
# Tests maxinconsts(Z, R) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_difrow_linkage(self):
# Tests maxinconsts(Z, R) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_one_cluster_linkage(self):
# Tests maxinconsts(Z, R) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxinconsts_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
yield self.check_maxinconsts_Q_linkage, method
def check_maxinconsts_Q_linkage(self, method):
# Tests maxinconsts(Z, R) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxRStat(object):
def test_maxRstat_invalid_index(self):
for i in [3.3, -1, 4]:
yield self.check_maxRstat_invalid_index, i
def check_maxRstat_invalid_index(self, i):
# Tests maxRstat(Z, R, i). Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
if isinstance(i, int):
assert_raises(ValueError, maxRstat, Z, R, i)
else:
assert_raises(TypeError, maxRstat, Z, R, i)
def test_maxRstat_empty_linkage(self):
for i in range(4):
yield self.check_maxRstat_empty_linkage, i
def check_maxRstat_empty_linkage(self, i):
# Tests maxRstat(Z, R, i) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_difrow_linkage(self):
for i in range(4):
yield self.check_maxRstat_difrow_linkage, i
def check_maxRstat_difrow_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_one_cluster_linkage(self):
for i in range(4):
yield self.check_maxRstat_one_cluster_linkage, i
def check_maxRstat_one_cluster_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxRstat_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
for i in range(4):
yield self.check_maxRstat_Q_linkage, method, i
def check_maxRstat_Q_linkage(self, method, i):
# Tests maxRstat(Z, R, i) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestDendrogram(object):
def test_dendrogram_single_linkage_tdist(self):
# Tests dendrogram calculation on single linkage of the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, no_plot=True)
leaves = R["leaves"]
assert_equal(leaves, [2, 5, 1, 0, 3, 4])
def test_valid_orientation(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_raises(ValueError, dendrogram, Z, orientation="foo")
@dec.skipif(not have_matplotlib)
def test_dendrogram_plot(self):
for orientation in ['top', 'bottom', 'left', 'right']:
yield self.check_dendrogram_plot, orientation
def check_dendrogram_plot(self, orientation):
# Tests dendrogram plotting.
Z = linkage(hierarchy_test_data.ytdist, 'single')
expected = {'color_list': ['g', 'b', 'b', 'b', 'b'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 219.0, 219.0, 0.0],
[0.0, 255.0, 255.0, 219.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[45.0, 45.0, 55.0, 55.0],
[35.0, 35.0, 50.0, 50.0],
[25.0, 25.0, 42.5, 42.5],
[10.0, 10.0, 33.75, 33.75]],
'ivl': ['2', '5', '1', '0', '3', '4'],
'leaves': [2, 5, 1, 0, 3, 4]}
fig = plt.figure()
ax = fig.add_subplot(111)
# test that dendrogram accepts ax keyword
R1 = dendrogram(Z, ax=ax, orientation=orientation)
plt.close()
assert_equal(R1, expected)
# test plotting to gca (will import pylab)
R2 = dendrogram(Z, orientation=orientation)
plt.close()
assert_equal(R2, expected)
@dec.skipif(not have_matplotlib)
def test_dendrogram_truncate_mode(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, 2, 'lastp', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['b'],
'dcoord': [[0.0, 295.0, 295.0, 0.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0]],
'ivl': ['(2)', '(4)'],
'leaves': [6, 9]})
R = dendrogram(Z, 2, 'mtica', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['g', 'b', 'b', 'b'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 255.0, 255.0, 0.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[35.0, 35.0, 45.0, 45.0],
[25.0, 25.0, 40.0, 40.0],
[10.0, 10.0, 32.5, 32.5]],
'ivl': ['2', '5', '1', '0', '(2)'],
'leaves': [2, 5, 1, 0, 7]})
def test_dendrogram_colors(self):
# Tests dendrogram plots with alternate colors
Z = linkage(hierarchy_test_data.ytdist, 'single')
set_link_color_palette(['c', 'm', 'y', 'k'])
R = dendrogram(Z, no_plot=True,
above_threshold_color='g', color_threshold=250)
set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k'])
color_list = R['color_list']
assert_equal(color_list, ['c', 'm', 'g', 'g', 'g'])
def calculate_maximum_distances(Z):
# Used for testing correctness of maxdists.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = Z[i, 2]
B[i] = q.max()
return B
def calculate_maximum_inconsistencies(Z, R, k=3):
# Used for testing correctness of maxinconsts.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = R[i, k]
B[i] = q.max()
return B
def test_euclidean_linkage_value_error():
for method in scipy.cluster.hierarchy._cpy_euclid_methods:
assert_raises(ValueError,
linkage, [[1, 1], [1, 1]], method=method, metric='cityblock')
def test_2x2_linkage():
Z1 = linkage([1], method='single', metric='euclidean')
Z2 = linkage([[0, 1], [0, 0]], method='single', metric='euclidean')
assert_allclose(Z1, Z2)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
jat255/hyperspyUI | hyperspyui/plugins/segmentation.py | 3 | 4922 | # -*- coding: utf-8 -*-
# Copyright 2014-2016 The HyperSpyUI developers
#
# This file is part of HyperSpyUI.
#
# HyperSpyUI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpyUI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpyUI. If not, see <http://www.gnu.org/licenses/>.
from hyperspyui.plugins.plugin import Plugin
from hyperspy.signal import BaseSignal
import numpy as np
from hyperspy.signals import Signal1D, Signal2D
from hyperspyui.tools import MultiSelectionTool
from hyperspyui.util import win2sig
from hyperspy.misc.rgb_tools import regular_array2rgbx
import matplotlib.cm as plt_cm
# TODO: Add dialog for manual editing of ROIs + Preview checkbox.
class Segmentation(Plugin):
name = "Segmentation"
def create_tools(self):
self.tool = MultiSelectionTool()
self.tool.name = 'Segmentation tool'
self.tool.icon = 'segmentation.svg'
self.tool.category = 'Image'
self.tool.updated[BaseSignal, list].connect(self._on_update)
self.tool.accepted[BaseSignal, list].connect(self.segment)
self.tool.validator = self._tool_signal_validator
self.add_tool(self.tool, self._select_image)
self.map = {}
self.ui.actions[self.tool.name].triggered.connect(
lambda c=None: self.start())
def _select_image(self, win, action):
"""Signal selection callback for actions that are only valid for
selected Signals.
"""
sw = win2sig(win, self.ui.signals, self.ui._plotting_signal)
if sw is None or not sw.signal.axes_manager.signal_dimension == 2:
action.setEnabled(False)
else:
action.setEnabled(True)
def _tool_signal_validator(self, signal, axes):
if not self.tool._default_validator(signal, axes):
return False
return signal in self.map
def start(self, signal=None):
if signal is None:
signal = self.ui.get_selected_signal()
data = signal()
hist = signal._get_signal_signal(data).get_histogram(1000)
hist.plot()
s_out = Signal1D(self._make_gray(data))
s_out.change_dtype('rgb8')
s_out.plot()
self.map[hist] = (signal, s_out)
def _make_gray(self, data):
data = data.astype(np.float) - np.nanmin(data)
data /= np.nanmax(data)
return (255 * plt_cm.gray(data)).astype('uint8')
def segment(self, signal, rois):
if signal is None:
signal = self.ui.get_selected_signal()
if signal in self.map:
histogram = signal
source, s_out = self.map[signal]
else:
found = False
for h, (s, s_out) in self.map.items():
if signal in (s, s_out):
found = True
histogram = h
source = s
break
if not found:
histogram = None
s_out = None
source = signal
if histogram is not None:
self.ui.lut_signalwrapper[histogram].close()
if s_out is not None:
self.ui.lut_signalwrapper[s_out].close()
N = len(rois)
if N <= 256:
dtype = np.uint8
elif N <= 2**16:
dtype = np.uint16
else:
dtype = np.uint32
src_data = source()
data = np.zeros(src_data.shape, dtype)
data[...] = np.nan
for i, r in enumerate(rois):
# Checks has to be inclusive to catch edges
mask = (src_data <= r.right) & (src_data >= r.left)
data[mask] = i + 1
s_seg = Signal2D(data)
s_seg.plot(cmap=plt_cm.jet)
roi_str = '[' + ',\n'.join(['hs.roi.' + str(r) for r in rois]) + ']'
self.record_code('segment_rois = ' + roi_str)
self.record_code('<p>.segment(None, segment_rois)')
def _on_update(self, histogram, rois):
if histogram not in self.map:
return
source, s_out = self.map[histogram]
N = len(rois)
data = source()
gray = self._make_gray(data)
s_out.data = regular_array2rgbx(gray)
for i in range(N):
color = (255 * plt_cm.hsv([float(i) / max(N, 10)])).astype('uint8')
color = regular_array2rgbx(color)
r = rois[i]
mask = (data < r.right) & (data >= r.left)
s_out.data[mask] = color
s_out.update_plot()
| gpl-3.0 |
gef756/statsmodels | statsmodels/sandbox/examples/thirdparty/ex_ratereturn.py | 33 | 4394 | # -*- coding: utf-8 -*-
"""Playing with correlation of DJ-30 stock returns
this uses pickled data that needs to be created with findow.py
to see graphs, uncomment plt.show()
Created on Sat Jan 30 16:30:18 2010
Author: josef-pktd
"""
import numpy as np
import matplotlib.finance as fin
import matplotlib.pyplot as plt
import datetime as dt
import pandas as pa
from statsmodels.compat.python import cPickle
import statsmodels.api as sm
import statsmodels.sandbox as sb
import statsmodels.sandbox.tools as sbtools
from statsmodels.graphics.correlation import plot_corr, plot_corr_grid
try:
rrdm = cPickle.load(file('dj30rr','rb'))
except Exception: #blanket for any unpickling error
print("Error with unpickling, a new pickle file can be created with findow_1")
raise
ticksym = rrdm.columns.tolist()
rr = rrdm.values[1:400]
rrcorr = np.corrcoef(rr, rowvar=0)
plot_corr(rrcorr, xnames=ticksym)
nvars = rrcorr.shape[0]
plt.figure()
plt.hist(rrcorr[np.triu_indices(nvars,1)])
plt.title('Correlation Coefficients')
xreda, facta, evaa, evea = sbtools.pcasvd(rr)
evallcs = (evaa).cumsum()
print(evallcs/evallcs[-1])
xred, fact, eva, eve = sbtools.pcasvd(rr, keepdim=4)
pcacorr = np.corrcoef(xred, rowvar=0)
plot_corr(pcacorr, xnames=ticksym, title='Correlation PCA')
resid = rr-xred
residcorr = np.corrcoef(resid, rowvar=0)
plot_corr(residcorr, xnames=ticksym, title='Correlation Residuals')
plt.matshow(residcorr)
plt.imshow(residcorr, cmap=plt.cm.jet, interpolation='nearest',
extent=(0,30,0,30), vmin=-1.0, vmax=1.0)
plt.colorbar()
normcolor = (0,1) #False #True
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
plot_corr(rrcorr, xnames=ticksym, normcolor=normcolor, ax=ax)
ax2 = fig.add_subplot(2,2,3)
#pcacorr = np.corrcoef(xred, rowvar=0)
plot_corr(pcacorr, xnames=ticksym, title='Correlation PCA',
normcolor=normcolor, ax=ax2)
ax3 = fig.add_subplot(2,2,4)
plot_corr(residcorr, xnames=ticksym, title='Correlation Residuals',
normcolor=normcolor, ax=ax3)
import matplotlib as mpl
images = [c for ax in fig.axes for c in ax.get_children() if isinstance(c, mpl.image.AxesImage)]
print(images)
print(ax.get_children())
#cax = fig.add_subplot(2,2,2)
#[0.85, 0.1, 0.075, 0.8]
fig. subplots_adjust(bottom=0.1, right=0.9, top=0.9)
cax = fig.add_axes([0.9, 0.1, 0.025, 0.8])
fig.colorbar(images[0], cax=cax)
fig.savefig('corrmatrixgrid.png', dpi=120)
has_sklearn = True
try:
import sklearn
except ImportError:
has_sklearn = False
print('sklearn not available')
def cov2corr(cov):
std_ = np.sqrt(np.diag(cov))
corr = cov / np.outer(std_, std_)
return corr
if has_sklearn:
from sklearn.covariance import LedoitWolf, OAS, MCD
lw = LedoitWolf(store_precision=False)
lw.fit(rr, assume_centered=False)
cov_lw = lw.covariance_
corr_lw = cov2corr(cov_lw)
oas = OAS(store_precision=False)
oas.fit(rr, assume_centered=False)
cov_oas = oas.covariance_
corr_oas = cov2corr(cov_oas)
mcd = MCD()#.fit(rr, reweight=None)
mcd.fit(rr, assume_centered=False)
cov_mcd = mcd.covariance_
corr_mcd = cov2corr(cov_mcd)
titles = ['raw correlation', 'lw', 'oas', 'mcd']
normcolor = None
fig = plt.figure()
for i, c in enumerate([rrcorr, corr_lw, corr_oas, corr_mcd]):
#for i, c in enumerate([np.cov(rr, rowvar=0), cov_lw, cov_oas, cov_mcd]):
ax = fig.add_subplot(2,2,i+1)
plot_corr(c, xnames=None, title=titles[i],
normcolor=normcolor, ax=ax)
images = [c for ax in fig.axes for c in ax.get_children() if isinstance(c, mpl.image.AxesImage)]
fig. subplots_adjust(bottom=0.1, right=0.9, top=0.9)
cax = fig.add_axes([0.9, 0.1, 0.025, 0.8])
fig.colorbar(images[0], cax=cax)
corrli = [rrcorr, corr_lw, corr_oas, corr_mcd, pcacorr]
diffssq = np.array([[((ci-cj)**2).sum() for ci in corrli]
for cj in corrli])
diffsabs = np.array([[np.max(np.abs(ci-cj)) for ci in corrli]
for cj in corrli])
print(diffssq)
print('\nmaxabs')
print(diffsabs)
fig.savefig('corrmatrix_sklearn.png', dpi=120)
fig2 = plot_corr_grid(corrli+[residcorr], ncols=3,
titles=titles+['pca', 'pca-residual'],
xnames=[], ynames=[])
fig2.savefig('corrmatrix_sklearn_2.png', dpi=120)
#plt.show()
#plt.close('all')
| bsd-3-clause |
fbagirov/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
DmitryYurov/BornAgain | dev-tools/analyze/lines_of_code.py | 3 | 1459 | """
Creates picture with number of lines of code.
Usage: python3 lines_of_code.py
The command should be executed in the directory where lines_of_code.py is located
(i.e. in <BornAgain>/dev-tools/analyze)
"""
import sys
if sys.version_info < (3, 0):
exit("Requires python3, exiting ...")
import os
from baloc import HistoryCollector
from baloc import history_plot
import matplotlib.pyplot as plt
gitlog_filename = "gitlog.tmp"
def process_loc_number(targetfolder="../..", gitlog=gitlog_filename):
prevfolder = os.getcwd()
os.chdir(targetfolder)
collector = HistoryCollector()
collector.run()
collector.save_report(os.path.join(prevfolder, gitlog))
os.chdir(prevfolder)
def plot_loc_number(gitlog=gitlog_filename):
history_plot(gitlog)
plt.show()
if __name__ == '__main__':
print('-' * 80)
print("Generating picture for number of lines of code")
print('-' * 80)
print(" ")
print("Possible options:")
print("[0] - Generate {0} and picture.".format(gitlog_filename))
print("[1] - Generate only {0}.".format(gitlog_filename))
print("[2] - Generate picture using existing {0}.".format(gitlog_filename))
print("[3] - Exit")
var = int(input("Enter your choice [0]: ") or "0")
if var == 0:
process_loc_number()
plot_loc_number()
elif var == 1:
process_loc_number()
elif var == 2:
plot_loc_number()
else:
exit("Good bye")
| gpl-3.0 |
ClimbsRocks/scikit-learn | benchmarks/bench_covertype.py | 57 | 7378 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
# Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
# Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
# Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3),
'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
winklerand/pandas | pandas/tests/series/test_timeseries.py | 1 | 32325 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
import numpy as np
from datetime import datetime, timedelta, time
import pandas as pd
import pandas.util.testing as tm
from pandas._libs.tslib import iNaT
from pandas.compat import lrange, StringIO, product
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.tseries.offsets import BDay, BMonthEnd
from pandas import (Index, Series, date_range, NaT, concat, DataFrame,
Timestamp, to_datetime, offsets,
timedelta_range)
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal, _skip_if_has_locale)
from pandas.tests.series.common import TestData
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
def assert_range_equal(left, right):
assert (left.equals(right))
assert (left.freq == right.freq)
assert (left.tz == right.tz)
class TestTimeSeries(TestData):
def test_shift(self):
shifted = self.ts.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, self.ts.index)
tm.assert_index_equal(unshifted.index, self.ts.index)
tm.assert_numpy_array_equal(unshifted.valid().values,
self.ts.values[:-1])
offset = BDay()
shifted = self.ts.shift(1, freq=offset)
unshifted = shifted.shift(-1, freq=offset)
assert_series_equal(unshifted, self.ts)
unshifted = self.ts.shift(0, freq=offset)
assert_series_equal(unshifted, self.ts)
shifted = self.ts.shift(1, freq='B')
unshifted = shifted.shift(-1, freq='B')
assert_series_equal(unshifted, self.ts)
# corner case
unshifted = self.ts.shift(0)
assert_series_equal(unshifted, self.ts)
# Shifting with PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(unshifted.valid().values, ps.values[:-1])
shifted2 = ps.shift(1, 'B')
shifted3 = ps.shift(1, BDay())
assert_series_equal(shifted2, shifted3)
assert_series_equal(ps, shifted2.shift(-1, 'B'))
pytest.raises(ValueError, ps.shift, freq='D')
# legacy support
shifted4 = ps.shift(1, freq='B')
assert_series_equal(shifted2, shifted4)
shifted5 = ps.shift(1, freq=BDay())
assert_series_equal(shifted5, shifted4)
# 32-bit taking
# GH 8129
index = date_range('2000-01-01', periods=5)
for dtype in ['int32', 'int64']:
s1 = Series(np.arange(5, dtype=dtype), index=index)
p = s1.iloc[1]
result = s1.shift(periods=p)
expected = Series([np.nan, 0, 1, 2, 3], index=index)
assert_series_equal(result, expected)
# xref 8260
# with tz
s = Series(date_range('2000-01-01 09:00:00', periods=5,
tz='US/Eastern'), name='foo')
result = s - s.shift()
exp = Series(TimedeltaIndex(['NaT'] + ['1 days'] * 4), name='foo')
assert_series_equal(result, exp)
# incompat tz
s2 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz='CET'), name='foo')
pytest.raises(ValueError, lambda: s - s2)
def test_shift2(self):
ts = Series(np.random.randn(5),
index=date_range('1/1/2000', periods=5, freq='H'))
result = ts.shift(1, freq='5T')
exp_index = ts.index.shift(1, freq='5T')
tm.assert_index_equal(result.index, exp_index)
# GH #1063, multiple of same base
result = ts.shift(1, freq='4H')
exp_index = ts.index + offsets.Hour(4)
tm.assert_index_equal(result.index, exp_index)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
pytest.raises(ValueError, idx.shift, 1)
def test_shift_dst(self):
# GH 13926
dates = date_range('2016-11-06', freq='H', periods=10, tz='US/Eastern')
s = Series(dates)
res = s.shift(0)
tm.assert_series_equal(res, s)
assert res.dtype == 'datetime64[ns, US/Eastern]'
res = s.shift(1)
exp_vals = [NaT] + dates.asobject.values.tolist()[:9]
exp = Series(exp_vals)
tm.assert_series_equal(res, exp)
assert res.dtype == 'datetime64[ns, US/Eastern]'
res = s.shift(-2)
exp_vals = dates.asobject.values.tolist()[2:] + [NaT, NaT]
exp = Series(exp_vals)
tm.assert_series_equal(res, exp)
assert res.dtype == 'datetime64[ns, US/Eastern]'
for ex in [10, -10, 20, -20]:
res = s.shift(ex)
exp = Series([NaT] * 10, dtype='datetime64[ns, US/Eastern]')
tm.assert_series_equal(res, exp)
assert res.dtype == 'datetime64[ns, US/Eastern]'
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_series_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_series_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=BDay())
assert_series_equal(shifted, shifted3)
pytest.raises(ValueError, ps.tshift, freq='M')
# DatetimeIndex
shifted = self.ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_series_equal(self.ts, unshifted)
shifted2 = self.ts.tshift(freq=self.ts.index.freq)
assert_series_equal(shifted, shifted2)
inferred_ts = Series(self.ts.values, Index(np.asarray(self.ts.index)),
name='ts')
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_series_equal(shifted, self.ts.tshift(1))
assert_series_equal(unshifted, inferred_ts)
no_freq = self.ts[[0, 5, 7]]
pytest.raises(ValueError, no_freq.tshift)
def test_truncate(self):
offset = BDay()
ts = self.ts[::3]
start, end = self.ts.index[3], self.ts.index[6]
start_missing, end_missing = self.ts.index[2], self.ts.index[7]
# neither specified
truncated = ts.truncate()
assert_series_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
assert_series_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
assert_series_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
assert_series_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
assert_series_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
assert_series_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
assert_series_equal(truncated, expected)
# corner case, empty series returned
truncated = ts.truncate(after=self.ts.index[0] - offset)
assert (len(truncated) == 0)
truncated = ts.truncate(before=self.ts.index[-1] + offset)
assert (len(truncated) == 0)
pytest.raises(ValueError, ts.truncate,
before=self.ts.index[-1] + offset,
after=self.ts.index[0] - offset)
def test_truncate_nonsortedindex(self):
# GH 17935
s = pd.Series(['a', 'b', 'c', 'd', 'e'],
index=[5, 3, 2, 9, 0])
with tm.assert_raises_regex(ValueError,
'truncate requires a sorted index'):
s.truncate(before=3, after=9)
rng = pd.date_range('2011-01-01', '2012-01-01', freq='W')
ts = pd.Series(np.random.randn(len(rng)), index=rng)
with tm.assert_raises_regex(ValueError,
'truncate requires a sorted index'):
ts.sort_values(ascending=False).truncate(before='2011-11',
after='2011-12')
def test_asfreq(self):
ts = Series([0., 1., 2.], index=[datetime(2009, 10, 30), datetime(
2009, 11, 30), datetime(2009, 12, 31)])
daily_ts = ts.asfreq('B')
monthly_ts = daily_ts.asfreq('BM')
tm.assert_series_equal(monthly_ts, ts)
daily_ts = ts.asfreq('B', method='pad')
monthly_ts = daily_ts.asfreq('BM')
tm.assert_series_equal(monthly_ts, ts)
daily_ts = ts.asfreq(BDay())
monthly_ts = daily_ts.asfreq(BMonthEnd())
tm.assert_series_equal(monthly_ts, ts)
result = ts[:0].asfreq('M')
assert len(result) == 0
assert result is not ts
daily_ts = ts.asfreq('D', fill_value=-1)
result = daily_ts.value_counts().sort_index()
expected = Series([60, 1, 1, 1],
index=[-1.0, 2.0, 1.0, 0.0]).sort_index()
tm.assert_series_equal(result, expected)
def test_asfreq_datetimeindex_empty_series(self):
# GH 14320
expected = Series(index=pd.DatetimeIndex(
["2016-09-29 11:00"])).asfreq('H')
result = Series(index=pd.DatetimeIndex(["2016-09-29 11:00"]),
data=[3]).asfreq('H')
tm.assert_index_equal(expected.index, result.index)
def test_diff(self):
# Just run the function
self.ts.diff()
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = s.diff()
assert rs[1] == 1
# neg n
rs = self.ts.diff(-1)
xp = self.ts - self.ts.shift(-1)
assert_series_equal(rs, xp)
# 0
rs = self.ts.diff(0)
xp = self.ts - self.ts
assert_series_equal(rs, xp)
# datetime diff (GH3100)
s = Series(date_range('20130102', periods=5))
rs = s - s.shift(1)
xp = s.diff()
assert_series_equal(rs, xp)
# timedelta diff
nrs = rs - rs.shift(1)
nxp = xp.diff()
assert_series_equal(nrs, nxp)
# with tz
s = Series(
date_range('2000-01-01 09:00:00', periods=5,
tz='US/Eastern'), name='foo')
result = s.diff()
assert_series_equal(result, Series(
TimedeltaIndex(['NaT'] + ['1 days'] * 4), name='foo'))
def test_pct_change(self):
rs = self.ts.pct_change(fill_method=None)
assert_series_equal(rs, self.ts / self.ts.shift(1) - 1)
rs = self.ts.pct_change(2)
filled = self.ts.fillna(method='pad')
assert_series_equal(rs, filled / filled.shift(2) - 1)
rs = self.ts.pct_change(fill_method='bfill', limit=1)
filled = self.ts.fillna(method='bfill', limit=1)
assert_series_equal(rs, filled / filled.shift(1) - 1)
rs = self.ts.pct_change(freq='5D')
filled = self.ts.fillna(method='pad')
assert_series_equal(rs, filled / filled.shift(freq='5D') - 1)
def test_pct_change_shift_over_nas(self):
s = Series([1., 1.5, np.nan, 2.5, 3.])
chg = s.pct_change()
expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2])
assert_series_equal(chg, expected)
def test_autocorr(self):
# Just run the function
corr1 = self.ts.autocorr()
# Now run it with the lag parameter
corr2 = self.ts.autocorr(lag=1)
# corr() with lag needs Series of at least length 2
if len(self.ts) <= 2:
assert np.isnan(corr1)
assert np.isnan(corr2)
else:
assert corr1 == corr2
# Choose a random lag between 1 and length of Series - 2
# and compare the result with the Series corr() function
n = 1 + np.random.randint(max(1, len(self.ts) - 2))
corr1 = self.ts.corr(self.ts.shift(n))
corr2 = self.ts.autocorr(lag=n)
# corr() with lag needs Series of at least length 2
if len(self.ts) <= 2:
assert np.isnan(corr1)
assert np.isnan(corr2)
else:
assert corr1 == corr2
def test_first_last_valid(self):
ts = self.ts.copy()
ts[:5] = np.NaN
index = ts.first_valid_index()
assert index == ts.index[5]
ts[-5:] = np.NaN
index = ts.last_valid_index()
assert index == ts.index[-6]
ts[:] = np.nan
assert ts.last_valid_index() is None
assert ts.first_valid_index() is None
ser = Series([], index=[])
assert ser.last_valid_index() is None
assert ser.first_valid_index() is None
# GH12800
empty = Series()
assert empty.last_valid_index() is None
assert empty.first_valid_index() is None
def test_mpl_compat_hack(self):
result = self.ts[:, np.newaxis]
expected = self.ts.values[:, np.newaxis]
assert_almost_equal(result, expected)
def test_timeseries_coercion(self):
idx = tm.makeDateIndex(10000)
ser = Series(np.random.randn(len(idx)), idx.astype(object))
assert ser.index.is_all_dates
assert isinstance(ser.index, DatetimeIndex)
def test_empty_series_ops(self):
# see issue #13844
a = Series(dtype='M8[ns]')
b = Series(dtype='m8[ns]')
assert_series_equal(a, a + b)
assert_series_equal(a, a - b)
assert_series_equal(a, b + a)
pytest.raises(TypeError, lambda x, y: x - y, b, a)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
assert expected.freq is not None
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
assert masked.freq is None
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([epoch + t for t in range(20)])
result = to_datetime(s, unit='s')
expected = Series([Timestamp('2013-06-09 02:42:28') + timedelta(
seconds=t) for t in range(20)])
assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)]).astype(float)
result = to_datetime(s, unit='s')
expected = Series([Timestamp('2013-06-09 02:42:28') + timedelta(
seconds=t) for t in range(20)])
assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)] + [iNaT])
result = to_datetime(s, unit='s')
expected = Series([Timestamp('2013-06-09 02:42:28') + timedelta(
seconds=t) for t in range(20)] + [NaT])
assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)] + [iNaT]).astype(float)
result = to_datetime(s, unit='s')
expected = Series([Timestamp('2013-06-09 02:42:28') + timedelta(
seconds=t) for t in range(20)] + [NaT])
assert_series_equal(result, expected)
# GH13834
s = Series([epoch + t for t in np.arange(0, 2, .25)] +
[iNaT]).astype(float)
result = to_datetime(s, unit='s')
expected = Series([Timestamp('2013-06-09 02:42:28') + timedelta(
seconds=t) for t in np.arange(0, 2, .25)] + [NaT])
assert_series_equal(result, expected)
s = concat([Series([epoch + t for t in range(20)]
).astype(float), Series([np.nan])],
ignore_index=True)
result = to_datetime(s, unit='s')
expected = Series([Timestamp('2013-06-09 02:42:28') + timedelta(
seconds=t) for t in range(20)] + [NaT])
assert_series_equal(result, expected)
result = to_datetime([1, 2, 'NaT', pd.NaT, np.nan], unit='D')
expected = DatetimeIndex([Timestamp('1970-01-02'),
Timestamp('1970-01-03')] + ['NaT'] * 3)
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError):
to_datetime([1, 2, 'foo'], unit='D')
with pytest.raises(ValueError):
to_datetime([1, 2, 111111111], unit='D')
# coerce we can process
expected = DatetimeIndex([Timestamp('1970-01-02'),
Timestamp('1970-01-03')] + ['NaT'] * 1)
result = to_datetime([1, 2, 'foo'], unit='D', errors='coerce')
tm.assert_index_equal(result, expected)
result = to_datetime([1, 2, 111111111], unit='D', errors='coerce')
tm.assert_index_equal(result, expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s')
dates = np.asarray(rng)
series = Series(dates)
assert np.issubdtype(series.dtype, np.dtype('M8[ns]'))
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00.000000\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
assert result == expected
def test_asfreq_keep_index_name(self):
# GH #9854
index_name = 'bar'
index = pd.date_range('20130101', periods=20, name=index_name)
df = pd.DataFrame([x for x in range(20)], columns=['foo'], index=index)
assert index_name == df.index.name
assert index_name == df.asfreq('10D').index.name
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
tm.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
assert len(result) == 20
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
assert len(result) == 10
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
assert len(result) == 20
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
assert len(result) == 10
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
assert (rs.index.hour == rng[1].hour).all()
assert (rs.index.minute == rng[1].minute).all()
assert (rs.index.second == rng[1].second).all()
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.loc[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.loc['1/4/2000':]
result = chunk.loc[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
assert len(rs) == 0
def test_between(self):
series = Series(date_range('1/1/2000', periods=10))
left, right = series[[2, 7]]
result = series.between(left, right)
expected = (series >= left) & (series <= right)
assert_series_equal(result, expected)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert t >= stime
else:
assert t > stime
if inc_end:
assert t <= etime
else:
assert t < etime
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert (t >= stime) or (t <= etime)
else:
assert (t > stime) or (t <= etime)
if inc_end:
assert (t <= etime) or (t >= stime)
else:
assert (t < etime) or (t >= stime)
def test_between_time_types(self):
# GH11818
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
pytest.raises(ValueError, rng.indexer_between_time,
datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5))
frame = DataFrame({'A': 0}, index=rng)
pytest.raises(ValueError, frame.between_time,
datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5))
series = Series(0, index=rng)
pytest.raises(ValueError, series.between_time,
datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5))
def test_between_time_formats(self):
# GH11818
_skip_if_has_locale()
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
strings = [("2:00", "2:30"), ("0200", "0230"), ("2:00am", "2:30am"),
("0200am", "0230am"), ("2:00:00", "2:30:00"),
("020000", "023000"), ("2:00:00am", "2:30:00am"),
("020000am", "023000am")]
expected_length = 28
for time_string in strings:
assert len(ts.between_time(*time_string)) == expected_length
def test_to_period(self):
from pandas.core.indexes.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
exp.index = exp.index.asfreq('M')
tm.assert_index_equal(pts.index, exp.index.asfreq('M'))
assert_series_equal(pts, exp)
# GH 7606 without freq
idx = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'])
exp_idx = pd.PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'], freq='D')
s = Series(np.random.randn(4), index=idx)
expected = s.copy()
expected.index = exp_idx
assert_series_equal(s.to_period(), expected)
df = DataFrame(np.random.randn(4, 4), index=idx, columns=idx)
expected = df.copy()
expected.index = exp_idx
assert_frame_equal(df.to_period(), expected)
expected = df.copy()
expected.columns = exp_idx
assert_frame_equal(df.to_period(axis=1), expected)
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
assert '2000-01-01' in result
def test_series_map_box_timedelta(self):
# GH 11349
s = Series(timedelta_range('1 day 1 s', periods=5, freq='h'))
def f(x):
return x.total_seconds()
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_asfreq_resample_set_correct_freq(self):
# GH5613
# we test if .asfreq() and .resample() set the correct value for .freq
df = pd.DataFrame({'date': ["2012-01-01", "2012-01-02", "2012-01-03"],
'col': [1, 2, 3]})
df = df.set_index(pd.to_datetime(df.date))
# testing the settings before calling .asfreq() and .resample()
assert df.index.freq is None
assert df.index.inferred_freq == 'D'
# does .asfreq() set .freq correctly?
assert df.asfreq('D').index.freq == 'D'
# does .resample() set .freq correctly?
assert df.resample('D').asfreq().index.freq == 'D'
def test_pickle(self):
# GH4606
p = tm.round_trip_pickle(NaT)
assert p is NaT
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = tm.round_trip_pickle(idx)
assert idx_p[0] == idx[0]
assert idx_p[1] is NaT
assert idx_p[2] == idx[2]
# GH11002
# don't infer freq
idx = date_range('1750-1-1', '2050-1-1', freq='7D')
idx_p = tm.round_trip_pickle(idx)
tm.assert_index_equal(idx, idx_p)
def test_setops_preserve_freq(self):
for tz in [None, 'Asia/Tokyo', 'US/Eastern']:
rng = date_range('1/1/2000', '1/1/2002', name='idx', tz=tz)
result = rng[:50].union(rng[50:100])
assert result.name == rng.name
assert result.freq == rng.freq
assert result.tz == rng.tz
result = rng[:50].union(rng[30:100])
assert result.name == rng.name
assert result.freq == rng.freq
assert result.tz == rng.tz
result = rng[:50].union(rng[60:100])
assert result.name == rng.name
assert result.freq is None
assert result.tz == rng.tz
result = rng[:50].intersection(rng[25:75])
assert result.name == rng.name
assert result.freqstr == 'D'
assert result.tz == rng.tz
nofreq = DatetimeIndex(list(rng[25:75]), name='other')
result = rng[:50].union(nofreq)
assert result.name is None
assert result.freq == rng.freq
assert result.tz == rng.tz
result = rng[:50].intersection(nofreq)
assert result.name is None
assert result.freq == rng.freq
assert result.tz == rng.tz
def test_min_max(self):
rng = date_range('1/1/2000', '12/31/2000')
rng2 = rng.take(np.random.permutation(len(rng)))
the_min = rng2.min()
the_max = rng2.max()
assert isinstance(the_min, Timestamp)
assert isinstance(the_max, Timestamp)
assert the_min == rng[0]
assert the_max == rng[-1]
assert rng.min() == rng[0]
assert rng.max() == rng[-1]
def test_min_max_series(self):
rng = date_range('1/1/2000', periods=10, freq='4h')
lvls = ['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C', 'C']
df = DataFrame({'TS': rng, 'V': np.random.randn(len(rng)), 'L': lvls})
result = df.TS.max()
exp = Timestamp(df.TS.iat[-1])
assert isinstance(result, Timestamp)
assert result == exp
result = df.TS.min()
exp = Timestamp(df.TS.iat[0])
assert isinstance(result, Timestamp)
assert result == exp
def test_from_M8_structured(self):
dates = [(datetime(2012, 9, 9, 0, 0), datetime(2012, 9, 8, 15, 10))]
arr = np.array(dates,
dtype=[('Date', 'M8[us]'), ('Forecasting', 'M8[us]')])
df = DataFrame(arr)
assert df['Date'][0] == dates[0][0]
assert df['Forecasting'][0] == dates[0][1]
s = Series(arr['Date'])
assert isinstance(s[0], Timestamp)
assert s[0] == dates[0][0]
with pytest.warns(FutureWarning):
s = Series.from_array(arr['Date'], Index([0]))
assert s[0] == dates[0][0]
def test_get_level_values_box(self):
from pandas import MultiIndex
dates = date_range('1/1/2000', periods=4)
levels = [dates, [0, 1]]
labels = [[0, 0, 1, 1, 2, 2, 3, 3], [0, 1, 0, 1, 0, 1, 0, 1]]
index = MultiIndex(levels=levels, labels=labels)
assert isinstance(index.get_level_values(0)[0], Timestamp)
| bsd-3-clause |
wanggang3333/scikit-learn | benchmarks/bench_tree.py | 297 | 3617 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
ctogle/grapeipm_support | wu_locate.py | 1 | 5419 | #!/usr/bin/python2.7
import argparse,contextlib,io,sys,os,json,time,pdb
import matplotlib.pyplot as plt
import wu_gather
baseurl_lonlat = 'http://api.wunderground.com/api/%s/geolookup/q/%s,%s.json'
make_url_lonlat = lambda u,x,y : baseurl_lonlat % (u,x,y)
# added this function cause it looks like Wunderground defies convention and has lat first
# Converts params given as x,y to y,x
make_url_latlon = lambda u,x,y : baseurl_lonlat % (u,x,y)
baseurl_state = 'http://api.wunderground.com/api/%s/geolookup/q/%s.json'
make_url_state = lambda u,s : baseurl_state % (u,s)
locstring = lambda slocs : '|'.join([k+':'+','.join(slocs[k]) for k in slocs])
def plot_stationpoints(pts,lon,lat,bndf = 10.0):
print('Calling fn plot_stationpoints()')
'''Useful function for plotting the locations of stations around a lon,lat point.'''
print('... found %d nearby stations to lon,lat %s,%s ...' % (len(pts),lon,lat))
xs,ys = zip(*pts)
xmin,xmax = min(xs),max(xs)
ymin,ymax = min(ys),max(ys)
xrng,yrng = xmax-xmin,ymax-ymin
plt.xlim((min(xs)-xrng/bndf,max(xs)+xrng/bndf))
plt.ylim((min(ys)-yrng/bndf,max(ys)+yrng/bndf))
plt.plot([lon],[lat],marker = '*',color = 'red')
for p in pts[1:]:plt.plot([p[0]],[p[1]],marker = 'o',color = 'g')
plt.show()
@contextlib.contextmanager
def nostdout():
print('Calling fn nostdout()')
'''Context manager that supresses stdout.'''
save_stdout = sys.stdout
sys.stdout = io.BytesIO()
yield
sys.stdout = save_stdout
def query(url,outpath):
print('Calling fn query(%s, %s)' % (url, outpath))
'''Download json file from url, save at output, and return associated data.'''
if wu_gather.urlfetch(url,outpath):lastcall = time.time()
with open(outpath) as f:data = json.load(f)
return data
def lonlat(cache,apikey,lon,lat):
print('Calling fn lonlat()')
'''Fetch list of up to 50 station locations within 40 km of a longitude,latitude.
"The nearby Personal Weather Stations returned in the feed represent the closest
stations within a 40km radius, with a max number of stations returned of 50."'''
outpath = wu_gather.make_outpath(cache,'LonLat_'+str(lon),str(lat),'X')
url = make_url_latlon(apikey,lat,lon)
data = query(url,outpath)
pts = []
stationlocs = {}
nearby = data['location']['nearby_weather_stations']
for stationtype in nearby:
nearbystations = nearby[stationtype]['station']
for station in nearbystations:
# lon and lat are switched, as wunderground seems to be in err...
#pts.append((float(station['lon']),float(station['lat'])))
pts.append((float(station['lat']),float(station['lon'])))
if stationtype == 'airport':sloc = station['state'],station['city']
elif stationtype == 'pws':sloc = 'PWS',station['id']
else:
emsg = '... stationtype %s is not supported ...'
# this wont print on stdout with nostdout()...
raise ValueError(emsg % stationtype)
if sloc[0] in stationlocs:stationlocs[sloc[0]].append(sloc[1])
else:stationlocs[sloc[0]] = [sloc[1]]
#plot_stationpoints(pts,float(lon),float(lat))
return stationlocs
def state(cache,apikey,state):
print('Calling fn state()')
'''Fetch state wide list of station locations (one per city).'''
outpath = wu_gather.make_outpath(cache,state,'X','X')
url = make_url_state(apikey,state)
print('... Searching %s with key %s, final URL: %s' % (state,apikey,url))
data = query(url,outpath)
stationlocs = {}
print(data)
for r in data['response']['results']:
state,city = r['state'],r['city']
if state in stationlocs:stationlocs[state].append(city)
else:stationlocs[state] = [city]
return stationlocs
if __name__ == '__main__':
'''
This can be used with wu_gather.py:
./wu_gather.py [wu_gather.py OPTIONS] -l "`./wu_locate.py [wu_locate.py OPTIONS]`"
stdout is supressed aside from the resulting valid -l option for wu_gather.py.
NOTE: This generally includes errors consequent of invalid input to wu_locate.py ...
'''
parser = argparse.ArgumentParser(formatter_class = argparse.RawTextHelpFormatter)
parser.add_argument('configfile',
help = 'specify a parsing configuration file')
parser.add_argument('--state',default = None,
help = 'specify a state')
parser.add_argument('--longitude',default = None,
help = 'specify a longitude coordinate')
parser.add_argument('--latitude',default = None,
help = 'specify a latitude coordinate')
parser.add_argument('-u','--apikey',
help = 'specify a user api key for weather underground')
parser.add_argument('-d','--cache',
help = 'specify a directory to store raw weather underground data')
cfg = wu_gather.parse_config(parser.parse_args())
if not os.path.exists(cfg.cache):os.mkdir(cfg.cache)
if cfg.state:
#with nostdout():
stationlocs = state(cfg.cache,cfg.apikey,cfg.state)
elif cfg.longitude and cfg.latitude:
#with nostdout():
stationlocs = lonlat(cfg.cache,cfg.apikey,cfg.longitude,cfg.latitude)
else:
emsg = '... need either --state option or --longitude and --latitude options ...'
raise ValueError(emsg)
print(locstring(stationlocs))
| mit |
exepulveda/swfc | python/clustering_pca_2d.py | 1 | 3175 | import numpy as np
import pickle
import logging
import argparse
import csv
import matplotlib as mpl
mpl.use('agg')
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.metrics import silhouette_score
from cluster_utils import create_clusters_dict, recode_categorical_values
from plotting import scatter_clusters
import matplotlib.pyplot as plt
import clusteringlib as cl
from case_study_2d import attributes,setup_case_study,setup_distances
if __name__ == "__main__":
filename = '2d'
locations,ore_indices,locations_ore,data_ore,min_values,max_values,scale,var_types,categories = setup_case_study()
N,ND = data_ore.shape
#recode binary clay
binary_clay = np.zeros((N,categories[0]))
for i in range(categories[0]):
indices = np.where(data_ore[:,0] == i)[0]
binary_clay[indices,i] = 1.0
values = np.c_[binary_clay,data_ore[:,1:]]
N,ND = values.shape
#now all are continues variables
var_types = np.ones(ND)
seed = 1634120
np.random.seed(seed)
n,p = values.shape
standadize = StandardScaler()
data = standadize.fit_transform(values)
scale = standadize.scale_
ND_PCA = 3
scale = np.ones(ND_PCA)
var_types = np.ones(ND_PCA)
pca = PCA(n_components=ND_PCA,whiten=True)
pca_X = pca.fit_transform(data)
data_F = np.asfortranarray(pca_X,dtype=np.float32)
pca_clusters_all = np.empty(len(locations))
for NC in range(2,11):
clustering_pca = KMeans(n_clusters=NC)
clusters_pca = clustering_pca.fit_predict(pca_X)
#save data
pca_clusters_all.fill(NC) #waste
pca_clusters_all[ore_indices] = clusters_pca #ore
new_data = np.c_[locations,pca_clusters_all]
np.savetxt("../results/{dataset}_clusters_pca_{nclusters}.csv".format(dataset=filename,nclusters=NC),new_data,delimiter=",",fmt="%.4f")
#stats
setup_distances(scale,var_types,categories,targets=None)
#PCA
centroid = np.asfortranarray(clustering_pca.cluster_centers_,dtype=np.float32)
clusters = np.asfortranarray(clusters_pca,dtype=np.int8)
weights = np.asfortranarray(np.ones((NC,ND_PCA),dtype=np.float32)/ ND_PCA)
ret_pca = cl.clustering.dbi_index(centroid,data_F,clusters,weights)
ret_sill= cl.clustering.silhouette_index(data_F,clusters,weights)
print('2D PCA',NC,ret_pca,ret_sill,sep=' ')
cl.distances.reset()
#save data
NC = 4
clustering_pca = KMeans(n_clusters=NC)
clusters_pca = clustering_pca.fit_predict(pca_X)
centroids = np.empty((NC,ND))
for k in range(NC):
indices = np.where(clusters_pca == k)[0]
centroids[k,:] = np.mean(values[indices,:],axis=0)
print('centroids',centroids)
np.savetxt("../results/final_{dataset}_clusters_pca_{nclusters}.csv".format(dataset=filename,nclusters=NC),clusters_pca,delimiter=",",fmt="%.4f")
np.savetxt("../results/final_{dataset}_centroids_pca_{nclusters}.csv".format(dataset=filename,nclusters=NC),centroids,delimiter=",",fmt="%.4f")
| gpl-3.0 |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/build/lib.linux-i686-2.7/matplotlib/testing/jpl_units/Epoch.py | 6 | 7147 | #===========================================================================
#
# Epoch
#
#===========================================================================
"""Epoch module."""
#===========================================================================
# Place all imports after here.
#
from __future__ import print_function
import math
import datetime as DT
from matplotlib.dates import date2num
#
# Place all imports before here.
#===========================================================================
#===========================================================================
class Epoch:
# Frame conversion offsets in seconds
# t(TO) = t(FROM) + allowed[ FROM ][ TO ]
allowed = {
"ET" : {
"UTC" : +64.1839,
},
"UTC" : {
"ET" : -64.1839,
},
}
#-----------------------------------------------------------------------
def __init__( self, frame, sec=None, jd=None, daynum=None, dt=None ):
"""Create a new Epoch object.
Build an epoch 1 of 2 ways:
Using seconds past a Julian date:
# Epoch( 'ET', sec=1e8, jd=2451545 )
or using a matplotlib day number
# Epoch( 'ET', daynum=730119.5 )
= ERROR CONDITIONS
- If the input units are not in the allowed list, an error is thrown.
= INPUT VARIABLES
- frame The frame of the epoch. Must be 'ET' or 'UTC'
- sec The number of seconds past the input JD.
- jd The Julian date of the epoch.
- daynum The matplotlib day number of the epoch.
- dt A python datetime instance.
"""
if ( ( sec is None and jd is not None ) or
( sec is not None and jd is None ) or
( daynum is not None and ( sec is not None or jd is not None ) ) or
( daynum is None and dt is None and ( sec is None or jd is None ) ) or
( daynum is not None and dt is not None ) or
( dt is not None and ( sec is not None or jd is not None ) ) or
( (dt is not None) and not isinstance(dt, DT.datetime) ) ):
msg = "Invalid inputs. Must enter sec and jd together, " \
"daynum by itself, or dt (must be a python datetime).\n" \
"Sec = %s\nJD = %s\ndnum= %s\ndt = %s" \
% ( str( sec ), str( jd ), str( daynum ), str( dt ) )
raise ValueError( msg )
if frame not in self.allowed:
msg = "Input frame '%s' is not one of the supported frames of %s" \
% ( frame, str( self.allowed.keys() ) )
raise ValueError( msg )
self._frame = frame
if dt is not None:
daynum = date2num( dt )
if daynum is not None:
# 1-JAN-0001 in JD = 1721425.5
jd = float( daynum ) + 1721425.5
self._jd = math.floor( jd )
self._seconds = ( jd - self._jd ) * 86400.0
else:
self._seconds = float( sec )
self._jd = float( jd )
# Resolve seconds down to [ 0, 86400 )
deltaDays = int( math.floor( self._seconds / 86400.0 ) )
self._jd += deltaDays
self._seconds -= deltaDays * 86400.0
#-----------------------------------------------------------------------
def convert( self, frame ):
if self._frame == frame:
return self
offset = self.allowed[ self._frame ][ frame ]
return Epoch( frame, self._seconds + offset, self._jd )
#-----------------------------------------------------------------------
def frame( self ):
return self._frame
#-----------------------------------------------------------------------
def julianDate( self, frame ):
t = self
if frame != self._frame:
t = self.convert( frame )
return t._jd + t._seconds / 86400.0
#-----------------------------------------------------------------------
def secondsPast( self, frame, jd ):
t = self
if frame != self._frame:
t = self.convert( frame )
delta = t._jd - jd
return t._seconds + delta * 86400
#-----------------------------------------------------------------------
def __cmp__( self, rhs ):
"""Compare two Epoch's.
= INPUT VARIABLES
- rhs The Epoch to compare against.
= RETURN VALUE
- Returns -1 if self < rhs, 0 if self == rhs, +1 if self > rhs.
"""
t = self
if self._frame != rhs._frame:
t = self.convert( rhs._frame )
if t._jd != rhs._jd:
return cmp( t._jd, rhs._jd )
return cmp( t._seconds, rhs._seconds )
#-----------------------------------------------------------------------
def __add__( self, rhs ):
"""Add a duration to an Epoch.
= INPUT VARIABLES
- rhs The Epoch to subtract.
= RETURN VALUE
- Returns the difference of ourselves and the input Epoch.
"""
t = self
if self._frame != rhs.frame():
t = self.convert( rhs._frame )
sec = t._seconds + rhs.seconds()
return Epoch( t._frame, sec, t._jd )
#-----------------------------------------------------------------------
def __sub__( self, rhs ):
"""Subtract two Epoch's or a Duration from an Epoch.
Valid:
Duration = Epoch - Epoch
Epoch = Epoch - Duration
= INPUT VARIABLES
- rhs The Epoch to subtract.
= RETURN VALUE
- Returns either the duration between to Epoch's or the a new
Epoch that is the result of subtracting a duration from an epoch.
"""
# Delay-load due to circular dependencies.
import matplotlib.testing.jpl_units as U
# Handle Epoch - Duration
if isinstance( rhs, U.Duration ):
return self + -rhs
t = self
if self._frame != rhs._frame:
t = self.convert( rhs._frame )
days = t._jd - rhs._jd
sec = t._seconds - rhs._seconds
return U.Duration( rhs._frame, days*86400 + sec )
#-----------------------------------------------------------------------
def __str__( self ):
"""Print the Epoch."""
return "%22.15e %s" % ( self.julianDate( self._frame ), self._frame )
#-----------------------------------------------------------------------
def __repr__( self ):
"""Print the Epoch."""
return str( self )
#-----------------------------------------------------------------------
def range( start, stop, step ):
"""Generate a range of Epoch objects.
Similar to the Python range() method. Returns the range [
start, stop ) at the requested step. Each element will be a
Epoch object.
= INPUT VARIABLES
- start The starting value of the range.
- stop The stop value of the range.
- step Step to use.
= RETURN VALUE
- Returns a list contianing the requested Epoch values.
"""
elems = []
i = 0
while True:
d = start + i * step
if d >= stop:
break
elems.append( d )
i += 1
return elems
range = staticmethod( range )
#===========================================================================
| mit |
potash/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 55 | 2433 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plt.scatter(X[labels == outer, 0], X[labels == outer, 1], color='navy',
marker='s', lw=0, label="outer labeled", s=10)
plt.scatter(X[labels == inner, 0], X[labels == inner, 1], color='c',
marker='s', lw=0, label='inner labeled', s=10)
plt.scatter(X[labels == -1, 0], X[labels == -1, 1], color='darkorange',
marker='.', label='unlabeled')
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Raw data (2 classes=outer and inner)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plt.scatter(X[outer_numbers, 0], X[outer_numbers, 1], color='navy',
marker='s', lw=0, s=10, label="outer learned")
plt.scatter(X[inner_numbers, 0], X[inner_numbers, 1], color='c',
marker='s', lw=0, s=10, label="inner learned")
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
xzh86/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 276 | 3790 | # Authors: Lars Buitinck <L.J.Buitinck@uva.nl>
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
cl4rke/scikit-learn | sklearn/datasets/lfw.py | 38 | 19042 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warn("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warn("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
face = np.asarray(imread(file_path)[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
ky822/scikit-learn | examples/mixture/plot_gmm_classifier.py | 250 | 3918 | """
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <ronweiss@gmail.com>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
jmschrei/scikit-learn | sklearn/model_selection/tests/test_search.py | 20 | 30855 | """Test the search module"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.fixes import sp_version
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneLabelOut
from sklearn.model_selection import LeavePLabelOut
from sklearn.model_selection import LabelKFold
from sklearn.model_selection import LabelShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
# TODO Import from sklearn.exceptions once merged.
from sklearn.base import ChangedBehaviorWarning
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_labels():
# Check if ValueError (when labels is None) propagates to GridSearchCV
# And also check if labels is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
labels = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
label_cvs = [LeaveOneLabelOut(), LeavePLabelOut(2), LabelKFold(),
LabelShuffleSplit()]
for cv in label_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The labels parameter should not be None",
gs.fit, X, y)
gs.fit(X, y, labels)
non_label_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_label_cvs:
print(cv)
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
@ignore_warnings
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv.split(X, y):
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/io/stata.py | 7 | 82769 | """
Module contains tools for processing Stata files into DataFrames
The StataReader below was originally written by Joe Presbrey as part of PyDTA.
It has been extended and improved by Skipper Seabold from the Statsmodels
project who also developed the StataWriter and was finally added to pandas in
a once again improved version.
You can find more information on http://presbrey.mit.edu/PyDTA and
http://www.statsmodels.org/devel/
"""
import numpy as np
import sys
import struct
from dateutil.relativedelta import relativedelta
from pandas.types.common import (is_categorical_dtype, is_datetime64_dtype,
_ensure_object)
from pandas.core.base import StringMixin
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.series import Series
import datetime
from pandas import compat, to_timedelta, to_datetime, isnull, DatetimeIndex
from pandas.compat import lrange, lmap, lzip, text_type, string_types, range, \
zip, BytesIO
from pandas.util.decorators import Appender
import pandas as pd
from pandas.io.common import get_filepath_or_buffer, BaseIterator
from pandas.lib import max_len_string_array, infer_dtype
from pandas.tslib import NaT, Timestamp
_version_error = ("Version of given Stata file is not 104, 105, 108, "
"111 (Stata 7SE), 113 (Stata 8/9), 114 (Stata 10/11), "
"115 (Stata 12), 117 (Stata 13), or 118 (Stata 14)")
_statafile_processing_params1 = """\
convert_dates : boolean, defaults to True
Convert date variables to DataFrame time values
convert_categoricals : boolean, defaults to True
Read value labels and convert columns to Categorical/Factor variables"""
_encoding_params = """\
encoding : string, None or encoding
Encoding used to parse the files. None defaults to iso-8859-1."""
_statafile_processing_params2 = """\
index : identifier of index column
identifier of column that should be used as index of the DataFrame
convert_missing : boolean, defaults to False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nans.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : boolean, defaults to True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64)
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns
order_categoricals : boolean, defaults to True
Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines"""
_iterator_params = """\
iterator : boolean, default False
Return StataReader object"""
_read_stata_doc = """Read Stata file into DataFrame
Parameters
----------
filepath_or_buffer : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
%s
Returns
-------
DataFrame or StataReader
Examples
--------
Read a Stata dta file:
>>> df = pandas.read_stata('filename.dta')
Read a Stata dta file in 10,000 line chunks:
>>> itr = pandas.read_stata('filename.dta', chunksize=10000)
>>> for chunk in itr:
>>> do_something(chunk)
""" % (_statafile_processing_params1, _encoding_params,
_statafile_processing_params2, _chunksize_params,
_iterator_params)
_data_method_doc = """Reads observations from Stata file, converting them into a dataframe
This is a legacy method. Use `read` in new code.
Parameters
----------
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_read_method_doc = """\
Reads observations from Stata file, converting them into a dataframe
Parameters
----------
nrows : int
Number of lines to read from data file, if None read whole file.
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_stata_reader_doc = """\
Class for reading Stata dta files.
Parameters
----------
path_or_buf : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
""" % (_statafile_processing_params1, _statafile_processing_params2,
_encoding_params, _chunksize_params)
@Appender(_read_stata_doc)
def read_stata(filepath_or_buffer, convert_dates=True,
convert_categoricals=True, encoding=None, index=None,
convert_missing=False, preserve_dtypes=True, columns=None,
order_categoricals=True, chunksize=None, iterator=False):
reader = StataReader(filepath_or_buffer,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
index=index, convert_missing=convert_missing,
preserve_dtypes=preserve_dtypes,
columns=columns,
order_categoricals=order_categoricals,
chunksize=chunksize, encoding=encoding)
if iterator or chunksize:
data = reader
else:
data = reader.read()
reader.close()
return data
_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
def _stata_elapsed_date_to_datetime_vec(dates, fmt):
"""
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Series
The converted dates
Examples
--------
>>> import pandas as pd
>>> dates = pd.Series([52])
>>> _stata_elapsed_date_to_datetime_vec(dates , "%tw")
0 1961-01-01
dtype: datetime64[ns]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
If you don't have pandas with datetime support, then you can't do
milliseconds accurately.
"""
MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year
MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days
MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days
MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
def convert_year_month_safe(year, month):
"""
Convert year and month to datetimes, using pandas vectorized versions
when the date range falls within the range supported by pandas. Other
wise it falls back to a slower but more robust method using datetime.
"""
if year.max() < MAX_YEAR and year.min() > MIN_YEAR:
return to_datetime(100 * year + month, format='%Y%m')
else:
index = getattr(year, 'index', None)
return Series(
[datetime.datetime(y, m, 1) for y, m in zip(year, month)],
index=index)
def convert_year_days_safe(year, days):
"""
Converts year (e.g. 1999) and days since the start of the year to a
datetime or datetime64 Series
"""
if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR:
return (to_datetime(year, format='%Y') +
to_timedelta(days, unit='d'))
else:
index = getattr(year, 'index', None)
value = [datetime.datetime(y, 1, 1) + relativedelta(days=int(d))
for y, d in zip(year, days)]
return Series(value, index=index)
def convert_delta_safe(base, deltas, unit):
"""
Convert base dates and deltas to datetimes, using pandas vectorized
versions if the deltas satisfy restrictions required to be expressed
as dates in pandas.
"""
index = getattr(deltas, 'index', None)
if unit == 'd':
if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA:
values = [base + relativedelta(days=int(d)) for d in deltas]
return Series(values, index=index)
elif unit == 'ms':
if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA:
values = [base + relativedelta(microseconds=(int(d) * 1000))
for d in deltas]
return Series(values, index=index)
else:
raise ValueError('format not understood')
base = to_datetime(base)
deltas = to_timedelta(deltas, unit=unit)
return base + deltas
# TODO: If/when pandas supports more than datetime64[ns], this should be
# improved to use correct range, e.g. datetime[Y] for yearly
bad_locs = np.isnan(dates)
has_bad_values = False
if bad_locs.any():
has_bad_values = True
data_col = Series(dates)
data_col[bad_locs] = 1.0 # Replace with NaT
dates = dates.astype(np.int64)
if fmt in ["%tc", "tc"]: # Delta ms relative to base
base = stata_epoch
ms = dates
conv_dates = convert_delta_safe(base, ms, 'ms')
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Encountered %tC format. Leaving in Stata Internal Format.")
conv_dates = Series(dates, dtype=np.object)
if has_bad_values:
conv_dates[bad_locs] = pd.NaT
return conv_dates
elif fmt in ["%td", "td", "%d", "d"]: # Delta days relative to base
base = stata_epoch
days = dates
conv_dates = convert_delta_safe(base, days, 'd')
elif fmt in ["%tw", "tw"]: # does not count leap days - 7 days is a week
year = stata_epoch.year + dates // 52
days = (dates % 52) * 7
conv_dates = convert_year_days_safe(year, days)
elif fmt in ["%tm", "tm"]: # Delta months relative to base
year = stata_epoch.year + dates // 12
month = (dates % 12) + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%tq", "tq"]: # Delta quarters relative to base
year = stata_epoch.year + dates // 4
month = (dates % 4) * 3 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%th", "th"]: # Delta half-years relative to base
year = stata_epoch.year + dates // 2
month = (dates % 2) * 6 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%ty", "ty"]: # Years -- not delta
year = dates
month = np.ones_like(dates)
conv_dates = convert_year_month_safe(year, month)
else:
raise ValueError("Date fmt %s not understood" % fmt)
if has_bad_values: # Restore NaT for bad values
conv_dates[bad_locs] = NaT
return conv_dates
def _datetime_to_stata_elapsed_vec(dates, fmt):
"""
Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
Series or array containing datetime.datetime or datetime64[ns] to
convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
index = dates.index
NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000
US_PER_DAY = NS_PER_DAY / 1000
def parse_dates_safe(dates, delta=False, year=False, days=False):
d = {}
if is_datetime64_dtype(dates.values):
if delta:
delta = dates - stata_epoch
d['delta'] = delta.values.astype(
np.int64) // 1000 # microseconds
if days or year:
dates = DatetimeIndex(dates)
d['year'], d['month'] = dates.year, dates.month
if days:
days = (dates.astype(np.int64) -
to_datetime(d['year'], format='%Y').astype(np.int64))
d['days'] = days // NS_PER_DAY
elif infer_dtype(dates) == 'datetime':
if delta:
delta = dates.values - stata_epoch
f = lambda x: \
US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
v = np.vectorize(f)
d['delta'] = v(delta)
if year:
year_month = dates.apply(lambda x: 100 * x.year + x.month)
d['year'] = year_month.values // 100
d['month'] = (year_month.values - d['year'] * 100)
if days:
f = lambda x: (x - datetime.datetime(x.year, 1, 1)).days
v = np.vectorize(f)
d['days'] = v(dates)
else:
raise ValueError('Columns containing dates must contain either '
'datetime64, datetime.datetime or null values.')
return DataFrame(d, index=index)
bad_loc = isnull(dates)
index = dates.index
if bad_loc.any():
dates = Series(dates)
if is_datetime64_dtype(dates):
dates[bad_loc] = to_datetime(stata_epoch)
else:
dates[bad_loc] = stata_epoch
if fmt in ["%tc", "tc"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta / 1000
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Stata Internal Format tC not supported.")
conv_dates = dates
elif fmt in ["%td", "td"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta // US_PER_DAY
elif fmt in ["%tw", "tw"]:
d = parse_dates_safe(dates, year=True, days=True)
conv_dates = (52 * (d.year - stata_epoch.year) + d.days // 7)
elif fmt in ["%tm", "tm"]:
d = parse_dates_safe(dates, year=True)
conv_dates = (12 * (d.year - stata_epoch.year) + d.month - 1)
elif fmt in ["%tq", "tq"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3
elif fmt in ["%th", "th"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 2 * (d.year - stata_epoch.year) + \
(d.month > 6).astype(np.int)
elif fmt in ["%ty", "ty"]:
d = parse_dates_safe(dates, year=True)
conv_dates = d.year
else:
raise ValueError("Format %s is not a known Stata date format" % fmt)
conv_dates = Series(conv_dates, dtype=np.float64)
missing_value = struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0]
conv_dates[bad_loc] = missing_value
return Series(conv_dates, index=index)
excessive_string_length_error = """
Fixed width strings in Stata .dta files are limited to 244 (or fewer)
characters. Column '%s' does not satisfy this restriction.
"""
class PossiblePrecisionLoss(Warning):
pass
precision_loss_doc = """
Column converted from %s to %s, and some data are outside of the lossless
conversion range. This may result in a loss of precision in the saved data.
"""
class ValueLabelTypeMismatch(Warning):
pass
value_label_mismatch_doc = """
Stata value labels (pandas categories) must be strings. Column {0} contains
non-string labels which will be converted to strings. Please check that the
Stata data file created has not lost information due to duplicate labels.
"""
class InvalidColumnName(Warning):
pass
invalid_name_doc = """
Not all pandas column names were valid Stata variable names.
The following replacements have been made:
{0}
If this is not what you expect, please make sure you have Stata-compliant
column names in your DataFrame (strings only, max 32 characters, only
alphanumerics and underscores, no Stata reserved words)
"""
def _cast_to_stata_types(data):
"""Checks the dtypes of the columns of a pandas DataFrame for
compatibility with the data types and ranges supported by Stata, and
converts if necessary.
Parameters
----------
data : DataFrame
The DataFrame to check and convert
Notes
-----
Numeric columns in Stata must be one of int8, int16, int32, float32 or
float64, with some additional value restrictions. int8 and int16 columns
are checked for violations of the value restrictions and upcast if needed.
int64 data is not usable in Stata, and so it is downcast to int32 whenever
the value are in the int32 range, and sidecast to float64 when larger than
this range. If the int64 values are outside of the range of those
perfectly representable as float64 values, a warning is raised.
bool columns are cast to int8. uint colums are converted to int of the
same size if there is no loss in precision, other wise are upcast to a
larger type. uint64 is currently not supported since it is concerted to
object in a DataFrame.
"""
ws = ''
# original, if small, if large
conversion_data = ((np.bool, np.int8, np.int8),
(np.uint8, np.int8, np.int16),
(np.uint16, np.int16, np.int32),
(np.uint32, np.int32, np.int64))
float32_max = struct.unpack('<f', b'\xff\xff\xff\x7e')[0]
float64_max = struct.unpack('<d', b'\xff\xff\xff\xff\xff\xff\xdf\x7f')[0]
for col in data:
dtype = data[col].dtype
# Cast from unsupported types to supported types
for c_data in conversion_data:
if dtype == c_data[0]:
if data[col].max() <= np.iinfo(c_data[1]).max:
dtype = c_data[1]
else:
dtype = c_data[2]
if c_data[2] == np.float64: # Warn if necessary
if data[col].max() >= 2 ** 53:
ws = precision_loss_doc % ('uint64', 'float64')
data[col] = data[col].astype(dtype)
# Check values and upcast if necessary
if dtype == np.int8:
if data[col].max() > 100 or data[col].min() < -127:
data[col] = data[col].astype(np.int16)
elif dtype == np.int16:
if data[col].max() > 32740 or data[col].min() < -32767:
data[col] = data[col].astype(np.int32)
elif dtype == np.int64:
if (data[col].max() <= 2147483620 and
data[col].min() >= -2147483647):
data[col] = data[col].astype(np.int32)
else:
data[col] = data[col].astype(np.float64)
if data[col].max() >= 2 ** 53 or data[col].min() <= -2 ** 53:
ws = precision_loss_doc % ('int64', 'float64')
elif dtype in (np.float32, np.float64):
value = data[col].max()
if np.isinf(value):
msg = 'Column {0} has a maximum value of infinity which is ' \
'outside the range supported by Stata.'
raise ValueError(msg.format(col))
if dtype == np.float32 and value > float32_max:
data[col] = data[col].astype(np.float64)
elif dtype == np.float64:
if value > float64_max:
msg = 'Column {0} has a maximum value ({1}) outside the ' \
'range supported by Stata ({1})'
raise ValueError(msg.format(col, value, float64_max))
if ws:
import warnings
warnings.warn(ws, PossiblePrecisionLoss)
return data
class StataValueLabel(object):
"""
Parse a categorical column and prepare formatted output
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Methods
-------
generate_value_label
"""
def __init__(self, catarray):
self.labname = catarray.name
categories = catarray.cat.categories
self.value_labels = list(zip(np.arange(len(categories)), categories))
self.value_labels.sort(key=lambda x: x[0])
self.text_len = np.int32(0)
self.off = []
self.val = []
self.txt = []
self.n = 0
# Compute lengths and setup lists of offsets and labels
for vl in self.value_labels:
category = vl[1]
if not isinstance(category, string_types):
category = str(category)
import warnings
warnings.warn(value_label_mismatch_doc.format(catarray.name),
ValueLabelTypeMismatch)
self.off.append(self.text_len)
self.text_len += len(category) + 1 # +1 for the padding
self.val.append(vl[0])
self.txt.append(category)
self.n += 1
if self.text_len > 32000:
raise ValueError('Stata value labels for a single variable must '
'have a combined length less than 32,000 '
'characters.')
# Ensure int32
self.off = np.array(self.off, dtype=np.int32)
self.val = np.array(self.val, dtype=np.int32)
# Total length
self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len
def _encode(self, s):
"""
Python 3 compatability shim
"""
if compat.PY3:
return s.encode(self._encoding)
else:
return s
def generate_value_label(self, byteorder, encoding):
"""
Parameters
----------
byteorder : str
Byte order of the output
encoding : str
File encoding
Returns
-------
value_label : bytes
Bytes containing the formatted value label
"""
self._encoding = encoding
bio = BytesIO()
null_string = '\x00'
null_byte = b'\x00'
# len
bio.write(struct.pack(byteorder + 'i', self.len))
# labname
labname = self._encode(_pad_bytes(self.labname[:32], 33))
bio.write(labname)
# padding - 3 bytes
for i in range(3):
bio.write(struct.pack('c', null_byte))
# value_label_table
# n - int32
bio.write(struct.pack(byteorder + 'i', self.n))
# textlen - int32
bio.write(struct.pack(byteorder + 'i', self.text_len))
# off - int32 array (n elements)
for offset in self.off:
bio.write(struct.pack(byteorder + 'i', offset))
# val - int32 array (n elements)
for value in self.val:
bio.write(struct.pack(byteorder + 'i', value))
# txt - Text labels, null terminated
for text in self.txt:
bio.write(self._encode(text + null_string))
bio.seek(0)
return bio.read()
class StataMissingValue(StringMixin):
"""
An observation's missing value.
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Notes
-----
More information: <http://www.stata.com/help.cgi?missing>
Integer missing values make the code '.', '.a', ..., '.z' to the ranges
101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ...
2147483647 (for int32). Missing values for floating point data types are
more complex but the pattern is simple to discern from the following table.
np.float32 missing values (float in Stata)
0000007f .
0008007f .a
0010007f .b
...
00c0007f .x
00c8007f .y
00d0007f .z
np.float64 missing values (double in Stata)
000000000000e07f .
000000000001e07f .a
000000000002e07f .b
...
000000000018e07f .x
000000000019e07f .y
00000000001ae07f .z
"""
# Construct a dictionary of missing values
MISSING_VALUES = {}
bases = (101, 32741, 2147483621)
for b in bases:
# Conversion to long to avoid hash issues on 32 bit platforms #8968
MISSING_VALUES[compat.long(b)] = '.'
for i in range(1, 27):
MISSING_VALUES[compat.long(i + b)] = '.' + chr(96 + i)
float32_base = b'\x00\x00\x00\x7f'
increment = struct.unpack('<i', b'\x00\x08\x00\x00')[0]
for i in range(27):
value = struct.unpack('<f', float32_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('<i', struct.pack('<f', value))[
0] + increment
float32_base = struct.pack('<i', int_value)
float64_base = b'\x00\x00\x00\x00\x00\x00\xe0\x7f'
increment = struct.unpack('q', b'\x00\x00\x00\x00\x00\x01\x00\x00')[0]
for i in range(27):
value = struct.unpack('<d', float64_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('q', struct.pack('<d', value))[0] + increment
float64_base = struct.pack('q', int_value)
BASE_MISSING_VALUES = {'int8': 101,
'int16': 32741,
'int32': 2147483621,
'float32': struct.unpack('<f', float32_base)[0],
'float64': struct.unpack('<d', float64_base)[0]}
def __init__(self, value):
self._value = value
# Conversion to long to avoid hash issues on 32 bit platforms #8968
value = compat.long(value) if value < 2147483648 else float(value)
self._str = self.MISSING_VALUES[value]
string = property(lambda self: self._str,
doc="The Stata representation of the missing value: "
"'.', '.a'..'.z'")
value = property(lambda self: self._value,
doc='The binary representation of the missing value.')
def __unicode__(self):
return self.string
def __repr__(self):
# not perfect :-/
return "%s(%s)" % (self.__class__, self)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.string == other.string and self.value == other.value)
@classmethod
def get_base_missing_value(cls, dtype):
if dtype == np.int8:
value = cls.BASE_MISSING_VALUES['int8']
elif dtype == np.int16:
value = cls.BASE_MISSING_VALUES['int16']
elif dtype == np.int32:
value = cls.BASE_MISSING_VALUES['int32']
elif dtype == np.float32:
value = cls.BASE_MISSING_VALUES['float32']
elif dtype == np.float64:
value = cls.BASE_MISSING_VALUES['float64']
else:
raise ValueError('Unsupported dtype')
return value
class StataParser(object):
_default_encoding = 'iso-8859-1'
def __init__(self, encoding):
self._encoding = encoding
# type code.
# --------------------
# str1 1 = 0x01
# str2 2 = 0x02
# ...
# str244 244 = 0xf4
# byte 251 = 0xfb (sic)
# int 252 = 0xfc
# long 253 = 0xfd
# float 254 = 0xfe
# double 255 = 0xff
# --------------------
# NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
self.DTYPE_MAP = \
dict(
lzip(range(1, 245), ['a' + str(i) for i in range(1, 245)]) +
[
(251, np.int8),
(252, np.int16),
(253, np.int32),
(254, np.float32),
(255, np.float64)
]
)
self.DTYPE_MAP_XML = \
dict(
[
(32768, np.uint8), # Keys to GSO
(65526, np.float64),
(65527, np.float32),
(65528, np.int32),
(65529, np.int16),
(65530, np.int8)
]
)
self.TYPE_MAP = lrange(251) + list('bhlfd')
self.TYPE_MAP_XML = \
dict(
[
# Not really a Q, unclear how to handle byteswap
(32768, 'Q'),
(65526, 'd'),
(65527, 'f'),
(65528, 'l'),
(65529, 'h'),
(65530, 'b')
]
)
# NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
float32_min = b'\xff\xff\xff\xfe'
float32_max = b'\xff\xff\xff\x7e'
float64_min = b'\xff\xff\xff\xff\xff\xff\xef\xff'
float64_max = b'\xff\xff\xff\xff\xff\xff\xdf\x7f'
self.VALID_RANGE = {
'b': (-127, 100),
'h': (-32767, 32740),
'l': (-2147483647, 2147483620),
'f': (np.float32(struct.unpack('<f', float32_min)[0]),
np.float32(struct.unpack('<f', float32_max)[0])),
'd': (np.float64(struct.unpack('<d', float64_min)[0]),
np.float64(struct.unpack('<d', float64_max)[0]))
}
self.OLD_TYPE_MAPPING = {
98: 251, # byte
105: 252, # int
108: 253, # long
102: 254 # float
# don't know old code for double
}
# These missing values are the generic '.' in Stata, and are used
# to replace nans
self.MISSING_VALUES = {
'b': 101,
'h': 32741,
'l': 2147483621,
'f': np.float32(struct.unpack('<f', b'\x00\x00\x00\x7f')[0]),
'd': np.float64(
struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
}
self.NUMPY_TYPE_MAP = {
'b': 'i1',
'h': 'i2',
'l': 'i4',
'f': 'f4',
'd': 'f8',
'Q': 'u8'
}
# Reserved words cannot be used as variable names
self.RESERVED_WORDS = ('aggregate', 'array', 'boolean', 'break',
'byte', 'case', 'catch', 'class', 'colvector',
'complex', 'const', 'continue', 'default',
'delegate', 'delete', 'do', 'double', 'else',
'eltypedef', 'end', 'enum', 'explicit',
'export', 'external', 'float', 'for', 'friend',
'function', 'global', 'goto', 'if', 'inline',
'int', 'local', 'long', 'NULL', 'pragma',
'protected', 'quad', 'rowvector', 'short',
'typedef', 'typename', 'virtual')
class StataReader(StataParser, BaseIterator):
__doc__ = _stata_reader_doc
def __init__(self, path_or_buf, convert_dates=True,
convert_categoricals=True, index=None,
convert_missing=False, preserve_dtypes=True,
columns=None, order_categoricals=True,
encoding='iso-8859-1', chunksize=None):
super(StataReader, self).__init__(encoding)
self.col_sizes = ()
# Arguments to the reader (can be temporarily overridden in
# calls to read).
self._convert_dates = convert_dates
self._convert_categoricals = convert_categoricals
self._index = index
self._convert_missing = convert_missing
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
self._encoding = encoding
self._chunksize = chunksize
# State variables for the file
self._has_string_data = False
self._missing_values = False
self._can_read_value_labels = False
self._column_selector_set = False
self._value_labels_read = False
self._data_read = False
self._dtype = None
self._lines_read = 0
self._native_byteorder = _set_endianness(sys.byteorder)
if isinstance(path_or_buf, str):
path_or_buf, encoding, _ = get_filepath_or_buffer(
path_or_buf, encoding=self._default_encoding
)
if isinstance(path_or_buf, (str, compat.text_type, bytes)):
self.path_or_buf = open(path_or_buf, 'rb')
else:
# Copy to BytesIO, and ensure no encoding
contents = path_or_buf.read()
try:
contents = contents.encode(self._default_encoding)
except:
pass
self.path_or_buf = BytesIO(contents)
self._read_header()
def __enter__(self):
""" enter context manager """
return self
def __exit__(self, exc_type, exc_value, traceback):
""" exit context manager """
self.close()
def close(self):
""" close the handle if its open """
try:
self.path_or_buf.close()
except IOError:
pass
def _read_header(self):
first_char = self.path_or_buf.read(1)
if struct.unpack('c', first_char)[0] == b'<':
self._read_new_header(first_char)
else:
self._read_old_header(first_char)
self.has_string_data = len([x for x in self.typlist
if type(x) is int]) > 0
# calculate size of a data record
self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist)
# remove format details from %td
self.fmtlist = ["%td" if x.startswith("%td") else x
for x in self.fmtlist]
def _read_new_header(self, first_char):
# The first part of the header is common to 117 and 118.
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117, 118]:
raise ValueError(_version_error)
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == "MSF" and '>' or '<'
self.path_or_buf.read(15) # </byteorder><K>
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.path_or_buf.read(7) # </K><N>
self.nobs = self._get_nobs()
self.path_or_buf.read(11) # </N><label>
self.data_label = self._get_data_label()
self.path_or_buf.read(19) # </label><timestamp>
self.time_stamp = self._get_time_stamp()
self.path_or_buf.read(26) # </timestamp></header><map>
self.path_or_buf.read(8) # 0x0000000000000000
self.path_or_buf.read(8) # position of <map>
self._seek_vartypes = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 16
self._seek_varnames = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
self._seek_sortlist = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
self._seek_formats = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 9
self._seek_value_label_names = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 19
# Requires version-specific treatment
self._seek_variable_labels = self._get_seek_variable_labels()
self.path_or_buf.read(8) # <characteristics>
self.data_location = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 6
self.seek_strls = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 7
self.seek_value_labels = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 14
self.typlist, self.dtyplist = self._get_dtypes(self._seek_vartypes)
self.path_or_buf.seek(self._seek_varnames)
self.varlist = self._get_varlist()
self.path_or_buf.seek(self._seek_sortlist)
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.path_or_buf.seek(self._seek_formats)
self.fmtlist = self._get_fmtlist()
self.path_or_buf.seek(self._seek_value_label_names)
self.lbllist = self._get_lbllist()
self.path_or_buf.seek(self._seek_variable_labels)
self._variable_labels = self._get_variable_labels()
# Get data type information, works for versions 117-118.
def _get_dtypes(self, seek_vartypes):
self.path_or_buf.seek(seek_vartypes)
raw_typlist = [struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
for i in range(self.nvar)]
def f(typ):
if typ <= 2045:
return typ
try:
return self.TYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata types [{0}]".
format(typ))
typlist = [f(x) for x in raw_typlist]
def f(typ):
if typ <= 2045:
return str(typ)
try:
return self.DTYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata dtype [{0}]"
.format(typ))
dtyplist = [f(x) for x in raw_typlist]
return typlist, dtyplist
def _get_varlist(self):
if self.format_version == 117:
b = 33
elif self.format_version == 118:
b = 129
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
# Returns the format list
def _get_fmtlist(self):
if self.format_version == 118:
b = 57
elif self.format_version > 113:
b = 49
elif self.format_version > 104:
b = 12
else:
b = 7
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
# Returns the label list
def _get_lbllist(self):
if self.format_version >= 118:
b = 129
elif self.format_version > 108:
b = 33
else:
b = 9
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
def _get_variable_labels(self):
if self.format_version == 118:
vlblist = [self._decode(self.path_or_buf.read(321))
for i in range(self.nvar)]
elif self.format_version > 105:
vlblist = [self._null_terminate(self.path_or_buf.read(81))
for i in range(self.nvar)]
else:
vlblist = [self._null_terminate(self.path_or_buf.read(32))
for i in range(self.nvar)]
return vlblist
def _get_nobs(self):
if self.format_version == 118:
return struct.unpack(self.byteorder + 'Q',
self.path_or_buf.read(8))[0]
else:
return struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
def _get_data_label(self):
if self.format_version == 118:
strlen = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self._null_terminate(self.path_or_buf.read(strlen))
elif self.format_version > 105:
return self._null_terminate(self.path_or_buf.read(81))
else:
return self._null_terminate(self.path_or_buf.read(32))
def _get_time_stamp(self):
if self.format_version == 118:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self.path_or_buf.read(strlen).decode("utf-8")
elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self._null_terminate(self.path_or_buf.read(strlen))
elif self.format_version > 104:
return self._null_terminate(self.path_or_buf.read(18))
else:
raise ValueError()
def _get_seek_variable_labels(self):
if self.format_version == 117:
self.path_or_buf.read(8) # <variable_lables>, throw away
# Stata 117 data files do not follow the described format. This is
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
return self._seek_value_label_names + (33 * self.nvar) + 20 + 17
elif self.format_version == 118:
return struct.unpack(self.byteorder + 'q',
self.path_or_buf.read(8))[0] + 17
else:
raise ValueError()
def _read_old_header(self, first_char):
self.format_version = struct.unpack('b', first_char)[0]
if self.format_version not in [104, 105, 108, 111, 113, 114, 115]:
raise ValueError(_version_error)
self.byteorder = struct.unpack('b', self.path_or_buf.read(1))[
0] == 0x1 and '>' or '<'
self.filetype = struct.unpack('b', self.path_or_buf.read(1))[0]
self.path_or_buf.read(1) # unused
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.nobs = self._get_nobs()
self.data_label = self._get_data_label()
self.time_stamp = self._get_time_stamp()
# descriptors
if self.format_version > 108:
typlist = [ord(self.path_or_buf.read(1))
for i in range(self.nvar)]
else:
buf = self.path_or_buf.read(self.nvar)
typlistb = np.frombuffer(buf, dtype=np.uint8)
typlist = []
for tp in typlistb:
if tp in self.OLD_TYPE_MAPPING:
typlist.append(self.OLD_TYPE_MAPPING[tp])
else:
typlist.append(tp - 127) # py2 string, py3 bytes
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata types [{0}]"
.format(','.join(str(x) for x in typlist)))
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata dtypes [{0}]"
.format(','.join(str(x) for x in typlist)))
if self.format_version > 108:
self.varlist = [self._null_terminate(self.path_or_buf.read(33))
for i in range(self.nvar)]
else:
self.varlist = [self._null_terminate(self.path_or_buf.read(9))
for i in range(self.nvar)]
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.fmtlist = self._get_fmtlist()
self.lbllist = self._get_lbllist()
self._variable_labels = self._get_variable_labels()
# ignore expansion fields (Format 105 and later)
# When reading, read five bytes; the last four bytes now tell you
# the size of the next read, which you discard. You then continue
# like this until you read 5 bytes of zeros.
if self.format_version > 104:
while True:
data_type = struct.unpack(self.byteorder + 'b',
self.path_or_buf.read(1))[0]
if self.format_version > 108:
data_len = struct.unpack(self.byteorder + 'i',
self.path_or_buf.read(4))[0]
else:
data_len = struct.unpack(self.byteorder + 'h',
self.path_or_buf.read(2))[0]
if data_type == 0:
break
self.path_or_buf.read(data_len)
# necessary data to continue parsing
self.data_location = self.path_or_buf.tell()
def _calcsize(self, fmt):
return (type(fmt) is int and fmt or
struct.calcsize(self.byteorder + fmt))
def _decode(self, s):
s = s.partition(b"\0")[0]
return s.decode('utf-8')
def _null_terminate(self, s):
if compat.PY3 or self._encoding is not None:
# have bytes not strings, so must decode
s = s.partition(b"\0")[0]
return s.decode(self._encoding or self._default_encoding)
else:
null_byte = "\0"
try:
return s.lstrip(null_byte)[:s.index(null_byte)]
except:
return s
def _read_value_labels(self):
if self.format_version <= 108:
# Value labels are not supported in version 108 and earlier.
return
if self._value_labels_read:
# Don't read twice
return
if self.format_version >= 117:
self.path_or_buf.seek(self.seek_value_labels)
else:
offset = self.nobs * self._dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
self._value_labels_read = True
self.value_label_dict = dict()
while True:
if self.format_version >= 117:
if self.path_or_buf.read(5) == b'</val': # <lbl>
break # end of value label table
slength = self.path_or_buf.read(4)
if not slength:
break # end of value label table (format < 117)
if self.format_version <= 117:
labname = self._null_terminate(self.path_or_buf.read(33))
else:
labname = self._decode(self.path_or_buf.read(129))
self.path_or_buf.read(3) # padding
n = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
txtlen = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
off = np.frombuffer(self.path_or_buf.read(4 * n),
dtype=self.byteorder + "i4",
count=n)
val = np.frombuffer(self.path_or_buf.read(4 * n),
dtype=self.byteorder + "i4",
count=n)
ii = np.argsort(off)
off = off[ii]
val = val[ii]
txt = self.path_or_buf.read(txtlen)
self.value_label_dict[labname] = dict()
for i in range(n):
end = off[i + 1] if i < n - 1 else txtlen
if self.format_version <= 117:
self.value_label_dict[labname][val[i]] = (
self._null_terminate(txt[off[i]:end]))
else:
self.value_label_dict[labname][val[i]] = (
self._decode(txt[off[i]:end]))
if self.format_version >= 117:
self.path_or_buf.read(6) # </lbl>
self._value_labels_read = True
def _read_strls(self):
self.path_or_buf.seek(self.seek_strls)
self.GSO = {0: ''}
while True:
if self.path_or_buf.read(3) != b'GSO':
break
if self.format_version == 117:
v_o = struct.unpack(self.byteorder + 'Q',
self.path_or_buf.read(8))[0]
else:
buf = self.path_or_buf.read(12)
# Only tested on little endian file on little endian machine.
if self.byteorder == '<':
buf = buf[0:2] + buf[4:10]
else:
buf = buf[0:2] + buf[6:]
v_o = struct.unpack('Q', buf)[0]
typ = struct.unpack('B', self.path_or_buf.read(1))[0]
length = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
va = self.path_or_buf.read(length)
if typ == 130:
encoding = 'utf-8'
if self.format_version == 117:
encoding = self._encoding or self._default_encoding
va = va[0:-1].decode(encoding)
self.GSO[v_o] = va
# legacy
@Appender('DEPRECATED: ' + _data_method_doc)
def data(self, **kwargs):
import warnings
warnings.warn("'data' is deprecated, use 'read' instead")
if self._data_read:
raise Exception("Data has already been read.")
self._data_read = True
return self.read(None, **kwargs)
def __next__(self):
return self.read(nrows=self._chunksize or 1)
def get_chunk(self, size=None):
"""
Reads lines from Stata file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
@Appender(_read_method_doc)
def read(self, nrows=None, convert_dates=None,
convert_categoricals=None, index=None,
convert_missing=None, preserve_dtypes=None,
columns=None, order_categoricals=None):
# Handle empty file or chunk. If reading incrementally raise
# StopIteration. If reading the whole thing return an empty
# data frame.
if (self.nobs == 0) and (nrows is None):
self._can_read_value_labels = True
self._data_read = True
self.close()
return DataFrame(columns=self.varlist)
# Handle options
if convert_dates is None:
convert_dates = self._convert_dates
if convert_categoricals is None:
convert_categoricals = self._convert_categoricals
if convert_missing is None:
convert_missing = self._convert_missing
if preserve_dtypes is None:
preserve_dtypes = self._preserve_dtypes
if columns is None:
columns = self._columns
if order_categoricals is None:
order_categoricals = self._order_categoricals
if nrows is None:
nrows = self.nobs
if (self.format_version >= 117) and (self._dtype is None):
self._can_read_value_labels = True
self._read_strls()
# Setup the dtype.
if self._dtype is None:
dtype = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self.typlist):
if typ in self.NUMPY_TYPE_MAP:
dtype.append(('s' + str(i), self.byteorder +
self.NUMPY_TYPE_MAP[typ]))
else:
dtype.append(('s' + str(i), 'S' + str(typ)))
dtype = np.dtype(dtype)
self._dtype = dtype
# Read data
dtype = self._dtype
max_read_len = (self.nobs - self._lines_read) * dtype.itemsize
read_len = nrows * dtype.itemsize
read_len = min(read_len, max_read_len)
if read_len <= 0:
# Iterator has finished, should never be here unless
# we are reading the file incrementally
if convert_categoricals:
self._read_value_labels()
self.close()
raise StopIteration
offset = self._lines_read * dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
read_lines = min(nrows, self.nobs - self._lines_read)
data = np.frombuffer(self.path_or_buf.read(read_len), dtype=dtype,
count=read_lines)
self._lines_read += read_lines
if self._lines_read == self.nobs:
self._can_read_value_labels = True
self._data_read = True
# if necessary, swap the byte order to native here
if self.byteorder != self._native_byteorder:
data = data.byteswap().newbyteorder()
if convert_categoricals:
self._read_value_labels()
if len(data) == 0:
data = DataFrame(columns=self.varlist, index=index)
else:
data = DataFrame.from_records(data, index=index)
data.columns = self.varlist
# If index is not specified, use actual row number rather than
# restarting at 0 for each chunk.
if index is None:
ix = np.arange(self._lines_read - read_lines, self._lines_read)
data = data.set_index(ix)
if columns is not None:
try:
data = self._do_select_columns(data, columns)
except ValueError:
self.close()
raise
# Decode strings
for col, typ in zip(data, self.typlist):
if type(typ) is int:
data[col] = data[col].apply(
self._null_terminate, convert_dtype=True)
data = self._insert_strls(data)
cols_ = np.where(self.dtyplist)[0]
# Convert columns (if needed) to match input type
index = data.index
requires_type_conversion = False
data_formatted = []
for i in cols_:
if self.dtyplist[i] is not None:
col = data.columns[i]
dtype = data[col].dtype
if dtype != np.dtype(object) and dtype != self.dtyplist[i]:
requires_type_conversion = True
data_formatted.append(
(col, Series(data[col], index, self.dtyplist[i])))
else:
data_formatted.append((col, data[col]))
if requires_type_conversion:
data = DataFrame.from_items(data_formatted)
del data_formatted
self._do_convert_missing(data, convert_missing)
if convert_dates:
cols = np.where(lmap(lambda x: x in _date_formats,
self.fmtlist))[0]
for i in cols:
col = data.columns[i]
try:
data[col] = _stata_elapsed_date_to_datetime_vec(
data[col],
self.fmtlist[i])
except ValueError:
self.close()
raise
if convert_categoricals and self.format_version > 108:
data = self._do_convert_categoricals(data,
self.value_label_dict,
self.lbllist,
order_categoricals)
if not preserve_dtypes:
retyped_data = []
convert = False
for col in data:
dtype = data[col].dtype
if dtype in (np.float16, np.float32):
dtype = np.float64
convert = True
elif dtype in (np.int8, np.int16, np.int32):
dtype = np.int64
convert = True
retyped_data.append((col, data[col].astype(dtype)))
if convert:
data = DataFrame.from_items(retyped_data)
return data
def _do_convert_missing(self, data, convert_missing):
# Check for missing values, and replace if found
for i, colname in enumerate(data):
fmt = self.typlist[i]
if fmt not in self.VALID_RANGE:
continue
nmin, nmax = self.VALID_RANGE[fmt]
series = data[colname]
missing = np.logical_or(series < nmin, series > nmax)
if not missing.any():
continue
if convert_missing: # Replacement follows Stata notation
missing_loc = np.argwhere(missing)
umissing, umissing_loc = np.unique(series[missing],
return_inverse=True)
replacement = Series(series, dtype=np.object)
for j, um in enumerate(umissing):
missing_value = StataMissingValue(um)
loc = missing_loc[umissing_loc == j]
replacement.iloc[loc] = missing_value
else: # All replacements are identical
dtype = series.dtype
if dtype not in (np.float32, np.float64):
dtype = np.float64
replacement = Series(series, dtype=dtype)
replacement[missing] = np.nan
data[colname] = replacement
def _insert_strls(self, data):
if not hasattr(self, 'GSO') or len(self.GSO) == 0:
return data
for i, typ in enumerate(self.typlist):
if typ != 'Q':
continue
data.iloc[:, i] = [self.GSO[k] for k in data.iloc[:, i]]
return data
def _do_select_columns(self, data, columns):
if not self._column_selector_set:
column_set = set(columns)
if len(column_set) != len(columns):
raise ValueError('columns contains duplicate entries')
unmatched = column_set.difference(data.columns)
if unmatched:
raise ValueError('The following columns were not found in the '
'Stata data set: ' +
', '.join(list(unmatched)))
# Copy information for retained columns for later processing
dtyplist = []
typlist = []
fmtlist = []
lbllist = []
for col in columns:
i = data.columns.get_loc(col)
dtyplist.append(self.dtyplist[i])
typlist.append(self.typlist[i])
fmtlist.append(self.fmtlist[i])
lbllist.append(self.lbllist[i])
self.dtyplist = dtyplist
self.typlist = typlist
self.fmtlist = fmtlist
self.lbllist = lbllist
self._column_selector_set = True
return data[columns]
def _do_convert_categoricals(self, data, value_label_dict, lbllist,
order_categoricals):
"""
Converts categorical columns to Categorical type.
"""
value_labels = list(compat.iterkeys(value_label_dict))
cat_converted_data = []
for col, label in zip(data, lbllist):
if label in value_labels:
# Explicit call with ordered=True
cat_data = Categorical(data[col], ordered=order_categoricals)
categories = []
for category in cat_data.categories:
if category in value_label_dict[label]:
categories.append(value_label_dict[label][category])
else:
categories.append(category) # Partially labeled
try:
cat_data.categories = categories
except ValueError:
vc = Series(categories).value_counts()
repeats = list(vc.index[vc > 1])
repeats = '\n' + '-' * 80 + '\n'.join(repeats)
msg = 'Value labels for column {0} are not unique. The ' \
'repeated labels are:\n{1}'.format(col, repeats)
raise ValueError(msg)
# TODO: is the next line needed above in the data(...) method?
cat_data = Series(cat_data, index=data.index)
cat_converted_data.append((col, cat_data))
else:
cat_converted_data.append((col, data[col]))
data = DataFrame.from_items(cat_converted_data)
return data
def data_label(self):
"""Returns data label of Stata file"""
return self.data_label
def variable_labels(self):
"""Returns variable labels as a dict, associating each variable name
with corresponding label
"""
return dict(zip(self.varlist, self._variable_labels))
def value_labels(self):
"""Returns a dict, associating each variable name a dict, associating
each value its corresponding label
"""
if not self._value_labels_read:
self._read_value_labels()
return self.value_label_dict
def _open_file_binary_write(fname, encoding):
if hasattr(fname, 'write'):
# if 'b' not in fname.mode:
return fname
return open(fname, "wb")
def _set_endianness(endianness):
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
return ">"
else: # pragma : no cover
raise ValueError("Endianness %s not understood" % endianness)
def _pad_bytes(name, length):
"""
Takes a char string and pads it with null bytes until it's length chars
"""
return name + "\x00" * (length - len(name))
def _convert_datetime_to_stata_type(fmt):
"""
Converts from one of the stata date formats to a type in TYPE_MAP
"""
if fmt in ["tc", "%tc", "td", "%td", "tw", "%tw", "tm", "%tm", "tq",
"%tq", "th", "%th", "ty", "%ty"]:
return np.float64 # Stata expects doubles for SIFs
else:
raise NotImplementedError("Format %s not implemented" % fmt)
def _maybe_convert_to_int_keys(convert_dates, varlist):
new_dict = {}
for key in convert_dates:
if not convert_dates[key].startswith("%"): # make sure proper fmts
convert_dates[key] = "%" + convert_dates[key]
if key in varlist:
new_dict.update({varlist.index(key): convert_dates[key]})
else:
if not isinstance(key, int):
raise ValueError("convert_dates key must be a "
"column or an integer")
new_dict.update({key: convert_dates[key]})
return new_dict
def _dtype_to_stata_type(dtype, column):
"""
Converts dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
Pandas Stata
251 - chr(251) - for int8 byte
252 - chr(252) - for int16 int
253 - chr(253) - for int32 long
254 - chr(254) - for float32 float
255 - chr(255) - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if dtype.type == np.string_:
return chr(dtype.itemsize)
elif dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(_ensure_object(column.values))
return chr(max(itemsize, 1))
elif dtype == np.float64:
return chr(255)
elif dtype == np.float32:
return chr(254)
elif dtype == np.int32:
return chr(253)
elif dtype == np.int16:
return chr(252)
elif dtype == np.int8:
return chr(251)
else: # pragma : no cover
raise NotImplementedError("Data type %s not supported." % dtype)
def _dtype_to_default_stata_fmt(dtype, column):
"""
Maps numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
object -> "%DDs" where DD is the length of the string. If not a string,
raise ValueError
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%12.0g"
int16 -> "%8.0g"
int8 -> "%8.0g"
"""
# TODO: Refactor to combine type with format
# TODO: expand this to handle a default datetime format?
if dtype.type == np.object_:
inferred_dtype = infer_dtype(column.dropna())
if not (inferred_dtype in ('string', 'unicode') or
len(column) == 0):
raise ValueError('Writing general object arrays is not supported')
itemsize = max_len_string_array(_ensure_object(column.values))
if itemsize > 244:
raise ValueError(excessive_string_length_error % column.name)
return "%" + str(max(itemsize, 1)) + "s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int32:
return "%12.0g"
elif dtype == np.int8 or dtype == np.int16:
return "%8.0g"
else: # pragma : no cover
raise NotImplementedError("Data type %s not supported." % dtype)
class StataWriter(StataParser):
"""
A class for writing Stata binary dta files
Parameters
----------
fname : str or buffer
String path of file-like object
data : DataFrame
Input to save
convert_dates : dict
Dictionary mapping columns containing datetime types to stata internal
format to use when wirting the dates. Options are 'tc', 'td', 'tm',
'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.
Datetime columns that do not have a conversion type specified will be
converted to 'tc'. Raises NotImplementedError if a datetime column has
timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current time
dataset_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as values.
Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
Returns
-------
writer : StataWriter instance
The StataWriter instance has a write_file method, which will
write the file to the given `fname`.
Raises
------
NotImplementedError
* If datetimes contain timezone information
ValueError
* Columns listed in convert_dates are noth either datetime64[ns]
or datetime.datetime
* Column dtype is not representable in Stata
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
Examples
--------
>>> import pandas as pd
>>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b'])
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> from datetime import datetime
>>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date'])
>>> writer = StataWriter('./date_data_file.dta', data, {'date' : 'tw'})
>>> writer.write_file()
"""
def __init__(self, fname, data, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
super(StataWriter, self).__init__(encoding)
self._convert_dates = {} if convert_dates is None else convert_dates
self._write_index = write_index
self._time_stamp = time_stamp
self._data_label = data_label
self._variable_labels = variable_labels
# attach nobs, nvars, data, varlist, typlist
self._prepare_pandas(data)
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
self._fname = fname
self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8}
def _write(self, to_write):
"""
Helper to call encode before writing to file for Python 3 compat.
"""
if compat.PY3:
self._file.write(to_write.encode(self._encoding or
self._default_encoding))
else:
self._file.write(to_write)
def _prepare_categoricals(self, data):
"""Check for categorical columns, retain categorical information for
Stata file and convert categorical data to int"""
is_cat = [is_categorical_dtype(data[col]) for col in data]
self._is_col_cat = is_cat
self._value_labels = []
if not any(is_cat):
return data
get_base_missing_value = StataMissingValue.get_base_missing_value
index = data.index
data_formatted = []
for col, col_is_cat in zip(data, is_cat):
if col_is_cat:
self._value_labels.append(StataValueLabel(data[col]))
dtype = data[col].cat.codes.dtype
if dtype == np.int64:
raise ValueError('It is not possible to export '
'int64-based categorical data to Stata.')
values = data[col].cat.codes.values.copy()
# Upcast if needed so that correct missing values can be set
if values.max() >= get_base_missing_value(dtype):
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
else:
dtype = np.float64
values = np.array(values, dtype=dtype)
# Replace missing values with Stata missing value for type
values[values == -1] = get_base_missing_value(dtype)
data_formatted.append((col, values, index))
else:
data_formatted.append((col, data[col]))
return DataFrame.from_items(data_formatted)
def _replace_nans(self, data):
# return data
"""Checks floating point data columns for nans, and replaces these with
the generic Stata for missing value (.)"""
for c in data:
dtype = data[c].dtype
if dtype in (np.float32, np.float64):
if dtype == np.float32:
replacement = self.MISSING_VALUES['f']
else:
replacement = self.MISSING_VALUES['d']
data[c] = data[c].fillna(replacement)
return data
def _check_column_names(self, data):
"""
Checks column names to ensure that they are valid Stata column names.
This includes checks for:
* Non-string names
* Stata keywords
* Variables that start with numbers
* Variables with names that are too long
When an illegal variable name is detected, it is converted, and if
dates are exported, the variable name is propagated to the date
conversion dictionary
"""
converted_names = []
columns = list(data.columns)
original_columns = columns[:]
duplicate_var_id = 0
for j, name in enumerate(columns):
orig_name = name
if not isinstance(name, string_types):
name = text_type(name)
for c in name:
if (c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and \
(c < '0' or c > '9') and c != '_':
name = name.replace(c, '_')
# Variable name must not be a reserved word
if name in self.RESERVED_WORDS:
name = '_' + name
# Variable name may not start with a number
if name[0] >= '0' and name[0] <= '9':
name = '_' + name
name = name[:min(len(name), 32)]
if not name == orig_name:
# check for duplicates
while columns.count(name) > 0:
# prepend ascending number to avoid duplicates
name = '_' + str(duplicate_var_id) + name
name = name[:min(len(name), 32)]
duplicate_var_id += 1
# need to possibly encode the orig name if its unicode
try:
orig_name = orig_name.encode('utf-8')
except:
pass
converted_names.append(
'{0} -> {1}'.format(orig_name, name))
columns[j] = name
data.columns = columns
# Check date conversion, and fix key if needed
if self._convert_dates:
for c, o in zip(columns, original_columns):
if c != o:
self._convert_dates[c] = self._convert_dates[o]
del self._convert_dates[o]
if converted_names:
import warnings
ws = invalid_name_doc.format('\n '.join(converted_names))
warnings.warn(ws, InvalidColumnName)
return data
def _prepare_pandas(self, data):
# NOTE: we might need a different API / class for pandas objects so
# we can set different semantics - handle this with a PR to pandas.io
data = data.copy()
if self._write_index:
data = data.reset_index()
# Ensure column names are strings
data = self._check_column_names(data)
# Check columns for compatibility with stata, upcast if necessary
# Raise if outside the supported range
data = _cast_to_stata_types(data)
# Replace NaNs with Stata missing values
data = self._replace_nans(data)
# Convert categoricals to int data, and strip labels
data = self._prepare_categoricals(data)
self.nobs, self.nvar = data.shape
self.data = data
self.varlist = data.columns.tolist()
dtypes = data.dtypes
# Ensure all date columns are converted
for col in data:
if col in self._convert_dates:
continue
if is_datetime64_dtype(data[col]):
self._convert_dates[col] = 'tc'
self._convert_dates = _maybe_convert_to_int_keys(self._convert_dates,
self.varlist)
for key in self._convert_dates:
new_type = _convert_datetime_to_stata_type(
self._convert_dates[key]
)
dtypes[key] = np.dtype(new_type)
self.typlist = []
self.fmtlist = []
for col, dtype in dtypes.iteritems():
self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, data[col]))
self.typlist.append(_dtype_to_stata_type(dtype, data[col]))
# set the given format for the datetime cols
if self._convert_dates is not None:
for key in self._convert_dates:
self.fmtlist[key] = self._convert_dates[key]
def write_file(self):
self._file = _open_file_binary_write(
self._fname, self._encoding or self._default_encoding
)
try:
self._write_header(time_stamp=self._time_stamp,
data_label=self._data_label)
self._write_descriptors()
self._write_variable_labels()
# write 5 zeros for expansion fields
self._write(_pad_bytes("", 5))
self._prepare_data()
self._write_data()
self._write_value_labels()
finally:
self._file.close()
def _write_value_labels(self):
for vl in self._value_labels:
self._file.write(vl.generate_value_label(self._byteorder,
self._encoding))
def _write_header(self, data_label=None, time_stamp=None):
byteorder = self._byteorder
# ds_format - just use 114
self._file.write(struct.pack("b", 114))
# byteorder
self._write(byteorder == ">" and "\x01" or "\x02")
# filetype
self._write("\x01")
# unused
self._write("\x00")
# number of vars, 2 bytes
self._file.write(struct.pack(byteorder + "h", self.nvar)[:2])
# number of obs, 4 bytes
self._file.write(struct.pack(byteorder + "i", self.nobs)[:4])
# data label 81 bytes, char, null terminated
if data_label is None:
self._file.write(self._null_terminate(_pad_bytes("", 80)))
else:
self._file.write(
self._null_terminate(_pad_bytes(data_label[:80], 80))
)
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime.datetime):
raise ValueError("time_stamp should be datetime type")
self._file.write(
self._null_terminate(time_stamp.strftime("%d %b %Y %H:%M"))
)
def _write_descriptors(self, typlist=None, varlist=None, srtlist=None,
fmtlist=None, lbllist=None):
nvar = self.nvar
# typlist, length nvar, format byte array
for typ in self.typlist:
self._write(typ)
# varlist names are checked by _check_column_names
# varlist, requires null terminated
for name in self.varlist:
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
# srtlist, 2*(nvar+1), int array, encoded by byteorder
srtlist = _pad_bytes("", 2 * (nvar + 1))
self._write(srtlist)
# fmtlist, 49*nvar, char array
for fmt in self.fmtlist:
self._write(_pad_bytes(fmt, 49))
# lbllist, 33*nvar, char array
for i in range(nvar):
# Use variable name when categorical
if self._is_col_cat[i]:
name = self.varlist[i]
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
else: # Default is empty label
self._write(_pad_bytes("", 33))
def _write_variable_labels(self):
# Missing labels are 80 blank characters plus null termination
blank = _pad_bytes('', 81)
if self._variable_labels is None:
for i in range(self.nvar):
self._write(blank)
return
for col in self.data:
if col in self._variable_labels:
label = self._variable_labels[col]
if len(label) > 80:
raise ValueError('Variable labels must be 80 characters '
'or fewer')
is_latin1 = all(ord(c) < 256 for c in label)
if not is_latin1:
raise ValueError('Variable labels must contain only '
'characters that can be encoded in '
'Latin-1')
self._write(_pad_bytes(label, 81))
else:
self._write(blank)
def _prepare_data(self):
data = self.data
typlist = self.typlist
convert_dates = self._convert_dates
# 1. Convert dates
if self._convert_dates is not None:
for i, col in enumerate(data):
if i in convert_dates:
data[col] = _datetime_to_stata_elapsed_vec(data[col],
self.fmtlist[i])
# 2. Convert bad string data to '' and pad to correct length
dtype = []
data_cols = []
has_strings = False
for i, col in enumerate(data):
typ = ord(typlist[i])
if typ <= 244:
has_strings = True
data[col] = data[col].fillna('').apply(_pad_bytes, args=(typ,))
stype = 'S%d' % typ
dtype.append(('c' + str(i), stype))
string = data[col].str.encode(self._encoding)
data_cols.append(string.values.astype(stype))
else:
dtype.append(('c' + str(i), data[col].dtype))
data_cols.append(data[col].values)
dtype = np.dtype(dtype)
if has_strings:
self.data = np.fromiter(zip(*data_cols), dtype=dtype)
else:
self.data = data.to_records(index=False)
def _write_data(self):
data = self.data
data.tofile(self._file)
def _null_terminate(self, s, as_string=False):
null_byte = '\x00'
if compat.PY3 and not as_string:
s += null_byte
return s.encode(self._encoding)
else:
s += null_byte
return s
| gpl-3.0 |
pravsripad/jumeg | examples/causality/plot_inter_and_intra_lobe_causality.py | 2 | 3317 | """
Group a causality matrix by lobes and plot the resulting
inter- and intra-lobe causality.
Author: Christian Kiefer <ch.kiefer@fz-juelich.de>
"""
import os
import os.path as op
import matplotlib.pyplot as plt
import mne
import numpy as np
from jumeg.connectivity.con_utils import group_con_matrix_by_lobe
from jumeg.connectivity.con_viz import plot_grouped_causality_circle
from jumeg.jumeg_utils import get_jumeg_path
###############################################################################
# Load the grouping files
###############################################################################
grouping_yaml_fname = op.join(get_jumeg_path(), 'data',
'desikan_aparc_cortex_based_grouping_ck.yaml')
lobe_grouping_yaml_fname = op.join(get_jumeg_path(), 'data',
'lobes_grouping.yaml')
###############################################################################
# Load anatomical labels
###############################################################################
subjects_dir = os.environ['SUBJECTS_DIR']
full_labels = mne.read_labels_from_annot(subject='fsaverage', parc='aparc',
hemi='both', subjects_dir=subjects_dir)
full_label_names = [full_label.name for full_label in full_labels if full_label.name.find('unknown') == -1]
###############################################################################
# create random causality matrix
###############################################################################
# create causality matrix
np.random.seed(42)
cau = np.random.uniform(-0.99, 0.01, (len(full_label_names), len(full_label_names)))
cau[cau < 0] = 0
cau = cau / 0.01 # values between 0 and 1
cau_grp, grp_label_names = group_con_matrix_by_lobe(con=cau, label_names=full_label_names,
grouping_yaml_fname=grouping_yaml_fname)
###############################################################################
# Compare original matrix with grouped matrix plot
###############################################################################
fig = plot_grouped_causality_circle(cau, grouping_yaml_fname, full_label_names,
title='original causality matrix', n_lines=None,
labels_mode=None, replacer_dict=None, out_fname=None,
colormap='magma_r', colorbar=True, colorbar_pos=(-0.25, 0.05),
arrowstyle='->,head_length=0.7,head_width=0.4',
figsize=(9.1, 6), vmin=0., vmax=1.0, ignore_diagonal=True,
show=True)
plt.close(fig)
fig = plot_grouped_causality_circle(cau_grp, lobe_grouping_yaml_fname, grp_label_names,
title='test', n_lines=None, labels_mode=None,
replacer_dict=None, out_fname=None, colormap='magma_r',
colorbar=True, colorbar_pos=(-0.25, 0.05),
arrowstyle='->,head_length=0.7,head_width=0.4',
figsize=(9.1, 6), vmin=0., ignore_diagonal=False,
show=True)
plt.close(fig)
| bsd-3-clause |
ai-se/XTREE | src/Planners/XTREE/methods1.py | 1 | 2615 | #! /Users/rkrsn/anaconda/bin/python
from pdb import set_trace
from os import environ, getcwd
from os import walk
from os.path import expanduser
from pdb import set_trace
import sys
# Update PYTHONPATH
HOME = expanduser('~')
axe = HOME + '/git/axe/axe/' # AXE
pystat = HOME + '/git/pystats/' # PySTAT
cwd = getcwd() # Current Directory
sys.path.extend([axe, pystat, './where2'])
from tools.axe.dtree import *
from tools.axe.table import *
# from w2 import *
from lib.w2 import where2, prepare, leaves
from lib.makeAmodel import makeAModel
import matplotlib.mlab as mlab
# import matplotlib.pyplot as plt
import smote
def explore(dir):
datasets = []
for (dirpath, dirnames, filenames) in walk(dir):
datasets.append(dirpath)
training = []
testing = []
for k in datasets[1:]:
train = [[dirPath, fname] for dirPath, _, fname in walk(k)]
test = [train[0][0] + '/' + train[0][1].pop(-1)]
training.append(
[train[0][0] + '/' + p for p in train[0][1] if not p == '.DS_Store'])
testing.append(test)
return training, testing
def newTable(tbl, headerLabel, Rows):
tbl2 = clone(tbl)
newHead = Sym()
newHead.col = len(tbl.headers)
newHead.name = headerLabel
tbl2.headers = tbl.headers + [newHead]
return clone(tbl2, rows=Rows)
def createTbl(
data,
settings=None,
_smote=False,
isBin=False,
bugThres=1,
duplicate=False):
"""
kwargs:
_smote = True/False : SMOTE input data (or not)
_isBin = True/False : Reduce bugs to defects/no defects
_bugThres = int : Threshold for marking stuff as defective,
default = 1. Not defective => Bugs < 1
"""
makeaModel = makeAModel()
_r = []
for t in data:
m = makeaModel.csv2py(t, _smote=_smote, duplicate=duplicate)
_r += m._rows
m._rows = _r
# Initialize all parameters for where2 to run
prepare(m, settings=None)
tree = where2(m, m._rows) # Decision tree using where2
tbl = table(t)
headerLabel = '=klass'
Rows = []
for k, _ in leaves(tree): # for k, _ in leaves(tree):
for j in k.val:
tmp = j.cells
if isBin:
tmp[-1] = 0 if tmp[-1] < bugThres else 1
tmp.append('_' + str(id(k) % 1000))
j.__dict__.update({'cells': tmp})
Rows.append(j.cells)
return newTable(tbl, headerLabel, Rows)
def test_createTbl():
dir = '../Data/camel/camel-1.6.csv'
newTbl = createTbl([dir], _smote=False)
newTblSMOTE = createTbl([dir], _smote=True)
print(len(newTbl._rows), len(newTblSMOTE._rows))
def drop(test, tree):
loc = apex(test, tree)
return loc
if __name__ == '__main__':
test_createTbl()
| mit |
mne-tools/mne-tools.github.io | 0.15/_downloads/plot_visualize_evoked.py | 1 | 9914 | """
.. _tut_viz_evoked:
=====================
Visualize Evoked data
=====================
In this tutorial we focus on plotting functions of :class:`mne.Evoked`.
"""
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
# sphinx_gallery_thumbnail_number = 9
###############################################################################
# First we read the evoked object from a file. Check out
# :ref:`tut_epoching_and_averaging` to get to this stage from raw data.
data_path = mne.datasets.sample.data_path()
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
evoked = mne.read_evokeds(fname, baseline=(None, 0), proj=True)
print(evoked)
###############################################################################
# Notice that ``evoked`` is a list of :class:`evoked <mne.Evoked>` instances.
# You can read only one of the categories by passing the argument ``condition``
# to :func:`mne.read_evokeds`. To make things more simple for this tutorial, we
# read each instance to a variable.
evoked_l_aud = evoked[0]
evoked_r_aud = evoked[1]
evoked_l_vis = evoked[2]
evoked_r_vis = evoked[3]
###############################################################################
# Let's start with a simple one. We plot event related potentials / fields
# (ERP/ERF). The bad channels are not plotted by default. Here we explicitly
# set the ``exclude`` parameter to show the bad channels in red. All plotting
# functions of MNE-python return a handle to the figure instance. When we have
# the handle, we can customise the plots to our liking.
fig = evoked_l_aud.plot(exclude=())
###############################################################################
# All plotting functions of MNE-python return a handle to the figure instance.
# When we have the handle, we can customise the plots to our liking. For
# example, we can get rid of the empty space with a simple function call.
fig.tight_layout()
###############################################################################
# Now let's make it a bit fancier and only use MEG channels. Many of the
# MNE-functions include a ``picks`` parameter to include a selection of
# channels. ``picks`` is simply a list of channel indices that you can easily
# construct with :func:`mne.pick_types`. See also :func:`mne.pick_channels` and
# :func:`mne.pick_channels_regexp`.
# Using ``spatial_colors=True``, the individual channel lines are color coded
# to show the sensor positions - specifically, the x, y, and z locations of
# the sensors are transformed into R, G and B values.
picks = mne.pick_types(evoked_l_aud.info, meg=True, eeg=False, eog=False)
evoked_l_aud.plot(spatial_colors=True, gfp=True, picks=picks)
###############################################################################
# Notice the legend on the left. The colors would suggest that there may be two
# separate sources for the signals. This wasn't obvious from the first figure.
# Try painting the slopes with left mouse button. It should open a new window
# with topomaps (scalp plots) of the average over the painted area. There is
# also a function for drawing topomaps separately.
evoked_l_aud.plot_topomap()
###############################################################################
# By default the topomaps are drawn from evenly spread out points of time over
# the evoked data. We can also define the times ourselves.
times = np.arange(0.05, 0.151, 0.05)
evoked_r_aud.plot_topomap(times=times, ch_type='mag')
###############################################################################
# Or we can automatically select the peaks.
evoked_r_aud.plot_topomap(times='peaks', ch_type='mag')
###############################################################################
# You can take a look at the documentation of :func:`mne.Evoked.plot_topomap`
# or simply write ``evoked_r_aud.plot_topomap?`` in your python console to
# see the different parameters you can pass to this function. Most of the
# plotting functions also accept ``axes`` parameter. With that, you can
# customise your plots even further. First we create a set of matplotlib
# axes in a single figure and plot all of our evoked categories next to each
# other.
fig, ax = plt.subplots(1, 5, figsize=(8, 2))
kwargs = dict(times=0.1, show=False, vmin=-300, vmax=300)
evoked_l_aud.plot_topomap(axes=ax[0], colorbar=True, **kwargs)
evoked_r_aud.plot_topomap(axes=ax[1], colorbar=False, **kwargs)
evoked_l_vis.plot_topomap(axes=ax[2], colorbar=False, **kwargs)
evoked_r_vis.plot_topomap(axes=ax[3], colorbar=False, **kwargs)
for ax, title in zip(ax[:4], ['Aud/L', 'Aud/R', 'Vis/L', 'Vis/R']):
ax.set_title(title)
plt.show()
###############################################################################
# Notice that we created five axes, but had only four categories. The fifth
# axes was used for drawing the colorbar. You must provide room for it when you
# create this kind of custom plots or turn the colorbar off with
# ``colorbar=False``. That's what the warnings are trying to tell you. Also, we
# used ``show=False`` for the three first function calls. This prevents the
# showing of the figure prematurely. The behavior depends on the mode you are
# using for your python session. See http://matplotlib.org/users/shell.html for
# more information.
#
# We can combine the two kinds of plots in one figure using the
# :func:`mne.Evoked.plot_joint` method of Evoked objects. Called as-is
# (``evoked.plot_joint()``), this function should give an informative display
# of spatio-temporal dynamics.
# You can directly style the time series part and the topomap part of the plot
# using the ``topomap_args`` and ``ts_args`` parameters. You can pass key-value
# pairs as a python dictionary. These are then passed as parameters to the
# topomaps (:func:`mne.Evoked.plot_topomap`) and time series
# (:func:`mne.Evoked.plot`) of the joint plot.
# For an example of specific styling using these ``topomap_args`` and
# ``ts_args`` arguments, here, topomaps at specific time points
# (90 and 200 ms) are shown, sensors are not plotted (via an argument
# forwarded to `plot_topomap`), and the Global Field Power is shown:
ts_args = dict(gfp=True)
topomap_args = dict(sensors=False)
evoked_r_aud.plot_joint(title='right auditory', times=[.09, .20],
ts_args=ts_args, topomap_args=topomap_args)
###############################################################################
# Sometimes, you may want to compare two or more conditions at a selection of
# sensors, or e.g. for the Global Field Power. For this, you can use the
# function :func:`mne.viz.plot_compare_evokeds`. The easiest way is to create
# a Python dictionary, where the keys are condition names and the values are
# :class:`mne.Evoked` objects. If you provide lists of :class:`mne.Evoked`
# objects, such as those for multiple subjects, the grand average is plotted,
# along with a confidence interval band - this can be used to contrast
# conditions for a whole experiment.
# First, we load in the evoked objects into a dictionary, setting the keys to
# '/'-separated tags (as we can do with event_ids for epochs). Then, we plot
# with :func:`mne.viz.plot_compare_evokeds`.
# The plot is styled with dictionary arguments, again using "/"-separated tags.
# We plot a MEG channel with a strong auditory response.
conditions = ["Left Auditory", "Right Auditory", "Left visual", "Right visual"]
evoked_dict = dict()
for condition in conditions:
evoked_dict[condition.replace(" ", "/")] = mne.read_evokeds(
fname, baseline=(None, 0), proj=True, condition=condition)
print(evoked_dict)
colors = dict(Left="Crimson", Right="CornFlowerBlue")
linestyles = dict(Auditory='-', visual='--')
pick = evoked_dict["Left/Auditory"].ch_names.index('MEG 1811')
mne.viz.plot_compare_evokeds(evoked_dict, picks=pick, colors=colors,
linestyles=linestyles)
###############################################################################
# We can also plot the activations as images. The time runs along the x-axis
# and the channels along the y-axis. The amplitudes are color coded so that
# the amplitudes from negative to positive translates to shift from blue to
# red. White means zero amplitude. You can use the ``cmap`` parameter to define
# the color map yourself. The accepted values include all matplotlib colormaps.
evoked_r_aud.plot_image(picks=picks)
###############################################################################
# Finally we plot the sensor data as a topographical view. In the simple case
# we plot only left auditory responses, and then we plot them all in the same
# figure for comparison. Click on the individual plots to open them bigger.
title = 'MNE sample data\n(condition : %s)'
evoked_l_aud.plot_topo(title=title % evoked_l_aud.comment,
background_color='k', color=['white'])
mne.viz.plot_evoked_topo(evoked, title=title % 'Left/Right Auditory/Visual',
background_color='w')
###############################################################################
# Visualizing field lines in 3D
# -----------------------------
# We now compute the field maps to project MEG and EEG data to MEG helmet
# and scalp surface.
#
# To do this we'll need coregistration information. See
# :ref:`tut_forward` for more details.
#
# Here we just illustrate usage.
subjects_dir = data_path + '/subjects'
trans_fname = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'
maps = mne.make_field_map(evoked_l_aud, trans=trans_fname, subject='sample',
subjects_dir=subjects_dir, n_jobs=1)
# explore several points in time
field_map = evoked_l_aud.plot_field(maps, time=.1)
###############################################################################
# .. note::
# If trans_fname is set to None then only MEG estimates can be visualized.
| bsd-3-clause |
nelango/ViralityAnalysis | model/lib/sklearn/metrics/__init__.py | 214 | 3440 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| mit |
jlegendary/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 249 | 1982 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, label='Elastic net coefficients')
plt.plot(lasso.coef_, label='Lasso coefficients')
plt.plot(coef, '--', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/matplotlib/backends/backend_template.py | 8 | 9384 | """
This is a fully functional do nothing backend to provide a template to
backend writers. It is fully functional in that you can select it as
a backend with
import matplotlib
matplotlib.use('Template')
and your matplotlib scripts will (should!) run without error, though
no output is produced. This provides a nice starting point for
backend writers because you can selectively implement methods
(draw_rectangle, draw_lines, etc...) and slowly see your figure come
to life w/o having to have a full blown implementation before getting
any results.
Copy this to backend_xxx.py and replace all instances of 'template'
with 'xxx'. Then implement the class methods and functions below, and
add 'xxx' to the switchyard in matplotlib/backends/__init__.py and
'xxx' to the backends list in the validate_backend methon in
matplotlib/__init__.py and you're off. You can use your backend with::
import matplotlib
matplotlib.use('xxx')
from pylab import *
plot([1,2,3])
show()
matplotlib also supports external backends, so you can place you can
use any module in your PYTHONPATH with the syntax::
import matplotlib
matplotlib.use('module://my_backend')
where my_backend.py is your module name. This syntax is also
recognized in the rc file and in the -d argument in pylab, e.g.,::
python simple_plot.py -dmodule://my_backend
If your backend implements support for saving figures (i.e. has a print_xyz()
method) you can register it as the default handler for a given file type
from matplotlib.backend_bases import register_backend
register_backend('xyz', 'my_backend', 'XYZ File Format')
...
plt.savefig("figure.xyz")
The files that are most relevant to backend_writers are
matplotlib/backends/backend_your_backend.py
matplotlib/backend_bases.py
matplotlib/backends/__init__.py
matplotlib/__init__.py
matplotlib/_pylab_helpers.py
Naming Conventions
* classes Upper or MixedUpperCase
* varables lower or lowerUpper
* functions lower or underscore_separated
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.figure import Figure
from matplotlib.transforms import Bbox
class RendererTemplate(RendererBase):
"""
The renderer handles drawing/rendering operations.
This is a minimal do-nothing class that can be used to get started when
writing a new backend. Refer to backend_bases.RendererBase for
documentation of the classes methods.
"""
def __init__(self, dpi):
self.dpi = dpi
def draw_path(self, gc, path, transform, rgbFace=None):
pass
# draw_markers is optional, and we get more correct relative
# timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
# pass
# draw_path_collection is optional, and we get more correct
# relative timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_path_collection(self, gc, master_transform, paths,
# all_transforms, offsets, offsetTrans, facecolors,
# edgecolors, linewidths, linestyles,
# antialiaseds):
# pass
# draw_quad_mesh is optional, and we get more correct
# relative timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
# coordinates, offsets, offsetTrans, facecolors,
# antialiased, edgecolors):
# pass
def draw_image(self, gc, x, y, im):
pass
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
pass
def flipy(self):
return True
def get_canvas_width_height(self):
return 100, 100
def get_text_width_height_descent(self, s, prop, ismath):
return 1, 1, 1
def new_gc(self):
return GraphicsContextTemplate()
def points_to_pixels(self, points):
# if backend doesn't have dpi, e.g., postscript or svg
return points
# elif backend assumes a value for pixels_per_inch
#return points/72.0 * self.dpi.get() * pixels_per_inch/72.0
# else
#return points/72.0 * self.dpi.get()
class GraphicsContextTemplate(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc... See the gtk
and postscript backends for examples of mapping the graphics context
attributes (cap styles, join styles, line widths, colors) to a particular
backend. In GTK this is done by wrapping a gtk.gdk.GC object and
forwarding the appropriate calls to it using a dictionary mapping styles
to gdk constants. In Postscript, all the work is done by the renderer,
mapping line styles to postscript calls.
If it's more appropriate to do the mapping at the renderer level (as in
the postscript backend), you don't need to override any of the GC methods.
If it's more appropriate to wrap an instance (as in the GTK backend) and
do the mapping here, you'll need to override several of the setter
methods.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, e.g., (0.5, 0.0, 1.0). You may need to map this to colors
appropriate for your backend.
"""
pass
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For image backends - is not required
For GUI backends - this should be overriden if drawing should be done in
interactive python mode
"""
pass
def show():
"""
For image backends - is not required
For GUI backends - show() is usually the last line of a pylab script and
tells the backend that it is time to draw. In interactive mode, this may
be a do nothing func. See the GTK backend for an example of how to handle
interactive versus batch mode
"""
for manager in Gcf.get_all_fig_managers():
# do something to display the GUI
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this (and
# new_figure_manager_given_figure) is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasTemplate(figure)
manager = FigureManagerTemplate(canvas, num)
return manager
class FigureCanvasTemplate(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
Note GUI templates will want to connect events for button presses,
mouse movements and key presses to functions that call the base
class methods button_press_event, button_release_event,
motion_notify_event, key_press_event, and key_release_event. See,
e.g., backend_gtk.py, backend_wx.py and backend_tkagg.py
"""
def draw(self):
"""
Draw the figure using the renderer
"""
renderer = RendererTemplate(self.figure.dpi)
self.figure.draw(renderer)
# You should provide a print_xxx function for every file format
# you can write.
# If the file type is not in the base set of filetypes,
# you should add it to the class-scope filetypes dictionary as follows:
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['foo'] = 'My magic Foo format'
def print_foo(self, filename, *args, **kwargs):
"""
Write out format foo. The dpi, facecolor and edgecolor are restored
to their original values after this call, so you don't need to
save and restore them.
"""
pass
def get_default_filetype(self):
return 'foo'
class FigureManagerTemplate(FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
For non interactive backends, the base class does all the work
"""
pass
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureCanvas = FigureCanvasTemplate
FigureManager = FigureManagerTemplate
| apache-2.0 |
icdishb/scikit-learn | sklearn/datasets/tests/test_20news.py | 42 | 2416 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
winklerand/pandas | pandas/tests/indexes/timedeltas/test_partial_slicing.py | 7 | 3216 | import pytest
import numpy as np
import pandas.util.testing as tm
import pandas as pd
from pandas import Series, timedelta_range, Timedelta
from pandas.util.testing import assert_series_equal
class TestSlicing(object):
def test_slice_keeps_name(self):
# GH4226
dr = pd.timedelta_range('1d', '5d', freq='H', name='timebucket')
assert dr[1:].name == dr.name
def test_partial_slice(self):
rng = timedelta_range('1 day 10:11:12', freq='h', periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['5 day':'6 day']
expected = s.iloc[86:134]
assert_series_equal(result, expected)
result = s['5 day':]
expected = s.iloc[86:]
assert_series_equal(result, expected)
result = s[:'6 day']
expected = s.iloc[:134]
assert_series_equal(result, expected)
result = s['6 days, 23:11:12']
assert result == s.iloc[133]
pytest.raises(KeyError, s.__getitem__, '50 days')
def test_partial_slice_high_reso(self):
# higher reso
rng = timedelta_range('1 day 10:11:12', freq='us', periods=2000)
s = Series(np.arange(len(rng)), index=rng)
result = s['1 day 10:11:12':]
expected = s.iloc[0:]
assert_series_equal(result, expected)
result = s['1 day 10:11:12.001':]
expected = s.iloc[1000:]
assert_series_equal(result, expected)
result = s['1 days, 10:11:12.001001']
assert result == s.iloc[1001]
def test_slice_with_negative_step(self):
ts = Series(np.arange(20), timedelta_range('0', periods=20, freq='H'))
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
assert_series_equal(ts[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
assert_slices_equivalent(SLC[Timedelta(hours=7)::-1], SLC[7::-1])
assert_slices_equivalent(SLC['7 hours'::-1], SLC[7::-1])
assert_slices_equivalent(SLC[:Timedelta(hours=7):-1], SLC[:6:-1])
assert_slices_equivalent(SLC[:'7 hours':-1], SLC[:6:-1])
assert_slices_equivalent(SLC['15 hours':'7 hours':-1], SLC[15:6:-1])
assert_slices_equivalent(SLC[Timedelta(hours=15):Timedelta(hours=7):-
1], SLC[15:6:-1])
assert_slices_equivalent(SLC['15 hours':Timedelta(hours=7):-1],
SLC[15:6:-1])
assert_slices_equivalent(SLC[Timedelta(hours=15):'7 hours':-1],
SLC[15:6:-1])
assert_slices_equivalent(SLC['7 hours':'15 hours':-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
ts = Series(np.arange(20), timedelta_range('0', periods=20, freq='H'))
tm.assert_raises_regex(ValueError, 'slice step cannot be zero',
lambda: ts[::0])
tm.assert_raises_regex(ValueError, 'slice step cannot be zero',
lambda: ts.loc[::0])
tm.assert_raises_regex(ValueError, 'slice step cannot be zero',
lambda: ts.loc[::0])
| bsd-3-clause |
victor-prado/broker-manager | environment/lib/python3.5/site-packages/pandas/tseries/tests/test_bin_groupby.py | 7 | 5012 | # -*- coding: utf-8 -*-
from numpy import nan
import numpy as np
from pandas.types.common import _ensure_int64
from pandas import Index, isnull
from pandas.util.testing import assert_almost_equal
import pandas.util.testing as tm
import pandas.lib as lib
import pandas.algos as algos
def test_series_grouper():
from pandas import Series
obj = Series(np.random.randn(10))
dummy = obj[:0]
labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64)
grouper = lib.SeriesGrouper(obj, np.mean, labels, 2, dummy)
result, counts = grouper.get_result()
expected = np.array([obj[3:6].mean(), obj[6:].mean()])
assert_almost_equal(result, expected)
exp_counts = np.array([3, 4], dtype=np.int64)
assert_almost_equal(counts, exp_counts)
def test_series_bin_grouper():
from pandas import Series
obj = Series(np.random.randn(10))
dummy = obj[:0]
bins = np.array([3, 6])
grouper = lib.SeriesBinGrouper(obj, np.mean, bins, dummy)
result, counts = grouper.get_result()
expected = np.array([obj[:3].mean(), obj[3:6].mean(), obj[6:].mean()])
assert_almost_equal(result, expected)
exp_counts = np.array([3, 3, 4], dtype=np.int64)
assert_almost_equal(counts, exp_counts)
class TestBinGroupers(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.obj = np.random.randn(10, 1)
self.labels = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 2], dtype=np.int64)
self.bins = np.array([3, 6], dtype=np.int64)
def test_generate_bins(self):
from pandas.core.groupby import generate_bins_generic
values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)
binner = np.array([0, 3, 6, 9], dtype=np.int64)
for func in [lib.generate_bins_dt64, generate_bins_generic]:
bins = func(values, binner, closed='left')
assert ((bins == np.array([2, 5, 6])).all())
bins = func(values, binner, closed='right')
assert ((bins == np.array([3, 6, 6])).all())
for func in [lib.generate_bins_dt64, generate_bins_generic]:
values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)
binner = np.array([0, 3, 6], dtype=np.int64)
bins = func(values, binner, closed='right')
assert ((bins == np.array([3, 6])).all())
self.assertRaises(ValueError, generate_bins_generic, values, [],
'right')
self.assertRaises(ValueError, generate_bins_generic, values[:0],
binner, 'right')
self.assertRaises(ValueError, generate_bins_generic, values, [4],
'right')
self.assertRaises(ValueError, generate_bins_generic, values, [-3, -1],
'right')
def test_group_ohlc():
def _check(dtype):
obj = np.array(np.random.randn(20), dtype=dtype)
bins = np.array([6, 12, 20])
out = np.zeros((3, 4), dtype)
counts = np.zeros(len(out), dtype=np.int64)
labels = _ensure_int64(np.repeat(np.arange(3),
np.diff(np.r_[0, bins])))
func = getattr(algos, 'group_ohlc_%s' % dtype)
func(out, counts, obj[:, None], labels)
def _ohlc(group):
if isnull(group).all():
return np.repeat(nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
expected = np.array([_ohlc(obj[:6]), _ohlc(obj[6:12]),
_ohlc(obj[12:])])
assert_almost_equal(out, expected)
tm.assert_numpy_array_equal(counts,
np.array([6, 6, 8], dtype=np.int64))
obj[:6] = nan
func(out, counts, obj[:, None], labels)
expected[0] = nan
assert_almost_equal(out, expected)
_check('float32')
_check('float64')
class TestMoments(tm.TestCase):
pass
class TestReducer(tm.TestCase):
def test_int_index(self):
from pandas.core.series import Series
arr = np.random.randn(100, 4)
result = lib.reduce(arr, np.sum, labels=Index(np.arange(4)))
expected = arr.sum(0)
assert_almost_equal(result, expected)
result = lib.reduce(arr, np.sum, axis=1, labels=Index(np.arange(100)))
expected = arr.sum(1)
assert_almost_equal(result, expected)
dummy = Series(0., index=np.arange(100))
result = lib.reduce(arr, np.sum, dummy=dummy,
labels=Index(np.arange(4)))
expected = arr.sum(0)
assert_almost_equal(result, expected)
dummy = Series(0., index=np.arange(4))
result = lib.reduce(arr, np.sum, axis=1, dummy=dummy,
labels=Index(np.arange(100)))
expected = arr.sum(1)
assert_almost_equal(result, expected)
result = lib.reduce(arr, np.sum, axis=1, dummy=dummy,
labels=Index(np.arange(100)))
assert_almost_equal(result, expected)
| mit |
dhwang99/statistics_introduction | hypothetical_test/test_contain.py | 1 | 2067 | #encoding: utf8
import numpy as np
from scipy.misc import comb
from scipy.stats import norm
import matplotlib.pyplot as plt
import pdb
'''
容量和alpha, beta都有关, 和delta有关。一般delta取一个sigma
err1 = alpha = 0.1
err2 = beta = 0.2
正态样本容量,用来控制第二类错误(这么说还好不对)
delta 默认为一个标准差
Phi((c - mu0)*sqrt(n)/sigma) <= (1-alpha)
c <= ppf(1-alpha) * sigma/sqrt(n) + mu0
ds = delta / sigma #delta,sigma指定后,这个就确定了。引入这个变量主要是方例后继的计算
mu = mu0 + delta
Phi((c - mu)*sqrt(n)/sigma) < beta
Phi((c - mu)*sqrt(n)/sigma) < beta
Phi((c - mu0 - delta)*sqrt(n)/sigma) < beta
(c - mu0 - delta)*sqrt(n)/sigma) < Phi(beta)
(c - mu0 - delta)*sqrt(n)/sigma) < -Phi(1-beta)
#c取最大值,有
(Za * sigma/sqrt(n) + mu0 - mu0 - delta)*sqrt(n)/sigma) < -Zbeta
Za - delta*sqrt(n)/sigma < -Zbeta
sqrt(n) > (Za + Zbeta) * sigma/delta
'''
def norm_sample_contain(sigma, delta, max_alpha=0.1, max_beta=0.2):
if delta == None:
delta = sigma
Za = norm.ppf(1-max_alpha)
Zb = norm.ppf(1-max_beta)
min_sqrt_n = (Za + Zb) * sigma / delta
n = np.ceil(min_sqrt_n ** 2)
return n
'''
计算正态分布下的p值
'''
def p_value():
return None
if __name__ == "__main__":
colors = ['g', 'b', 'k']
#test contain of samples
mu0 = 0
sigma = 1.
betas = np.linspace(0.01, 0.3, num=50)
contains = np.zeros(len(betas))
for i in xrange(len(betas)):
beta = betas[i]
n = norm_sample_contain(sigma, delta=sigma, max_alpha=0.1, max_beta=beta)
contains[i] = n
plt.clf()
plt.plot(betas, contains, color='r')
print "betas:", betas
print "n:", contains
for i in xrange(len(betas)):
beta = betas[i]
n = norm_sample_contain(sigma, delta=sigma, max_alpha=0.05, max_beta=beta)
contains[i] = n
plt.plot(betas, contains, color='k')
print "betas:", betas
print "n:", contains
plt.savefig('images/norm_contain.png', format='png')
| gpl-3.0 |
iismd17/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
CallaJun/hackprince | indico/matplotlib/backends/backend_qt5.py | 10 | 29378 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import re
import signal
import sys
from six import unichr
import matplotlib
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import FigureManagerBase
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.backend_bases import NavigationToolbar2
from matplotlib.backend_bases import cursors
from matplotlib.backend_bases import TimerBase
from matplotlib.backend_bases import ShowBase
from matplotlib._pylab_helpers import Gcf
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
try:
import matplotlib.backends.qt_editor.figureoptions as figureoptions
except ImportError:
figureoptions = None
from .qt_compat import QtCore, QtGui, QtWidgets, _getSaveFileName, __version__
from matplotlib.backends.qt_editor.formsubplottool import UiSubplotTool
backend_version = __version__
# SPECIAL_KEYS are keys that do *not* return their unicode name
# instead they have manually specified names
SPECIAL_KEYS = {QtCore.Qt.Key_Control: 'control',
QtCore.Qt.Key_Shift: 'shift',
QtCore.Qt.Key_Alt: 'alt',
QtCore.Qt.Key_Meta: 'super',
QtCore.Qt.Key_Return: 'enter',
QtCore.Qt.Key_Left: 'left',
QtCore.Qt.Key_Up: 'up',
QtCore.Qt.Key_Right: 'right',
QtCore.Qt.Key_Down: 'down',
QtCore.Qt.Key_Escape: 'escape',
QtCore.Qt.Key_F1: 'f1',
QtCore.Qt.Key_F2: 'f2',
QtCore.Qt.Key_F3: 'f3',
QtCore.Qt.Key_F4: 'f4',
QtCore.Qt.Key_F5: 'f5',
QtCore.Qt.Key_F6: 'f6',
QtCore.Qt.Key_F7: 'f7',
QtCore.Qt.Key_F8: 'f8',
QtCore.Qt.Key_F9: 'f9',
QtCore.Qt.Key_F10: 'f10',
QtCore.Qt.Key_F11: 'f11',
QtCore.Qt.Key_F12: 'f12',
QtCore.Qt.Key_Home: 'home',
QtCore.Qt.Key_End: 'end',
QtCore.Qt.Key_PageUp: 'pageup',
QtCore.Qt.Key_PageDown: 'pagedown',
QtCore.Qt.Key_Tab: 'tab',
QtCore.Qt.Key_Backspace: 'backspace',
QtCore.Qt.Key_Enter: 'enter',
QtCore.Qt.Key_Insert: 'insert',
QtCore.Qt.Key_Delete: 'delete',
QtCore.Qt.Key_Pause: 'pause',
QtCore.Qt.Key_SysReq: 'sysreq',
QtCore.Qt.Key_Clear: 'clear', }
# define which modifier keys are collected on keyboard events.
# elements are (mpl names, Modifier Flag, Qt Key) tuples
SUPER = 0
ALT = 1
CTRL = 2
SHIFT = 3
MODIFIER_KEYS = [('super', QtCore.Qt.MetaModifier, QtCore.Qt.Key_Meta),
('alt', QtCore.Qt.AltModifier, QtCore.Qt.Key_Alt),
('ctrl', QtCore.Qt.ControlModifier, QtCore.Qt.Key_Control),
('shift', QtCore.Qt.ShiftModifier, QtCore.Qt.Key_Shift),
]
if sys.platform == 'darwin':
# in OSX, the control and super (aka cmd/apple) keys are switched, so
# switch them back.
SPECIAL_KEYS.update({QtCore.Qt.Key_Control: 'super', # cmd/apple key
QtCore.Qt.Key_Meta: 'control',
})
MODIFIER_KEYS[0] = ('super', QtCore.Qt.ControlModifier,
QtCore.Qt.Key_Control)
MODIFIER_KEYS[2] = ('ctrl', QtCore.Qt.MetaModifier,
QtCore.Qt.Key_Meta)
def fn_name():
return sys._getframe(1).f_code.co_name
DEBUG = False
cursord = {
cursors.MOVE: QtCore.Qt.SizeAllCursor,
cursors.HAND: QtCore.Qt.PointingHandCursor,
cursors.POINTER: QtCore.Qt.ArrowCursor,
cursors.SELECT_REGION: QtCore.Qt.CrossCursor,
}
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
# make place holder
qApp = None
def _create_qApp():
"""
Only one qApp can exist at a time, so check before creating one.
"""
global qApp
if qApp is None:
if DEBUG:
print("Starting up QApplication")
app = QtWidgets.QApplication.instance()
if app is None:
# check for DISPLAY env variable on X11 build of Qt
if hasattr(QtGui, "QX11Info"):
display = os.environ.get('DISPLAY')
if display is None or not re.search(':\d', display):
raise RuntimeError('Invalid DISPLAY variable')
qApp = QtWidgets.QApplication([str(" ")])
qApp.lastWindowClosed.connect(qApp.quit)
else:
qApp = app
class Show(ShowBase):
def mainloop(self):
# allow KeyboardInterrupt exceptions to close the plot window.
signal.signal(signal.SIGINT, signal.SIG_DFL)
global qApp
qApp.exec_()
show = Show()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
thisFig = Figure(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasQT(figure)
manager = FigureManagerQT(canvas, num)
return manager
class TimerQT(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses Qt4 timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def __init__(self, *args, **kwargs):
TimerBase.__init__(self, *args, **kwargs)
# Create a new timer and connect the timeout() signal to the
# _on_timer method.
self._timer = QtCore.QTimer()
self._timer.timeout.connect(self._on_timer)
self._timer_set_interval()
def __del__(self):
# Probably not necessary in practice, but is good behavior to
# disconnect
try:
TimerBase.__del__(self)
self._timer.timeout.disconnect(self._on_timer)
except RuntimeError:
# Timer C++ object already deleted
pass
def _timer_set_single_shot(self):
self._timer.setSingleShot(self._single)
def _timer_set_interval(self):
self._timer.setInterval(self._interval)
def _timer_start(self):
self._timer.start()
def _timer_stop(self):
self._timer.stop()
class FigureCanvasQT(QtWidgets.QWidget, FigureCanvasBase):
# map Qt button codes to MouseEvent's ones:
buttond = {QtCore.Qt.LeftButton: 1,
QtCore.Qt.MidButton: 2,
QtCore.Qt.RightButton: 3,
# QtCore.Qt.XButton1: None,
# QtCore.Qt.XButton2: None,
}
def __init__(self, figure):
if DEBUG:
print('FigureCanvasQt qt5: ', figure)
_create_qApp()
# NB: Using super for this call to avoid a TypeError:
# __init__() takes exactly 2 arguments (1 given) on QWidget
# PyQt5
super(FigureCanvasQT, self).__init__(figure=figure)
self.figure = figure
self.setMouseTracking(True)
self._idle = True
# hide until we can test and fix
# self.startTimer(backend_IdleEvent.milliseconds)
w, h = self.get_width_height()
self.resize(w, h)
def __timerEvent(self, event):
# hide until we can test and fix
self.mpl_idle_event(event)
def enterEvent(self, event):
FigureCanvasBase.enter_notify_event(self, event)
def leaveEvent(self, event):
QtWidgets.QApplication.restoreOverrideCursor()
FigureCanvasBase.leave_notify_event(self, event)
def mousePressEvent(self, event):
x = event.pos().x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.pos().y()
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_press_event(self, x, y, button)
if DEBUG:
print('button pressed:', event.button())
def mouseDoubleClickEvent(self, event):
x = event.pos().x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.pos().y()
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_press_event(self, x, y,
button, dblclick=True)
if DEBUG:
print('button doubleclicked:', event.button())
def mouseMoveEvent(self, event):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
FigureCanvasBase.motion_notify_event(self, x, y)
# if DEBUG: print('mouse move')
def mouseReleaseEvent(self, event):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_release_event(self, x, y, button)
if DEBUG:
print('button released')
def wheelEvent(self, event):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
# from QWheelEvent::delta doc
if event.pixelDelta().x() == 0 and event.pixelDelta().y() == 0:
steps = event.angleDelta().y() / 120
else:
steps = event.pixelDelta().y()
if steps != 0:
FigureCanvasBase.scroll_event(self, x, y, steps)
if DEBUG:
print('scroll event: delta = %i, '
'steps = %i ' % (event.delta(), steps))
def keyPressEvent(self, event):
key = self._get_key(event)
if key is None:
return
FigureCanvasBase.key_press_event(self, key)
if DEBUG:
print('key press', key)
def keyReleaseEvent(self, event):
key = self._get_key(event)
if key is None:
return
FigureCanvasBase.key_release_event(self, key)
if DEBUG:
print('key release', key)
def resizeEvent(self, event):
w = event.size().width()
h = event.size().height()
if DEBUG:
print('resize (%d x %d)' % (w, h))
print("FigureCanvasQt.resizeEvent(%d, %d)" % (w, h))
dpival = self.figure.dpi
winch = w / dpival
hinch = h / dpival
self.figure.set_size_inches(winch, hinch)
FigureCanvasBase.resize_event(self)
self.draw()
self.update()
QtWidgets.QWidget.resizeEvent(self, event)
def sizeHint(self):
w, h = self.get_width_height()
return QtCore.QSize(w, h)
def minumumSizeHint(self):
return QtCore.QSize(10, 10)
def _get_key(self, event):
if event.isAutoRepeat():
return None
event_key = event.key()
event_mods = int(event.modifiers()) # actually a bitmask
# get names of the pressed modifier keys
# bit twiddling to pick out modifier keys from event_mods bitmask,
# if event_key is a MODIFIER, it should not be duplicated in mods
mods = [name for name, mod_key, qt_key in MODIFIER_KEYS
if event_key != qt_key and (event_mods & mod_key) == mod_key]
try:
# for certain keys (enter, left, backspace, etc) use a word for the
# key, rather than unicode
key = SPECIAL_KEYS[event_key]
except KeyError:
# unicode defines code points up to 0x0010ffff
# QT will use Key_Codes larger than that for keyboard keys that are
# are not unicode characters (like multimedia keys)
# skip these
# if you really want them, you should add them to SPECIAL_KEYS
MAX_UNICODE = 0x10ffff
if event_key > MAX_UNICODE:
return None
key = unichr(event_key)
# qt delivers capitalized letters. fix capitalization
# note that capslock is ignored
if 'shift' in mods:
mods.remove('shift')
else:
key = key.lower()
mods.reverse()
return '+'.join(mods + [key])
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of
:class:`backend_bases.Timer`. This is useful for getting
periodic events through the backend's native event
loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs)
will be executed by the timer every *interval*.
"""
return TimerQT(*args, **kwargs)
def flush_events(self):
global qApp
qApp.processEvents()
def start_event_loop(self, timeout):
FigureCanvasBase.start_event_loop_default(self, timeout)
start_event_loop.__doc__ = \
FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__ = FigureCanvasBase.stop_event_loop_default.__doc__
def draw_idle(self):
'update drawing area only if idle'
d = self._idle
self._idle = False
def idle_draw(*args):
try:
self.draw()
finally:
self._idle = True
if d:
QtCore.QTimer.singleShot(0, idle_draw)
class MainWindow(QtWidgets.QMainWindow):
closing = QtCore.Signal()
def closeEvent(self, event):
self.closing.emit()
QtWidgets.QMainWindow.closeEvent(self, event)
class FigureManagerQT(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The qt.QToolBar
window : The qt.QMainWindow
"""
def __init__(self, canvas, num):
if DEBUG:
print('FigureManagerQT.%s' % fn_name())
FigureManagerBase.__init__(self, canvas, num)
self.canvas = canvas
self.window = MainWindow()
self.window.closing.connect(canvas.close_event)
self.window.closing.connect(self._widgetclosed)
self.window.setWindowTitle("Figure %d" % num)
image = os.path.join(matplotlib.rcParams['datapath'],
'images', 'matplotlib.png')
self.window.setWindowIcon(QtGui.QIcon(image))
# Give the keyboard focus to the figure instead of the
# manager; StrongFocus accepts both tab and click to focus and
# will enable the canvas to process event w/o clicking.
# ClickFocus only takes the focus is the window has been
# clicked
# on. http://qt-project.org/doc/qt-4.8/qt.html#FocusPolicy-enum or
# http://doc.qt.digia.com/qt/qt.html#FocusPolicy-enum
self.canvas.setFocusPolicy(QtCore.Qt.StrongFocus)
self.canvas.setFocus()
self.window._destroying = False
self.toolbar = self._get_toolbar(self.canvas, self.window)
if self.toolbar is not None:
self.window.addToolBar(self.toolbar)
self.toolbar.message.connect(self._show_message)
tbs_height = self.toolbar.sizeHint().height()
else:
tbs_height = 0
# resize the main window so it will display the canvas with the
# requested size:
cs = canvas.sizeHint()
sbs = self.window.statusBar().sizeHint()
self._status_and_tool_height = tbs_height + sbs.height()
height = cs.height() + self._status_and_tool_height
self.window.resize(cs.width(), height)
self.window.setCentralWidget(self.canvas)
if matplotlib.is_interactive():
self.window.show()
def notify_axes_change(fig):
# This will be called whenever the current axes is changed
if self.toolbar is not None:
self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
@QtCore.Slot()
def _show_message(self, s):
# Fixes a PySide segfault.
self.window.statusBar().showMessage(s)
def full_screen_toggle(self):
if self.window.isFullScreen():
self.window.showNormal()
else:
self.window.showFullScreen()
def _widgetclosed(self):
if self.window._destroying:
return
self.window._destroying = True
try:
Gcf.destroy(self.num)
except AttributeError:
pass
# It seems that when the python session is killed,
# Gcf can get destroyed before the Gcf.destroy
# line is run, leading to a useless AttributeError.
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2QT(canvas, parent, False)
else:
toolbar = None
return toolbar
def resize(self, width, height):
'set the canvas size in pixels'
self.window.resize(width, height + self._status_and_tool_height)
def show(self):
self.window.show()
def destroy(self, *args):
# check for qApp first, as PySide deletes it in its atexit handler
if QtWidgets.QApplication.instance() is None:
return
if self.window._destroying:
return
self.window._destroying = True
self.window.destroyed.connect(self._widgetclosed)
if self.toolbar:
self.toolbar.destroy()
if DEBUG:
print("destroy figure manager")
self.window.close()
def get_window_title(self):
return str(self.window.windowTitle())
def set_window_title(self, title):
self.window.setWindowTitle(title)
class NavigationToolbar2QT(NavigationToolbar2, QtWidgets.QToolBar):
message = QtCore.Signal(str)
def __init__(self, canvas, parent, coordinates=True):
""" coordinates: should we show the coordinates on the right? """
self.canvas = canvas
self.parent = parent
self.coordinates = coordinates
self._actions = {}
"""A mapping of toolitem method names to their QActions"""
QtWidgets.QToolBar.__init__(self, parent)
NavigationToolbar2.__init__(self, canvas)
def _icon(self, name):
return QtGui.QIcon(os.path.join(self.basedir, name))
def _init_toolbar(self):
self.basedir = os.path.join(matplotlib.rcParams['datapath'], 'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.addSeparator()
else:
a = self.addAction(self._icon(image_file + '.png'),
text, getattr(self, callback))
self._actions[callback] = a
if callback in ['zoom', 'pan']:
a.setCheckable(True)
if tooltip_text is not None:
a.setToolTip(tooltip_text)
if figureoptions is not None:
a = self.addAction(self._icon("qt4_editor_options.png"),
'Customize', self.edit_parameters)
a.setToolTip('Edit curves line and axes parameters')
self.buttons = {}
# Add the x,y location widget at the right side of the toolbar
# The stretch factor is 1 which means any resizing of the toolbar
# will resize this label instead of the buttons.
if self.coordinates:
self.locLabel = QtWidgets.QLabel("", self)
self.locLabel.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
self.locLabel.setSizePolicy(
QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Ignored))
labelAction = self.addWidget(self.locLabel)
labelAction.setVisible(True)
# reference holder for subplots_adjust window
self.adj_window = None
if figureoptions is not None:
def edit_parameters(self):
allaxes = self.canvas.figure.get_axes()
if len(allaxes) == 1:
axes = allaxes[0]
else:
titles = []
for axes in allaxes:
title = axes.get_title()
ylabel = axes.get_ylabel()
label = axes.get_label()
if title:
fmt = "%(title)s"
if ylabel:
fmt += ": %(ylabel)s"
fmt += " (%(axes_repr)s)"
elif ylabel:
fmt = "%(axes_repr)s (%(ylabel)s)"
elif label:
fmt = "%(axes_repr)s (%(label)s)"
else:
fmt = "%(axes_repr)s"
titles.append(fmt % dict(title=title,
ylabel=ylabel, label=label,
axes_repr=repr(axes)))
item, ok = QtWidgets.QInputDialog.getItem(
self.parent, 'Customize', 'Select axes:', titles, 0, False)
if ok:
axes = allaxes[titles.index(six.text_type(item))]
else:
return
figureoptions.figure_edit(axes, self)
def _update_buttons_checked(self):
# sync button checkstates to match active mode
self._actions['pan'].setChecked(self._active == 'PAN')
self._actions['zoom'].setChecked(self._active == 'ZOOM')
def pan(self, *args):
super(NavigationToolbar2QT, self).pan(*args)
self._update_buttons_checked()
def zoom(self, *args):
super(NavigationToolbar2QT, self).zoom(*args)
self._update_buttons_checked()
def dynamic_update(self):
self.canvas.draw()
def set_message(self, s):
self.message.emit(s)
if self.coordinates:
self.locLabel.setText(s.replace(', ', '\n'))
def set_cursor(self, cursor):
if DEBUG:
print('Set cursor', cursor)
self.canvas.setCursor(cursord[cursor])
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val)for val in (min(x0, x1), min(y0, y1), w, h)]
self.canvas.drawRectangle(rect)
def configure_subplots(self):
image = os.path.join(matplotlib.rcParams['datapath'],
'images', 'matplotlib.png')
dia = SubplotToolQt(self.canvas.figure, self.parent)
dia.setWindowIcon(QtGui.QIcon(image))
dia.exec_()
def save_figure(self, *args):
filetypes = self.canvas.get_supported_filetypes_grouped()
sorted_filetypes = list(six.iteritems(filetypes))
sorted_filetypes.sort()
default_filetype = self.canvas.get_default_filetype()
startpath = matplotlib.rcParams.get('savefig.directory', '')
startpath = os.path.expanduser(startpath)
start = os.path.join(startpath, self.canvas.get_default_filename())
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filter
filters.append(filter)
filters = ';;'.join(filters)
fname, filter = _getSaveFileName(self.parent,
"Choose a filename to save to",
start, filters, selectedFilter)
if fname:
if startpath == '':
# explicitly missing key or empty str signals to use cwd
matplotlib.rcParams['savefig.directory'] = startpath
else:
# save dir for next time
savefig_dir = os.path.dirname(six.text_type(fname))
matplotlib.rcParams['savefig.directory'] = savefig_dir
try:
self.canvas.print_figure(six.text_type(fname))
except Exception as e:
QtWidgets.QMessageBox.critical(
self, "Error saving file", str(e),
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)
class SubplotToolQt(SubplotTool, UiSubplotTool):
def __init__(self, targetfig, parent):
UiSubplotTool.__init__(self, None)
self.targetfig = targetfig
self.parent = parent
self.donebutton.clicked.connect(self.close)
self.resetbutton.clicked.connect(self.reset)
self.tightlayout.clicked.connect(self.functight)
# constraints
self.sliderleft.valueChanged.connect(self.sliderright.setMinimum)
self.sliderright.valueChanged.connect(self.sliderleft.setMaximum)
self.sliderbottom.valueChanged.connect(self.slidertop.setMinimum)
self.slidertop.valueChanged.connect(self.sliderbottom.setMaximum)
self.defaults = {}
for attr in ('left', 'bottom', 'right', 'top', 'wspace', 'hspace', ):
self.defaults[attr] = getattr(self.targetfig.subplotpars, attr)
slider = getattr(self, 'slider' + attr)
slider.setMinimum(0)
slider.setMaximum(1000)
slider.setSingleStep(5)
slider.valueChanged.connect(getattr(self, 'func' + attr))
self._setSliderPositions()
def _setSliderPositions(self):
for attr in ('left', 'bottom', 'right', 'top', 'wspace', 'hspace', ):
slider = getattr(self, 'slider' + attr)
slider.setSliderPosition(int(self.defaults[attr] * 1000))
def funcleft(self, val):
if val == self.sliderright.value():
val -= 1
val /= 1000.
self.targetfig.subplots_adjust(left=val)
self.leftvalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def funcright(self, val):
if val == self.sliderleft.value():
val += 1
val /= 1000.
self.targetfig.subplots_adjust(right=val)
self.rightvalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def funcbottom(self, val):
if val == self.slidertop.value():
val -= 1
val /= 1000.
self.targetfig.subplots_adjust(bottom=val)
self.bottomvalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def functop(self, val):
if val == self.sliderbottom.value():
val += 1
val /= 1000.
self.targetfig.subplots_adjust(top=val)
self.topvalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def funcwspace(self, val):
val /= 1000.
self.targetfig.subplots_adjust(wspace=val)
self.wspacevalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def funchspace(self, val):
val /= 1000.
self.targetfig.subplots_adjust(hspace=val)
self.hspacevalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def functight(self):
self.targetfig.tight_layout()
self._setSliderPositions()
self.targetfig.canvas.draw()
def reset(self):
self.targetfig.subplots_adjust(**self.defaults)
self._setSliderPositions()
self.targetfig.canvas.draw()
def error_msg_qt(msg, parent=None):
if not is_string_like(msg):
msg = ','.join(map(str, msg))
QtWidgets.QMessageBox.warning(None, "Matplotlib",
msg, QtGui.QMessageBox.Ok)
def exception_handler(type, value, tb):
"""Handle uncaught exceptions
It does not catch SystemExit
"""
msg = ''
# get the filename attribute if available (for IOError)
if hasattr(value, 'filename') and value.filename is not None:
msg = value.filename + ': '
if hasattr(value, 'strerror') and value.strerror is not None:
msg += value.strerror
else:
msg += str(value)
if len(msg):
error_msg_qt(msg)
FigureCanvas = FigureCanvasQT
FigureManager = FigureManagerQT
| lgpl-3.0 |
linkedin/naarad | src/naarad/metrics/jmeter_metric.py | 1 | 15687 | # coding=utf-8
"""
Copyright 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import defaultdict
import gc
import logging
import os
from xml.etree import ElementTree
import numpy
import heapq
from naarad.metrics.metric import Metric
from naarad.graphing.plot_data import PlotData as PD
import naarad.utils
import naarad.naarad_imports
from naarad.naarad_constants import important_sub_metrics_import
logger = logging.getLogger('naarad.metrics.JmeterMetric')
class JmeterMetric(Metric):
def __init__(self, metric_type, infile_list, hostname, aggr_metrics, output_directory, resource_path, label, ts_start, ts_end,
rule_strings, important_sub_metrics, anomaly_detection_metrics, **other_options):
Metric.__init__(self, metric_type, infile_list, hostname, aggr_metrics, output_directory, resource_path, label, ts_start, ts_end,
rule_strings, important_sub_metrics, anomaly_detection_metrics)
self.sub_metric_description = {
'lb': 'Transaction Name',
'lt': 'Time to First byte',
'ts': 'Timestamp',
'tn': 'Transaction Name (Parent)',
's': 'Status',
'ResponseTime': 'Response Time',
'rc': 'Response Code',
'rm': 'Response Message',
'dt': 'Data Type',
'ResponseSize': 'Response Size',
'qps': 'Successful Transactions per second',
'ErrorsPerSecond': 'Errors per second',
'DataThroughput': 'Data Throughput'
}
self.sub_metric_units = {
'lt': 'ms',
'ResponseTime': 'ms',
'ResponseSize': 'bytes',
'qps': 'qps',
'DataThroughput': 'mbps',
'ErrorsPerSecond': 'qps'
}
self.calculated_stats = {}
self.aggregation_granularity = 'second'
self.calculated_percentiles = {}
self.summary_stats = defaultdict(dict)
self.summary_html_content_enabled = True
self.summary_charts = [self.label + '.Overall_Summary.div']
if not self.important_sub_metrics:
self.important_sub_metrics = important_sub_metrics_import['JMETER']
if other_options:
for (key, val) in other_options.iteritems():
setattr(self, key, val)
def get_csv(self, transaction_name, column):
col = naarad.utils.sanitize_string(column)
if col == 't':
col = 'ResponseTime'
elif col == 'by':
col = 'ResponseSize'
elif col == 'thr':
col = 'DataThroughput'
elif col == 'eqps':
col = 'ErrorsPerSecond'
if transaction_name == '__overall_summary__':
transaction_name = 'Overall_Summary'
csv = os.path.join(self.resource_directory, self.label + '.' + transaction_name + '.' + col + '.csv')
self.csv_column_map[csv] = transaction_name + '.' + col
return csv
def aggregate_count_over_time(self, metric_store, line_data, transaction_list, aggregate_timestamp):
"""
Organize and store the count of data from the log line into the metric store by metric type, transaction, timestamp
:param dict metric_store: The metric store used to store all the parsed jmeter log data
:param dict line_data: dict with the extracted k:v from the log line
:param list transaction_list: list of transaction to be used for storing the metrics from given line
:param string aggregate_timestamp: timestamp used for storing the raw data. This accounts for aggregation time period
:return: None
"""
for transaction in transaction_list:
if line_data.get('s') == 'true':
all_qps = metric_store['qps']
else:
all_qps = metric_store['eqps']
qps = all_qps[transaction]
if aggregate_timestamp in qps:
qps[aggregate_timestamp] += 1
else:
qps[aggregate_timestamp] = 1
return None
def aggregate_values_over_time(self, metric_store, line_data, transaction_list, metric_list, aggregate_timestamp):
"""
Organize and store the data from the log line into the metric store by metric type, transaction, timestamp
:param dict metric_store: The metric store used to store all the parsed jmeter log data
:param dict line_data: dict with the extracted k:v from the log line
:param list transaction_list: list of transaction to be used for storing the metrics from given line
:param list metric_list: list of metrics to extract from the log line
:param string aggregate_timestamp: timestamp used for storing the raw data. This accounts for aggregation time period
:return: None
"""
for metric in metric_list:
for transaction in transaction_list:
metric_data = reduce(defaultdict.__getitem__, [metric, transaction, aggregate_timestamp], metric_store)
metric_data.append(float(line_data.get(metric)))
return None
def average_values_for_plot(self, metric_store, data, averaging_factor):
"""
Create the time series for the various metrics, averaged over the aggregation period being used for plots
:param dict metric_store: The metric store used to store all the parsed jmeter log data
:param dict data: Dict with all the metric data to be output to csv
:param float averaging_factor: averaging factor to be used for calculating the average per second metrics
:return: None
"""
for metric, transaction_store in metric_store.items():
for transaction, time_store in transaction_store.items():
for time_stamp, metric_data in sorted(time_store.items()):
if metric in ['t', 'by']:
data[self.get_csv(transaction, metric)].append(','.join([str(time_stamp), str(sum(map(float, metric_data)) / float(len(metric_data)))]))
if metric == 'by':
metric_store['thr'][transaction][time_stamp] = sum(map(float, metric_data)) / float(averaging_factor * 1024 * 1024 / 8.0)
data[self.get_csv(transaction, 'thr')].append(','.join([str(time_stamp), str(metric_store['thr'][transaction][time_stamp])]))
elif metric in ['qps', 'eqps']:
data[self.get_csv(transaction, metric)].append(','.join([str(time_stamp), str(metric_data / float(averaging_factor))]))
return None
def calculate_key_stats(self, metric_store):
"""
Calculate key statistics for given data and store in the class variables calculated_stats and calculated_percentiles
calculated_stats:
'mean', 'std', 'median', 'min', 'max'
calculated_percentiles:
range(5,101,5), 99
:param dict metric_store: The metric store used to store all the parsed jmeter log data
:return: none
"""
stats_to_calculate = ['mean', 'std', 'median', 'min', 'max'] # TODO: get input from user
percentiles_to_calculate = range(5, 101, 5) # TODO: get input from user
percentiles_to_calculate.append(99)
for transaction in metric_store['t'].keys():
transaction_key = transaction + '.' + 'ResponseTime'
# For ResponseTime and ResponseSize, each timestamp has a list of values associated with it.
# Using heapq.merge to merge all the lists into a single list to be passed to numpy.
self.calculated_stats[transaction_key], self.calculated_percentiles[transaction_key] = \
naarad.utils.calculate_stats(list(heapq.merge(*metric_store['t'][transaction].values())),
stats_to_calculate, percentiles_to_calculate)
self.update_summary_stats(transaction_key)
transaction_key = transaction + '.' + 'qps'
if len(metric_store['qps'][transaction].values()) > 0:
self.calculated_stats[transaction_key], self.calculated_percentiles[transaction_key] = \
naarad.utils.calculate_stats(metric_store['qps'][transaction].values(),
stats_to_calculate, percentiles_to_calculate)
self.update_summary_stats(transaction_key)
transaction_key = transaction + '.' + 'ResponseSize'
self.calculated_stats[transaction_key], self.calculated_percentiles[transaction_key] = \
naarad.utils.calculate_stats(list(heapq.merge(*metric_store['by'][transaction].values())),
stats_to_calculate, percentiles_to_calculate)
self.update_summary_stats(transaction_key)
if 'eqps' in metric_store.keys() and transaction in metric_store['eqps'].keys():
transaction_key = transaction + '.' + 'ErrorsPerSecond'
self.calculated_stats[transaction_key], self.calculated_percentiles[transaction_key] = \
naarad.utils.calculate_stats(metric_store['eqps'][transaction].values(),
stats_to_calculate, percentiles_to_calculate)
self.update_summary_stats(transaction + '.' + 'ErrorsPerSecond')
transaction_key = transaction + '.' + 'DataThroughput'
self.calculated_stats[transaction_key], self.calculated_percentiles[transaction_key] = \
naarad.utils.calculate_stats(metric_store['thr'][transaction].values(),
stats_to_calculate, percentiles_to_calculate)
self.update_summary_stats(transaction_key)
return None
def parse(self):
"""
Parse the Jmeter file and calculate key stats
:return: status of the metric parse
"""
file_status = True
for infile in self.infile_list:
file_status = file_status and naarad.utils.is_valid_file(infile)
if not file_status:
return False
status = self.parse_xml_jtl(self.aggregation_granularity)
gc.collect()
return status
def _sanitize_label(self, raw_label):
return raw_label.replace('/', '_').replace('?', '_')
def parse_xml_jtl(self, granularity):
"""
Parse Jmeter workload output in XML format and extract overall and per transaction data and key statistics
:param string granularity: The time period over which to aggregate and average the raw data. Valid values are 'hour', 'minute' or 'second'
:return: status of the metric parse
"""
data = defaultdict(list)
processed_data = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for input_file in self.infile_list:
logger.info('Processing : %s', input_file)
timestamp_format = None
tree = ElementTree.parse(input_file)
samples = tree.findall('./httpSample') + tree.findall('./sample')
for sample in samples:
if not timestamp_format or timestamp_format == 'unknown':
timestamp_format = naarad.utils.detect_timestamp_format(sample.get('ts'))
if timestamp_format == 'unknown':
continue
ts = naarad.utils.get_standardized_timestamp(sample.get('ts'), timestamp_format)
if ts == -1:
continue
ts = naarad.utils.reconcile_timezones(ts, self.timezone, self.graph_timezone)
aggregate_timestamp, averaging_factor = self.get_aggregation_timestamp(ts, granularity)
self.aggregate_count_over_time(processed_data, sample, [self._sanitize_label(sample.get('lb')), 'Overall_Summary'], aggregate_timestamp)
self.aggregate_values_over_time(processed_data, sample, [self._sanitize_label(sample.get('lb')), 'Overall_Summary'], ['t', 'by'], aggregate_timestamp)
logger.info('Finished parsing : %s', input_file)
logger.info('Processing metrics for output to csv')
self.average_values_for_plot(processed_data, data, averaging_factor)
logger.info('Writing time series csv')
for csv in data.keys():
self.csv_files.append(csv)
with open(csv, 'w') as csvf:
csvf.write('\n'.join(sorted(data[csv])))
logger.info('Processing raw data for stats')
self.calculate_key_stats(processed_data)
return True
def calculate_stats(self):
stats_csv = self.get_stats_csv()
imp_metric_stats_csv = self.get_important_sub_metrics_csv()
csv_header = 'sub_metric,mean,std. deviation,median,min,max,90%,95%,99%\n'
with open(stats_csv, 'w') as FH:
FH.write(csv_header)
for sub_metric in self.calculated_stats:
percentile_data = self.calculated_percentiles[sub_metric]
stats_data = self.calculated_stats[sub_metric]
csv_data = ','.join([sub_metric, str(round(stats_data['mean'], 2)), str(round(stats_data['std'], 2)), str(round(stats_data['median'], 2)),
str(round(stats_data['min'], 2)), str(round(stats_data['max'], 2)), str(round(percentile_data[90], 2)),
str(round(percentile_data[95], 2)), str(round(percentile_data[99], 2))])
FH.write(csv_data + '\n')
self.stats_files.append(stats_csv)
for sub_metric in self.calculated_percentiles:
percentiles_csv = self.get_csv(sub_metric, 'percentiles')
percentile_data = self.calculated_percentiles[sub_metric]
with open(percentiles_csv, 'w') as FH:
for percentile in sorted(percentile_data):
FH.write(str(percentile) + ',' + str(numpy.round_(percentile_data[percentile], 2)) + '\n')
self.percentiles_files.append(percentiles_csv)
with open(imp_metric_stats_csv, 'w') as FH_IMP:
FH_IMP.write(csv_header)
for sub_metric in self.important_sub_metrics:
if sub_metric in self.calculated_stats.keys():
percentile_data = self.calculated_percentiles[sub_metric]
stats_data = self.calculated_stats[sub_metric]
csv_data = ','.join([sub_metric, str(round(stats_data['mean'], 2)), str(round(stats_data['std'], 2)), str(round(stats_data['median'], 2)),
str(round(stats_data['min'], 2)), str(round(stats_data['max'], 2)), str(round(percentile_data[90], 2)),
str(round(percentile_data[95], 2)), str(round(percentile_data[99], 2))])
FH_IMP.write(csv_data + '\n')
self.important_stats_files.append(imp_metric_stats_csv)
def plot_timeseries(self, graphing_library='matplotlib'):
if graphing_library != 'matplotlib':
return Metric.plot_timeseries(self, graphing_library)
else:
logger.info('Using graphing_library {lib} for metric {name}'.format(lib=graphing_library, name=self.label))
plot_data = {}
# plot time series data for submetrics
for out_csv in sorted(self.csv_files, reverse=True):
csv_filename = os.path.basename(out_csv)
# The last element is .csv, don't need that in the name of the chart
column = csv_filename.split('.')[-2]
transaction_name = ' '.join(csv_filename.split('.')[1:-2])
plot = PD(input_csv=out_csv, csv_column=1, series_name=transaction_name,
y_label=self.sub_metric_description[column] + ' (' + self.sub_metric_units[column] + ')', precision=None, graph_height=500, graph_width=1200,
graph_type='line')
if transaction_name in plot_data:
plot_data[transaction_name].append(plot)
else:
plot_data[transaction_name] = [plot]
for transaction in plot_data:
graphed, div_file = Metric.graphing_modules[graphing_library].graph_data(plot_data[transaction], self.resource_directory, self.resource_path,
self.label + '.' + transaction)
if graphed:
self.plot_files.append(div_file)
return True
| apache-2.0 |
tensorflow/models | official/vision/detection/utils/object_detection/visualization_utils.py | 1 | 28994 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A set of functions that are used for visualization.
These functions often receive an image, perform some visualization on the image.
The functions do not return a value, instead they modify the image itself.
"""
import collections
import functools
from absl import logging
# Set headless-friendly backend.
import matplotlib
matplotlib.use('Agg') # pylint: disable=multiple-statements
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
import six
import tensorflow as tf
from official.vision.detection.utils import box_utils
from official.vision.detection.utils.object_detection import shape_utils
_TITLE_LEFT_MARGIN = 10
_TITLE_TOP_MARGIN = 10
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def save_image_array_as_png(image, output_path):
"""Saves an image (represented as a numpy array) to PNG.
Args:
image: a numpy array with shape [height, width, 3].
output_path: path to which image should be written.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
with tf.io.gfile.GFile(output_path, 'w') as fid:
image_pil.save(fid, 'PNG')
def encode_image_array_as_png_str(image):
"""Encodes a numpy array into a PNG string.
Args:
image: a numpy array with shape [height, width, 3].
Returns:
PNG encoded image string.
"""
image_pil = Image.fromarray(np.uint8(image))
output = six.BytesIO()
image_pil.save(output, format='PNG')
png_string = output.getvalue()
output.close()
return png_string
def visualize_images_with_bounding_boxes(images, box_outputs, step,
summary_writer):
"""Records subset of evaluation images with bounding boxes."""
if not isinstance(images, list):
logging.warning(
'visualize_images_with_bounding_boxes expects list of '
'images but received type: %s and value: %s', type(images), images)
return
image_shape = tf.shape(images[0])
image_height = tf.cast(image_shape[0], tf.float32)
image_width = tf.cast(image_shape[1], tf.float32)
normalized_boxes = box_utils.normalize_boxes(box_outputs,
[image_height, image_width])
bounding_box_color = tf.constant([[1.0, 1.0, 0.0, 1.0]])
image_summary = tf.image.draw_bounding_boxes(
tf.cast(images, tf.float32), normalized_boxes, bounding_box_color)
with summary_writer.as_default():
tf.summary.image('bounding_box_summary', image_summary, step=step)
summary_writer.flush()
def draw_bounding_box_on_image_array(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image (numpy array).
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Args:
image: a numpy array with shape [height, width, 3].
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box (each to be shown on its
own line).
use_normalized_coordinates: If True (default), treat coordinates ymin, xmin,
ymax, xmax as relative to the image. Otherwise treat coordinates as
absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,
thickness, display_str_list,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Each string in display_str_list is displayed on a separate line above the
bounding box in black text on a rectangle filled with the input 'color'.
If the top of the bounding box extends to the edge of the image, the strings
are displayed below the bounding box.
Args:
image: a PIL.Image object.
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box (each to be shown on its
own line).
use_normalized_coordinates: If True (default), treat coordinates ymin, xmin,
ymax, xmax as relative to the image. Otherwise treat coordinates as
absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line([(left, top), (left, bottom), (right, bottom), (right, top),
(left, top)],
width=thickness,
fill=color)
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle([(left, text_bottom - text_height - 2 * margin),
(left + text_width, text_bottom)],
fill=color)
draw.text((left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
text_bottom -= text_height - 2 * margin
def draw_bounding_boxes_on_image_array(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image (numpy array).
Args:
image: a numpy array object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax). The
coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings. a list of strings for each
bounding box. The reason to pass a list of strings for a bounding box is
that it might contain multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
image_pil = Image.fromarray(image)
draw_bounding_boxes_on_image(image_pil, boxes, color, thickness,
display_str_list_list)
np.copyto(image, np.array(image_pil))
def draw_bounding_boxes_on_image(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image.
Args:
image: a PIL.Image object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax). The
coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings. a list of strings for each
bounding box. The reason to pass a list of strings for a bounding box is
that it might contain multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError('Input must be of size [N, 4]')
for i in range(boxes_shape[0]):
display_str_list = ()
if display_str_list_list:
display_str_list = display_str_list_list[i]
draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
boxes[i, 3], color, thickness, display_str_list)
def _visualize_boxes(image, boxes, classes, scores, category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image, boxes, classes, scores, category_index=category_index, **kwargs)
def _visualize_boxes_and_masks(image, boxes, classes, scores, masks,
category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
**kwargs)
def _visualize_boxes_and_keypoints(image, boxes, classes, scores, keypoints,
category_index, **kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
keypoints=keypoints,
**kwargs)
def _visualize_boxes_and_masks_and_keypoints(image, boxes, classes, scores,
masks, keypoints, category_index,
**kwargs):
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
keypoints=keypoints,
**kwargs)
def _resize_original_image(image, image_shape):
image = tf.expand_dims(image, 0)
image = tf.image.resize(
image, image_shape, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return tf.cast(tf.squeeze(image, 0), tf.uint8)
def draw_bounding_boxes_on_image_tensors(images,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=None,
true_image_shape=None,
instance_masks=None,
keypoints=None,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True):
"""Draws bounding boxes, masks, and keypoints on batch of image tensors.
Args:
images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
channels will be ignored. If C = 1, then we convert the images to RGB
images.
boxes: [N, max_detections, 4] float32 tensor of detection boxes.
classes: [N, max_detections] int tensor of detection classes. Note that
classes are 1-indexed.
scores: [N, max_detections] float32 tensor of detection scores.
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
original_image_spatial_shape: [N, 2] tensor containing the spatial size of
the original image.
true_image_shape: [N, 3] tensor containing the spatial size of unpadded
original_image.
instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with
instance masks.
keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2]
with keypoints.
max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20.
min_score_thresh: Minimum score threshold for visualization. Default 0.2.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes). Default is
True.
Returns:
4D image tensor of type uint8, with boxes drawn on top.
"""
# Additional channels are being ignored.
if images.shape[3] > 3:
images = images[:, :, :, 0:3]
elif images.shape[3] == 1:
images = tf.image.grayscale_to_rgb(images)
visualization_keyword_args = {
'use_normalized_coordinates': use_normalized_coordinates,
'max_boxes_to_draw': max_boxes_to_draw,
'min_score_thresh': min_score_thresh,
'agnostic_mode': False,
'line_thickness': 4
}
if true_image_shape is None:
true_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 3])
else:
true_shapes = true_image_shape
if original_image_spatial_shape is None:
original_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 2])
else:
original_shapes = original_image_spatial_shape
if instance_masks is not None and keypoints is None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_masks,
category_index=category_index,
**visualization_keyword_args)
elems = [
true_shapes, original_shapes, images, boxes, classes, scores,
instance_masks
]
elif instance_masks is None and keypoints is not None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_keypoints,
category_index=category_index,
**visualization_keyword_args)
elems = [
true_shapes, original_shapes, images, boxes, classes, scores, keypoints
]
elif instance_masks is not None and keypoints is not None:
visualize_boxes_fn = functools.partial(
_visualize_boxes_and_masks_and_keypoints,
category_index=category_index,
**visualization_keyword_args)
elems = [
true_shapes, original_shapes, images, boxes, classes, scores,
instance_masks, keypoints
]
else:
visualize_boxes_fn = functools.partial(
_visualize_boxes,
category_index=category_index,
**visualization_keyword_args)
elems = [true_shapes, original_shapes, images, boxes, classes, scores]
def draw_boxes(image_and_detections):
"""Draws boxes on image."""
true_shape = image_and_detections[0]
original_shape = image_and_detections[1]
if true_image_shape is not None:
image = shape_utils.pad_or_clip_nd(image_and_detections[2],
[true_shape[0], true_shape[1], 3])
if original_image_spatial_shape is not None:
image_and_detections[2] = _resize_original_image(image, original_shape)
image_with_boxes = tf.compat.v1.py_func(visualize_boxes_fn,
image_and_detections[2:], tf.uint8)
return image_with_boxes
images = tf.map_fn(draw_boxes, elems, dtype=tf.uint8, back_prop=False)
return images
def draw_keypoints_on_image_array(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil, keypoints, color, radius,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_keypoints_on_image(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image.
Args:
image: a PIL.Image object.
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
keypoints_x = [k[1] for k in keypoints]
keypoints_y = [k[0] for k in keypoints]
if use_normalized_coordinates:
keypoints_x = tuple([im_width * x for x in keypoints_x])
keypoints_y = tuple([im_height * y for y in keypoints_y])
for keypoint_x, keypoint_y in zip(keypoints_x, keypoints_y):
draw.ellipse([(keypoint_x - radius, keypoint_y - radius),
(keypoint_x + radius, keypoint_y + radius)],
outline=color,
fill=color)
def draw_mask_on_image_array(image, mask, color='red', alpha=0.4):
"""Draws mask on an image.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
mask: a uint8 numpy array of shape (img_height, img_height) with values
between either 0 or 1.
color: color to draw the keypoints with. Default is red.
alpha: transparency value between 0 and 1. (default: 0.4)
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if np.any(np.logical_and(mask != 1, mask != 0)):
raise ValueError('`mask` elements should be in [0, 1]')
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
rgb = ImageColor.getrgb(color)
pil_image = Image.fromarray(image)
solid_color = np.expand_dims(
np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0 * alpha * mask)).convert('L')
pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index,
instance_masks=None,
instance_boundaries=None,
keypoints=None,
use_normalized_coordinates=False,
max_boxes_to_draw=20,
min_score_thresh=.5,
agnostic_mode=False,
line_thickness=4,
groundtruth_box_visualization_color='black',
skip_scores=False,
skip_labels=False):
"""Overlay labeled boxes on an image with formatted scores and label names.
This function groups boxes that correspond to the same location
and creates a display string for each detection and overlays these
on the image. Note that this function modifies the image in place, and returns
that same image.
Args:
image: uint8 numpy array with shape (img_height, img_width, 3)
boxes: a numpy array of shape [N, 4]
classes: a numpy array of shape [N]. Note that class indices are 1-based,
and match the keys in the label map.
scores: a numpy array of shape [N] or None. If scores=None, then this
function assumes that the boxes to be plotted are groundtruth boxes and
plot all boxes as black with no classes or scores.
category_index: a dict containing category dictionaries (each holding
category index `id` and category name `name`) keyed by category indices.
instance_masks: a numpy array of shape [N, image_height, image_width] with
values ranging between 0 and 1, can be None.
instance_boundaries: a numpy array of shape [N, image_height, image_width]
with values ranging between 0 and 1, can be None.
keypoints: a numpy array of shape [N, num_keypoints, 2], can be None
use_normalized_coordinates: whether boxes is to be interpreted as normalized
coordinates or not.
max_boxes_to_draw: maximum number of boxes to visualize. If None, draw all
boxes.
min_score_thresh: minimum score threshold for a box to be visualized
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not. This mode will display scores but ignore
classes.
line_thickness: integer (default: 4) controlling line width of the boxes.
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.
"""
# Create a display string (and color) for every box location, group any boxes
# that correspond to the same location.
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
box_to_keypoints_map = collections.defaultdict(list)
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(min(max_boxes_to_draw, boxes.shape[0])):
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in category_index.keys():
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = '{}%'.format(int(100 * scores[i]))
else:
display_str = '{}: {}%'.format(display_str, int(100 * scores[i]))
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
else:
box_to_color_map[box] = STANDARD_COLORS[classes[i] %
len(STANDARD_COLORS)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
if instance_masks is not None:
draw_mask_on_image_array(
image, box_to_instance_masks_map[box], color=color)
if instance_boundaries is not None:
draw_mask_on_image_array(
image, box_to_instance_boundaries_map[box], color='red', alpha=1.0)
draw_bounding_box_on_image_array(
image,
ymin,
xmin,
ymax,
xmax,
color=color,
thickness=line_thickness,
display_str_list=box_to_display_str_map[box],
use_normalized_coordinates=use_normalized_coordinates)
if keypoints is not None:
draw_keypoints_on_image_array(
image,
box_to_keypoints_map[box],
color=color,
radius=line_thickness / 2,
use_normalized_coordinates=use_normalized_coordinates)
return image
def add_cdf_image_summary(values, name):
"""Adds a tf.summary.image for a CDF plot of the values.
Normalizes `values` such that they sum to 1, plots the cumulative distribution
function and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
name: name for the image summary.
"""
def cdf_plot(values):
"""Numpy function to plot CDF."""
normalized_values = values / np.sum(values)
sorted_values = np.sort(normalized_values)
cumulative_values = np.cumsum(sorted_values)
fraction_of_examples = (
np.arange(cumulative_values.size, dtype=np.float32) /
cumulative_values.size)
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
ax.plot(fraction_of_examples, cumulative_values)
ax.set_ylabel('cumulative normalized values')
ax.set_xlabel('fraction of examples')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(
fig.canvas.tostring_rgb(),
dtype='uint8').reshape(1, int(height), int(width), 3)
return image
cdf_plot = tf.compat.v1.py_func(cdf_plot, [values], tf.uint8)
tf.compat.v1.summary.image(name, cdf_plot)
def add_hist_image_summary(values, bins, name):
"""Adds a tf.summary.image for a histogram plot of the values.
Plots the histogram of values and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
bins: bin edges which will be directly passed to np.histogram.
name: name for the image summary.
"""
def hist_plot(values, bins):
"""Numpy function to plot hist."""
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
y, x = np.histogram(values, bins=bins)
ax.plot(x[:-1], y)
ax.set_ylabel('count')
ax.set_xlabel('value')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(
fig.canvas.tostring_rgb(),
dtype='uint8').reshape(1, int(height), int(width), 3)
return image
hist_plot = tf.compat.v1.py_func(hist_plot, [values, bins], tf.uint8)
tf.compat.v1.summary.image(name, hist_plot)
| apache-2.0 |
SotolitoLabs/cockpit | bots/learn/cluster.py | 3 | 11778 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of Cockpit.
#
# Copyright (C) 2017 Slavek Kabrda
#
# Cockpit is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# Cockpit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Cockpit; If not, see <http://www.gnu.org/licenses/>.
# WARNING: As you change this code increment this version number so
# the machine learning model uses a new place to store the model
VERSION = 5
# This attempts to cluster log items related to similarity and then
# classify whether new test logs fit into those clusters. The clustering
# is unsupervised, and currently uses DBSCAN to accomplish this.
# The classification currently uses nearest neighbor techniques.
#
# We use distances to tell us whether two items are similar or not.
# These distances are currently calculated via normalized compression
# distance in ncd.py
# The threshhold of where we start to treat a cluster as a flakey if
# the number of tests that failed but were merged anyway is over:
FLAKE_THRESHOLD = 0.4
import gzip
import operator
import os
import pickle
import random
import sys
import time
import tempfile
import sklearn.cluster
import sklearn.neighbors
BASE = os.path.dirname(__file__)
sys.path.insert(1, os.path.join(BASE, ".."))
from learn import ncd
from learn import extractor
# The name and version of the learning data
FILENAME = "tests-learn-{0}-{1}.model".format(VERSION, extractor.VERSION)
# Note that we use pickle protocol=4 to get over size limits
# Load a model from the given directory
# Return None of the model doesn't exist
def load(directory):
path = os.path.join(directory, FILENAME)
if not os.path.exists(path):
return None
with gzip.open(path, 'rb') as fp:
model = pickle.load(fp)
return model
# Write a model to the given directory
def save(directory, model):
path = os.path.join(directory, FILENAME)
(outfd, outname) = tempfile.mkstemp(prefix=FILENAME, dir=directory)
os.close(outfd)
with gzip.open(outname, 'wb') as fp:
pickle.dump(model, fp, protocol=4)
os.rename(outname, path)
return path
# A cluster of items with optional analysis of those items
# The items are not stored here, but their points in the
# array are.
class Cluster():
def __init__(self, label, points):
self.label = label
self.points = points
# Analyse the cluster, based on the points added in
# the cluster. The points should be indexes into the
# items array.
def analyze(self, features):
num_merged = 0
for point in self.points:
merged = features[point][extractor.FEATURE_MERGED]
if merged == 1:
num_merged += 1
total = len(self.points)
# Calculate the merged probabilities
if total:
merged = (float(num_merged) / float(total))
if merged > 1:
merged = 1
# Probability that this cluster represents the given name
return {
"total": total,
"merged": merged,
"trackers": self.group_by(features, extractor.FEATURE_TRACKER, factor=extractor.TRACKER_SPARSE),
"names": self.group_by(features, extractor.FEATURE_NAME),
"contexts": self.group_by(features, extractor.FEATURE_CONTEXT)
}
# Figure out how often given values of a feature show up in a cluster
def group_by(self, features, feature, limit=5, factor=1):
values = { }
total = 0
for point in self.points:
value = features[point][feature]
if value:
# If we have a factor, some of the features may be sparse
# So account for the spareness in our probability estimates
values[value] = values.get(value, 0) + factor
total += factor
else:
total += 1
listing = [ ]
for value, count in values.items():
probability = float(count) / float(total or 1)
listing.append((value, min(probability, 1)))
listing.sort(key=operator.itemgetter(1), reverse=True)
return listing[0:limit]
# Dump the selected cluster to disk. The features are the inputs
# from the model that were used to build the cluster.
def dump(self, directory, features, detail=None):
if self.label is None:
label = "noise"
else:
label = "cluster-{0}".format(self.label)
# Dump our stuff into the directory
if not os.path.exists(directory):
os.mkdir(directory)
path = os.path.join(directory, "{0}-{1}.log".format(label, detail or len(self.points)))
with open(path, "a") as fp:
for row in self.analyze(features).items():
fp.write("{0}: {1}\n".format(row[0], repr(row[1])))
fp.write("\n\n")
for point in self.points:
url = features[point][extractor.FEATURE_URL]
if url:
fp.write("{0}\n".format(url))
fp.write(features[point][extractor.FEATURE_LOG])
fp.write("\n\n")
# The clustering model. Uses unsupervised clustering to build clusters
# out of data extracted from test logs. See extractor.py for the code
# that extracts features from the logs.
#
# Also allows classification into the built clusters.
#
class Model():
eps = 0.3 # Maximum distance between two samples in neighborhood
min_samples = 3 # Minimum number of samples in a cluster
def __init__(self, verbose=False):
self.clusters = { } # The actual clustered items
self.verbose = verbose
self.extractor = None
self.features = None
# Perform the unsupervised clustering
def train(self, items):
self.clusters = { }
self.noise = [ ]
items = list(items)
if self.verbose:
sys.stderr.write("{0}: Items to train\n".format(len(items)))
# Extract the features we want to use for clustering from the items
self.extractor = extractor.Extractor()
self.features = self.extractor.fit_transform(items)
jobs = os.cpu_count() or -1
start = time.perf_counter()
# Initialize the NCD code with our log feature. Currently only
# one feature is used: the normalized log
X = ncd.prepare(map(lambda features: features[extractor.FEATURE_LOG], self.features))
# Calculate all the pairwise distances between the items in question
# The scikit DBSCAN implementation does this anyway, poorly. So why not
# do it ahead of time and parralelize it ... which we do here. Then we
#
# TODO: This takes forever and is an O(n^2) operation
# There is significant room for improvement both here, and in the following
# DBSCAN usage and implementation. Techniques such as feature/item selection
# BIRCH, ball trees, or many other things could make this better/faster
matrix = sklearn.metrics.pairwise.pairwise_distances(X, metric=ncd.metric, n_jobs=jobs)
if self.verbose:
sys.stderr.write("{0}: Computed distances in {1} seconds on {2} cores\n".format(
int((len(self.features) * len(self.features)) / 2),
int(time.perf_counter() - start), jobs
))
# Actually perform the clustering. This is fast compared to above
min_samples = min(self.min_samples, len(self.features) / 10)
dbs = sklearn.cluster.DBSCAN(metric='precomputed', eps=self.eps, min_samples=min_samples)
dbs.fit(matrix)
labels = dbs.labels_
# Create clusters of all the items
clusters = { }
noise = [ ]
for i, label in enumerate(labels):
if label == -1:
noise.append(i)
else:
if label not in clusters:
clusters[label] = [ ]
clusters[label].append(i)
self.clusters = { }
for label, indexes in clusters.items():
self.clusters[label] = Cluster(label, indexes)
self.noise = Cluster(None, noise)
# Print out a rough description of that
if self.verbose:
sys.stderr.write("{0}: Clusters ({1} items, {2} noise)\n".format(
len(self.clusters.keys()),
len(self.features) - len(noise),
len(noise)
))
# Setup our neighbors classifier for predict()
self.neighbors = sklearn.neighbors.KNeighborsClassifier(metric='precomputed', weights='distance')
self.neighbors.fit(matrix, labels)
# Predict which clusters these items are a part of
# The cluster labels are returned for each item, along with a probability
def predict(self, items):
features = self.extractor.transform(items)
Y = ncd.prepare(map(lambda x: x[0], self.features))
X = ncd.prepare(map(lambda x: x[0], features))
matrix = sklearn.metrics.pairwise.pairwise_distances(X, Y, metric=ncd.metric, n_jobs=-1)
result = [ ]
# TODO: The probability is currently bogus, we could use distance measurements to fill it in
for label in self.neighbors.predict(matrix):
if label == -1:
result.append((None, 0.0))
else:
# TODO: The problem here is we don't classify noise properly, should use eps (above)
result.append((label, 0.5))
return result
# Dump the cluster's models and noise to a directory
def dump(self, directory):
for label, cluster in self.clusters.items():
cluster.dump(directory, self.features)
self.noise.dump(directory, self.features)
# This is a helpful debugger to help diagnose data, and figure out if we're
# getting the above threshold and regular expressions right
if __name__ == '__main__':
import data
import argparse
parser = argparse.ArgumentParser(description="Clusterize input data")
parser.add_argument("--only", action="append", help="Only analyze these statuses")
parser.add_argument("-v", "--verbose", action="store_true", help="Print verbose progress output")
parser.add_argument("filename", help="Training data in JSONL gzip format")
opts = parser.parse_args()
# The kind of statuses to inlcude
if not opts.only:
only = None
else:
only = lambda item: item.get("status") in opts.only
# Load the actual data
items = data.load(opts.filename, only=only, verbose=opts.verbose)
# Split items into two sets with probability factor
def split(items, factor):
a, b = [ ], [ ]
for item in items:
if random.random() < factor:
a.append(item)
else:
b.append(item)
return a, b
train, predict = split(items, 0.6)
# Write to the current directory
directory = "."
model = Model(verbose=opts.verbose)
model.train(train)
results = model.predict(predict)
# Dump our clusters and predicted results, using fake clusters
model.dump(directory)
for point, result in enumerate(results):
Cluster(result[0], [ 0 ]).dump(directory, [ (predict[point]["log"], ) ], detail="predict")
| lgpl-2.1 |
ilo10/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 121 | 6117 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
nborwankar/opendatasci | notebooks/kmeans.py | 4 | 1967 | # supporting lib for kmeans clustering
# Nitin Borwankar
# Open Data Science Training
import numpy as np
from scipy.cluster.vq import kmeans,vq
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
def load_data(fName = '../datasets/UN4col.csv'):
fp = open(fName)
XX = np.loadtxt(fp)
fp.close()
return XX
def run_kmeans(X, n=10):
_K = range(1,n)
# scipy.cluster.vq.kmeans
_KM = [kmeans(X,k) for k in _K] # apply kmeans 1 to 10
_centroids = [cent for (cent,var) in _KM] # cluster centroids
_D_k = [cdist(X, cent, 'euclidean') for cent in _centroids]
_cIdx = [np.argmin(D,axis=1) for D in _D_k]
_dist = [np.min(D,axis=1) for D in _D_k]
_avgWithinSS = [sum(d)/X.shape[0] for d in _dist]
return (_K, _KM, _centroids, _D_k, _cIdx, _dist, _avgWithinSS)
def plot_elbow_curve(kIdx, K, avgWithinSS):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(K, avgWithinSS, 'b*-')
ax.plot(K[kIdx], avgWithinSS[kIdx], marker='o', markersize=12,
markeredgewidth=2, markeredgecolor='r', markerfacecolor='None')
plt.grid(True)
plt.xlabel('Number of clusters')
plt.ylabel('Average within-cluster sum of squares')
tt = plt.title('Elbow for KMeans clustering')
return(fig,ax)
def plot_clusters(orig,pred,nx,ny,legend=True):
data = orig
import matplotlib.pyplot as plt
ylabels = { 0:'Male life expectancy in yrs',1:'Female life expectancy in yrs',2:'Infant mortality, per 1000'}
# plot data into three clusters based on value of c
p0 = plt.plot(data[pred==0,nx],data[pred==0,ny],'ro',label='Underdeveloped')
p2 = plt.plot(data[pred==2,nx],data[pred==2,ny],'go',label='Developing')
p1 = plt.plot(data[pred==1,nx],data[pred==1,ny],'bo',label='Developed')
lx = p1[0].axes.set_xlabel('Per Capita GDP in US$')
ly = p1[0].axes.set_ylabel(ylabels[ny])
tt= plt.title('UN countries Dataset, KMeans clustering with K=3')
if legend:
ll=plt.legend()
return (p0,p1,p2)
| bsd-2-clause |
bsipocz/astroML | examples/datasets/plot_sdss_spectrum.py | 5 | 1247 | """
SDSS Spectrum Example
---------------------
This example shows how to fetch and plot a spectrum from the SDSS database
using the plate, MJD, and fiber numbers. The code below sends a query to
the SDSS server for the given plate, fiber, and mjd, downloads the spectrum,
and plots the result.
"""
# Author: Jake VanderPlas <vanderplas@astro.washington.edu>
# License: BSD
# The figure is an example from astroML: see http://astroML.github.com
from matplotlib import pyplot as plt
from astroML.datasets import fetch_sdss_spectrum
#------------------------------------------------------------
# Fetch single spectrum
plate = 1615
mjd = 53166
fiber = 513
spec = fetch_sdss_spectrum(plate, mjd, fiber)
#------------------------------------------------------------
# Plot the resulting spectrum
ax = plt.axes()
ax.plot(spec.wavelength(), spec.spectrum, '-k', label='spectrum')
ax.plot(spec.wavelength(), spec.error, '-', color='gray', label='error')
ax.legend(loc=4)
ax.set_title('Plate = %(plate)i, MJD = %(mjd)i, Fiber = %(fiber)i' % locals())
ax.text(0.05, 0.95, 'z = %.2f' % spec.z, size=16,
ha='left', va='top', transform=ax.transAxes)
ax.set_xlabel(r'$\lambda (\AA)$')
ax.set_ylabel('Flux')
ax.set_ylim(-10, 300)
plt.show()
| bsd-2-clause |
etkirsch/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 128 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
q1ang/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
LohithBlaze/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 254 | 2795 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
print(grid_search.grid_scores_)
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
cdd1969/pygwa | lib/functions/interpolate.py | 1 | 7114 | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
def createInterpolationRanges(df, columnName, interpolateMargin=100, log=False):
u""" Function checks data for missing values. From the missing-data indices
it will try to create so called 'missing data index regions' based on
*interpolateMargin* parameter. This index-regions will be used further
for faster interpolation.
INPUT:
------------------------------------------------------------------------
*df* - pandas.DataFrame, our data
*columnName* - str, column name in passed DataFrame
*interpolateMargin* - int, number of data-points to consider left and
right from current NaN value.
OUTPUT:
------------------------------------------------------------------------
*regions* - list of 2-length-lists, indices of the missing
data regions (i.e. [[1, 12], [45, 50], [60, 100]]
OR None (if no missing data in columnName)
------------------------------------------------------------------------
Example 1:
>>> s = [1, 2, 3, np.nan, 5, 4, 3, 2, 1]
>>> df = pd.DataFrame(data=s, columns=['col1'])
>>> print createInterpolationRanges(df,'col1',interpolateMargin=2)
[[1, 5]]
This for further interpolation we will consider not full array *s*
but only its part *s'* = [2, 3, np.nan, 5, 4], where [[1, 5]]
corresponds to the indices of [2] and [4] in the *s'*
Example 2:
>>> s = [1, 2, 3, np.nan, 5, 4, 3, np.nan, 1]
>>> df = pd.DataFrame(data=s, columns=['col1'])
>>> print createInterpolationRanges(df,'col1',interpolateMargin=2)
[[1, 5], [5, 8]]
Here we have two regions. Note that the "8" value in the second
region is the maximum index of the array *s*
Example 3:
>>> s = [np.nan, 5, 6, 7, 8, 7, 6, 5, 4, 3, np.nan, 1, np.nan, -1, np.nan, -1, 0, 1, 2]
>>> df = pd.DataFrame(data=s, columns=['col1'])
>>> print createInterpolationRanges(df, 'col1', interpolateMargin = 3)
[[0, 3], [7, 17]]
Here we have two regions and the second one is joined from multiple
NaN occurences
"""
N = len(df.index)
validN = df[columnName].count()
nanN = (N-validN)
if log: print( 'createInterpolationRanges(): Column <{0}>: entries - {1}, NaNs - {2}'.format(columnName, N, nanN))
if nanN > 0:
# indeces of NaN values
nanIndeces = np.where(df[columnName].isnull())[0]
#print( nanIndeces)
# create regions...
regions = list()
region = list()
for i, v in enumerate(nanIndeces):
if i == 0: # treat first element
region.append(v)
continue
elif i == len(nanIndeces)-1 and len(region) == 1: # treat last element
region.append(nanIndeces[i])
regions.append(region)
continue
if v - nanIndeces[i-1] > interpolateMargin: # distance is too big, save previous region, start new region
region.append(nanIndeces[i-1])
regions.append(region)
region = list()
region.append(v)
# now add and substract interpolation margins:
iMin = 0
iMax = N-1
for r in regions:
#print( r)
r[0] = max(iMin, r[0]-interpolateMargin)
r[1] = min(iMax, r[1]+interpolateMargin)
#for r in regions:
# print( r)
return regions
else: # no NaN values detected. Interpolation is not needed. Return None
if log: print( 'createInterpolationRanges(): Column *{0}* has no missing data. Nothing to interpolate. Aborting... '.format(columnName))
return None
def applyInterpolationBasedOnRanges(df, columnName, ranges, suffix='_interpolated', **kwargs):
u""" Function interpolates data within given *ranges* (*ranges* should be
generated with *createInterpolationRanges()*)
Interpolation is done with native `pandas.DataFrame.interpolate()` method
INPUT:
------------------------------------------------------------------------------
*df* - pandas.DataFrame, our data
*columnName* - str, column name in passed DataFrame
*ranges* - list 2D, kist of indix regions (*ranges* should
be generated with *createInterpolationRanges()*)
*suffix* - str, *suffix* will be appended to the *columnName*
to create new data-column in *df*
***kwargs* - are passed to pandas function *DataFrame.interpolate()*
OUTPUT:
------------------------------------------------------------------------------
Note: There is no output, because function MODIFIES passed dataframe.
The result will be appended there.
------------------------------------------------------------------------------
"""
if ranges is None: # nothing to interpolate
return
columnNameNew = columnName+suffix
df[columnNameNew] = df[columnName]
for r in ranges:
df.loc[r[0]:r[1], columnNameNew] = df[columnName][r[0]:r[1]+1].interpolate(**kwargs)
if __name__ == '__main__':
# define X, Y
x = np.arange(0, 100, 0.1)
y = np.sin(x)
# set some missing data
y[0] = np.nan
y[38] = np.nan
y[40:60] = np.nan
y[70:75] = np.nan
y[400:450] = np.nan
y[453] = np.nan
y[457] = np.nan
y[459] = np.nan
y[466] = np.nan
y[800:825] = np.nan
y[990:992] = np.nan
#create Pandas DataFrame
df = pd.DataFrame(data=y, columns=['one'])
# column names
cn = 'one'
cni = cn+'_interpolated'
# define number of points left and right from NaN value to consider for interpolation
interpolateMargin = 100
# find NaN regions - to increase performance
Regions = createInterpolationRanges(df, cn, interpolateMargin=interpolateMargin)
print( Regions)
# apply interpolation to NaN regions
applyInterpolationBasedOnRanges(df, cn, Regions, suffix='_interpolated', method='polynomial', order=15)
# check new column if it has NaN or not (if not - will be printed)
createInterpolationRanges(df, cni, interpolateMargin=interpolateMargin)
#plot
ax = plt.subplot(211)
df[cn].plot(ax=ax, marker='x', color='b')
criterion = df.index.isin(np.where(df[cn].isnull())[0])
x = df[criterion].index
y = df[cni][criterion].values
plt.scatter(x=x, y=y, marker='o', color='r', label='interpolated', s=100)
plt.legend(loc='best')
plt.ylim([-2, 2])
ax2 = plt.subplot(212)
df[cni].plot(ax=ax2, marker='x', color='g')
plt.ylim([-2, 2])
plt.legend(loc='best')
plt.show()
| gpl-2.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/tests/io/parser/na_values.py | 6 | 10526 | # -*- coding: utf-8 -*-
"""
Tests that NA values are properly handled during
parsing for all of the parsers defined in parsers.py
"""
import numpy as np
from numpy import nan
import pandas.io.parsers as parsers
import pandas.util.testing as tm
from pandas import DataFrame, Index, MultiIndex
from pandas.compat import StringIO, range
class NAvaluesTests(object):
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = np.array([['foo', 'bar'], [nan, 'baz'], [nan, nan]],
dtype=np.object_)
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
def test_non_string_na_values(self):
# see gh-3611: with an odd float format, we can't match
# the string '999.0' exactly but still need float matching
nice = """A,B
-999,1.2
2,-999
3,4.5
"""
ugly = """A,B
-999,1.200
2,-999.000
3,4.500
"""
na_values_param = [['-999.0', '-999'],
[-999, -999.0],
[-999.0, -999],
['-999.0'], ['-999'],
[-999.0], [-999]]
expected = DataFrame([[np.nan, 1.2], [2.0, np.nan],
[3.0, 4.5]], columns=['A', 'B'])
for data in (nice, ugly):
for na_values in na_values_param:
out = self.read_csv(StringIO(data), na_values=na_values)
tm.assert_frame_equal(out, expected)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
assert _NA_VALUES == parsers._NA_VALUES
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = np.array([[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]])
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_numpy_array_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_numpy_array_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_numpy_array_equal(df3.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
def test_na_values_keep_default(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# see gh-4318: passing na_values=None and
# keep_default_na=False yields 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_na_values_na_filter_override(self):
data = """\
A,B
1,A
nan,B
3,C
"""
expected = DataFrame([[1, 'A'], [np.nan, np.nan], [3, 'C']],
columns=['A', 'B'])
out = self.read_csv(StringIO(data), na_values=['B'], na_filter=True)
tm.assert_frame_equal(out, expected)
expected = DataFrame([['1', 'A'], ['nan', 'B'], ['3', 'C']],
columns=['A', 'B'])
out = self.read_csv(StringIO(data), na_values=['B'], na_filter=False)
tm.assert_frame_equal(out, expected)
def test_na_trailing_columns(self):
data = """Date,Currenncy,Symbol,Type,Units,UnitPrice,Cost,Tax
2012-03-14,USD,AAPL,BUY,1000
2012-05-12,USD,SBUX,SELL,500"""
result = self.read_csv(StringIO(data))
assert result['Date'][1] == '2012-05-12'
assert result['UnitPrice'].isnull().all()
def test_na_values_scalar(self):
# see gh-12224
names = ['a', 'b']
data = '1,2\n2,1'
expected = DataFrame([[np.nan, 2.0], [2.0, np.nan]],
columns=names)
out = self.read_csv(StringIO(data), names=names, na_values=1)
tm.assert_frame_equal(out, expected)
expected = DataFrame([[1.0, 2.0], [np.nan, np.nan]],
columns=names)
out = self.read_csv(StringIO(data), names=names,
na_values={'a': 2, 'b': 1})
tm.assert_frame_equal(out, expected)
def test_na_values_dict_aliasing(self):
na_values = {'a': 2, 'b': 1}
na_values_copy = na_values.copy()
names = ['a', 'b']
data = '1,2\n2,1'
expected = DataFrame([[1.0, 2.0], [np.nan, np.nan]], columns=names)
out = self.read_csv(StringIO(data), names=names, na_values=na_values)
tm.assert_frame_equal(out, expected)
tm.assert_dict_equal(na_values, na_values_copy)
def test_na_values_dict_col_index(self):
# see gh-14203
data = 'a\nfoo\n1'
na_values = {0: 'foo'}
out = self.read_csv(StringIO(data), na_values=na_values)
expected = DataFrame({'a': [np.nan, 1]})
tm.assert_frame_equal(out, expected)
def test_na_values_uint64(self):
# see gh-14983
na_values = [2**63]
data = str(2**63) + '\n' + str(2**63 + 1)
expected = DataFrame([str(2**63), str(2**63 + 1)])
out = self.read_csv(StringIO(data), header=None, na_values=na_values)
tm.assert_frame_equal(out, expected)
data = str(2**63) + ',1' + '\n,2'
expected = DataFrame([[str(2**63), 1], ['', 2]])
out = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(out, expected)
def test_empty_na_values_no_default_with_index(self):
# see gh-15835
data = "a,1\nb,2"
expected = DataFrame({'1': [2]}, index=Index(["b"], name="a"))
out = self.read_csv(StringIO(data), keep_default_na=False, index_col=0)
tm.assert_frame_equal(out, expected)
| mit |
eg-zhang/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
wkentaro/fcn | examples/apc2016/datasets/mit_benchmark.py | 1 | 3797 | import itertools
import os
import os.path as osp
import chainer
import numpy as np
import scipy.misc
from sklearn.model_selection import train_test_split
from base import APC2016DatasetBase
def ids_from_scene_dir(scene_dir, empty_scene_dir):
for i_frame in itertools.count():
empty_file = osp.join(
empty_scene_dir, 'frame-{:06}.color.png'.format(i_frame))
rgb_file = osp.join(
scene_dir, 'frame-{:06}.color.png'.format(i_frame))
segm_file = osp.join(
scene_dir, 'segm/frame-{:06}.segm.png'.format(i_frame))
if not (osp.exists(rgb_file) and osp.exists(segm_file)):
break
data_id = (empty_file, rgb_file, segm_file)
yield data_id
def bin_id_from_scene_dir(scene_dir):
caminfo = open(osp.join(scene_dir, 'cam.info.txt')).read()
loc = caminfo.splitlines()[0].split(': ')[-1]
if loc == 'shelf':
bin_id = caminfo.splitlines()[1][-1]
else:
bin_id = 'tote'
return bin_id
class APC2016mit_benchmarkDataset(APC2016DatasetBase):
def __init__(self, data_type):
assert data_type in ('train', 'val')
self.dataset_dir = chainer.dataset.get_dataset_directory(
'apc2016/benchmark')
data_ids = self._get_ids()
ids_train, ids_val = train_test_split(
data_ids, test_size=0.25, random_state=1234)
if data_type == 'train':
self._ids = ids_train
else:
self._ids = ids_val
def __len__(self):
return len(self._ids)
def _get_ids_from_loc_dir(self, env, loc_dir):
assert env in ('office', 'warehouse')
loc = osp.basename(loc_dir)
data_ids = []
for scene_dir in os.listdir(loc_dir):
scene_dir = osp.join(loc_dir, scene_dir)
bin_id = bin_id_from_scene_dir(scene_dir)
empty_dir = osp.join(
self.dataset_dir, env, 'empty', loc, 'scene-{}'.format(bin_id))
data_ids += list(ids_from_scene_dir(scene_dir, empty_dir))
return data_ids
def _get_ids(self):
data_ids = []
# office
contain_dir = osp.join(self.dataset_dir, 'office/test')
for loc in ['shelf', 'tote']:
loc_dir = osp.join(contain_dir, loc)
data_ids += self._get_ids_from_loc_dir('office', loc_dir)
# warehouse
contain_dir = osp.join(self.dataset_dir, 'warehouse')
for sub in ['practice', 'competition']:
sub_contain_dir = osp.join(contain_dir, sub)
for loc in ['shelf', 'tote']:
loc_dir = osp.join(sub_contain_dir, loc)
data_ids += self._get_ids_from_loc_dir('warehouse', loc_dir)
return data_ids
def _load_from_id(self, data_id):
empty_file, rgb_file, segm_file = data_id
img = scipy.misc.imread(rgb_file, mode='RGB')
img_empty = scipy.misc.imread(empty_file, mode='RGB')
# Label value is multiplied by 9:
# ex) 0: 0/6=0 (background), 54: 54/6=9 (dasani_bottle_water)
lbl = scipy.misc.imread(segm_file, mode='L') / 6
lbl = lbl.astype(np.int32)
img_empty[lbl > 0] = img[lbl > 0]
return img_empty, lbl
def get_example(self, i):
data_id = self._ids[i]
img, lbl = self._load_from_id(data_id)
datum = self.img_to_datum(img)
return datum, lbl
if __name__ == '__main__':
import matplotlib.pyplot as plt
import six
dataset_train = APC2016mit_benchmarkDataset('train')
dataset_val = APC2016mit_benchmarkDataset('val')
print('train: %d, val: %d' % (len(dataset_train), len(dataset_val)))
for i in six.moves.range(len(dataset_val)):
viz = dataset_val.visualize_example(i)
plt.imshow(viz)
plt.show()
| mit |
hdmetor/scikit-learn | benchmarks/bench_plot_approximate_neighbors.py | 85 | 6377 | """
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
p1 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[0])
p2 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[1])
p3 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[2])
labels = ['n_estimators=' + str(params_list[0]['n_estimators']) +
', n_candidates=' + str(params_list[0]['n_candidates']),
'n_estimators=' + str(params_list[1]['n_estimators']) +
', n_candidates=' + str(params_list[1]['n_candidates']),
'n_estimators=' + str(params_list[2]['n_estimators']) +
', n_candidates=' + str(params_list[2]['n_candidates'])]
# Plot precision
plt.figure()
plt.legend((p1, p2, p3), (labels[0], labels[1], labels[2]),
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend((p1, p2, p3), (labels[0], labels[1], labels[2]),
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
| bsd-3-clause |
Yu-Group/scikit-learn-sandbox | benchmarks/deleteme/py_irf_benchmarks.py | 1 | 14042 | # iRF benchmarks
import numpy as np
import time
from copy import deepcopy
import matplotlib.pyplot as plt
import os
import yaml
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from IPython.display import display, Image
from sklearn.datasets import load_breast_cancer
import sys
sys.path.insert(0, '../jupyter')
# Import our custom utilities
from imp import reload
from utils import irf_jupyter_utils
from utils import irf_utils
def check_path_exists(inp_path):
"""
Check if the directory or filepath exists
Print clear message to the user if it does not exist else
return a True value (bool)
Parameters
----------
inp_path: str
path of a filename or directory whose existence
is to be verified
"""
if not os.path.exists(inp_path):
sys.exit('Directory {} does not exist! Check you are running this from the benchmarks directory'.format(inp_path))
else:
print(inp_path, True, sep=': ')
#
# =============================================================================
# Run the RF benchmarks
# =============================================================================
def RF_benchmarks(features, responses,
n_trials=10,
train_split_propn=0.8,
n_estimators=20,
seed=2017):
"""
Run RF benchmarks
Parameters
----------
n_trials : int, optional (default = 10)
Number of times to run RF
train_test_split : float, int, optional (default = 0.8)
If float, should be between 0.0 and 1.0 and represent the proportion of
the dataset to include in the train split. If int, represents the
absolute number of train samples.
n_estimators : integer, optional (default=20)
The number of trees in the forest.
seed : integer, optional (default = 2017)
ranodm seed for reproducibility
Output
----------
metrics_all : dict
dictionary containing the metrics from all trials
metrics_summary : dict
dictionary summarizing the metrics from all trials
(gives average, standard deviation)
feature_importances : dict
dictionary containing feature importances from all trials
"""
# set seed
np.random.seed(seed)
# split into testing and training
X_train, X_test, y_train, y_test = train_test_split(
features, responses, train_size=train_split_propn)
rf_time = np.array([])
metrics_tmp = {}
feature_importances = {}
for i in range(n_trials):
t0 = time.time()
# run random forest and time
rf = RandomForestClassifier(n_estimators=n_estimators)
rf.fit(X=X_train, y=y_train)
rf_time = np.append(rf_time, time.time() - t0)
# get metrics
metrics_tmp[i] = irf_utils.get_validation_metrics(rf, y_test, X_test)
# get feature importances
feature_importances[i] = rf.feature_importances_
# aggregate metrics
metrics_all = {}
for k in metrics_tmp[0].keys():
metrics_all[k] = [metrics_tmp[i][k] for i in range(n_trials)]
metrics_all['time'] = rf_time
# compute summaries of metrics
metrics_summary = {}
for k in metrics_all.keys():
metrics_summary[k] = \
[np.mean(metrics_all[k], 0), np.std(metrics_all[k], 0)]
rf_bm = {'metrics_all' : metrics_all, 'metrics_summary': metrics_summary,
'feature_importances': feature_importances}
return(rf_bm)
def iRF_benchmarks(features, responses, n_trials=10,
K=5,
train_split_propn=0.8,
n_estimators=20,
B=30,
propn_n_samples=.2,
bin_class_type=1,
M=20,
max_depth=5,
noisy_split=False,
num_splits=2,
n_estimators_bootstrap=5,
seed=2018):
"""
Run iRF benchmarks
Parameters
----------
...
Output
----------
metrics_all : dict
dictionary containing the metrics from all trials
metrics_summary : dict
dictionary summarizing the metrics from all trials
(gives average, standard deviation)
feature_importances : dict
dictionary containing feature importances from the Kth forest
from all trials
stability_all : dict
interactions and their respective stability scores from all trials
"""
# set seed
np.random.seed(seed)
# split into testing and training
X_train, X_test, y_train, y_test = train_test_split(
features, responses, train_size=train_split_propn)
iRF_time = np.array([])
metrics_tmp = {}
feature_importances = {}
stability_all = {}
for i in range(n_trials):
# run iRF and time
t0 = time.time()
assert np.shape(X_train)[0] == np.shape(y_train)[0]
assert np.shape(X_test)[0] == np.shape(y_test)[0]
_, all_K_iter_rf_data, _, _, stability_score = irf_utils.run_iRF(X_train=X_train,
X_test=X_test,
y_train=y_train,
y_test=y_test,
K=K,
n_estimators=n_estimators,
B=B,
random_state_classifier=None,
propn_n_samples=propn_n_samples,
bin_class_type=bin_class_type,
M=M,
max_depth=max_depth,
noisy_split=noisy_split,
num_splits=num_splits,
n_estimators_bootstrap=n_estimators_bootstrap)
iRF_time = np.append(iRF_time, time.time() - t0)
# get metrics from last forest
rf_final = all_K_iter_rf_data['rf_iter' + str(K - 1)]['rf_obj']
metrics_tmp[i] = irf_utils.get_validation_metrics(
rf_final, y_test, X_test)
# get feature importances from last forest
feature_importances[i] = rf_final.feature_importances_
# get stability scores
stability_all[i] = stability_score
# aggregate metrics
metrics_all = {}
for k in metrics_tmp[1].keys():
metrics_all[k] = [metrics_tmp[i][k] for i in range(n_trials)]
metrics_all['time'] = iRF_time
# compute summaries of metrics
metrics_summary = {}
for k in metrics_all.keys():
metrics_summary[k] = \
[np.mean(metrics_all[k], 0), np.std(metrics_all[k], 0)]
iRF_bm = {'metrics_all': metrics_all,
'metrics_summary': metrics_summary,
'stability_all': stability_all,
'feature_importances': feature_importances}
return(iRF_bm)
def consolidate_bm_RF(features, responses, specs, seed = None):
np.random.seed(seed)
# figure out which parameter is being looped over
# there should only be one parameter to be looped over
# i.e. only one element of the "specs" dictionary should be a list
err_1param = 0
for k in specs.keys():
if np.max(np.shape([specs[k]])) > 1:
print(k)
loop_spec = k
err_1param += 1
assert(err_1param <= 1) # should only be one parameter being looped over
# replicate keys
if err_1param == 0:
loop_spec = 'n_trials'
specs[loop_spec] = list([specs[loop_spec]]) * \
np.max(np.shape([specs[loop_spec]]))
n_loops = np.max(np.shape([specs[loop_spec]]))
print(specs[loop_spec])
for k in specs.keys():
if k != loop_spec:
specs[k] = list([specs[k]]) * n_loops
print(specs)
rf_bm = {}
for i in range(n_loops):
# subsample data if n parameter is passed
N = np.shape(features)[0]
P = np.shape(features)[1]
if specs['N_obs'][i] != N:
indx = np.random.choice(N, specs['N_obs'], replace = False)
features_subset = features[indx, :]
responses_subset = responses[indx, :]
else:
features_subset = deepcopy(features)
responses_subset = deepcopy(responses)
# subsample features if p parameter is passed
if specs['N_features'][i] != P:
indx = np.random.choice(P, specs['N_features'], replace = False)
features_subset = features[:, indx]
responses_subset = responses[:, indx]
else:
features_subset = deepcopy(features)
responses_subset = deepcopy(responses)
rf_bm[i] = RF_benchmarks(features_subset, responses_subset,
n_trials=specs['n_trials'][i],
train_split_propn=specs['train_split_propn'][i],
n_estimators=specs['n_estimators'][i],
seed=None)
return(rf_bm)
def consolidate_bm_iRF(features, responses, specs, seed = None):
# figure out which parameter is being looped over
# there should only be one parameter to be looped over
# i.e. only one element of the "specs" dictionary should be a list
err_1param = 0
for k in specs.keys():
if np.max(np.shape([specs[k]])) > 1:
print(k)
loop_spec = k
err_1param += 1
assert(err_1param <= 1) # should only be one parameter being looped over
# replicate keys
if err_1param == 0:
loop_spec = 'n_trials'
specs[loop_spec] = list([specs[loop_spec]]) * \
np.max(np.shape([specs[loop_spec]]))
n_loops = np.max(np.shape([specs[loop_spec]]))
print(specs[loop_spec])
for k in specs.keys():
if k != loop_spec:
specs[k] = list([specs[k]]) * n_loops
print(specs)
iRF_bm = {}
for i in range(n_loops):
print(i)
# subsample data if n parameter is passed
N = np.shape(features)[0]
P = np.shape(features)[1]
if specs['N_obs'][i] != N:
indx = np.random.choice(N, specs['N_obs'], replace = False)
features_subset = features[indx, :]
responses_subset = responses[indx, :]
else:
features_subset = deepcopy(features)
responses_subset = deepcopy(responses)
# subsample features if p parameter is passed
if specs['N_features'][i] != P:
indx = np.random.choice(P, specs['N_features'], replace = False)
features_subset = features[:, indx]
responses_subset = responses[:, indx]
else:
features_subset = deepcopy(features)
responses_subset = deepcopy(responses)
iRF_bm[i] = iRF_benchmarks(features_subset, responses_subset,
n_trials=specs['n_trials'][i],
K=specs['n_iter'][i],
train_split_propn=specs['train_split_propn'][i],
n_estimators=specs['n_estimators'][i],
B=specs['n_bootstraps'][i],
propn_n_samples=specs['propn_n_samples'][i],
bin_class_type=specs['bin_class_type'][i],
M=specs['n_RIT'][i],
max_depth=specs['max_depth'][i],
noisy_split=specs['noisy_split'][i],
num_splits=specs['num_splits'][i],
n_estimators_bootstrap=specs['n_estimators_bootstrap'][i],
seed=seed)
return(iRF_bm)
def plot_bm(bm, specs, param, metric):
x = specs[param]
y = [bm[i]['metrics_summary'][metric][0] \
for i in range(len(specs[param]))]
sd = [bm[i]['metrics_summary'][metric][1] \
for i in range(len(specs[param]))]
plt.clf()
plt.errorbar(x, y, yerr = sd)
plt.xlabel(param )
plt.ylabel(metric)
plt.show()
# =============================================================================
# Read in yaml file as a Python dictionary
# =============================================================================
def yaml_to_dict(inp_yaml):
""" Helper function to read in a yaml file into
Python as a dictionary
Parameters
----------
inp_yaml : str
A yaml text string containing to be parsed into a Python
dictionary
Returns
-------
out : dict
The input yaml string parsed as a Python dictionary object
"""
with open(inp_yaml, 'r') as stream:
try:
out = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
return out
# =============================================================================
# Convert Python dictionary to yaml
# =============================================================================
def dict_to_yaml(inp_dict, out_yaml_dir, out_yaml_name):
""" Helper function to convert Python dictionary
into a yaml string file
Parameters
----------
inp_dict: dict
The Python dictionary object to be output as a yaml file
out_yaml_dir : str
The output directory for yaml file created
out_yaml_name : str
The output filename for yaml file created
e.g. for 'test.yaml' just set this value to 'test'
the '.yaml' will be added by the function
Returns
-------
out : str
The yaml file with specified name and directory from
the input Python dictionary
"""
if not os.path.exists(out_yaml_dir):
os.makedirs(out_yaml_dir)
out_yaml_path = os.path.join(out_yaml_dir,
out_yaml_name)
# Write out the yaml file to the specified path
with open(out_yaml_path, 'w') as outfile:
yaml.dump(inp_dict, outfile, default_flow_style=False)
| mit |
bitforks/freetype-py | examples/glyph-vector.py | 3 | 2915 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# FreeType high-level python API - Copyright 2011 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
'''
Show how to access glyph outline description.
'''
from freetype import *
if __name__ == '__main__':
import numpy
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
face = Face('./Vera.ttf')
face.set_char_size( 48*64 )
face.load_char('S')
slot = face.glyph
outline = slot.outline
points = numpy.array(outline.points, dtype=[('x',float), ('y',float)])
x, y = points['x'], points['y']
figure = plt.figure(figsize=(8,10))
axis = figure.add_subplot(111)
#axis.scatter(points['x'], points['y'], alpha=.25)
start, end = 0, 0
VERTS, CODES = [], []
# Iterate over each contour
for i in range(len(outline.contours)):
end = outline.contours[i]
points = outline.points[start:end+1]
points.append(points[0])
tags = outline.tags[start:end+1]
tags.append(tags[0])
segments = [ [points[0],], ]
for j in range(1, len(points) ):
segments[-1].append(points[j])
if tags[j] & (1 << 0) and j < (len(points)-1):
segments.append( [points[j],] )
verts = [points[0], ]
codes = [Path.MOVETO,]
for segment in segments:
if len(segment) == 2:
verts.extend(segment[1:])
codes.extend([Path.LINETO])
elif len(segment) == 3:
verts.extend(segment[1:])
codes.extend([Path.CURVE3, Path.CURVE3])
else:
verts.append(segment[1])
codes.append(Path.CURVE3)
for i in range(1,len(segment)-2):
A,B = segment[i], segment[i+1]
C = ((A[0]+B[0])/2.0, (A[1]+B[1])/2.0)
verts.extend([ C, B ])
codes.extend([ Path.CURVE3, Path.CURVE3])
verts.append(segment[-1])
codes.append(Path.CURVE3)
VERTS.extend(verts)
CODES.extend(codes)
start = end+1
# Draw glyph lines
path = Path(VERTS, CODES)
glyph = patches.PathPatch(path, facecolor='.75', lw=1)
# Draw "control" lines
for i, code in enumerate(CODES):
if code == Path.CURVE3:
CODES[i] = Path.LINETO
path = Path(VERTS, CODES)
patch = patches.PathPatch(path, ec='.5', fill=False, ls='dashed', lw=1 )
axis.add_patch(patch)
axis.add_patch(glyph)
axis.set_xlim(x.min()-100, x.max()+100)
plt.xticks([])
axis.set_ylim(y.min()-100, y.max()+100)
plt.yticks([])
plt.show()
| bsd-3-clause |
mpritham/prophet | docs/conf.py | 2 | 8901 | # -*- coding: utf-8 -*-
#
# Prophet documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 19 05:52:00 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import mock
MOCK_MODULES = ['numpy', 'pandas', 'pandas.io',
'prophet.utils.tradingcalendar']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
import alabaster
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinxcontrib.napoleon',
'alabaster',
]
napoleon_numpy_docstring = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Prophet'
copyright = u'2014, Michael Su'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
html_theme_path = [alabaster.get_path()]
html_theme = 'alabaster'
html_sidebars = {
'**': [
'about.html', 'navigation.html', 'searchbox.html', 'donate.html',
]
}
html_static_path = ['_static']
html_theme_options = {
'logo': 'img/logo.png',
'github_user': 'Emsu',
'github_repo': 'prophet',
'github_button': True,
'github_banner': True,
'show_powered_by': False,
}
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Prophetdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Prophet.tex', u'Prophet Documentation',
u'Michael Su', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'prophet', u'Prophet Documentation',
[u'Michael Su'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Prophet', u'Prophet Documentation',
u'Michael Su', 'Prophet', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-3-clause |
iut-ibk/DynaMind-Sewer | scripts/Sewer/clustering.py | 1 | 2985 | # -*- coding: utf-8 -*-
"""
@file
@author Chrisitan Urich <christian.urich@gmail.com>
@version 1.0
@section LICENSE
This file is part of DynaMind
Copyright (C) 2012 Christian Urich
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
from pydynamind import *
from pydmtoolbox import *
import networkx
import numpy, matplotlib
from scipy.cluster import hierarchy
from scipy.spatial import distance
class Clustering(Module):
def __init__(self):
Module.__init__(self)
self.conduits = View("CONDUIT", EDGE, READ)
viewvector = []
viewvector.append(self.conduits)
self.addData("Sewer", viewvector)
def run(self):
try:
g = networkx.Graph()
sewer = self.getData("Sewer")
CostsTotal = 0
LengthTot = 0
names = sewer.getNamesOfComponentsInView(self.conduits)
pointnamelist = []
for nc in names:
c = sewer.getEdge(nc)
startNode = c.getStartpointName()
endNode = c.getEndpointName()
if startNode not in pointnamelist:
pointnamelist.append(startNode)
if endNode not in pointnamelist:
pointnamelist.append(endNode)
g.add_edge(pointnamelist.index(startNode), pointnamelist.index(endNode))
path_length=networkx.all_pairs_shortest_path_length(g)
n = len(g.nodes())
distances=numpy.zeros((n,n))
for u,p in path_length.iteritems():
for v,d in p.iteritems():
distances[int(u)-1][int(v)-1] = d
sd = distance.squareform(distances)
hier = hierarchy.average(sd)
hierarchy.dendrogram(hier)
matplotlib.pylab.savefig("tree.png",format="png")
partition = community.best_partition(g)
print partition
for i in set(partition.values()):
print "Community", i
members = list_nodes = [nodes for nodes in partition.keys() if partition[nodes] == i]
print members
except Exception, e:
print e
print "Unexpected error:"
| gpl-2.0 |
aminert/scikit-learn | examples/decomposition/plot_incremental_pca.py | 244 | 1878 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
c=c, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best")
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
murali-munna/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 85 | 8565 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1) | bsd-3-clause |
Wonjuseo/Project101 | 2/2-2. FrozenLake2.py | 1 | 2022 | import gym
import numpy as np
import matplotlib.pyplot as plt
from gym.envs.registration import register
import random as pr
import tensorflow as tf
def rargmax(vector):
# random argmax
m = np.max(vector)
indices = np.nonzero(vector == m)[0]
return pr.choice(indices)
# Reward Update Q
# Algorithm
# For each s,a initialize table entry Q(s,a)<-0
# Observe current stat s
# Do foever:
# select an action a and execute it
# receive immediate reward
# observe the new state
# update the table entry for Q(s,a)
# update the state
register(
id='FrozenLake-v3',
entry_point ='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name':'4x4','is_slippery':False})
env = gym.make('FrozenLake-v3')
# Intialize table with all zeros
Q = np.zeros([env.observation_space.n,env.action_space.n])
# Discount factor
dis = .99
# Set learning parameters
num_episodes = 2000
# Create lists to contain total rewards and steps per episode
rList = []
for i in range(num_episodes):
# Reset environment and get first new observation
# Intialize state
state = env.reset()
rAll = 0
done = False
# Decaying E-greedy
e = 1. /((i/100)+1)
while not done:
# Determine actions
# Exploit and Exploration : Decaying E-greedy
if pr.random() < e:
action = env.action_space.sample()
else:
action = rargmax(Q[state,:])
# Add Random noise
# action = np.argmax(Q[state,:]+np.random.randn(1,env.action_space.n)/(i+1))
# Get new state and reward from environment
new_state, reward, done, _ = env.step(action)
# Update Q-table with new knowledge using decay rate
Q[state,action] = reward + dis * np.max(Q[new_state,:])
# Update the state
state = new_state
# reward every episode
rAll += reward
rList.append(rAll)
# Show the result
print("Success rate:"+str(sum(rList)/num_episodes))
# Show the table
print(Q)
plt.bar(range(len(rList)),rList,color="blue")
plt.show()
| apache-2.0 |
sirfoga/hal | hal/data/matrix.py | 2 | 3170 | #!/usr/bin/env python
# coding: utf-8
"""Functions to deal with matrices"""
from sklearn.preprocessing import LabelEncoder
from hal.maths.utils import divide
class Matrix:
"""Table of data"""
def __init__(self, matrix):
self.matrix = matrix
def precision(self):
"""Calculates precision
:return: Precision of matrix
"""
true_pos = self.matrix[0][0]
false_pos = self.matrix[1][0]
return divide(1.0 * true_pos, true_pos + false_pos)
def recall(self):
"""Calculates recall
:return: Recall
"""
true_pos = self.matrix[0][0]
false_neg = self.matrix[0][1]
return divide(1.0 * true_pos, true_pos + false_neg)
def true_neg_rate(self):
"""Calculates true negative rate
:return: true negative rate
"""
false_pos = self.matrix[1][0]
true_neg = self.matrix[1][1]
return divide(1.0 * true_neg, true_neg + false_pos)
def accuracy(self):
"""Calculates accuracy
:return: Accuracy
"""
true_pos = self.matrix[0][0]
false_pos = self.matrix[1][0]
false_neg = self.matrix[0][1]
true_neg = self.matrix[1][1]
num = 1.0 * (true_pos + true_neg)
den = true_pos + true_neg + false_pos + false_neg
return divide(num, den)
def f1_score(self):
"""Calculates F1 score
:return: F1 score
"""
m_pre = self.precision()
rec = self.recall()
return divide(2.0, 1.0 / m_pre + 1.0 / rec) # harmonic mean
def get_as_list(self):
"""List of all values in matrix
:return: list representation
"""
return sum([
row
for row in self.matrix
], [])
def encode(self):
"""Encodes matrix
:return: Encoder used
"""
encoder = LabelEncoder() # encoder
values = self.get_as_list()
encoded = encoder.fit_transform(values) # long list of encoded
n_columns = len(self.matrix[0])
n_rows = len(self.matrix)
self.matrix = [
encoded[i: i + n_columns]
for i in range(0, n_rows * n_columns, n_columns)
]
return encoder
def decode(self, encoder):
"""Decodes matrix
:param encoder: Encoder used to encode matrix
:return: list: Decodes matrix
"""
self.matrix = [
encoder.inverse_transform(row)
for row in self.matrix
]
def get_column(self, index):
"""Gets column at given index
:param index: index of column
:return: Column
"""
return [
row[index]
for row in self.matrix
]
@staticmethod
def from_columns(columns):
"""Parses raw columns
:param columns: matrix divided into columns
:return: Matrix: Merge the columns to form a matrix
"""
data = [
[
column[i]
for i in range(len(column))
]
for column in columns
]
return Matrix(data)
| apache-2.0 |
low-sky/colira | bayes/hold/bayes_ratio_galrad.py | 1 | 6905 | #!/usr/bin/env python
import scipy.stats
import numpy as np
import astropy.io.fits as fits
import emcee
import matplotlib.pyplot as p
from matplotlib import rc
from astropy.table import Table, Column
rc('text',usetex=True)
execfile('logprob.py')
s = fits.getdata('colira.fits')
hdr = fits.getheader('colira.fits')
GalNames = np.unique(s['GALNAME'])
t = Table(names=('Name','theta','theta+','theta-','phi','phi+','phi-',\
'R21-','R21','R21+','R32-','R32','R32+','var',\
'R31-','R31','R31+','LowKey','HighKey'),\
dtypes=('S7','f8','f8','f8','f8','f8','f8','f8','f8','f8',\
'f8','f8','f8','f8','f8','f8','f8','f8','f8'))
for tag in t.keys():
if tag != 'Name':
t[tag].format = '{:.3f}'
nGal = len(GalNames)
quantile = 10
lower_percentiles = np.arange(quantile)*quantile
upper_percentiles = np.arange(quantile)*quantile+1e2/quantile
it = np.nditer(GalNames,flags=['f_index'])
key_variable = s['RGALNORM']
while not it.finished:
for radidx in np.arange(len(lower_percentiles)):
t.add_row()
ctr = -1
name = np.array_str(it.value)
subset = key_variable[np.where(s['GALNAME']==name)]
lower_score = scipy.stats.scoreatpercentile(subset,\
lower_percentiles[radidx])
upper_score = scipy.stats.scoreatpercentile(subset,\
upper_percentiles[radidx])
figname = name+'_'+np.array_str(lower_percentiles[radidx])
print(name,lower_score,upper_score)
idx = np.where((key_variable>=lower_score)&\
(key_variable<=upper_score)&\
(s['GALNAME']==name))
t['Name'][ctr] = name.upper()
t['LowKey'][ctr] = lower_score
t['HighKey'][ctr] = upper_score
sub = s[idx]
if len(sub)>1:
x = sub['CO10']
x_err = sub['CO10_ERR']
y = sub['CO21']
y_err = sub['CO21_ERR']
z = sub['CO32']
z_err = sub['CO32_ERR']
ndim, nwalkers = 4,50
p0 = np.zeros((nwalkers,4))
p0[:,0] = np.pi/4+np.random.randn(nwalkers)*np.pi/8
p0[:,1] = np.pi/4+np.random.randn(nwalkers)*np.pi/8
p0[:,2] = (np.random.random(nwalkers))*20
p0[:,3] = (np.random.randn(nwalkers))
sampler = emcee.EnsembleSampler(nwalkers, ndim, logprob3d_xoff,
args=[x,y,z,x_err,y_err,z_err])
pos, prob, state = sampler.run_mcmc(p0, 200)
sampler.reset()
sampler.run_mcmc(pos, 1000)
p.figure(1)
p.subplot(241)
p.errorbar(y,z,xerr=y_err,yerr=z_err,fmt=None,marker=None,mew=0)
p.scatter(y,z,marker='.')
testx = np.linspace(np.nanmin(y),np.nanmax(y),10)
r32 = np.median(1/np.tan(sampler.flatchain[:,0])/np.sin(sampler.flatchain[:,1]))
p.plot(testx,r32*testx,color='r')
p.ylabel('CO(3-2)')
p.xlabel('CO(2-1)')
# p.subplot(241)
#Theta
# p.plot(sampler.flatchain[:,0])
# p.ylabel(r'$\theta$')
# p.subplot(242)
#phi
# p.plot(sampler.flatchain[:,1])
# p.ylabel(r'$\phi$')
p.subplot(243)
p.hist(1/np.tan(sampler.flatchain[:,0])/np.sin(sampler.flatchain[:,1]),\
range=[0,2],bins=100)
p.xlabel('$R_{32}$')
p.subplot(244)
p.hist(np.tan(sampler.flatchain[:,1]),range=[0,2],bins=100)
p.xlabel('$R_{21}$')
p.subplot(245)
p.errorbar(x,y,xerr=x_err,yerr=y_err,fmt=None,marker=None,mew=0)
p.scatter(x,y,marker='.')
p.xlabel('CO(1-0)')
p.ylabel('CO(2-1)')
testx = np.linspace(np.nanmin(x),np.nanmax(x),10)
xoff = np.median(sampler.flatchain[:,3])
p.plot(testx,np.tan(np.median(sampler.flatchain[:,1]))*(testx+xoff),color='r')
p.subplot(246)
p.errorbar(y,z,xerr=y_err,yerr=y_err,fmt=None,marker=None,mew=0)
p.scatter(y,z,marker='.')
p.xlabel('CO(2-1)')
p.ylabel('CO(3-2)')
testx = np.linspace(np.nanmin(y),np.nanmax(y),10)
p.plot(testx,testx/np.tan(np.median(sampler.flatchain[:,0]))/\
np.sin(np.median(sampler.flatchain[:,1])),color='r')
# p.subplot(247)
# p.plot(sampler.flatchain[:,2])
# p.xlabel(r'$V$')
p.subplot(248)
p.hexbin(np.tan(sampler.flatchain[:,1]),sampler.flatchain[:,3])
p.xlabel('$R_{21}$')
p.ylabel('Offset')
p.savefig(figname+'.pdf',format='pdf',orientation='portrait')
p.close()
t['theta'][ctr] = np.median(sampler.flatchain[:,0])
t['theta+'][ctr] = scipy.stats.scoreatpercentile(\
sampler.flatchain[:,0],85) - t['theta'][ctr]
t['theta-'][ctr] = t['theta'][ctr] - \
scipy.stats.scoreatpercentile(\
sampler.flatchain[:,0],15)
t['phi'][ctr] = np.median(sampler.flatchain[:,1])
t['phi+'][ctr] = scipy.stats.scoreatpercentile(\
sampler.flatchain[:,1],85) - t['phi'][ctr]
t['phi-'][ctr] = t['phi'][ctr] - \
scipy.stats.scoreatpercentile(\
sampler.flatchain[:,1],15)
r21 = np.tan(sampler.flatchain[:,1])
t['R21'][ctr] = np.median(r21)
t['R21+'][ctr] = scipy.stats.scoreatpercentile(r21,85)
t['R21-'][ctr] = scipy.stats.scoreatpercentile(r21,15)
r32 = 1/np.tan(sampler.flatchain[:,0])/np.sin(sampler.flatchain[:,1])
t['R32'][ctr] = np.median(r32)
t['R32+'][ctr] = scipy.stats.scoreatpercentile(r32,85)
t['R32-'][ctr] = scipy.stats.scoreatpercentile(r32,15)
r31 = 1/np.tan(sampler.flatchain[:,0])/np.cos(sampler.flatchain[:,1])
t['R31'][ctr] = np.median(r31)
t['R31+'][ctr] = scipy.stats.scoreatpercentile(r31,85)
t['R31-'][ctr] = t['R31'][ctr] =scipy.stats.scoreatpercentile(r31,15)
t['var'][ctr] = np.median(sampler.flatchain[:,2])
it.iternext()
t2 = Table(t)
t2.remove_columns(('theta','phi','theta+','phi+','theta-','phi-','var'))
emptystring = np.empty((len(t2)),dtype='string')
emptystring[:]=''
col = Column(name='blank',data=emptystring)
t2.add_column(col,index=4)
col = Column(name='blank2',data=emptystring)
t2.add_column(col,index=8)
t2.write('ratios.tex',format='latex')
| gpl-2.0 |
sounay/flaminggo-test | onadata/apps/viewer/tests/test_export_list.py | 5 | 8203 | import os
from django.core.urlresolvers import reverse
from onadata.apps.main.tests.test_base import TestBase
from onadata.apps.viewer.models.export import Export
from onadata.apps.main.models.meta_data import MetaData
from onadata.apps.viewer.views import export_list
class TestExportList(TestBase):
def setUp(self):
super(TestExportList, self).setUp()
self._publish_transportation_form()
survey = self.surveys[0]
self._make_submission(
os.path.join(
self.this_directory, 'fixtures', 'transportation',
'instances', survey, survey + '.xml'))
def test_unauthorised_users_cannot_export_form_data(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.CSV_EXPORT}
url = reverse(export_list, kwargs=kwargs)
response = self.client.get(url)
# check that the 'New Export' button is not being rendered
self.assertNotIn(
'<input title="" data-original-title="" \
class="btn large btn-primary" \
value="New Export" type="submit">', response.content)
self.assertEqual(response.status_code, 200)
def test_csv_export_list(self):
kwargs = {'username': self.user.username.upper(),
'id_string': self.xform.id_string.upper(),
'export_type': Export.CSV_EXPORT}
# test csv
url = reverse(export_list, kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_xls_export_list(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.XLS_EXPORT}
url = reverse(export_list, kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_kml_export_list(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.KML_EXPORT}
url = reverse(export_list, kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_zip_export_list(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.ZIP_EXPORT}
url = reverse(export_list, kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_gdoc_export_list(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.GDOC_EXPORT}
url = reverse(export_list, kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_csv_zip_export_list(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.CSV_ZIP_EXPORT}
url = reverse(export_list, kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_sav_zip_export_list(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.SAV_ZIP_EXPORT}
url = reverse(export_list, kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_external_export_list(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.EXTERNAL_EXPORT}
server = 'http://localhost:8080/xls/23fa4c38c0054748a984ffd89021a295'
data_value = 'template 1 |{0}'.format(server)
meta = MetaData.external_export(self.xform, data_value)
custom_params = {
'meta': meta.id,
}
url = reverse(export_list, kwargs=kwargs)
count = len(Export.objects.all())
response = self.client.get(url, custom_params)
self.assertEqual(response.status_code, 200)
count1 = len(Export.objects.all())
self.assertEquals(count+1, count1)
def test_external_export_list_no_template(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.EXTERNAL_EXPORT}
url = reverse(export_list, kwargs=kwargs)
count = len(Export.objects.all())
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
self.assertEquals(response.content, u'No XLS Template set.')
count1 = len(Export.objects.all())
self.assertEquals(count, count1)
class TestDataExportURL(TestBase):
def setUp(self):
super(TestDataExportURL, self).setUp()
self._publish_transportation_form()
def _filename_from_disposition(self, content_disposition):
filename_pos = content_disposition.index('filename=')
self.assertTrue(filename_pos != -1)
return content_disposition[filename_pos + len('filename='):]
def test_csv_export_url(self):
self._submit_transport_instance()
url = reverse('csv_export', kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
})
response = self.client.get(url)
headers = dict(response.items())
self.assertEqual(headers['Content-Type'], 'application/csv')
content_disposition = headers['Content-Disposition']
filename = self._filename_from_disposition(content_disposition)
basename, ext = os.path.splitext(filename)
self.assertEqual(ext, '.csv')
def test_csv_export_url_without_records(self):
# csv using the pandas path can throw a NoRecordsFound Exception -
# handle it gracefully
url = reverse('csv_export', kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_xls_export_url(self):
self._submit_transport_instance()
url = reverse('xls_export', kwargs={
'username': self.user.username.upper(),
'id_string': self.xform.id_string.upper(),
})
response = self.client.get(url)
headers = dict(response.items())
self.assertEqual(headers['Content-Type'],
'application/vnd.openxmlformats')
content_disposition = headers['Content-Disposition']
filename = self._filename_from_disposition(content_disposition)
basename, ext = os.path.splitext(filename)
self.assertEqual(ext, '.xlsx')
def test_csv_zip_export_url(self):
self._submit_transport_instance()
url = reverse('csv_zip_export', kwargs={
'username': self.user.username.upper(),
'id_string': self.xform.id_string.upper(),
})
response = self.client.get(url)
headers = dict(response.items())
self.assertEqual(headers['Content-Type'], 'application/zip')
content_disposition = headers['Content-Disposition']
filename = self._filename_from_disposition(content_disposition)
basename, ext = os.path.splitext(filename)
self.assertEqual(ext, '.zip')
def test_sav_zip_export_url(self):
self._submit_transport_instance()
url = reverse('sav_zip_export', kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
})
response = self.client.get(url)
headers = dict(response.items())
self.assertEqual(headers['Content-Type'], 'application/zip')
content_disposition = headers['Content-Disposition']
filename = self._filename_from_disposition(content_disposition)
basename, ext = os.path.splitext(filename)
self.assertEqual(ext, '.zip')
| bsd-2-clause |
tomsilver/NAB | tests/integration/false_positive_test.py | 1 | 5890 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import datetime
import pandas
import unittest
from nab.scorer import Scorer
from nab.test_helpers import generateTimestamps, generateWindows, generateLabels
class FalsePositiveTests(unittest.TestCase):
def _checkCounts(self, counts, tn, tp, fp, fn):
"""Ensure the metric counts are correct."""
self.assertEqual(counts['tn'], tn, "Incorrect tn count")
self.assertEqual(counts['tp'], tp, "Incorrect tp count")
self.assertEqual(counts['fp'], fp, "Incorrect fp count")
self.assertEqual(counts['fn'], fn, "Incorrect fn count")
def setUp(self):
self.costMatrix = {"tpWeight": 1.0,
"fnWeight": 1.0,
"fpWeight": 1.0,
"tnWeight": 1.0}
def testFalsePositiveMeansNegativeScore(self):
"""
A false positive should make the score negative.
"""
start = datetime.datetime.now()
increment = datetime.timedelta(minutes=5)
length = 1000
numWindows = 1
windowSize = 10
timestamps = generateTimestamps(start, increment, length)
windows = generateWindows(timestamps, numWindows, windowSize)
labels = generateLabels(timestamps, windows)
predictions = pandas.Series([0]*length)
predictions[0] = 1
scorer = Scorer(timestamps, predictions, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score) = scorer.getScore()
self.assertTrue(score < 0)
self._checkCounts(scorer.counts, length-windowSize*numWindows-1, 0, 1,
windowSize*numWindows)
def testTwoFalsePositivesIsWorseThanOne(self):
"""
For two false positives A and B in a file, the score given A and B should be
more negative than the score given just A.
"""
start = datetime.datetime.now()
increment = datetime.timedelta(minutes=5)
length = 1000
numWindows = 1
windowSize = 10
timestamps = generateTimestamps(start, increment, length)
windows = generateWindows(timestamps, numWindows, windowSize)
labels = generateLabels(timestamps, windows)
predictions = pandas.Series([0]*length)
predictions[0] = 1
scorer1 = Scorer(timestamps, predictions, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score1) = scorer1.getScore()
predictions[1] = 1
scorer2 = Scorer(timestamps, predictions, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score2) = scorer2.getScore()
self.assertTrue(score2 < score1)
self._checkCounts(scorer1.counts, length-windowSize*numWindows-1, 0, 1,
windowSize*numWindows)
self._checkCounts(scorer2.counts, length-windowSize*numWindows-2, 0, 2,
windowSize*numWindows)
def testOneFalsePositiveNoWindow(self):
"""
When there is no window (i.e. no anomaly), a false positive should still
result in a negative score, specifically negative the FP weight.
"""
start = datetime.datetime.now()
increment = datetime.timedelta(minutes=5)
length = 1000
numWindows = 0
windowSize = 10
timestamps = generateTimestamps(start, increment, length)
windows = generateWindows(timestamps, numWindows, windowSize)
labels = generateLabels(timestamps, windows)
predictions = pandas.Series([0]*length)
predictions[0] = 1
scorer = Scorer(timestamps, predictions, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score) = scorer.getScore()
self.assertTrue(score == -self.costMatrix["fpWeight"])
self._checkCounts(scorer.counts, length-windowSize*numWindows-1, 0, 1,
windowSize*numWindows)
def testEarlierFalsePositiveAfterWindowIsBetter(self):
"""For two false positives A and B, where A occurs earlier than B, the
score change due to A will be less than the score change due to B.
"""
start = datetime.datetime.now()
increment = datetime.timedelta(minutes=5)
length = 10
numWindows = 1
windowSize = 2
timestamps = generateTimestamps(start, increment, length)
windows = generateWindows(timestamps, numWindows, windowSize)
labels = generateLabels(timestamps, windows)
predictions1 = pandas.Series([0]*length)
predictions2 = pandas.Series([0]*length)
t1, t2 = windows[0]
index1 = timestamps[timestamps == t2].index[0] + 1
predictions1[index1] = 1
scorer1 = Scorer(timestamps, predictions1, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score1) = scorer1.getScore()
predictions2[index1+1] = 1
scorer2 = Scorer(timestamps, predictions2, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score2) = scorer2.getScore()
self.assertTrue(score1 > score2)
self._checkCounts(scorer1.counts, length-windowSize*numWindows-1, 0, 1,
windowSize*numWindows)
self._checkCounts(scorer2.counts, length-windowSize*numWindows-1, 0, 1,
windowSize*numWindows)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
mjsauvinen/P4UL | pyAnalyze/richardsonProfiles.py | 1 | 3268 | #!/usr/bin/env python3
import sys
import numpy as np
import argparse
import matplotlib.pyplot as plt
from plotTools import addToPlot
from netcdfTools import netcdfDataset, readVariableFromDataset
from analysisTools import sensibleIds, groundOffset
from utilities import filesFromList
'''
Description: A script to perform quadrant analysis on velocity data stored in a NETCDF file.
The analysis is performed for all points along a z-direction.
In case of PALM-generated results (featuring staggered grid), the velocity data must first be
interpolated onto cell-centers (i.e. scalar grid) with groupVectorDataNetCdf.py script.
Author: Mikko Auvinen
mikko.auvinen@helsinki.fi
University of Helsinki &
Finnish Meteorological Institute
'''
#==========================================================#
#==========================================================#
sepStr = ' # = # = # = # = # = # = # = # = '
parser = argparse.ArgumentParser()
parser.add_argument("fileKey", default=None,\
help="Search string for collecting files.")
parser.add_argument("-vn", "--vnames", type=str, nargs=2, default=['u','v'],\
help="Name of the horizontal velocity variables in the NETCDF file. Default=['u','v'] ")
parser.add_argument("-ptn", "--ptname", type=str, default='pt',\
help="Name of the potential temperature variable in the NETCDF file. Default='pt' ")
parser.add_argument("-c", "--coarse", type=int, default=1,\
help="Coarsening level. Int > 1.")
args = parser.parse_args()
#==========================================================#
# Rename ...
fileKey = args.fileKey
cl = abs(args.coarse)
vnames = args.vnames
ptname = args.ptname
#==========================================================#
# Obtain a list of files to include.
fileNos, fileList = filesFromList( fileKey+'*' )
fig = plt.figure(num=1, figsize=(12,10))
fig2= plt.figure(num=2, figsize=(12,10))
fig3= plt.figure(num=3, figsize=(12,10))
for fn in fileNos:
ds, varList, paramList = netcdfDataset(fileList[fn], verbose=False)
u1, veldict = readVariableFromDataset( vnames[0], ds, cl=1 )
u2, veldict = readVariableFromDataset( vnames[1], ds, cl=1 )
umag = np.sqrt( u1**2 + u2**2 ); u1 = None; u2 = None
pt, ptdict = readVariableFromDataset( ptname, ds, cl=1 )
ptdict = None
if( len(veldict.keys()) == 4 ):
umag = np.mean( umag, axis=(0,2,3) )
pt = np.mean( pt , axis=(0,2,3) )
if( len(veldict.keys()) == 2 ):
umag = np.mean( umag, axis=(0) )
pt = np.mean( pt , axis=(0) )
for dstr in veldict.keys():
if( 'z' in dstr ): z = veldict[dstr]
veldict = None
dudz = (umag[1:]-umag[:-1])/(z[1:]-z[:-1])
dptdz = (pt[1:]-pt[:-1])/(z[1:]-z[:-1])
zm = 0.5*(z[1:]+z[:-1])
Ri = (9.81/np.mean(pt))*(dptdz)/(dudz**2+1e-9) * ( dudz > 1e-3 ).astype(float)
plotStr = ["Local Ri vs z ", "Ri" ,"z"]
fig = addToPlot(fig, Ri[2:-4], zm[2:-4],'{}'.format(fileList[fn]), plotStr, False )
plotStr = ["dpt/dz vs z ", "dpt/dz" ,"z"]
fig2 = addToPlot(fig2, dptdz[2:-4], zm[2:-4],'{}'.format(fileList[fn]), plotStr, False )
plotStr = ["du/dz vs z ", "du/dz" ,"z"]
fig3 = addToPlot(fig3, dudz[2:-4], zm[2:-4],'{}'.format(fileList[fn]), plotStr, False )
plt.legend(loc=0)
plt.show()
| mit |
ucbtrans/sumo-project | examples/timingPlan_simulation/Throughput/plots4pravin/Deceleration_4.5/Manual/tau_plots.py | 1 | 3677 | import sys
import optparse
import subprocess
import random
import pdb
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams.update({'font.size': 20})
import math
import numpy as np
import scipy.io
a2_10 = np.loadtxt('2min3RCT_taus_a1.0',dtype=int)
t2_10 = np.loadtxt('2min3RCT_taus_time_a1.0',dtype=int)
a2_15 = np.loadtxt('2min3RCT_taus_a1.5',dtype=int)
t2_15 = np.loadtxt('2min3RCT_taus_time_a1.5',dtype=int)
a2_26 = np.loadtxt('2min3RCT_taus_a2.6',dtype=int)
t2_26 = np.loadtxt('2min3RCT_taus_time_a2.6',dtype=int)
a1_10 = np.loadtxt('1min3RCT_taus_a1.0',dtype=int)
t1_10 = np.loadtxt('1min3RCT_taus_time_a1.0',dtype=int)
a1_15 = np.loadtxt('1min3RCT_taus_a1.5',dtype=int)
t1_15 = np.loadtxt('1min3RCT_taus_time_a1.5',dtype=int)
a1_26 = np.loadtxt('1min3RCT_taus_a2.6',dtype=int)
t1_26 = np.loadtxt('1min3RCT_taus_time_a2.6',dtype=int)
ss = [1440]*len(t2_10)
ts = np.subtract(t2_10,1200)
print '2 min cycle -----------'
print 'Max for a = 1.0: ' + str(max(a2_10))
print 'Max for a = 1.5: ' + str(max(a2_15))
print 'Max for a = 2.6: ' + str(max(a2_26))
print '1 min cycle -----------'
print 'Max for a = 1.0: ' + str(max(a1_10))
print 'Max for a = 1.5: ' + str(max(a1_15))
print 'Max for a = 2.6: ' + str(max(a1_26))
# compare all the 2min cycles with different accelerations
plt.figure(1)
m1, = plt.plot(np.subtract(t2_10,1200),a2_10,label=r'$a=1.0 \: m/s^2$',linestyle='-',color='r',linewidth=3,marker='o',markersize=7)
m2, = plt.plot(np.subtract(t2_15,1200),a2_15,label=r'$a=1.5 \: m/s^2$',linestyle='-',color='g',linewidth=3,marker='o',markersize=7)
m3, = plt.plot(np.subtract(t2_26,1200),a2_26,label=r'$a=2.6 \: m/s^2$',linestyle='-',color='b',linewidth=3,marker='o',markersize=7)
ms, = plt.plot(ts,ss,label='Steady State',linestyle='--',color='k',linewidth=2)
plt.legend(handles=[m1,m2,m3,ms],loc='best')
# compare all the 1min cycles with different accelerations
plt.figure(2)
m1, = plt.plot(np.subtract(t1_10,1200),a1_10,label=r'$a=1.0 \: m/s^2$',linestyle='-',color='r',linewidth=3,marker='o',markersize=7)
m2, = plt.plot(np.subtract(t1_15,1200),a1_15,label=r'$a=1.5 \: m/s^2$',linestyle='-',color='g',linewidth=3,marker='o',markersize=7)
m3, = plt.plot(np.subtract(t1_26,1200),a1_26,label=r'$a=2.6 \: m/s^2$',linestyle='-',color='b',linewidth=3,marker='o',markersize=7)
ms, = plt.plot(ts,ss,label='Steady State',linestyle='--',color='k',linewidth=2)
plt.legend(handles=[m1,m2,m3,ms],loc='best')
# compare the two diff cycles but same acceleration
plt.figure(3)
m1, = plt.plot(np.subtract(t1_10,1200),a1_10,label='1min cycle',linestyle='-',color='r',linewidth=3,marker='o',markersize=7)
m2, = plt.plot(np.subtract(t2_10,1200),a2_10,label='2min cycle',linestyle='-',color='g',linewidth=3,marker='o',markersize=7)
ms, = plt.plot(ts,ss,label='Steady State',linestyle='--',color='k',linewidth=2)
plt.legend(handles=[m1,m2,ms],loc='best')
plt.figure(4)
m1, = plt.plot(np.subtract(t1_15,1200),a1_15,label='1min cycle',linestyle='-',color='r',linewidth=3,marker='o',markersize=7)
m2, = plt.plot(np.subtract(t2_15,1200),a2_15,label='2min cycle',linestyle='-',color='g',linewidth=3,marker='o',markersize=7)
ms, = plt.plot(ts,ss,label='Steady State',linestyle='--',color='k',linewidth=2)
plt.legend(handles=[m1,m2,ms],loc='best')
plt.figure(5)
m1, = plt.plot(np.subtract(t1_26,1200),a1_26,label='1min cycle',linestyle='-',color='r',linewidth=3,marker='o',markersize=7)
m2, = plt.plot(np.subtract(t2_26,1200),a2_26,label='2min cycle',linestyle='-',color='g',linewidth=3,marker='o',markersize=7)
ms, = plt.plot(ts,ss,label='Steady State',linestyle='--',color='k',linewidth=2)
plt.legend(handles=[m1,m2,ms],loc='best')
plt.show()
| bsd-2-clause |
argriffing/cvxpy | doc/sphinxext/docscrape_sphinx.py | 154 | 7759 | import re, inspect, textwrap, pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| gpl-3.0 |
cavestruz/L500analysis | plotting/profiles/T_Vr_evolution/Tnt_Vr_evolution/plot_Tnt_Vr_r200m.py | 1 | 2927 | from L500analysis.data_io.get_cluster_data import GetClusterData
from L500analysis.utils.utils import aexp2redshift
from L500analysis.plotting.tools.figure_formatting import *
from L500analysis.plotting.profiles.tools.profiles_percentile \
import *
from L500analysis.utils.constants import rbins, linear_rbins
from derived_field_functions import *
color = matplotlib.cm.afmhot_r
matplotlib.rcParams['legend.handlelength'] = 0
matplotlib.rcParams['legend.numpoints'] = 1
matplotlib.rcParams['legend.fontsize'] = 12
aexps = [1.0,0.9,0.8,0.7,0.6,0.5,0.45,0.4,0.35]
db_name = 'L500_NR_0'
db_dir = '/home/babyostrich/Documents/Repos/L500analysis/'
profiles_list = ['T_mw', 'r_mid',
'vel_gas_rad_std', 'vel_gas_tan_std',
'vel_gas_rad_avg', 'vel_gas_tan_avg',
'Tnt_cm_per_s_2_r200m',
'Vr2_cm_per_s_2_r200m',
'R/R200m']
halo_properties_list=['r200m','M_total_200m']
Tnt_Vr2_ratio=r"$\Xi=T_{nt}/V^2_{r}$"
fXz1=r"$\Xi/\Xi(z=1)$"
pa = PlotAxes(figname='Tnt_Vr2_ratio_200m',
axes=[[0.15,0.4,0.80,0.55],[0.15,0.15,0.80,0.24]],
axes_labels=[Tnt_Vr2_ratio,fXz1],
xlabel=r"$R/R_{200m}$",
ylog=[True,False],
xlim=(0.2,2),
ylims=[(1e-1,1e2),(0.4,1.6)])
TratioV2={}
plots=[TratioV2]
clkeys=['Tnt_Vr2_ratio_200m']
for aexp in aexps :
cldata = GetClusterData(aexp=aexp,db_name=db_name,
db_dir=db_dir,
profiles_list=profiles_list,
halo_properties_list=halo_properties_list)
Tnt = calculate_profiles_mean_variance(cldata['Tnt_cm_per_s_2_r200m'])
Vr2 = calculate_profiles_mean_variance(cldata['Vr2_cm_per_s_2_r200m'])
TratioV2[aexp] = get_profiles_division_mean_variance(
mean_profile1=Tnt['mean'], var_profile1=Tnt['var'],
mean_profile2=Vr2['mean'], var_profile2=Vr2['var'])
print TratioV2[aexp]['mean']
pa.axes[Tnt_Vr2_ratio].plot( rbins, TratioV2[aexp]['mean'],
color=color(aexp),ls='-',
label="$z=%3.1f$" % aexp2redshift(aexp))
for aexp in aexps :
fractional_evolution = get_profiles_division_mean_variance(
mean_profile1=TratioV2[aexp]['mean'],
var_profile1=TratioV2[aexp]['var'],
mean_profile2=TratioV2[0.5]['mean'],
var_profile2=TratioV2[0.5]['var'],
)
pa.axes[fXz1].plot( rbins, fractional_evolution['mean'],
color=color(aexp),ls='-')
pa.axes[Tnt_Vr2_ratio].tick_params(labelsize=12)
pa.axes[Tnt_Vr2_ratio].tick_params(labelsize=12)
pa.axes[fXz1].set_yticks(arange(0.6,1.4,0.2))
pa.set_legend(axes_label=Tnt_Vr2_ratio,ncol=3,loc='best', frameon=False)
pa.color_legend_texts(axes_label=Tnt_Vr2_ratio)
pa.savefig()
| mit |
JackKelly/neuralnilm_prototype | scripts/e515.py | 2 | 6699 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,
RandomSegments, RandomSegmentsInMemory,
SameLocation)
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter, RectangularOutputPlotter, StartEndMeanPlotter
from neuralnilm.updates import clipped_nesterov_momentum
from neuralnilm.disaggregate import disaggregate
from neuralnilm.rectangulariser import rectangularise
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity, softmax
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
447: first attempt at disaggregation
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 1000
N_SEQ_PER_BATCH = 64
N_SEGMENTS = 3
MAX_TARGET_POWER = 300
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
window=("2013-03-18", None),
train_buildings=[1],
validation_buildings=[1],
n_seq_per_batch=N_SEQ_PER_BATCH,
standardise_input=True,
independently_center_inputs=True,
subsample_target=1,
ignore_incomplete=True,
allow_incomplete=False,
include_all=False,
skip_probability=0.25,
offset_probability=0.9,
target_is_start_and_end_and_mean=True,
y_processing_func=lambda y: y / MAX_TARGET_POWER
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-2,
learning_rate_changes_by_iteration={
1000: 1e-3,
5000: 1e-4
},
do_save_activations=True,
auto_reshape=False,
# plotter=CentralOutputPlotter
# plotter=Plotter(n_seq_to_plot=32)
plotter=StartEndMeanPlotter(n_seq_to_plot=16, max_target_power=MAX_TARGET_POWER)
)
def exp_a(name, target_appliance, seq_length):
global source
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
target_appliance=target_appliance,
logger=logging.getLogger(name),
seq_length=seq_length
))
source = SameLocation(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
NUM_FILTERS = 16
target_seq_length = source.output_shape_after_processing()[1]
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_size': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_size': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': 512,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': 256,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': 128,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': target_seq_length,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def main():
APPLIANCES = [
('a', ['fridge freezer', 'fridge'], 512),
('b', "'coffee maker'", 512),
('c', "'dish washer'", 2000),
('d', "'hair dryer'", 256),
('e', "'kettle'", 256),
('f', "'oven'", 2000),
('g', "'toaster'", 256),
('h', "'light'", 2000),
('i', ['washer dryer', 'washing machine'], 800)
]
for experiment, appliance, seq_length in APPLIANCES[:1]:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, 'a', full_exp_name)
func_call = func_call[:-1] + ", {}, {})".format(appliance, seq_length)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source
del net
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e515.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
| mit |
yudingding6197/fin_script | debug/my_stock.py | 1 | 3092 | #!/usr/bin/env python
import asyncio
import pandas as pd
import tushare as ts
import requests
from collections import deque
from aiohttp import ClientSession
import json
import re
import sqlite3
ori_url = 'http://web.ifzq.gtimg.cn/appstock/app/fqkline/get?_var=kline_dayqfq2017¶m=%s,day,2017-01-01,2018-12-31,640,qfq&r=%s'
count = 0
down_data = deque()
KLINE_TT_COLS = ['date', 'open', 'close', 'high', 'low', 'volume']
conn = sqlite3.connect('stock.db')
cu = conn.cursor()
async def hello_aio(q_code):
while len(q_code) > 0:
code = q_code.pop()
url = ori_url % (code, ts.stock.trading._random(17))
async with ClientSession() as session:
async with session.get(url)as response:
lines = await response.text()
global count
count += 1
if len(lines) > 100:
lines = lines.split('=')[1]
reg = re.compile(r',{"nd.*?}')
lines = re.subn(reg, '', lines)
js = json.loads(lines[0])
down_data.append((js, code))
await asyncio.sleep(0.01)
async def progress(q_code, max_code):
current = max_code - len(q_code)
step = max_code//25
next_step = step
while current < max_code:
current = max_code - len(q_code)
if current > next_step:
print(current, max_code)
next_step += step
await asyncio.sleep(0.1)
def get_history():
cor_num = 50
myd = deque()
global count
stock_list = ts.get_stock_basics() ####################
for stock_code in stock_list.index:
stock_code = ts.stock.trading._code_to_symbol(stock_code)
myd.append(stock_code)
task_list = []
for i in range(cor_num):
task_list.append(hello_aio(myd))
task_list.append(progress(myd, len(myd)))
import time
start = time.clock()
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(task_list))
end = time.clock()
print(end-start)
print('count:{}, corout:{}'.format(count, cor_num))
count = 0
KLINE_TT_COLS = ['date', 'open', 'close', 'high', 'low', 'volume']
start = time.clock()
for item in down_data:
js = item[0]
code = item[1]
sql = '''create table if not exists {}(
id integer primary key autoincrement,
date date,
open float,
close float,
high float,
low float,
volume float
)'''.format(code)
cu.execute(sql)
sql = 'insert into {} values(NULL,?,?,?,?,?,?)'.format(code)
dataflag = 'qfqday'
dataflag = dataflag if dataflag in list(js['data'][code].keys()) else 'day'
cu.executemany(sql, js['data'][code][dataflag])
conn.commit()
print(time.clock()-start)
########################################################
if __name__ == '__main__':
get_history()
| gpl-2.0 |
hoburg/gpkit | gpkit/tests/t_examples.py | 1 | 10586 | """Unit testing of tests in docs/source/examples"""
import unittest
import os
import pickle
import numpy as np
from gpkit import settings, Model, Variable
from gpkit.tests.helpers import generate_example_tests
from gpkit.small_scripts import mag
from gpkit.small_classes import Quantity
from gpkit.constraints.loose import Loose
from gpkit.exceptions import (UnknownInfeasible,
PrimalInfeasible, DualInfeasible, UnboundedGP)
def assert_logtol(first, second, logtol=1e-6):
"Asserts that the logs of two arrays have a given abstol"
np.testing.assert_allclose(np.log(mag(first)), np.log(mag(second)),
atol=logtol, rtol=0)
# pylint: disable=too-many-public-methods
class TestExamples(unittest.TestCase):
"""
To test a new example, add a function called `test_$EXAMPLENAME`, where
$EXAMPLENAME is the name of your example in docs/source/examples without
the file extension.
This function should accept two arguments (e.g. 'self' and 'example').
The imported example script will be passed to the second: anything that
was a global variable (e.g, "sol") in the original script is available
as an attribute (e.g., "example.sol")
If you don't want to perform any checks on the example besides making
sure it runs, just put "pass" as the function's body, e.g.:
def test_dummy_example(self, example):
pass
But it's good practice to ensure the example's solution as well, e.g.:
def test_dummy_example(self, example):
self.assertAlmostEqual(example.sol["cost"], 3.121)
"""
# TODO: allow enabling plotting examples, make plots in correct folder...
# def test_plot_sweep1d(self, _):
# import matplotlib.pyplot as plt
# plt.close("all")
def test_autosweep(self, example):
from gpkit import ureg
bst1, tol1 = example.bst1, example.tol1
bst2, tol2 = example.bst2, example.tol2
l_ = np.linspace(1, 10, 100)
for bst in [bst1, example.bst1_loaded]:
sol1 = bst.sample_at(l_)
assert_logtol(sol1("l"), l_)
assert_logtol(sol1("A"), l_**2 + 1, tol1)
assert_logtol(sol1["cost"], (l_**2 + 1)**2, tol1)
self.assertEqual(Quantity(1.0, sol1("A").units),
Quantity(1.0, ureg.m)**2)
ndig = -int(np.log10(tol2))
self.assertAlmostEqual(bst2.cost_at("cost", 3), 1.0, ndig)
# before corner
A_bc = np.linspace(1, 3, 50)
sol_bc = bst2.sample_at(A_bc)
assert_logtol(sol_bc("A"), (A_bc/3)**0.5, tol2)
assert_logtol(sol_bc["cost"], A_bc/3, tol2)
# after corner
A_ac = np.linspace(3, 10, 50)
sol_ac = bst2.sample_at(A_ac)
assert_logtol(sol_ac("A"), (A_ac/3)**2, tol2)
assert_logtol(sol_ac["cost"], (A_ac/3)**4, tol2)
def test_checking_result_changes(self, example):
sol = example.sol
self.assertAlmostEqual(sol["cost"], 0.48, 2)
def test_evaluated_fixed_variables(self, example):
sol = example.sol
t_night = example.t_night
self.assertTrue((sol["variables"][t_night] == [16, 12, 8]).all())
def test_evaluated_free_variables(self, example):
x2 = example.x2
sol = example.sol
self.assertTrue(abs(sol(x2) - 4) <= 1e-4)
def test_external_constraint(self, example):
pass
def test_external_function(self, example):
external_code = example.external_code
self.assertEqual(external_code(0), 0)
def test_external_sp(self, example):
m = example.m
sol = m.localsolve(verbosity=0)
self.assertAlmostEqual(sol["cost"], 0.707, places=3)
def test_freeing_fixed_variables(self, example):
x = example.x
y = Variable("y", 3)
m = Model(x, [x >= 1 + y, y >= 1])
sol = m.solve(verbosity=0)
self.assertTrue(abs(sol["cost"] - 4) <= 1e-4)
self.assertTrue(y in sol["constants"])
del m.substitutions["y"]
sol = m.solve(verbosity=0)
self.assertTrue(abs(sol["cost"] - 2) <= 1e-4)
self.assertTrue(y in sol["freevariables"])
def test_gettingstarted(self, example):
pass
def test_loose_constraintsets(self, example):
m = example.m
sol = m.solve(verbosity=0)
self.assertAlmostEqual(sol["cost"], 2, 3)
def test_sub_multi_values(self, example):
x = example.x
y = example.y
z = example.z
p = example.p
self.assertTrue(all(p.sub({x: 1, "y": 2}) == 2*z))
self.assertTrue(all(
p.sub({x: 1, y: 2, "z": [1, 2]}) == z.sub({z: [2, 4]})
))
def test_substitutions(self, example):
x = example.x
p = example.p
self.assertTrue(p.sub({x: 3}) == 9)
self.assertTrue(p.sub({x.key: 3}) == 9)
self.assertTrue(p.sub({"x": 3}) == 9)
def test_tight_constraintsets(self, example):
m = example.m
sol = m.solve(verbosity=0)
self.assertAlmostEqual(sol["cost"], 2, places=2)
def test_vectorization(self, example):
x = example.x
y = example.y
z = example.z
self.assertEqual(y.shape, (5, 3))
self.assertEqual(x.shape, (2, 5, 3))
self.assertEqual(z.shape, (7, 3))
def test_model_var_access(self, example):
model = example.PS
_ = model["E"]
with self.assertRaises(ValueError):
_ = model["m"] # multiple variables called m
def test_performance_modeling(self, example):
m = Model(example.M.cost, Loose(example.M), example.M.substitutions)
sol = m.solve(verbosity=0)
sol.table()
sol.save("solution.pkl")
sol.table()
sol_loaded = pickle.load(open("solution.pkl", "rb"))
sol_loaded.table()
sweepsol = m.sweep({example.AC.fuse.W: (50, 100, 150)}, verbosity=0)
sweepsol.table()
sweepsol.save("sweepsolution.pkl")
sweepsol.table()
sol_loaded = pickle.load(open("sweepsolution.pkl", "rb"))
sol_loaded.table()
def test_sp_to_gp_sweep(self, example):
sol = example.sol
cost = sol["cost"]
self.assertAlmostEqual(cost[0], 4628.21, places=2)
self.assertAlmostEqual(cost[1], 6226.60, places=2)
self.assertAlmostEqual(cost[2], 7362.77, places=2)
def test_boundschecking(self, example): # pragma: no cover
if "mosek_cli" in settings["default_solver"]:
with self.assertRaises(UnknownInfeasible):
example.gp.solve(verbosity=0)
else:
example.gp.solve(verbosity=0) # mosek_conif and cvxopt solve it
def test_vectorize(self, example):
pass
def test_primal_infeasible_ex1(self, example):
primal_or_unknown = PrimalInfeasible
if "cvxopt" in settings["default_solver"]: # pragma: no cover
primal_or_unknown = UnknownInfeasible
with self.assertRaises(primal_or_unknown):
example.m.solve(verbosity=0)
def test_primal_infeasible_ex2(self, example):
primal_or_unknown = PrimalInfeasible
if "cvxopt" in settings["default_solver"]: # pragma: no cover
primal_or_unknown = UnknownInfeasible
with self.assertRaises(primal_or_unknown):
example.m.solve(verbosity=0)
def test_docstringparsing(self, example):
pass
def test_debug(self, example):
dual_or_primal = DualInfeasible
if "mosek_conif" == settings["default_solver"]: # pragma: no cover
dual_or_primal = PrimalInfeasible
with self.assertRaises(UnboundedGP):
example.m.gp()
with self.assertRaises(dual_or_primal):
gp = example.m.gp(checkbounds=False)
gp.solve(verbosity=0)
primal_or_unknown = PrimalInfeasible
if "cvxopt" == settings["default_solver"]: # pragma: no cover
primal_or_unknown = UnknownInfeasible
with self.assertRaises(primal_or_unknown):
example.m2.solve(verbosity=0)
with self.assertRaises(UnboundedGP):
example.m3.gp()
with self.assertRaises(DualInfeasible):
gp3 = example.m3.gp(checkbounds=False)
gp3.solve(verbosity=0)
def test_simple_sp(self, example):
pass
def test_simple_box(self, example):
pass
def test_x_greaterthan_1(self, example):
pass
def test_beam(self, example):
self.assertFalse(np.isnan(example.sol("w")).any())
def test_water_tank(self, example):
pass
def test_sin_approx_example(self, example):
pass
def test_simpleflight(self, example):
self.assertTrue(example.sol.almost_equal(example.sol_loaded))
for sol in [example.sol, example.sol_loaded]:
freevarcheck = {
"A": 8.46,
"C_D": 0.0206,
"C_f": 0.0036,
"C_L": 0.499,
"Re": 3.68e+06,
"S": 16.4,
"W": 7.34e+03,
"V": 38.2,
"W_w": 2.40e+03
}
# sensitivity values from p. 34 of W. Hoburg's thesis
senscheck = {
r"(\frac{S}{S_{wet}})": 0.4300,
"e": -0.4785,
"V_{min}": -0.3691,
"k": 0.4300,
r"\mu": 0.0860,
"(CDA0)": 0.0915,
"C_{L,max}": -0.1845,
r"\tau": -0.2903,
"N_{ult}": 0.2903,
"W_0": 1.0107,
r"\rho": -0.2275
}
for key in freevarcheck:
sol_rat = mag(sol["variables"][key])/freevarcheck[key]
self.assertTrue(abs(1-sol_rat) < 1e-2)
for key in senscheck:
sol_rat = sol["sensitivities"]["variables"][key]/senscheck[key]
self.assertTrue(abs(1-sol_rat) < 1e-2)
def test_relaxation(self, example):
pass
def test_unbounded(self, example):
pass
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
EXAMPLE_DIR = os.path.abspath(FILE_DIR + '../../../docs/source/examples')
SOLVERS = settings["installed_solvers"]
if os.path.isdir(EXAMPLE_DIR):
TESTS = generate_example_tests(EXAMPLE_DIR, [TestExamples], SOLVERS)
else: # pragma: no cover
TESTS = []
if __name__ == "__main__": # pragma: no cover
# pylint:disable=wrong-import-position
from gpkit.tests.helpers import run_tests
run_tests(TESTS)
| mit |
adamrvfisher/TechnicalAnalysisLibrary | RMultipleTracker.py | 1 | 13087 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 11 09:04:55 2018
@author: AmatVictoriaCuramIII
"""
#R Multiple Documentation; Trade Tracking
import numpy as np
import random as rand
import pandas as pd
import time as t
from DatabaseGrabber import DatabaseGrabber
from YahooGrabber import YahooGrabber
##Empty sets used for optimization
Empty = []
somelist = []
#Empty Dataframes
Dataset = pd.DataFrame()
TradeSubIndex = pd.DataFrame()
Trades = pd.DataFrame()
##Timing statistics and iteration counter for optimization
#Start = t.time()
#Counter = 0
#start = t.time()
#Inputs - OHLC data
Ticker1 = 'SOYB'
Asset1 = YahooGrabber(Ticker1)
#Tasty OHLC; ***ATTN*** insert path for OHLC data
#Asset1 = pd.read_pickle('C:\\Users\\Tasty\\Desktop\\WorkingDirectory\\UVXY')
#In percentages
LongStopLoss = .05
LongProfitTake = .3
ShortStopLoss = .05
ShortProfitTake = .3
Commission = .01
Slippage = .01
#Time series trimmer for in/out sample data
#Asset1a = Asset1[-1250:] #Out
Asset1 = Asset1[:] #In
#
#Numbered subindex
Asset1['SubIndex'] = range(1,len(Asset1)+1)
#Log Returns
Asset1['LogRet'] = np.log(Asset1['Adj Close']/Asset1['Adj Close'].shift(1))
Asset1['LogRet'] = Asset1['LogRet'].fillna(0)
#SMA window
window = 100
#SMA calculation
Asset1['MA'] = Asset1['Adj Close'].rolling(window=window, center=False).mean()
#Current Close Price to SMA ratio
Asset1['Price/MA'] = Asset1['Adj Close']/Asset1['MA'].shift(1)
#Fill nan
Asset1['MA'] = Asset1['MA'].fillna(0)
Asset1['Price/MA'] = Asset1['Price/MA'].fillna(0)
#Signal = Price to Moving Average
#if price is greater than the MA go long w/ brackets
#if price is less than the MA go short w/ brackets
Asset1['Signal'] = np.where(Asset1['Price/MA'] >= 1, 1, -1)
#if MA is still being computed, stay out of market
Asset1['Signal'] = np.where(Asset1['Price/MA'] == 0, 0, Asset1['Signal'])
#Find the first trade of the signal period, so we can document entry price
Asset1['OriginalSignal'] = 0
Asset1['OriginalSignal'].loc[Asset1['Signal'] != Asset1['Signal'].shift(1)] = Asset1['Signal']
numsignals = sum(abs(Asset1['OriginalSignal']))
#Declare column to record entry price
Asset1['EntryPrice'] = np.nan
#If it's the original signal, record entry price
Asset1['EntryPrice'].loc[(Asset1['OriginalSignal'] != 0)] = Asset1['Adj Close']
#Assess spread/unfavorable fills here!
#Asset1['EntryPriceSlippage'] = Asset1['EntryPrice']
#Long slippage
#Asset1['EntryPriceSlippage'].loc[(Asset1['EntryPrice'] != 0) & (
# Asset1['Signal'] == 1)] = Asset1['EntryPrice'] * (1 + Slippage)
#Short slippage
#Asset1['EntryPriceSlippage'].loc[(Asset1['EntryPrice'] != 0) & (
# Asset1['Signal'] == -1)] = Asset1['EntryPrice'] * (1 - Slippage)
#
#Run the entry price DOWN the column until new position is taken
#Asset1['EntryPriceSlippage'] = Asset1['EntryPriceSlippage'].ffill(inplace=False)
#Fill nan with 0 for entry price
#Asset1['EntryPriceSlippage'] = Asset1['EntryPriceSlippage'].fillna(0)
#Declare StopPrice column
Asset1['StopPrice'] = np.nan
#Long stop calculation
Asset1['StopPrice'].loc[(Asset1['EntryPrice'] != 0) & (
Asset1['OriginalSignal'] == 1)] = Asset1['EntryPrice'] * (1 - LongStopLoss)
#Short stop calculation
Asset1['StopPrice'].loc[(Asset1['EntryPrice'] != 0) & (
Asset1['OriginalSignal'] == -1)] = Asset1['EntryPrice'] * (1 + ShortStopLoss)
#Forward fill
Asset1['StopPrice'] = Asset1['StopPrice'].ffill(inplace=False)
Asset1['StopPrice'] = Asset1['StopPrice'].fillna(0)
#Declare ProfitPrice column
Asset1['ProfitPrice'] = np.nan
#Long stop calculation
Asset1['ProfitPrice'].loc[(Asset1['EntryPrice'] != 0) & (
Asset1['OriginalSignal'] == 1)] = Asset1['EntryPrice'] * (1 + LongProfitTake)
#Short stop calculation
Asset1['ProfitPrice'].loc[(Asset1['EntryPrice'] != 0) & (
Asset1['OriginalSignal'] == -1)] = Asset1['EntryPrice'] * (1 - ShortProfitTake)
#Forward fill
Asset1['ProfitPrice'] = Asset1['ProfitPrice'].ffill(inplace=False)
Asset1['ProfitPrice'] = Asset1['ProfitPrice'].fillna(0)
Asset1['Exit'] = 0
#This will be the final return stream. Generally I use a regime of
#(-1, or 0, or +1) multiplied by the next day's log return to get equity curve
Asset1['BracketReturns'] = 1
#Short Take Gain exit, 1 = yes, 0 = no
Asset1['STG'] = 0
#Short Take Gain exit, 1 = yes, 0 = no
Asset1['SSL'] = 0
#Short Stop Loss exit, 1 = yes, 0 = no
Asset1['LTG'] = 0
#Long Stop Loss exit, 1 = yes, 0 = no
Asset1['LSL'] = 0
#For initial exits
Asset1['OriginalSTG'] = 0
Asset1['OriginalSSL'] = 0
Asset1['OriginalLTG'] = 0
Asset1['OriginalLSL'] = 0
Asset1['GapSTG'] = 0
Asset1['GapSSL'] = 0
Asset1['GapLTG'] = 0
Asset1['GapLSL'] = 0
#1 = STG being hit starting the day after the signal. After initial hit, 1s
#will run down the column even though the trade should be closed
Asset1['STG'].loc[(Asset1['Signal'] == -1) & (
Asset1['OriginalSignal'] == 0) & (Asset1['Low'] < Asset1['ProfitPrice'])] = 1
#find initial exit
#Asset1['OriginalSTG'].loc[Asset1['STG'] != Asset1['STG'].shift(1)] = Asset1['STG']
Asset1['LTG'].loc[(Asset1['Signal'] == 1) & (
Asset1['OriginalSignal'] == 0) & (Asset1['High'] > Asset1['ProfitPrice'])] = 1
#Asset1['OriginalLTG'].loc[Asset1['LTG'] != Asset1['LTG'].shift(1)] = Asset1['LTG']
Asset1['SSL'].loc[(Asset1['Signal'] == -1) & (
Asset1['OriginalSignal'] == 0) & (Asset1['High'] > Asset1['StopPrice'])] = 1
#Asset1['OriginalSSL'].loc[Asset1['STG'] != Asset1['SSL'].shift(1)] = Asset1['SSL']
Asset1['LSL'].loc[(Asset1['Signal'] == 1) & (
Asset1['OriginalSignal'] == 0) & (Asset1['Low'] < Asset1['StopPrice'])] = 1
#Asset1['OriginalLSL'].loc[Asset1['LSL'] != Asset1['LSL'].shift(1)] = Asset1['LSL']
#Assess Gaps on days where trade closes
Asset1['GapSTG'].loc[(Asset1['STG'] == 1) & (
Asset1['Open'] < Asset1['ProfitPrice'])] = 1
Asset1['GapSSL'].loc[(Asset1['SSL'] == 1) & (
Asset1['Open'] > Asset1['StopPrice'])] = 1
Asset1['GapLTG'].loc[(Asset1['LTG'] == 1) & (
Asset1['Open'] > Asset1['ProfitPrice'])] = 1
Asset1['GapLSL'].loc[(Asset1['LSL'] == 1) & (
Asset1['Open'] < Asset1['StopPrice'])] = 1
#Days where StopPrice and ProfitPrice are both touched
Asset1['LongDD'] = np.where((Asset1['LTG'] + Asset1['LSL']) == 2, 1, 0)
Asset1['ShortDD'] = np.where((Asset1['STG'] + Asset1['SSL']) == 2, 1, 0)
Asset1['DoubleDay'] = Asset1['LongDD'] + Asset1['ShortDD']
#Exit on DoubleDays - 1 & 2; LTG - 3; LSL - 4; STG - 5, SSL - 6.
#Preference given to stoploss on 'expensive' days
Asset1['Exit'].loc[(Asset1['LTG'] == 1)] = 1 #exit as gain
Asset1['Exit'].loc[(Asset1['STG'] == 1)] = 2 #exit as gain
Asset1['Exit'].loc[(Asset1['GapSTG'] == 1)] = 3 #exit as gain
Asset1['Exit'].loc[(Asset1['GapLTG'] == 1)] = 4 #exit as gain
Asset1['Exit'].loc[(Asset1['LSL'] == 1)] = 5 #exit as loss
Asset1['Exit'].loc[(Asset1['SSL'] == 1)] = 6 #exit as loss
Asset1['Exit'].loc[(Asset1['LongDD'] == 1)] == 7 #exit long position at loss
Asset1['Exit'].loc[(Asset1['ShortDD'] == 1)] == 8 #exit as short position at loss
Asset1['Exit'].loc[(Asset1['GapSSL'] == 1)] = 9 #exit as loss
Asset1['Exit'].loc[(Asset1['GapLSL'] == 1)] = 10 #exit as loss
#Create individual trade subsets for examination
TradeSubIndex = Asset1['SubIndex'].loc[(Asset1['OriginalSignal'] != 0)]
TradeDates = pd.DataFrame()
try:
for i in range(0, len(TradeSubIndex)):
TradeDates[i] = TradeSubIndex[i]-1,TradeSubIndex[i+1]
except IndexError:
pass
#quick reference matrix for exits
ExitReturns = pd.Series(index=range(0,10))
ExitReturns[0] = 0
ExitReturns[1] = 1 + LongProfitTake
ExitReturns[2] = 1 + ShortProfitTake
ExitReturns[3] = 0
ExitReturns[4] = 0
ExitReturns[5] = 1 - LongStopLoss
ExitReturns[6] = 1 - ShortStopLoss
ExitReturns[7] = 1 - LongStopLoss
ExitReturns[8] = 1 - ShortStopLoss
ExitReturns[9] = 0
ExitReturns[10] = 0
#Trade Analysis from 0th trade
for ii in TradeDates.columns:
TradeData = Asset1[TradeDates[ii][0]:TradeDates[ii][1]]
#the 'next' function yields index position of first non 0 exit
ExitTaken = TradeData['Exit'][next((n for n, x in enumerate(TradeData['Exit']) if x), 0)]
SubIndexOfExit = TradeData['SubIndex'][next((n for n, x in enumerate(TradeData['Exit']) if x), 0)]
TradeDuration = len(TradeData) - 1
TradeDirection = TradeData['Signal'][0]
TradeReturn = ExitReturns[ExitTaken]
#If no stops are hit and there is a signal change, take P/L and switch position
if ExitTaken == 0:
SubIndexOfExit = TradeData['SubIndex'][-1]
if TradeDirection == 1:
TradeReturn = 1 + ((TradeData['Adj Close'][-1] - TradeData['Adj Close'][0])/TradeData['Adj Close'][0])
elif TradeDirection == -1:
TradeReturn = 1 + ((TradeData['Adj Close'][0] - TradeData['Adj Close'][-1])/TradeData['Adj Close'][0])
else:
pass
#Assess Gaps
#GAP STG
if ExitTaken == 3:
TradeReturn = 1 + ((TradeData['Adj Close'][0] - TradeData['Open'][TradeDuration])/TradeData['Adj Close'][0])
else:
pass
#GAP LTG
if ExitTaken == 4:
TradeReturn = 1 + ((TradeData['Open'][TradeDuration] - TradeData['Adj Close'][0])/TradeData['Adj Close'][0])
else:
pass
#GAP SSL
if ExitTaken == 9:
TradeReturn = 1 + ((TradeData['Adj Close'][0] - TradeData['Open'][TradeDuration])/TradeData['Adj Close'][0])
else:
pass
#GAP LSL
if ExitTaken == 10:
TradeReturn = 1 + ((TradeData['Open'][TradeDuration] - TradeData['Adj Close'][0])/TradeData['Adj Close'][0])
else:
pass
Empty.append(ExitTaken)
Empty.append(SubIndexOfExit)
Empty.append(TradeDuration)
Empty.append(TradeDirection)
Empty.append(TradeReturn)
Emptyseries = pd.Series(Empty)
Dataset[ii] = Emptyseries.values
Empty[:] = []
#
Dataset = Dataset.rename(index={0: "ExitTaken", 1: "SubIndex", 2: "TradeDuration",
3: "TradeDirection", 4: "TradeReturn"})
Asset1['Brackets'] = 1
Asset1['SlippageCommissionBrackets'] = 1
for d in Dataset:
Asset1['SlippageCommissionBrackets'].loc[(Asset1['SubIndex'] == Dataset[d]['SubIndex'])] = Dataset[d]['TradeReturn'] - Slippage - Commission
for d in Dataset:
Asset1['Brackets'].loc[(Asset1['SubIndex'] == Dataset[d]['SubIndex'])] = Dataset[d]['TradeReturn']
NumWinningTrades = len(Asset1['Brackets'][Asset1['Brackets'] > 1])
NumLosingTrades = len(Asset1['Brackets'][Asset1['Brackets'] < 1])
AvgWin = Asset1['Brackets'][Asset1['Brackets'] > 1].mean()
AvgLoss = Asset1['Brackets'][Asset1['Brackets'] < 1].mean()
RewardRisk = AvgWin/AvgLoss
WinRate = NumWinningTrades / (NumWinningTrades + NumLosingTrades)
LossRate = NumLosingTrades / (NumWinningTrades + NumLosingTrades)
Expectancy = (WinRate * RewardRisk) - (LossRate)
Asset1['Multiplier'] = Asset1['Brackets'].cumprod().plot()
print(Expectancy)
Asset1['Brackets'].plot()
#TradeData = Asset1[TradeDates[0][0]:TradeDates[0][1]]
##the 'next' function yields index position of first non 0 exit
#TradeData['ReIndex'] = range(0,len(TradeData))
#ExitTaken = TradeData['Exit'][next((n for n, x in enumerate(TradeData['Exit']) if x), 0)]
#SubIndexOfExit = TradeData['SubIndex'][next((n for n, x in enumerate(TradeData['Exit']) if x), 0)]
#TradeDuration = TradeData['ReIndex'][next((n for n, x in enumerate(TradeData['Exit']) if x), 0)]
#TradeDirection = TradeData['Signal'][0]
#TradeReturn = ExitReturns[ExitTaken]
#
##If no stops are hit and there is a signal change, take P/L and switch position
#if ExitTaken == 0:
# SubIndexOfExit = TradeData['SubIndex'][-1]
# if TradeDirection == 1:
# TradeReturn = 1 + ((TradeData['Adj Close'][-1] - TradeData['Adj Close'][0])/TradeData['Adj Close'][0])
# elif TradeDirection == -1:
# TradeReturn = 1 + ((TradeData['Adj Close'][0] - TradeData['Adj Close'][-1])/TradeData['Adj Close'][0])
#else:
# pass
##Assess Gaps
##GAP STG
#if ExitTaken == 3:
# TradeReturn = 1 + ((TradeData['Adj Close'][0] - TradeData['Open'][TradeDuration])/TradeData['Adj Close'][0])
#else:
# pass
##GAP LTG
#if ExitTaken == 4:
# TradeReturn = 1 + ((TradeData['Open'][TradeDuration] - TradeData['Adj Close'][0])/TradeData['Adj Close'][0])
#else:
# pass
##GAP SSL
#if ExitTaken == 9:
# TradeReturn = 1 + ((TradeData['Adj Close'][0] - TradeData['Open'][TradeDuration])/TradeData['Adj Close'][0])
#else:
# pass
##GAP LSL
#if ExitTaken == 10:
# TradeReturn = 1 + ((TradeData['Open'][TradeDuration] - TradeData['Adj Close'][0])/TradeData['Adj Close'][0])
#else:
# pass
#Empty.append(ExitTaken)
#Empty.append(SubIndexOfExit)
#Empty.append(TradeDuration)
#Empty.append(TradeDirection)
#Empty.append(TradeReturn)
#Emptyseries = pd.Series(Empty)
##Dataset[ii] = Emptyseries.values
##Empty[:] = []
#print(Emptyseries) | apache-2.0 |
morgenst/PyAnalysisTools | PyAnalysisTools/AnalysisTools/MLHelper.py | 1 | 19393 | from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import pickle
import sys
import numpy as np
import pandas as pd
import root_numpy
from builtins import object
from builtins import range
try:
from imblearn import over_sampling
except ImportError:
pass
from past.utils import old_div
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, LabelEncoder, MinMaxScaler
from sklearn.metrics import classification_report
import PyAnalysisTools.PlottingUtils.Formatting as fm
import PyAnalysisTools.PlottingUtils.PlottingTools as pt
import ROOT
from PyAnalysisTools.PlottingUtils import set_batch_mode
from PyAnalysisTools.PlottingUtils.PlotConfig import PlotConfig as pc
from PyAnalysisTools.base.ProcessConfig import find_process_config, parse_and_build_process_config
from PyAnalysisTools.base.FileHandle import FileHandle
from PyAnalysisTools.base import _logger, InvalidInputError
from PyAnalysisTools.base.OutputHandle import OutputFileHandle
from PyAnalysisTools.base.ShellUtils import make_dirs
import matplotlib
matplotlib.use('Agg') # noqa: E402
import matplotlib.pyplot as plt # noqa: E402
def explode(df, lst_cols, fill_value=''):
# make sure `lst_cols` is a list
if lst_cols and not isinstance(lst_cols, list):
lst_cols = [lst_cols]
# all columns except `lst_cols`
idx_cols = df.columns.difference(lst_cols)
# calculate lengths of lists
lens = df[lst_cols[0]].str.len()
if (lens > 0).all():
# ALL lists in cells aren't empty
return pd.DataFrame({col: np.repeat(df[col].values, df[lst_cols[0]].str.len())
for col in idx_cols}).assign(
**{col: np.concatenate(df[col].values) for col in lst_cols}).loc[:, df.columns]
else:
# at least one list in cells is empty
return pd.DataFrame({col: np.repeat(df[col].values, df[lst_cols[0]].str.len())
for col in idx_cols}).assign(
**{col: np.concatenate(df[col].values)
for col in lst_cols}).append(df.loc[lens == 0, idx_cols]).fillna(fill_value).loc[:, df.columns]
def print_classification(model, X, y, Xeval, yeval, output_path=None):
class_preds_train = model.predict_classes(X)
class_preds_eval = model.predict_classes(Xeval)
report_train = classification_report(class_preds_train, y)
print('########## on same dataset (bias) ##########')
print(report_train)
report_unbiased = classification_report(class_preds_eval, yeval)
print('\n\n########## on independent dataset (unbiased) ##########')
print(report_unbiased)
if output_path is not None:
store = os.path.join(output_path, 'classification_reports')
make_dirs(store)
with open(os.path.join(store, 'training_set.txt'), 'w') as f:
print(report_train, file=f)
with open(os.path.join(store, 'test_set.txt'), 'w') as f:
print(report_unbiased, file=f)
def plot_scoring(history, name, scorers, output_path):
"""
Make scoring plots for each epoch, i.e. loss, accuracy etc
:param history: training history
:param name: output name
:param scorers: scorings to be plotted
:param output_path: output path
:return:
"""
for scorer in scorers:
plt.plot(history.history[scorer])
plt.plot(history.history['val_{:s}'.format(scorer)])
plt.legend([i for x in [(scorer, "valid {:s}".format(scorer)) for scorer in scorers] for i in x])
plt.xlabel('epoch')
plt.ylabel('scoring')
store = os.path.join(output_path, 'plots')
make_dirs(store)
plt.savefig(os.path.join(store, '{:s}.png'.format(name)))
plt.savefig(os.path.join(store, '{:s}.pdf'.format(name)))
plt.close()
class MLTrainConfig(object):
"""
Class wrapping training configuration
"""
def __init__(self, **kwargs):
kwargs.setdefault('scaler', None)
kwargs.setdefault('imbalance', None)
for attr, value in list(kwargs.items()):
setattr(self, attr, value)
# todo: Should be deprecated
class MLConfig(object):
"""
Class containing configration of ML classifier
"""
def __init__(self, **kwargs):
kwargs.setdefault('scaler', None)
kwargs.setdefault('scale_algo', None)
self.score_name = kwargs['branch_name']
self.varset = kwargs['variable_list']
self.scaler = kwargs['scaler']
self.scale_algo = kwargs['scale_algo']
self.selection = kwargs['selection']
def __str__(self):
"""
Overloaded str operator. Get's called if object is printed
:return: formatted string with name and attributes
:rtype: str
"""
obj_str = "Attached ML branch {:s} was created with the following configuration \n".format(self.score_name)
obj_str += 'variables: \n'
for var in self.varset:
obj_str += '\t {:s}\n'.format(var)
if self.selection is not None:
obj_str += 'selection: \n'
for sel in self.selection:
obj_str += '\t {:s}\n'.format(sel)
else:
obj_str += 'selection: None\n'
if self.scaler is not None:
obj_str += 'scaler: {:s}'.format(self.scaler)
else:
obj_str += 'scaler: None\n'
return obj_str
def __eq__(self, other):
"""
Comparison operator
:param other: ML config object to compare to
:type other: MLConfig
:return: True/False
:rtype: boolean
"""
if isinstance(self, other.__class__):
for k, v in list(self.__dict__.items()):
if k not in other.__dict__:
return False
if k == 'scaler':
if v is None and other.__dict__[k] is None:
continue
if v is None or other.__dict__[k] is None:
return False
if self.__dict__[k].scale_algo != other.__dict__[k].scale_algo:
return False
continue
if self.__dict__[k] != other.__dict__[k]:
return False
return True
return False
def __ne__(self, other):
"""
Comparison operator (negative)
:param other: ML config object to compare to
:type other: MLConfig
:return: True/False
:rtype: boolean
"""
return not self.__eq__(other)
class MLConfigHandle(object):
"""
Handle to create and add ML configuration to summary file in friend directory
"""
def __init__(self, **kwargs):
kwargs.setdefault('output_path', '.')
self.config = MLConfig(**kwargs)
self.output_path = kwargs['output_path']
self.file_name = os.path.join(self.output_path, 'ml_config_summary.pkl')
def dump_config(self):
data = {}
if os.path.exists(self.file_name):
with open(self.file_name, 'r') as f:
data = pickle.load(f)
if self.config.score_name in data:
if self.config == data[self.config.score_name]:
return
_logger.error('Score with name {:s} does already exist, but has different config. '
'Will give up adding it'.format(self.config.score_name))
exit()
data[self.config.score_name] = self.config
with open(self.file_name, 'w') as f:
pickle.dump(data, f)
class DataScaler(object):
def __init__(self, algo="default"):
self.scale_algo = algo
self.scaler = None
def __eq__(self, other):
"""
Comparison operator
:param other: DataScaler config object to compare to
:type other: DataScaler
:return: True/False
:rtype: boolean
"""
if isinstance(self, other.__class__):
return self.scale_algo == other.scale_algo
return False
def __format__(self, format_spec):
return self.__unicode__()
@staticmethod
def get_algos():
return ["default", "standard", "min_max"]
def apply_scaling(self, X, y, dump=None, scaler=None):
if scaler is not None:
with open(scaler, 'rb') as fn:
return pickle.load(fn).transform(X), y
if dump is not None:
make_dirs(os.path.dirname(dump))
le = LabelEncoder()
if y is not None:
y = le.fit_transform(y)
if self.scale_algo == "min_max":
return self.apply_min_max_scaler(X, dump), y
elif self.scale_algo == "standard":
return self.apply_standard_scaler(X, dump), y
elif self.scale_algo == "default":
return self.apply_min_max_scaler(X, dump), y
else:
_logger.error("Invalid scaling algorithm requested: {:s}".format(self.scale_algo))
raise InvalidInputError()
@staticmethod
def apply_standard_scaler(X, dump=None):
scaler = StandardScaler()
return DataScaler.apply_scaler(scaler, X, dump)
@staticmethod
def apply_min_max_scaler(X, dump=None):
scaler = MinMaxScaler(feature_range=(0, 1))
return DataScaler.apply_scaler(scaler, X, dump)
@staticmethod
def apply_scaler(scaler, X, dump=None):
scaler.fit(X)
if dump is not None:
_logger.debug('Store scaler to: {:s}'.format(dump))
with open(dump, "wb") as fn:
pickle.dump(scaler, fn)
return scaler.transform(X)
class Root2NumpyConverter(object):
def __init__(self, branches):
if not isinstance(branches, list):
branches = [branches]
self.branches = branches
def convert_to_array(self, tree, selection="", max_events=None):
data = root_numpy.tree2array(tree, branches=self.branches, selection=selection, start=0, stop=max_events)
return data
def merge(self, signals, bkgs):
signal = np.concatenate(signals)
bkg = np.concatenate(bkgs)
data = np.concatenate((signal, bkg))
labels = np.append(np.ones(signal.shape[0]), np.zeros(bkg.shape[0]))
return data, labels
class TrainingReader(object):
def __init__(self, **kwargs):
def check_file_type(postfix):
return all([i.endswith(postfix) for i in kwargs['input_file_list']])
self.mode = ''
self.numpy_input = False
if 'input_file' in kwargs:
if len(kwargs["input_file"]) > 1 and kwargs["input_file"][0].endswith(".npy"):
self.numpy_input = True
return
self.input_file = FileHandle(file_name=kwargs["input_file"][0])
self.signal_tree_names = kwargs["signal_tree_names"]
self.bkg_tree_names = kwargs["bkg_tree_names"]
if 'input_file_list' in kwargs:
if check_file_type('.json'):
self.mode = 'pandas'
self.data = {}
for fn in kwargs['input_file_list']:
with open(fn) as f:
self.data[fn.split('/')[-1]] = pd.read_json(f)
def get_trees(self):
signal_train_tree_names, bkg_train_tree_names, signal_eval_tree_names, bkg_eval_tree_names = \
self.parse_tree_names()
signal_train_trees = self.read_tree(signal_train_tree_names)
signal_eval_trees = self.read_tree(signal_eval_tree_names)
bkg_train_trees = self.read_tree(bkg_train_tree_names)
bkg_eval_trees = self.read_tree(bkg_eval_tree_names)
return signal_train_trees, bkg_train_trees, signal_eval_trees, bkg_eval_trees
def read_tree(self, tree_names):
return [self.input_file.get_object_by_name(tn) for tn in tree_names]
def parse_tree_names(self):
if any("re." in name for name in self.signal_tree_names):
self.expand_tree_names(self.signal_tree_names)
if any("re." in name for name in self.bkg_tree_names):
self.expand_tree_names(self.bkg_tree_names)
signal_train_tree_names = ["train_{:s}".format(signal_tree_name) for signal_tree_name in self.signal_tree_names]
background_train_tree_names = ["train_{:s}".format(background_tree_name) for background_tree_name in
self.bkg_tree_names]
signal_eval_tree_names = ["eval_{:s}".format(signal_tree_name) for signal_tree_name in
self.signal_tree_names]
background_eval_tree_names = ["eval_{:s}".format(background_tree_name) for background_tree_name in
self.bkg_tree_names]
return signal_train_tree_names, background_train_tree_names, signal_eval_tree_names, background_eval_tree_names
def expand_tree_names(self, tree_names):
expanded_tree_names = []
tree_names_to_remove = []
for tree_name in tree_names:
if not tree_name.startswith("re."):
continue
pattern = "train_" + tree_name.replace("re.", "").replace("*", ".*")
expanded_tree_names += list(set([str.replace(name, "train_", "")
for name in [obj.GetName()
for obj in self.input_file.get_objects_by_pattern(pattern)]]))
tree_names_to_remove.append(tree_name)
for tree_name in tree_names_to_remove:
tree_names.remove(tree_name)
tree_names += expanded_tree_names
def prepare_data(self, train_cfg, variable_list=None):
signal_df, bkg_df, labels = None, None, None
if self.mode == 'pandas':
signal_dfs = [v for k, v in list(self.data.items())
if any(['_'+sname in k for sname in train_cfg.signals])]
bkg_dfs = [v for k, v in list(self.data.items())
if any(['_' + sname in k for sname in train_cfg.backgrounds])]
signal_df = signal_dfs[0]
for df in signal_dfs[1:]:
signal_df.append(df)
bkg_df = bkg_dfs[0]
for df in bkg_dfs[1:]:
bkg_df.append(df)
if variable_list:
signal_df = explode(signal_df, lst_cols=variable_list)[variable_list]
bkg_df = explode(bkg_df, lst_cols=variable_list)[variable_list]
labels = np.concatenate([np.ones(len(signal_df)), np.zeros(len(bkg_df))])
return signal_df, bkg_df, labels
def pre_process_data(self, signal_df, bkg_df, labels, train_cfg, output_path):
X = signal_df.append(bkg_df)
y = labels
X_train = None
y_train = None
X_test = None
y_test = None
if train_cfg.scaler is not None:
X, y = DataScaler(train_cfg.scaler).apply_scaling(X, y, dump=os.path.join(output_path, 'scalers',
'train_scaler.pkl'))
if train_cfg.split == 'random':
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
if train_cfg.imbalance == 'over_sampling':
if sys.version_info[0] > 2:
ros = over_sampling.RandomOverSampler(random_state=42)
X_train, y_train = ros.fit_resample(X_train, y_train)
else:
_logger.error('Imbalance scaling requested which requires python3, but running in python2.')
if X_train is None:
assert y_train is None
X_train = X
y_train = y
return X_train, y_train, X_test, y_test
class MLAnalyser(object):
def __init__(self, **kwargs):
kwargs.setdefault("batch", True)
kwargs.setdefault("process_config_file", None)
kwargs.setdefault("branch_name", None)
kwargs.setdefault("tree_name", None)
kwargs.setdefault("output_dir", '.')
self.file_handles = [FileHandle(file_name=file_name, dataset_info=kwargs["cross_section_config"])
for file_name in kwargs["input_files"]]
self.process_config_file = kwargs["process_config_file"]
self.branch_name = kwargs["branch_name"]
self.tree_name = kwargs["tree_name"]
self.converter = Root2NumpyConverter(self.branch_name)
self.process_configs = parse_and_build_process_config(self.process_config_file)
self.output_handle = OutputFileHandle(output_dir=kwargs["output_dir"])
set_batch_mode(kwargs["batch"])
def read_score(self, selection=None):
trees = {fh.process: fh.get_object_by_name(self.tree_name, "Nominal") for fh in self.file_handles}
arrays = {process: self.converter.convert_to_array(tree, selection=selection) for process, tree in
list(trees.items())}
signals = []
backgrounds = []
for process_name in list(trees.keys()):
find_process_config(process_name, self.process_configs)
for process, process_config in list(self.process_configs.items()):
if not hasattr(process_config, "subprocesses"):
continue
for sub_process in process_config.subprocesses:
if sub_process not in list(arrays.keys()):
continue
if process_config.type.lower() == "signal":
signals.append(arrays[sub_process])
elif process_config.type.lower() == "background" or process_config.type.lower() == "data":
backgrounds.append(arrays[sub_process])
else:
_logger.warn("Could not classify {:s}".format(sub_process))
signal = np.concatenate(signals)
background = np.concatenate(backgrounds)
return signal + 1., background + 1.
def plot_roc(self, selection=None):
signal, background = self.read_score(selection)
efficiencies = [100. - i * 10. for i in range(10)]
for eff in efficiencies:
_logger.debug("eff: {:.2f} bkg eff: {:.2f} rej: {:.2f}".format(eff,
np.percentile(signal, eff),
np.percentile(signal, 100. - eff)))
cuts = [np.percentile(signal, 100. - eff) for eff in efficiencies]
signal_total = sum(signal)
signal_eff = [np.sum(old_div(signal[signal < cut], signal_total)) for cut in cuts]
bkg_total = sum(background)
bkg_rej = [1. - np.sum(old_div(background[background < cut], bkg_total)) for cut in cuts]
curve = ROOT.TGraph(len(efficiencies))
curve.SetName("roc_curve")
for b in range(len(efficiencies)):
rej = bkg_rej[b]
if rej == np.inf:
rej = 0
curve.SetPoint(b, signal_eff[b], rej)
plot_config = pc(name="roc_curve", xtitle="signal efficiency", ytitle="background rejection", draw="Line",
logy=True, watermark="Internal", lumi=1.)
canvas = pt.plot_obj(curve, plot_config)
fm.decorate_canvas(canvas, plot_config)
self.output_handle.register_object(canvas)
self.output_handle.write_and_close()
| mit |
depet/scikit-learn | examples/linear_model/plot_sgd_weighted_classes.py | 9 | 1431 | """
================================================
SGD: Separating hyperplane with weighted classes
================================================
Fit linear SVMs with and without class weighting.
Allows to handle problems with unbalanced classes.
"""
print(__doc__)
import numpy as np
import pylab as pl
from sklearn.linear_model import SGDClassifier
# we create 40 separable points
np.random.seed(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * np.random.randn(n_samples_1, 2),
0.5 * np.random.randn(n_samples_2, 2) + [2, 2]]
y = np.array([0] * (n_samples_1) + [1] * (n_samples_2), dtype=np.float64)
idx = np.arange(y.shape[0])
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# fit the model and get the separating hyperplane
clf = SGDClassifier(n_iter=100, alpha=0.01)
clf.fit(X, y)
w = clf.coef_.ravel()
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_ / w[1]
# get the separating hyperplane using weighted classes
wclf = SGDClassifier(n_iter=100, alpha=0.01, class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_.ravel()
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_ / ww[1]
# plot separating hyperplanes and samples
h0 = pl.plot(xx, yy, 'k-', label='no weights')
h1 = pl.plot(xx, wyy, 'k--', label='with weights')
pl.scatter(X[:, 0], X[:, 1], c=y, cmap=pl.cm.Paired)
pl.legend()
pl.axis('tight')
pl.show()
| bsd-3-clause |
TakayukiSakai/tensorflow | tensorflow/contrib/learn/python/learn/estimators/base.py | 1 | 18801 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import types
import numpy as np
import six
from six import string_types
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.io.data_feeder import setup_train_data_feeder
from tensorflow.python.framework import ops
from tensorflow.python.ops import constant_op
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
def _write_with_backup(filename, content):
if gfile.Exists(filename):
gfile.Rename(filename, filename + '.old', overwrite=True)
with gfile.Open(filename, 'w') as f:
f.write(content)
def _copy_dir(dir_in, dir_out):
gfile.MakeDirs(dir_out)
for name in gfile.ListDirectory(dir_in):
name_in = os.path.join(dir_in, name)
name_out = os.path.join(dir_out, name)
if gfile.IsDirectory(name_in):
gfile.MakeDirs(name_out)
_copy_dir(name_in, name_out)
else:
gfile.Copy(name_in, name_out, overwrite=True)
class TensorFlowEstimator(estimator.Estimator):
"""Base class for all TensorFlow estimators.
Parameters:
model_fn: Model function, that takes input `x`, `y` tensors and outputs
prediction and loss tensors.
n_classes: Number of classes in the target.
batch_size: Mini batch size.
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam",
"Adagrad".
learning_rate: If this is constant float value, no decay function is used.
Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
clip_gradients: Clip norm of the gradients to this value to stop
gradient explosion.
class_weight: None or list of n_classes floats. Weight associated with
classes for loss computation. If not given, all classes are supposed to
have weight one.
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
config: RunConfig object that controls the configurations of the
session, e.g. num_cores, gpu_memory_fraction, etc.
verbose: Controls the verbosity, possible values:
0: the algorithm and debug information is muted.
1: trainer prints the progress.
2: log device placement is printed.
"""
def __init__(self,
model_fn,
n_classes,
batch_size=32,
steps=200,
optimizer='Adagrad',
learning_rate=0.1,
clip_gradients=5.0,
class_weight=None,
continue_training=False,
config=None,
verbose=1):
self.class_weight = class_weight
self.learning_rate = learning_rate
self.clip_gradients = clip_gradients
if isinstance(optimizer, six.string_types):
if optimizer not in layers.OPTIMIZER_CLS_NAMES:
raise ValueError(
'Optimizer name should be one of [%s], you provided %s.' %
(', '.join(layers.OPTIMIZER_CLS_NAMES), optimizer))
self.optimizer = optimizer
super(TensorFlowEstimator, self).__init__(
model_fn=self._get_model_fn(model_fn),
config=config)
self.n_classes = n_classes
self.batch_size = batch_size
self.steps = steps
self.verbose = verbose
self.continue_training = continue_training
self._data_feeder = None
def fit(self, x, y, steps=None, monitors=None, logdir=None):
"""Neural network model from provided `model_fn` and training data.
Note: called first time constructs the graph and initializers
variables. Consecutives times it will continue training the same model.
This logic follows partial_fit() interface in scikit-learn.
To restart learning, create new estimator.
Args:
x: matrix or tensor of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features. The training input
samples for fitting the model.
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class labels in classification, real numbers in regression).
steps: int, number of steps to train.
If None or 0, train for `self.steps`.
monitors: List of `BaseMonitor` objects to print training progress and
invoke early stopping.
logdir: the directory to save the log file that can be used for
optional visualization.
Returns:
Returns self.
"""
if logdir is not None:
self._model_dir = logdir
self._data_feeder = setup_train_data_feeder(
x, y, n_classes=self.n_classes, batch_size=self.batch_size)
self._train_model(input_fn=self._data_feeder.input_builder,
feed_fn=self._data_feeder.get_feed_dict_fn(),
steps=steps or self.steps,
monitors=monitors)
return self
def evaluate(self, x=None, y=None, input_fn=None, steps=None):
"""See base class."""
feed_fn = None
if x is not None:
eval_data_feeder = setup_train_data_feeder(
x, y, n_classes=self.n_classes, batch_size=self.batch_size, epochs=1)
input_fn, feed_fn = (eval_data_feeder.input_builder,
eval_data_feeder.get_feed_dict_fn())
return self._evaluate_model(
input_fn=input_fn, feed_fn=feed_fn, steps=steps or self.steps)
def partial_fit(self, x, y):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: matrix or tensor of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features. The training input
samples for fitting the model.
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class label in classification, real numbers in regression).
Returns:
Returns self.
"""
return self.fit(x, y)
def _predict(self, x, axis=-1, batch_size=None):
if self._graph is None:
raise NotFittedError()
# Use the batch size for fitting if the user did not specify one.
if batch_size is None:
batch_size = self.batch_size
predict_data_feeder = setup_train_data_feeder(
x, None, n_classes=None,
batch_size=batch_size,
shuffle=False, epochs=1)
preds = self._infer_model(
input_fn=predict_data_feeder.input_builder,
feed_fn=predict_data_feeder.get_feed_dict_fn())
if self.n_classes > 1 and axis != -1:
preds = preds.argmax(axis=axis)
else:
preds = preds
return preds
def predict(self, x, axis=1, batch_size=None):
"""Predict class or regression for `x`.
For a classification model, the predicted class for each sample in `x` is
returned. For a regression model, the predicted value based on `x` is
returned.
Args:
x: array-like matrix, [n_samples, n_features...] or iterator.
axis: Which axis to argmax for classification.
By default axis 1 (next after batch) is used.
Use 2 for sequence predictions.
batch_size: If test set is too big, use batch size to split
it into mini batches. By default the batch_size member
variable is used.
Returns:
y: array of shape [n_samples]. The predicted classes or predicted
value.
"""
return self._predict(x, axis=axis, batch_size=batch_size)
def predict_proba(self, x, batch_size=None):
"""Predict class probability of the input samples `x`.
Args:
x: array-like matrix, [n_samples, n_features...] or iterator.
batch_size: If test set is too big, use batch size to split
it into mini batches. By default the batch_size member variable is used.
Returns:
y: array of shape [n_samples, n_classes]. The predicted
probabilities for each class.
"""
return self._predict(x, batch_size=batch_size)
def get_tensor(self, name):
"""Returns tensor by name.
Args:
name: string, name of the tensor.
Returns:
Tensor.
"""
if self._graph is None:
raise NotFittedError
return self._graph.get_tensor_by_name(name)
def save(self, path):
"""Saves checkpoints and graph to given path.
Args:
path: Folder to save model to.
"""
if self._graph is None:
raise NotFittedError
# Copy model dir into new path.
_copy_dir(self.model_dir, path)
# Save model definition.
all_params = self.get_params()
params = {}
for key, value in all_params.items():
if not callable(value) and value is not None:
params[key] = value
params['class_name'] = type(self).__name__
model_def = json.dumps(
params,
default=lambda o: o.__dict__ if hasattr(o, '__dict__') else None)
_write_with_backup(os.path.join(path, 'model.def'), model_def)
def _restore(self, path):
"""Restores this estimator from given path.
Note: will rebuild the graph and initialize all parameters,
and will ignore provided model.
Args:
path: Path to checkpoints and other information.
"""
raise NotImplementedError
@classmethod
def restore(cls, path, config=None):
# pylint: disable=unused-argument
"""Restores model from give path.
Args:
path: Path to the checkpoints and other model information.
config: RunConfig object that controls the configurations of the session,
e.g. num_cores, gpu_memory_fraction, etc. This is allowed to be
reconfigured.
Returns:
Estimator, object of the subclass of TensorFlowEstimator.
Raises:
ValueError: if `path` does not contain a model definition.
"""
model_def_filename = os.path.join(path, 'model.def')
if not os.path.exists(model_def_filename):
raise ValueError("Restore folder doesn't contain model definition.")
# list of parameters that are allowed to be reconfigured
reconfigurable_params = ['_config']
_config = config # pylint: disable=unused-variable,invalid-name
with gfile.Open(model_def_filename) as fmodel:
model_def = json.loads(fmodel.read())
# TensorFlow binding requires parameters to be strings not unicode.
# Only issue in Python2.
for key, value in model_def.items():
if isinstance(value, string_types) and not isinstance(value, str):
model_def[key] = str(value)
if key in reconfigurable_params:
new_value = locals()[key]
if new_value is not None:
model_def[key] = new_value
class_name = model_def.pop('class_name')
if class_name == 'TensorFlowEstimator':
custom_estimator = TensorFlowEstimator(model_fn=None, **model_def)
# pylint: disable=protected-access
custom_estimator._restore(path)
return custom_estimator
# To avoid cyclical dependencies, import inside the function instead of
# the beginning of the file.
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.learn.python.learn import estimators
# Estimator must be one of the defined estimators in the __init__ file.
result = getattr(estimators, class_name)(**model_def)
# pylint: disable=protected-access
result._restore(path)
return result
def _get_model_fn(self, model_fn):
"""Backward compatibility way of adding class weight and IS_TRAINING.
TODO(ipolosukhin): Remove this function after new layers are available.
Specifically:
* dropout and batch norm should work via update ops.
* class weights should be retrieved from weights column or hparams.
Args:
model_fn: Core model function.
Returns:
Model function.
"""
def _model_fn(features, targets, mode):
"""Model function."""
ops.get_default_graph().add_to_collection('IS_TRAINING', mode == 'train')
if self.class_weight is not None:
constant_op.constant(self.class_weight, name='class_weight')
predictions, loss = model_fn(features, targets)
if isinstance(self.learning_rate, types.FunctionType):
learning_rate = self.learning_rate(contrib_framework.get_global_step())
else:
learning_rate = self.learning_rate
if isinstance(self.optimizer, types.FunctionType):
optimizer = self.optimizer(learning_rate)
else:
optimizer = self.optimizer
train_op = layers.optimize_loss(
loss,
contrib_framework.get_global_step(),
learning_rate=learning_rate,
optimizer=optimizer,
clip_gradients=self.clip_gradients)
return predictions, loss, train_op
return _model_fn
class TensorFlowBaseTransformer(TensorFlowEstimator, _sklearn.TransformerMixin):
"""TensorFlow Base Transformer class."""
def transform(self, x):
"""Transform `x` using trained transformer."""
return(super(TensorFlowBaseTransformer, self).predict(
x, axis=1, batch_size=None))
def fit(self, x, y=None, monitor=None, logdir=None):
"""Fit a transformer."""
return(super(TensorFlowBaseTransformer, self).fit(
x, y, monitors=None, logdir=None))
def fit_transform(self, x, y=None, monitor=None, logdir=None):
"""Fit transformer and transform `x` using trained transformer."""
return self.fit(x, y, monitor=None, logdir=None).transform(x)
class DeprecatedMixin(object):
"""This is mixin for deprecated TensorFlowYYY classes."""
def __init__(self, *args, **kwargs):
this_class = type(self).__name__
alternative_class = this_class[len('TensorFlow'):]
logging.warning(
'%s class is deprecated. Please consider using %s as an alternative.',
this_class, alternative_class)
# Handle deprecated arguments.
self.__deprecated_n_classes = kwargs.get('n_classes', 0)
if self.__deprecated_n_classes < 1 and 'n_classes' in kwargs:
kwargs.pop('n_classes')
self.batch_size = kwargs.pop('batch_size', 32)
self.steps = kwargs.pop('steps', 200)
if 'optimizer' in kwargs or 'learning_rate' in kwargs:
self.learning_rate = kwargs.pop('learning_rate', 0.1)
self.optimizer = kwargs.pop('optimizer', 'Adagrad')
if isinstance(self.learning_rate, types.FunctionType):
raise ValueError('Function-like learning_rate are not supported '
'consider using custom Estimator.')
else:
learning_rate = self.learning_rate
if isinstance(self.optimizer, types.FunctionType):
optimizer = self.optimizer(learning_rate)
elif isinstance(self.optimizer, six.string_types):
optimizer = layers.OPTIMIZER_CLS_NAMES[self.optimizer](learning_rate)
else:
optimizer = self.optimizer
kwargs['optimizer'] = optimizer
if 'class_weight' in kwargs:
raise ValueError('Sorry we switched interface for providing class '
'weights. Please use weight column instead which '
'provides more granular control (per example).')
if 'clip_gradients' in kwargs:
logging.warning('clip_gradients argument in %s is now ignored.' %
this_class)
kwargs.pop('clip_gradients')
if 'continue_training' in kwargs:
logging.warning('continue_training argument in %s is now ignored.' %
this_class)
kwargs.pop('continue_training')
if 'verbose' in kwargs:
logging.warning('verbose argument in %s is now ignored.' %
this_class)
kwargs.pop('verbose')
super(DeprecatedMixin, self).__init__(*args, **kwargs)
def fit(self, x, y, steps=None, batch_size=None, monitors=None, logdir=None):
if logdir is not None:
self._model_dir = logdir
return super(DeprecatedMixin, self).fit(
x=x, y=y, steps=steps or self.steps,
batch_size=batch_size or self.batch_size, monitors=monitors)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
axis=1):
"""Predict class or regression for `x`."""
if x is not None:
predict_data_feeder = setup_train_data_feeder(
x, None, n_classes=None,
batch_size=batch_size or self.batch_size,
shuffle=False, epochs=1)
result = super(DeprecatedMixin, self)._infer_model(
input_fn=predict_data_feeder.input_builder,
feed_fn=predict_data_feeder.get_feed_dict_fn(),
outputs=outputs)
else:
result = super(DeprecatedMixin, self)._infer_model(
input_fn=input_fn, outputs=outputs)
if self.__deprecated_n_classes > 1 and axis is not None:
return np.argmax(result, axis)
return result
def predict_proba(self, x=None, input_fn=None, batch_size=None, outputs=None):
return self.predict(x=x, input_fn=input_fn, batch_size=batch_size,
outputs=outputs, axis=None)
def save(self, path):
"""Saves checkpoints and graph to given path.
Args:
path: Folder to save model to.
"""
# Copy model dir into new path.
_copy_dir(self.model_dir, path)
| apache-2.0 |
calum-chamberlain/EQcorrscan | eqcorrscan/tests/install_test.py | 2 | 1067 | """Script to test if all dependencies are installed and running for the \
EQcorrscan package.
"""
import unittest
class TestImport(unittest.TestCase):
def test_import(self):
import sys
if sys.version_info.major == 2:
sys.path.insert(0, '/usr/lib/pyshared/python2.7')
# Insert path for travis
i = 0
try:
import numpy # NOQA
except ImportError:
print("You have not properly installed: numpy")
i += 1
try:
import matplotlib.pyplot # NOQA
except ImportError:
print("You have not properly installed: matplotlib")
i += 1
try:
import scipy # NOQA
except ImportError:
print("You have not properly installed: scipy")
i += 1
try:
from obspy import read # NOQA
except ImportError:
print("You have not properly installed: obspy")
i += 1
self.assertEqual(i, 0)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/sklearn/tests/test_lda.py | 14 | 2947 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn import lda
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]])
def test_lda_predict():
"""
LDA classification.
This checks that LDA implements fit and predict and returns
correct values for a simple toy dataset.
"""
clf = lda.LDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
def test_lda_transform():
clf = lda.LDA()
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = lda.LDA().fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1**2))
d2 /= np.sqrt(np.sum(d2**2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
| apache-2.0 |
sinhrks/scikit-learn | examples/linear_model/plot_logistic_multinomial.py | 24 | 2480 | """
====================================================
Plot multinomial and One-vs-Rest Logistic Regression
====================================================
Plot decision surface of multinomial and One-vs-Rest Logistic Regression.
The hyperplanes corresponding to the three One-vs-Rest (OVR) classifiers
are represented by the dashed lines.
"""
print(__doc__)
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
# Licence: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
# make 3-class dataset for classification
centers = [[-5, 0], [0, 1.5], [5, -1]]
X, y = make_blobs(n_samples=1000, centers=centers, random_state=40)
transformation = [[0.4, 0.2], [-0.4, 1.2]]
X = np.dot(X, transformation)
for multi_class in ('multinomial', 'ovr'):
clf = LogisticRegression(solver='sag', max_iter=100, random_state=42,
multi_class=multi_class).fit(X, y)
# print the training scores
print("training score : %.3f (%s)" % (clf.score(X, y), multi_class))
# create a mesh to plot in
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.title("Decision surface of LogisticRegression (%s)" % multi_class)
plt.axis('tight')
# Plot also the training points
colors = "bry"
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired)
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.show()
| bsd-3-clause |
xzh86/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
maropu/spark | python/pyspark/pandas/frame.py | 1 | 427279 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper class for Spark DataFrame to behave similar to pandas DataFrame.
"""
from collections import OrderedDict, defaultdict, namedtuple
from collections.abc import Mapping
from distutils.version import LooseVersion
import re
import warnings
import inspect
import json
import types
from functools import partial, reduce
import sys
from itertools import zip_longest
from typing import (
Any,
Optional,
List,
Tuple,
Union,
Generic,
TypeVar,
Iterable,
Iterator,
Dict,
Callable,
cast,
TYPE_CHECKING,
)
import datetime
import numpy as np
import pandas as pd
from pandas.api.types import is_list_like, is_dict_like, is_scalar
from pandas.api.extensions import ExtensionDtype
from pandas.tseries.frequencies import DateOffset, to_offset
if TYPE_CHECKING:
from pandas.io.formats.style import Styler # noqa: F401 (SPARK-34943)
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
from pandas.core.dtypes.common import infer_dtype_from_object
else:
from pandas.core.dtypes.common import _get_dtype_from_object as infer_dtype_from_object
from pandas.core.accessor import CachedAccessor
from pandas.core.dtypes.inference import is_sequence
from pyspark import StorageLevel
from pyspark import sql as spark
from pyspark.sql import Column, DataFrame as SparkDataFrame, functions as F
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import ( # noqa: F401 (SPARK-34943)
BooleanType,
DataType,
DoubleType,
FloatType,
NumericType,
StringType,
StructField,
StructType,
ArrayType,
)
from pyspark.sql.window import Window
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas.accessors import PandasOnSparkFrameMethods
from pyspark.pandas.config import option_context, get_option
from pyspark.pandas.spark.accessors import SparkFrameMethods, CachedSparkFrameMethods
from pyspark.pandas.utils import (
align_diff_frames,
column_labels_level,
combine_frames,
default_session,
is_name_like_tuple,
is_name_like_value,
is_testing,
name_like_string,
same_anchor,
scol_for,
validate_arguments_and_invoke_function,
validate_axis,
validate_bool_kwarg,
validate_how,
verify_temp_column_name,
)
from pyspark.pandas.generic import Frame
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
HIDDEN_COLUMNS,
NATURAL_ORDER_COLUMN_NAME,
SPARK_INDEX_NAME_FORMAT,
SPARK_DEFAULT_INDEX_NAME,
SPARK_DEFAULT_SERIES_NAME,
)
from pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame
from pyspark.pandas.ml import corr
from pyspark.pandas.typedef import (
as_spark_type,
infer_return_type,
spark_type_to_pandas_dtype,
DataFrameType,
SeriesType,
Scalar,
ScalarType,
)
from pyspark.pandas.plot import PandasOnSparkPlotAccessor
if TYPE_CHECKING:
from pyspark.pandas.indexes import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
# These regular expression patterns are complied and defined here to avoid to compile the same
# pattern every time it is used in _repr_ and _repr_html_ in DataFrame.
# Two patterns basically seek the footer string from Pandas'
REPR_PATTERN = re.compile(r"\n\n\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\]$")
REPR_HTML_PATTERN = re.compile(
r"\n\<p\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\<\/p\>\n\<\/div\>$"
)
_flex_doc_FRAME = """
Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`).
Equivalent to ``{equiv}``. With reverse version, `{reverse}`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`.
Parameters
----------
other : scalar
Any single data
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> df = ps.DataFrame({{'angles': [0, 3, 4],
... 'degrees': [360, 180, 360]}},
... index=['circle', 'triangle', 'rectangle'],
... columns=['angles', 'degrees'])
>>> df
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Add a scalar with operator version which return the same
results. Also reverse version.
>>> df + 1
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.add(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.add(df)
angles degrees
circle 0 720
triangle 6 360
rectangle 8 720
>>> df + df + df
angles degrees
circle 0 1080
triangle 9 540
rectangle 12 1080
>>> df.radd(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
Divide and true divide by constant with reverse version.
>>> df / 10
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.div(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.rdiv(10)
angles degrees
circle inf 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
>>> df.truediv(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.rtruediv(10)
angles degrees
circle inf 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
Subtract by constant with reverse version.
>>> df - 1
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
>>> df.sub(1)
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
>>> df.rsub(1)
angles degrees
circle 1 -359
triangle -2 -179
rectangle -3 -359
Multiply by constant with reverse version.
>>> df * 1
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
>>> df.mul(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
>>> df.rmul(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Floor Divide by constant with reverse version.
>>> df // 10
angles degrees
circle 0.0 36.0
triangle 0.0 18.0
rectangle 0.0 36.0
>>> df.floordiv(10)
angles degrees
circle 0.0 36.0
triangle 0.0 18.0
rectangle 0.0 36.0
>>> df.rfloordiv(10) # doctest: +SKIP
angles degrees
circle inf 0.0
triangle 3.0 0.0
rectangle 2.0 0.0
Mod by constant with reverse version.
>>> df % 2
angles degrees
circle 0 0
triangle 1 0
rectangle 0 0
>>> df.mod(2)
angles degrees
circle 0 0
triangle 1 0
rectangle 0 0
>>> df.rmod(2)
angles degrees
circle NaN 2
triangle 2.0 2
rectangle 2.0 2
Power by constant with reverse version.
>>> df ** 2
angles degrees
circle 0.0 129600.0
triangle 9.0 32400.0
rectangle 16.0 129600.0
>>> df.pow(2)
angles degrees
circle 0.0 129600.0
triangle 9.0 32400.0
rectangle 16.0 129600.0
>>> df.rpow(2)
angles degrees
circle 1.0 2.348543e+108
triangle 8.0 1.532496e+54
rectangle 16.0 2.348543e+108
"""
T = TypeVar("T")
def _create_tuple_for_frame_type(params):
"""
This is a workaround to support variadic generic in DataFrame.
See https://github.com/python/typing/issues/193
we always wraps the given type hints by a tuple to mimic the variadic generic.
"""
from pyspark.pandas.typedef import NameTypeHolder
if isinstance(params, zip):
params = [slice(name, tpe) for name, tpe in params]
if isinstance(params, slice):
params = (params,)
if (
hasattr(params, "__len__")
and isinstance(params, Iterable)
and all(isinstance(param, slice) for param in params)
):
for param in params:
if isinstance(param.start, str) and param.step is not None:
raise TypeError(
"Type hints should be specified as "
"DataFrame['name': type]; however, got %s" % param
)
name_classes = []
for param in params:
new_class = type("NameType", (NameTypeHolder,), {})
new_class.name = param.start
# When the given argument is a numpy's dtype instance.
new_class.tpe = param.stop.type if isinstance(param.stop, np.dtype) else param.stop
name_classes.append(new_class)
return Tuple[tuple(name_classes)]
if not isinstance(params, Iterable):
params = [params]
new_params = []
for param in params:
if isinstance(param, ExtensionDtype):
new_class = type("NameType", (NameTypeHolder,), {})
new_class.tpe = param
new_params.append(new_class)
else:
new_params.append(param.type if isinstance(param, np.dtype) else param)
return Tuple[tuple(new_params)]
if (3, 5) <= sys.version_info < (3, 7) and __name__ != "__main__":
from typing import GenericMeta # type: ignore
# This is a workaround to support variadic generic in DataFrame in Python 3.5+.
# See https://github.com/python/typing/issues/193
# We wrap the input params by a tuple to mimic variadic generic.
old_getitem = GenericMeta.__getitem__ # type: ignore
def new_getitem(self, params):
if hasattr(self, "is_dataframe"):
return old_getitem(self, _create_tuple_for_frame_type(params))
else:
return old_getitem(self, params)
GenericMeta.__getitem__ = new_getitem # type: ignore
class DataFrame(Frame, Generic[T]):
"""
pandas-on-Spark DataFrame that corresponds to pandas DataFrame logically. This holds Spark
DataFrame internally.
:ivar _internal: an internal immutable Frame to manage metadata.
:type _internal: InternalFrame
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, pandas DataFrame, Spark DataFrame \
or pandas-on-Spark Series
Dict can contain Series, arrays, constants, or list-like objects
If data is a dict, argument order is maintained for Python 3.6
and later.
Note that if `data` is a pandas DataFrame, a Spark DataFrame, and a pandas-on-Spark Series,
other arguments should not be used.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = ps.DataFrame(data=d, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
Constructing DataFrame from pandas DataFrame
>>> df = ps.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2']))
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = ps.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = ps.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df2 # doctest: +SKIP
a b c d e
0 3 1 4 9 8
1 4 8 4 8 4
2 7 6 5 6 7
3 8 7 9 1 0
4 2 5 4 3 9
"""
def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False):
if isinstance(data, InternalFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
internal = data
elif isinstance(data, spark.DataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
internal = InternalFrame(spark_frame=data, index_spark_columns=None)
elif isinstance(data, ps.Series):
assert index is None
assert columns is None
assert dtype is None
assert not copy
data = data.to_frame()
internal = data._internal
else:
if isinstance(data, pd.DataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
pdf = data
else:
pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy)
internal = InternalFrame.from_pandas(pdf)
object.__setattr__(self, "_internal_frame", internal)
@property
def _pssers(self):
"""Return a dict of column label -> Series which anchors `self`."""
from pyspark.pandas.series import Series
if not hasattr(self, "_psseries"):
object.__setattr__(
self,
"_psseries",
{label: Series(data=self, index=label) for label in self._internal.column_labels},
)
else:
psseries = self._psseries
assert len(self._internal.column_labels) == len(psseries), (
len(self._internal.column_labels),
len(psseries),
)
if any(self is not psser._psdf for psser in psseries.values()):
# Refresh the dict to contain only Series anchoring `self`.
self._psseries = {
label: psseries[label]
if self is psseries[label]._psdf
else Series(data=self, index=label)
for label in self._internal.column_labels
}
return self._psseries
@property
def _internal(self) -> InternalFrame:
return self._internal_frame
def _update_internal_frame(self, internal: InternalFrame, requires_same_anchor: bool = True):
"""
Update InternalFrame with the given one.
If the column_label is changed or the new InternalFrame is not the same `anchor`,
disconnect the link to the Series and create a new one.
If `requires_same_anchor` is `False`, checking whether or not the same anchor is ignored
and force to update the InternalFrame, e.g., replacing the internal with the resolved_copy,
updating the underlying Spark DataFrame which need to combine a different Spark DataFrame.
:param internal: the new InternalFrame
:param requires_same_anchor: whether checking the same anchor
"""
from pyspark.pandas.series import Series
if hasattr(self, "_psseries"):
psseries = {}
for old_label, new_label in zip_longest(
self._internal.column_labels, internal.column_labels
):
if old_label is not None:
psser = self._pssers[old_label]
renamed = old_label != new_label
not_same_anchor = requires_same_anchor and not same_anchor(internal, psser)
if renamed or not_same_anchor:
psdf = DataFrame(self._internal.select_column(old_label)) # type: DataFrame
psser._update_anchor(psdf)
psser = None
else:
psser = None
if new_label is not None:
if psser is None:
psser = Series(data=self, index=new_label)
psseries[new_label] = psser
self._psseries = psseries
self._internal_frame = internal
if hasattr(self, "_repr_pandas_cache"):
del self._repr_pandas_cache
@property
def ndim(self) -> int:
"""
Return an int representing the number of array dimensions.
return 2 for DataFrame.
Examples
--------
>>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=['cobra', 'viper', None],
... columns=['max_speed', 'shield'])
>>> df
max_speed shield
cobra 1 2
viper 4 5
NaN 7 8
>>> df.ndim
2
"""
return 2
@property
def axes(self) -> List:
"""
Return a list representing the axes of the DataFrame.
It has the row axis labels and column axis labels as the only members.
They are returned in that order.
Examples
--------
>>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.axes
[Int64Index([0, 1], dtype='int64'), Index(['col1', 'col2'], dtype='object')]
"""
return [self.index, self.columns]
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Union[int, str]] = None,
numeric_only: bool = True,
**kwargs: Any
) -> "Series":
"""
Applies sfun to each column and returns a pd.Series where the number of rows equal the
number of columns.
Parameters
----------
sfun : either an 1-arg function that takes a Column and returns a Column, or
a 2-arg function that takes a Column and its DataType and returns a Column.
axis: used only for sanity check because series only support index axis.
name : original pandas API name.
axis : axis to apply. 0 or 1, or 'index' or 'columns.
numeric_only : bool, default True
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility. Only 'DataFrame.count' uses this parameter
currently.
"""
from inspect import signature
from pyspark.pandas.series import Series, first_series
axis = validate_axis(axis)
if axis == 0:
min_count = kwargs.get("min_count", 0)
exprs = [F.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)]
new_column_labels = []
num_args = len(signature(sfun).parameters)
for label in self._internal.column_labels:
spark_column = self._internal.spark_column_for(label)
spark_type = self._internal.spark_type_for(label)
is_numeric_or_boolean = isinstance(spark_type, (NumericType, BooleanType))
keep_column = not numeric_only or is_numeric_or_boolean
if keep_column:
if num_args == 1:
# Only pass in the column if sfun accepts only one arg
scol = cast(Callable[[Column], Column], sfun)(spark_column)
else: # must be 2
assert num_args == 2
# Pass in both the column and its data type if sfun accepts two args
scol = cast(Callable[[Column, DataType], Column], sfun)(
spark_column, spark_type
)
if min_count > 0:
scol = F.when(
Frame._count_expr(spark_column, spark_type) >= min_count, scol
)
exprs.append(scol.alias(name_like_string(label)))
new_column_labels.append(label)
if len(exprs) == 1:
return Series([])
sdf = self._internal.spark_frame.select(*exprs)
# The data is expected to be small so it's fine to transpose/use default index.
with ps.option_context("compute.max_rows", 1):
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)],
column_labels=new_column_labels,
column_label_names=self._internal.column_label_names,
)
return first_series(DataFrame(internal).transpose())
else:
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
limit = get_option("compute.shortcut_limit")
pdf = self.head(limit + 1)._to_internal_pandas()
pser = getattr(pdf, name)(axis=axis, numeric_only=numeric_only, **kwargs)
if len(pdf) <= limit:
return Series(pser)
@pandas_udf(returnType=as_spark_type(pser.dtype.type)) # type: ignore
def calculate_columns_axis(*cols: pd.Series) -> pd.Series:
return getattr(pd.concat(cols, axis=1), name)(
axis=axis, numeric_only=numeric_only, **kwargs
)
column_name = verify_temp_column_name(
self._internal.spark_frame.select(self._internal.index_spark_columns),
"__calculate_columns_axis__",
)
sdf = self._internal.spark_frame.select(
self._internal.index_spark_columns
+ [calculate_columns_axis(*self._internal.data_spark_columns).alias(column_name)]
)
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
index_fields=self._internal.index_fields,
)
return first_series(DataFrame(internal)).rename(pser.name)
def _psser_for(self, label):
"""
Create Series with a proper column label.
The given label must be verified to exist in `InternalFrame.column_labels`.
For example, in some method, self is like:
>>> self = ps.range(3)
`self._psser_for(label)` can be used with `InternalFrame.column_labels`:
>>> self._psser_for(self._internal.column_labels[0])
0 0
1 1
2 2
Name: id, dtype: int64
`self._psser_for(label)` must not be used directly with user inputs.
In that case, `self[label]` should be used instead, which checks the label exists or not:
>>> self['id']
0 0
1 1
2 2
Name: id, dtype: int64
"""
return self._pssers[label]
def _apply_series_op(self, op, should_resolve: bool = False):
applied = []
for label in self._internal.column_labels:
applied.append(op(self._psser_for(label)))
internal = self._internal.with_new_columns(applied)
if should_resolve:
internal = internal.resolved_copy
return DataFrame(internal)
# Arithmetic Operators
def _map_series_op(self, op, other):
from pyspark.pandas.base import IndexOpsMixin
if not isinstance(other, DataFrame) and (
isinstance(other, IndexOpsMixin) or is_sequence(other)
):
raise TypeError(
"%s with a sequence is currently not supported; "
"however, got %s." % (op, type(other).__name__)
)
if isinstance(other, DataFrame):
if self._internal.column_labels_level != other._internal.column_labels_level:
raise ValueError("cannot join with no overlapping index names")
if not same_anchor(self, other):
# Different DataFrames
def apply_op(psdf, this_column_labels, that_column_labels):
for this_label, that_label in zip(this_column_labels, that_column_labels):
yield (
getattr(psdf._psser_for(this_label), op)(
psdf._psser_for(that_label)
).rename(this_label),
this_label,
)
return align_diff_frames(apply_op, self, other, fillna=True, how="full")
else:
applied = []
column_labels = []
for label in self._internal.column_labels:
if label in other._internal.column_labels:
applied.append(getattr(self._psser_for(label), op)(other._psser_for(label)))
else:
applied.append(
F.lit(None)
.cast(self._internal.spark_type_for(label))
.alias(name_like_string(label))
)
column_labels.append(label)
for label in other._internal.column_labels:
if label not in column_labels:
applied.append(
F.lit(None)
.cast(other._internal.spark_type_for(label))
.alias(name_like_string(label))
)
column_labels.append(label)
internal = self._internal.with_new_columns(applied, column_labels=column_labels)
return DataFrame(internal)
else:
return self._apply_series_op(lambda psser: getattr(psser, op)(other))
def __add__(self, other) -> "DataFrame":
return self._map_series_op("add", other)
def __radd__(self, other) -> "DataFrame":
return self._map_series_op("radd", other)
def __div__(self, other) -> "DataFrame":
return self._map_series_op("div", other)
def __rdiv__(self, other) -> "DataFrame":
return self._map_series_op("rdiv", other)
def __truediv__(self, other) -> "DataFrame":
return self._map_series_op("truediv", other)
def __rtruediv__(self, other) -> "DataFrame":
return self._map_series_op("rtruediv", other)
def __mul__(self, other) -> "DataFrame":
return self._map_series_op("mul", other)
def __rmul__(self, other) -> "DataFrame":
return self._map_series_op("rmul", other)
def __sub__(self, other) -> "DataFrame":
return self._map_series_op("sub", other)
def __rsub__(self, other) -> "DataFrame":
return self._map_series_op("rsub", other)
def __pow__(self, other) -> "DataFrame":
return self._map_series_op("pow", other)
def __rpow__(self, other) -> "DataFrame":
return self._map_series_op("rpow", other)
def __mod__(self, other) -> "DataFrame":
return self._map_series_op("mod", other)
def __rmod__(self, other) -> "DataFrame":
return self._map_series_op("rmod", other)
def __floordiv__(self, other) -> "DataFrame":
return self._map_series_op("floordiv", other)
def __rfloordiv__(self, other) -> "DataFrame":
return self._map_series_op("rfloordiv", other)
def __abs__(self) -> "DataFrame":
return self._apply_series_op(lambda psser: abs(psser))
def __neg__(self) -> "DataFrame":
return self._apply_series_op(lambda psser: -psser)
def add(self, other) -> "DataFrame":
return self + other
# create accessor for plot
plot = CachedAccessor("plot", PandasOnSparkPlotAccessor)
# create accessor for Spark related methods.
spark = CachedAccessor("spark", SparkFrameMethods)
# create accessor for pandas-on-Spark specific methods.
pandas_on_spark = CachedAccessor("pandas_on_spark", PandasOnSparkFrameMethods)
# keep the name "koalas" for backward compatibility.
koalas = CachedAccessor("koalas", PandasOnSparkFrameMethods)
def hist(self, bins=10, **kwds):
return self.plot.hist(bins, **kwds)
hist.__doc__ = PandasOnSparkPlotAccessor.hist.__doc__
def kde(self, bw_method=None, ind=None, **kwds):
return self.plot.kde(bw_method, ind, **kwds)
kde.__doc__ = PandasOnSparkPlotAccessor.kde.__doc__
add.__doc__ = _flex_doc_FRAME.format(
desc="Addition", op_name="+", equiv="dataframe + other", reverse="radd"
)
def radd(self, other) -> "DataFrame":
return other + self
radd.__doc__ = _flex_doc_FRAME.format(
desc="Addition", op_name="+", equiv="other + dataframe", reverse="add"
)
def div(self, other) -> "DataFrame":
return self / other
div.__doc__ = _flex_doc_FRAME.format(
desc="Floating division", op_name="/", equiv="dataframe / other", reverse="rdiv"
)
divide = div
def rdiv(self, other) -> "DataFrame":
return other / self
rdiv.__doc__ = _flex_doc_FRAME.format(
desc="Floating division", op_name="/", equiv="other / dataframe", reverse="div"
)
def truediv(self, other) -> "DataFrame":
return self / other
truediv.__doc__ = _flex_doc_FRAME.format(
desc="Floating division", op_name="/", equiv="dataframe / other", reverse="rtruediv"
)
def rtruediv(self, other) -> "DataFrame":
return other / self
rtruediv.__doc__ = _flex_doc_FRAME.format(
desc="Floating division", op_name="/", equiv="other / dataframe", reverse="truediv"
)
def mul(self, other) -> "DataFrame":
return self * other
mul.__doc__ = _flex_doc_FRAME.format(
desc="Multiplication", op_name="*", equiv="dataframe * other", reverse="rmul"
)
multiply = mul
def rmul(self, other) -> "DataFrame":
return other * self
rmul.__doc__ = _flex_doc_FRAME.format(
desc="Multiplication", op_name="*", equiv="other * dataframe", reverse="mul"
)
def sub(self, other) -> "DataFrame":
return self - other
sub.__doc__ = _flex_doc_FRAME.format(
desc="Subtraction", op_name="-", equiv="dataframe - other", reverse="rsub"
)
subtract = sub
def rsub(self, other) -> "DataFrame":
return other - self
rsub.__doc__ = _flex_doc_FRAME.format(
desc="Subtraction", op_name="-", equiv="other - dataframe", reverse="sub"
)
def mod(self, other) -> "DataFrame":
return self % other
mod.__doc__ = _flex_doc_FRAME.format(
desc="Modulo", op_name="%", equiv="dataframe % other", reverse="rmod"
)
def rmod(self, other) -> "DataFrame":
return other % self
rmod.__doc__ = _flex_doc_FRAME.format(
desc="Modulo", op_name="%", equiv="other % dataframe", reverse="mod"
)
def pow(self, other) -> "DataFrame":
return self ** other
pow.__doc__ = _flex_doc_FRAME.format(
desc="Exponential power of series", op_name="**", equiv="dataframe ** other", reverse="rpow"
)
def rpow(self, other) -> "DataFrame":
return other ** self
rpow.__doc__ = _flex_doc_FRAME.format(
desc="Exponential power", op_name="**", equiv="other ** dataframe", reverse="pow"
)
def floordiv(self, other) -> "DataFrame":
return self // other
floordiv.__doc__ = _flex_doc_FRAME.format(
desc="Integer division", op_name="//", equiv="dataframe // other", reverse="rfloordiv"
)
def rfloordiv(self, other) -> "DataFrame":
return other // self
rfloordiv.__doc__ = _flex_doc_FRAME.format(
desc="Integer division", op_name="//", equiv="other // dataframe", reverse="floordiv"
)
# Comparison Operators
def __eq__(self, other) -> "DataFrame": # type: ignore
return self._map_series_op("eq", other)
def __ne__(self, other) -> "DataFrame": # type: ignore
return self._map_series_op("ne", other)
def __lt__(self, other) -> "DataFrame":
return self._map_series_op("lt", other)
def __le__(self, other) -> "DataFrame":
return self._map_series_op("le", other)
def __ge__(self, other) -> "DataFrame":
return self._map_series_op("ge", other)
def __gt__(self, other) -> "DataFrame":
return self._map_series_op("gt", other)
def eq(self, other) -> "DataFrame":
"""
Compare if the current value is equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.eq(1)
a b
a True True
b False False
c False True
d False False
"""
return self == other
equals = eq
def gt(self, other) -> "DataFrame":
"""
Compare if the current value is greater than the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.gt(2)
a b
a False False
b False False
c True False
d True False
"""
return self > other
def ge(self, other) -> "DataFrame":
"""
Compare if the current value is greater than or equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.ge(1)
a b
a True True
b True False
c True True
d True False
"""
return self >= other
def lt(self, other) -> "DataFrame":
"""
Compare if the current value is less than the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.lt(1)
a b
a False False
b False False
c False False
d False False
"""
return self < other
def le(self, other) -> "DataFrame":
"""
Compare if the current value is less than or equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.le(2)
a b
a True True
b True False
c False True
d False False
"""
return self <= other
def ne(self, other) -> "DataFrame":
"""
Compare if the current value is not equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.ne(1)
a b
a False False
b True True
c True False
d True True
"""
return self != other
def applymap(self, func) -> "DataFrame":
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def square(x) -> np.int32:
... return x ** 2
pandas-on-Spark uses return type hint and does not try to infer the type.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
Returns
-------
DataFrame
Transformed DataFrame.
Examples
--------
>>> df = ps.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> def str_len(x) -> int:
... return len(str(x))
>>> df.applymap(str_len)
0 1
0 3 4
1 5 5
>>> def power(x) -> float:
... return x ** 2
>>> df.applymap(power)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
You can omit the type hint and let pandas-on-Spark infer its type.
>>> df.applymap(lambda x: x ** 2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
# TODO: We can implement shortcut theoretically since it creates new DataFrame
# anyway and we don't have to worry about operations on different DataFrames.
return self._apply_series_op(lambda psser: psser.apply(func))
# TODO: not all arguments are implemented comparing to pandas' for now.
def aggregate(
self, func: Union[List[str], Dict[Any, List[str]]]
) -> Union["Series", "DataFrame", "Index"]:
"""Aggregate using one or more operations over the specified axis.
Parameters
----------
func : dict or a list
a dict mapping from column name (string) to
aggregate functions (list of strings).
If a list is given, the aggregation is performed against
all columns.
Returns
-------
DataFrame
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
DataFrame.apply : Invoke function on DataFrame.
DataFrame.transform : Only perform transforming type operations.
DataFrame.groupby : Perform operations over groups.
Series.aggregate : The equivalent function for Series.
Examples
--------
>>> df = ps.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
>>> df
A B C
0 1.0 2.0 3.0
1 4.0 5.0 6.0
2 7.0 8.0 9.0
3 NaN NaN NaN
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])[['A', 'B', 'C']].sort_index()
A B C
min 1.0 2.0 3.0
sum 12.0 15.0 18.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})[['A', 'B']].sort_index()
A B
max NaN 8.0
min 1.0 2.0
sum 12.0 NaN
For multi-index columns:
>>> df.columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
>>> df.agg(['sum', 'min'])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index()
X Y
A B C
min 1.0 2.0 3.0
sum 12.0 15.0 18.0
>>> aggregated = df.agg({("X", "A") : ['sum', 'min'], ("X", "B") : ['min', 'max']})
>>> aggregated[[("X", "A"), ("X", "B")]].sort_index() # doctest: +NORMALIZE_WHITESPACE
X
A B
max NaN 8.0
min 1.0 2.0
sum 12.0 NaN
"""
from pyspark.pandas.groupby import GroupBy
if isinstance(func, list):
if all((isinstance(f, str) for f in func)):
func = dict([(column, func) for column in self.columns])
else:
raise ValueError(
"If the given function is a list, it "
"should only contains function names as strings."
)
if not isinstance(func, dict) or not all(
is_name_like_value(key)
and (
isinstance(value, str)
or (isinstance(value, list) and all(isinstance(v, str) for v in value))
)
for key, value in func.items()
):
raise ValueError(
"aggs must be a dict mapping from column name to aggregate "
"functions (string or list of strings)."
)
with option_context("compute.default_index_type", "distributed"):
psdf = DataFrame(GroupBy._spark_groupby(self, func)) # type: DataFrame
# The codes below basically converts:
#
# A B
# sum min min max
# 0 12.0 1.0 2.0 8.0
#
# to:
# A B
# max NaN 8.0
# min 1.0 2.0
# sum 12.0 NaN
#
# Aggregated output is usually pretty much small.
return psdf.stack().droplevel(0)[list(func.keys())]
agg = aggregate
def corr(self, method="pearson") -> Union["Series", "DataFrame", "Index"]:
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
y : DataFrame
See Also
--------
Series.corr
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr('pearson')
dogs cats
dogs 1.000000 -0.851064
cats -0.851064 1.000000
>>> df.corr('spearman')
dogs cats
dogs 1.000000 -0.948683
cats -0.948683 1.000000
Notes
-----
There are behavior differences between pandas-on-Spark and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. pandas-on-Spark will return an error.
* pandas-on-Spark doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
return ps.from_pandas(corr(self, method))
def iteritems(self) -> Iterator:
"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Returns
-------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
Examples
--------
>>> df = ps.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'],
... columns=['species', 'population'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content.to_string())
...
label: species
content: panda bear
polar bear
koala marsupial
label: population
content: panda 1864
polar 22000
koala 80000
"""
return (
(label if len(label) > 1 else label[0], self._psser_for(label))
for label in self._internal.column_labels
)
def iterrows(self) -> Iterator:
"""
Iterate over DataFrame rows as (index, Series) pairs.
Yields
------
index : label or tuple of label
The index of the row. A tuple for a `MultiIndex`.
data : pandas.Series
The data of the row as a Series.
it : generator
A generator that iterates over the rows of the frame.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = ps.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
"""
columns = self.columns
internal_index_columns = self._internal.index_spark_column_names
internal_data_columns = self._internal.data_spark_column_names
def extract_kv_from_spark_row(row):
k = (
row[internal_index_columns[0]]
if len(internal_index_columns) == 1
else tuple(row[c] for c in internal_index_columns)
)
v = [row[c] for c in internal_data_columns]
return k, v
for k, v in map(
extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator()
):
s = pd.Series(v, index=columns, name=k)
yield k, s
def itertuples(self, index: bool = True, name: Optional[str] = "PandasOnSpark") -> Iterator:
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "PandasOnSpark"
The name of the returned namedtuples or None to return regular
tuples.
Returns
-------
iterator
An object to iterate over namedtuples for each row in the
DataFrame with the first field possibly being the index and
following fields being the column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
On python versions < 3.7 regular tuples are returned for DataFrames
with a large number of columns (>254).
Examples
--------
>>> df = ps.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
PandasOnSpark(Index='dog', num_legs=4, num_wings=0)
PandasOnSpark(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
PandasOnSpark(num_legs=4, num_wings=0)
PandasOnSpark(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
fields = list(self.columns)
if index:
fields.insert(0, "Index")
index_spark_column_names = self._internal.index_spark_column_names
data_spark_column_names = self._internal.data_spark_column_names
def extract_kv_from_spark_row(row):
k = (
row[index_spark_column_names[0]]
if len(index_spark_column_names) == 1
else tuple(row[c] for c in index_spark_column_names)
)
v = [row[c] for c in data_spark_column_names]
return k, v
can_return_named_tuples = sys.version_info >= (3, 7) or len(self.columns) + index < 255
if name is not None and can_return_named_tuples:
itertuple = namedtuple(name, fields, rename=True) # type: ignore
for k, v in map(
extract_kv_from_spark_row,
self._internal.resolved_copy.spark_frame.toLocalIterator(),
):
yield itertuple._make(([k] if index else []) + list(v))
else:
for k, v in map(
extract_kv_from_spark_row,
self._internal.resolved_copy.spark_frame.toLocalIterator(),
):
yield tuple(([k] if index else []) + list(v))
def items(self) -> Iterator:
"""This is an alias of ``iteritems``."""
return self.iteritems()
def to_clipboard(self, excel=True, sep=None, **kwargs) -> None:
"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)
- Windows : none
- OS X : none
See Also
--------
read_clipboard : Read text from clipboard.
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = ps.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
This function also works for Series:
>>> df = ps.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # 0, 1
... # 1, 2
... # 2, 3
... # 3, 4
... # 4, 5
... # 5, 6
... # 6, 7
"""
args = locals()
psdf = self
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args
)
def to_html(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
justify=None,
max_rows=None,
max_cols=None,
show_dimensions=False,
decimal=".",
bold_rows=True,
classes=None,
escape=True,
notebook=False,
border=None,
table_id=None,
render_links=False,
) -> Optional[str]:
"""
Render a DataFrame as an HTML table.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links (only works with pandas 0.24+).
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_string : Convert DataFrame to a string.
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
psdf = self.head(max_rows)
else:
psdf = self
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_html, pd.DataFrame.to_html, args
)
def to_string(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
justify=None,
max_rows=None,
max_cols=None,
show_dimensions=False,
decimal=".",
line_width=None,
) -> Optional[str]:
"""
Render a DataFrame to a console-friendly tabular output.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
line_width : int, optional
Width to wrap a line in characters.
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> df = ps.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2'])
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
>>> print(df.to_string(max_rows=2))
col1 col2
0 1 4
1 2 5
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
psdf = self.head(max_rows)
else:
psdf = self
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_string, pd.DataFrame.to_string, args
)
def to_dict(self, orient="dict", into=dict) -> Union[List, Mapping]:
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
.. note:: This method should only be used if the resulting pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
Examples
--------
>>> df = ps.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'],
... columns=['col1', 'col2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df_dict = df.to_dict()
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])]
You can specify the return orientation.
>>> df_dict = df.to_dict('series')
>>> sorted(df_dict.items())
[('col1', row1 1
row2 2
Name: col1, dtype: int64), ('col2', row1 0.50
row2 0.75
Name: col2, dtype: float64)]
>>> df_dict = df.to_dict('split')
>>> sorted(df_dict.items()) # doctest: +ELLIPSIS
[('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])]
>>> df_dict = df.to_dict('records')
>>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS
[[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]]
>>> df_dict = df.to_dict('index')
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])]
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS
[defaultdict(<class 'list'>, {'col..., 'col...}), \
defaultdict(<class 'list'>, {'col..., 'col...})]
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_dict, pd.DataFrame.to_dict, args
)
def to_latex(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
bold_rows=False,
column_format=None,
longtable=None,
escape=None,
encoding=None,
decimal=".",
multicolumn=None,
multicolumn_format=None,
multirow=None,
) -> Optional[str]:
r"""
Render an object to a LaTeX tabular environment table.
Render an object to a tabular environment table. You can splice this into a LaTeX
document. Requires usepackage{booktabs}.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, consider alternative formats.
Parameters
----------
buf : file descriptor or None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given, it is assumed to be aliases
for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default ‘NaN’
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns’ elements by position or name. The result of
each function must be a unicode string. List must be of length equal to the number of
columns.
float_format : str, optional
Format string for floating point numbers.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print every multiindex key at
each row. By default, the value will be read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By
default, ‘l’ will be used for all columns except columns of numbers, which default
to ‘r’.
longtable : bool, optional
By default, the value will be read from the pandas config module. Use a longtable
environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX
preamble.
escape : bool, optional
By default, the value will be read from the pandas config module. When set to False
prevents from escaping latex special characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file, defaults to ‘ascii’ on
Python 2 and ‘utf-8’ on Python 3.
decimal : str, default ‘.’
Character recognized as decimal separator, e.g. ‘,’ in Europe.
multicolumn : bool, default True
Use multicolumn to enhance MultiIndex columns. The default will be read from the config
module.
multicolumn_format : str, default ‘l’
The alignment for multicolumns, similar to column_format The default will be read from
the config module.
multirow : bool, default False
Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your
LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read from the pandas config
module.
Returns
-------
str or None
If buf is None, returns the resulting LateX format as a string. Otherwise returns None.
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = ps.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']},
... columns=['name', 'mask', 'weapon'])
>>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE
\begin{tabular}{lll}
\toprule
name & mask & weapon \\
\midrule
Raphael & red & sai \\
Donatello & purple & bo staff \\
\bottomrule
\end{tabular}
<BLANKLINE>
"""
args = locals()
psdf = self
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_latex, pd.DataFrame.to_latex, args
)
# TODO: enable doctests once we drop Spark 2.3.x (due to type coercion logic
# when creating arrays)
def transpose(self) -> "DataFrame":
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
.. note:: This method is based on an expensive operation due to the nature
of big data. Internally it needs to generate each row for each value, and
then group twice - it is a huge operation. To prevent misusage, this method
has the 'compute.max_rows' default limit of input length, and raises a ValueError.
>>> from pyspark.pandas.config import option_context
>>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE
... ps.DataFrame({'a': range(1001)}).transpose()
Traceback (most recent call last):
...
ValueError: Current DataFrame has more then the given limit 1000 rows.
Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option'
to retrieve to retrieve more than 1000 rows. Note that, before changing the
'compute.max_rows', this operation is considerably expensive.
Returns
-------
DataFrame
The transposed DataFrame.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the coerced dtype. For instance, if int and float have
to be placed in same column, it becomes float. If type coercion is not
possible, it fails.
Also, note that the values in index should be unique because they become
unique column names.
In addition, if Spark 2.3 is used, the types should always be exactly same.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = ps.DataFrame(data=d1, columns=['col1', 'col2'])
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T.sort_index() # doctest: +SKIP
>>> df1_transposed # doctest: +SKIP
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes # doctest: +SKIP
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'score': [9.5, 8],
... 'kids': [0, 0],
... 'age': [12, 22]}
>>> df2 = ps.DataFrame(data=d2, columns=['score', 'kids', 'age'])
>>> df2
score kids age
0 9.5 0 12
1 8.0 0 22
>>> df2_transposed = df2.T.sort_index() # doctest: +SKIP
>>> df2_transposed # doctest: +SKIP
0 1
age 12.0 22.0
kids 0.0 0.0
score 9.5 8.0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the coerced dtype:
>>> df2.dtypes
score float64
kids int64
age int64
dtype: object
>>> df2_transposed.dtypes # doctest: +SKIP
0 float64
1 float64
dtype: object
"""
max_compute_count = get_option("compute.max_rows")
if max_compute_count is not None:
pdf = self.head(max_compute_count + 1)._to_internal_pandas()
if len(pdf) > max_compute_count:
raise ValueError(
"Current DataFrame has more then the given limit {0} rows. "
"Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option' "
"to retrieve to retrieve more than {0} rows. Note that, before changing the "
"'compute.max_rows', this operation is considerably expensive.".format(
max_compute_count
)
)
return DataFrame(pdf.transpose())
# Explode the data to be pairs.
#
# For instance, if the current input DataFrame is as below:
#
# +------+------+------+------+------+
# |index1|index2|(a,x1)|(a,x2)|(b,x3)|
# +------+------+------+------+------+
# | y1| z1| 1| 0| 0|
# | y2| z2| 0| 50| 0|
# | y3| z3| 3| 2| 1|
# +------+------+------+------+------+
#
# Output of `exploded_df` becomes as below:
#
# +-----------------+-----------------+-----------------+-----+
# | index|__index_level_0__|__index_level_1__|value|
# +-----------------+-----------------+-----------------+-----+
# |{"a":["y1","z1"]}| a| x1| 1|
# |{"a":["y1","z1"]}| a| x2| 0|
# |{"a":["y1","z1"]}| b| x3| 0|
# |{"a":["y2","z2"]}| a| x1| 0|
# |{"a":["y2","z2"]}| a| x2| 50|
# |{"a":["y2","z2"]}| b| x3| 0|
# |{"a":["y3","z3"]}| a| x1| 3|
# |{"a":["y3","z3"]}| a| x2| 2|
# |{"a":["y3","z3"]}| b| x3| 1|
# +-----------------+-----------------+-----------------+-----+
pairs = F.explode(
F.array(
*[
F.struct(
*[
F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i))
for i, col in enumerate(label)
],
*[self._internal.spark_column_for(label).alias("value")],
)
for label in self._internal.column_labels
]
)
)
exploded_df = self._internal.spark_frame.withColumn("pairs", pairs).select(
[
F.to_json(
F.struct(
F.array(*[scol for scol in self._internal.index_spark_columns]).alias("a")
)
).alias("index"),
F.col("pairs.*"),
]
)
# After that, executes pivot with key and its index column.
# Note that index column should contain unique values since column names
# should be unique.
internal_index_columns = [
SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level)
]
pivoted_df = exploded_df.groupBy(internal_index_columns).pivot("index")
transposed_df = pivoted_df.agg(F.first(F.col("value")))
new_data_columns = list(
filter(lambda x: x not in internal_index_columns, transposed_df.columns)
)
column_labels = [
None if len(label) == 1 and label[0] is None else label
for label in (tuple(json.loads(col)["a"]) for col in new_data_columns)
]
internal = InternalFrame(
spark_frame=transposed_df,
index_spark_columns=[scol_for(transposed_df, col) for col in internal_index_columns],
index_names=self._internal.column_label_names,
column_labels=column_labels,
data_spark_columns=[scol_for(transposed_df, col) for col in new_data_columns],
column_label_names=self._internal.index_names,
)
return DataFrame(internal)
T = property(transpose)
def apply(self, func, axis=0, args=(), **kwds) -> Union["Series", "DataFrame", "Index"]:
"""
Apply a function along an axis of the DataFrame.
Objects passed to the function are Series objects whose index is
either the DataFrame's index (``axis=0``) or the DataFrame's columns
(``axis=1``).
See also `Transform and apply a function
<https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_.
.. note:: when `axis` is 0 or 'index', the `func` is unable to access
to the whole input series. pandas-on-Spark internally splits the input series into
multiple batches and calls `func` with each batch multiple times. Therefore, operations
such as global aggregations are impossible. See the example below.
>>> # This case does not return the length of whole series but of the batch internally
... # used.
... def length(s) -> int:
... return len(s)
...
>>> df = ps.DataFrame({'A': range(1000)})
>>> df.apply(length, axis=0) # doctest: +SKIP
0 83
1 83
2 83
...
10 83
11 83
dtype: int32
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify the return type as `Series` or scalar value in ``func``,
for instance, as below:
>>> def square(s) -> ps.Series[np.int32]:
... return s ** 2
pandas-on-Spark uses return type hint and does not try to infer the type.
In case when axis is 1, it requires to specify `DataFrame` or scalar value
with type hints as below:
>>> def plus_one(x) -> ps.DataFrame[float, float]:
... return x + 1
If the return type is specified as `DataFrame`, the output column names become
`c0, c1, c2 ... cn`. These names are positionally mapped to the returned
DataFrame in ``func``.
To specify the column names, you can assign them in a pandas friendly style as below:
>>> def plus_one(x) -> ps.DataFrame["a": float, "b": float]:
... return x + 1
>>> pdf = pd.DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
>>> def plus_one(x) -> ps.DataFrame[zip(pdf.dtypes, pdf.columns)]:
... return x + 1
However, this way switches the index type to default index type in the output
because the type hint cannot express the index type at this moment. Use
`reset_index()` to keep index as a workaround.
When the given function has the return type annotated, the original index of the
DataFrame will be lost and then a default index will be attached to the result.
Please be careful about configuring the default index. See also `Default Index Type
<https://koalas.readthedocs.io/en/latest/user_guide/options.html#default-index-type>`_.
Parameters
----------
func : function
Function to apply to each column or row.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
args : tuple
Positional arguments to pass to `func` in addition to the
array/series.
**kwds
Additional keyword arguments to pass as keywords arguments to
`func`.
Returns
-------
Series or DataFrame
Result of applying ``func`` along the given axis of the
DataFrame.
See Also
--------
DataFrame.applymap : For elementwise operations.
DataFrame.aggregate : Only perform aggregating type operations.
DataFrame.transform : Only perform transforming type operations.
Series.apply : The equivalent function for Series.
Examples
--------
>>> df = ps.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
>>> df
A B
0 4 9
1 4 9
2 4 9
Using a numpy universal function (in this case the same as
``np.sqrt(df)``):
>>> def sqrt(x) -> ps.Series[float]:
... return np.sqrt(x)
...
>>> df.apply(sqrt, axis=0)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
You can omit the type hint and let pandas-on-Spark infer its type.
>>> df.apply(np.sqrt, axis=0)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
When `axis` is 1 or 'columns', it applies the function for each row.
>>> def summation(x) -> np.int64:
... return np.sum(x)
...
>>> df.apply(summation, axis=1)
0 13
1 13
2 13
dtype: int64
Likewise, you can omit the type hint and let pandas-on-Spark infer its type.
>>> df.apply(np.sum, axis=1)
0 13
1 13
2 13
dtype: int64
>>> df.apply(max, axis=1)
0 9
1 9
2 9
dtype: int64
Returning a list-like will result in a Series
>>> df.apply(lambda x: [1, 2], axis=1)
0 [1, 2]
1 [1, 2]
2 [1, 2]
dtype: object
In order to specify the types when `axis` is '1', it should use DataFrame[...]
annotation. In this case, the column names are automatically generated.
>>> def identify(x) -> ps.DataFrame['A': np.int64, 'B': np.int64]:
... return x
...
>>> df.apply(identify, axis=1)
A B
0 4 9
1 4 9
2 4 9
You can also specify extra arguments.
>>> def plus_two(a, b, c) -> ps.DataFrame[np.int64, np.int64]:
... return a + b + c
...
>>> df.apply(plus_two, axis=1, args=(1,), c=3)
c0 c1
0 8 13
1 8 13
2 8 13
"""
from pyspark.pandas.groupby import GroupBy
from pyspark.pandas.series import first_series
if not isinstance(func, types.FunctionType):
assert callable(func), "the first argument should be a callable function."
f = func
func = lambda *args, **kwargs: f(*args, **kwargs)
axis = validate_axis(axis)
should_return_series = False
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
should_infer_schema = return_sig is None
def apply_func(pdf):
pdf_or_pser = pdf.apply(func, axis=axis, args=args, **kwds)
if isinstance(pdf_or_pser, pd.Series):
return pdf_or_pser.to_frame()
else:
return pdf_or_pser
self_applied = DataFrame(self._internal.resolved_copy) # type: "DataFrame"
column_labels = None # type: Optional[List[Tuple]]
if should_infer_schema:
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
limit = get_option("compute.shortcut_limit")
pdf = self_applied.head(limit + 1)._to_internal_pandas()
applied = pdf.apply(func, axis=axis, args=args, **kwds)
psser_or_psdf = ps.from_pandas(applied)
if len(pdf) <= limit:
return psser_or_psdf
psdf = psser_or_psdf
if isinstance(psser_or_psdf, ps.Series):
should_return_series = True
psdf = psser_or_psdf._psdf
index_fields = [field.normalize_spark_type() for field in psdf._internal.index_fields]
data_fields = [field.normalize_spark_type() for field in psdf._internal.data_fields]
return_schema = StructType([field.struct_field for field in index_fields + data_fields])
output_func = GroupBy._make_pandas_df_builder_func(
self_applied, apply_func, return_schema, retain_index=True
)
sdf = self_applied._internal.to_internal_spark_frame.mapInPandas(
lambda iterator: map(output_func, iterator), schema=return_schema
)
# If schema is inferred, we can restore indexes too.
internal = psdf._internal.with_new_sdf(
spark_frame=sdf, index_fields=index_fields, data_fields=data_fields
)
else:
return_type = infer_return_type(func)
require_index_axis = isinstance(return_type, SeriesType)
require_column_axis = isinstance(return_type, DataFrameType)
if require_index_axis:
if axis != 0:
raise TypeError(
"The given function should specify a scalar or a series as its type "
"hints when axis is 0 or 'index'; however, the return type "
"was %s" % return_sig
)
dtype = cast(SeriesType, return_type).dtype
spark_type = cast(SeriesType, return_type).spark_type
data_fields = [
InternalField(
dtype=dtype, struct_field=StructField(name=name, dataType=spark_type)
)
for name in self_applied.columns
]
return_schema = StructType([field.struct_field for field in data_fields])
elif require_column_axis:
if axis != 1:
raise TypeError(
"The given function should specify a scalar or a frame as its type "
"hints when axis is 1 or 'column'; however, the return type "
"was %s" % return_sig
)
data_fields = cast(DataFrameType, return_type).fields
return_schema = cast(DataFrameType, return_type).spark_type
else:
# any axis is fine.
should_return_series = True
spark_type = cast(ScalarType, return_type).spark_type
dtype = cast(ScalarType, return_type).dtype
data_fields = [
InternalField(
dtype=dtype,
struct_field=StructField(
name=SPARK_DEFAULT_SERIES_NAME, dataType=spark_type
),
)
]
return_schema = StructType([field.struct_field for field in data_fields])
column_labels = [None]
output_func = GroupBy._make_pandas_df_builder_func(
self_applied, apply_func, return_schema, retain_index=False
)
sdf = self_applied._internal.to_internal_spark_frame.mapInPandas(
lambda iterator: map(output_func, iterator), schema=return_schema
)
# Otherwise, it loses index.
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=None,
column_labels=column_labels,
data_fields=data_fields,
)
result = DataFrame(internal) # type: "DataFrame"
if should_return_series:
return first_series(result)
else:
return result
def transform(self, func, axis=0, *args, **kwargs) -> "DataFrame":
"""
Call ``func`` on self producing a Series with transformed values
and that has the same length as its input.
See also `Transform and apply a function
<https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def square(x) -> ps.Series[np.int32]:
... return x ** 2
pandas-on-Spark uses return type hint and does not try to infer the type.
.. note:: the series within ``func`` is actually multiple pandas series as the
segments of the whole pandas-on-Spark series; therefore, the length of each series
is not guaranteed. As an example, an aggregation against each series
does work as a global aggregation but an aggregation of each segment. See
below:
>>> def func(x) -> ps.Series[np.int32]:
... return x + sum(x)
Parameters
----------
func : function
Function to use for transforming the data. It must work when pandas Series
is passed.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
*args
Positional arguments to pass to func.
**kwargs
Keyword arguments to pass to func.
Returns
-------
DataFrame
A DataFrame that must have the same length as self.
Raises
------
Exception : If the returned DataFrame has a different length than self.
See Also
--------
DataFrame.aggregate : Only perform aggregating type operations.
DataFrame.apply : Invoke function on DataFrame.
Series.transform : The equivalent function for Series.
Examples
--------
>>> df = ps.DataFrame({'A': range(3), 'B': range(1, 4)}, columns=['A', 'B'])
>>> df
A B
0 0 1
1 1 2
2 2 3
>>> def square(x) -> ps.Series[np.int32]:
... return x ** 2
>>> df.transform(square)
A B
0 0 1
1 1 4
2 4 9
You can omit the type hint and let pandas-on-Spark infer its type.
>>> df.transform(lambda x: x ** 2)
A B
0 0 1
1 1 4
2 4 9
For multi-index columns:
>>> df.columns = [('X', 'A'), ('X', 'B')]
>>> df.transform(square) # doctest: +NORMALIZE_WHITESPACE
X
A B
0 0 1
1 1 4
2 4 9
>>> (df * -1).transform(abs) # doctest: +NORMALIZE_WHITESPACE
X
A B
0 0 1
1 1 2
2 2 3
You can also specify extra arguments.
>>> def calculation(x, y, z) -> ps.Series[int]:
... return x ** y + z
>>> df.transform(calculation, y=10, z=20) # doctest: +NORMALIZE_WHITESPACE
X
A B
0 20 21
1 21 1044
2 1044 59069
"""
if not isinstance(func, types.FunctionType):
assert callable(func), "the first argument should be a callable function."
f = func
func = lambda *args, **kwargs: f(*args, **kwargs)
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
should_infer_schema = return_sig is None
if should_infer_schema:
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
limit = get_option("compute.shortcut_limit")
pdf = self.head(limit + 1)._to_internal_pandas()
transformed = pdf.transform(func, axis, *args, **kwargs)
psdf = DataFrame(transformed) # type: "DataFrame"
if len(pdf) <= limit:
return psdf
applied = []
data_fields = []
for input_label, output_label in zip(
self._internal.column_labels, psdf._internal.column_labels
):
psser = self._psser_for(input_label)
field = psdf._internal.field_for(output_label).normalize_spark_type()
data_fields.append(field)
return_schema = field.spark_type
applied.append(
psser.pandas_on_spark._transform_batch(
func=lambda c: func(c, *args, **kwargs),
return_type=SeriesType(field.dtype, return_schema),
)
)
internal = self._internal.with_new_columns(applied, data_fields=data_fields)
return DataFrame(internal)
else:
return self._apply_series_op(
lambda psser: psser.pandas_on_spark.transform_batch(func, *args, **kwargs)
)
def pop(self, item) -> "DataFrame":
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = ps.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey','mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
Also support for MultiIndex
>>> df = ps.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey','mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> columns = [('a', 'name'), ('a', 'class'), ('b', 'max_speed')]
>>> df.columns = pd.MultiIndex.from_tuples(columns)
>>> df
a b
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('a')
name class
0 falcon bird
1 parrot bird
2 lion mammal
3 monkey mammal
>>> df
b
max_speed
0 389.0
1 24.0
2 80.5
3 NaN
"""
result = self[item]
self._update_internal_frame(self.drop(item)._internal)
return result
# TODO: add axis parameter can work when '1' or 'columns'
def xs(self, key, axis=0, level=None) -> Union["DataFrame", "Series"]:
"""
Return cross-section from the DataFrame.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
axis : 0 or 'index', default 0
Axis to retrieve cross-section on.
currently only support 0 or 'index'
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
Returns
-------
DataFrame or Series
Cross-section from the original DataFrame
corresponding to the selected index levels.
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
DataFrame.iloc : Purely integer-location based indexing
for selection by position.
Examples
--------
>>> d = {'num_legs': [4, 4, 2, 2],
... 'num_wings': [0, 0, 2, 2],
... 'class': ['mammal', 'mammal', 'mammal', 'bird'],
... 'animal': ['cat', 'dog', 'bat', 'penguin'],
... 'locomotion': ['walks', 'walks', 'flies', 'walks']}
>>> df = ps.DataFrame(data=d)
>>> df = df.set_index(['class', 'animal', 'locomotion'])
>>> df # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
class animal locomotion
mammal cat walks 4 0
dog walks 4 0
bat flies 2 2
bird penguin walks 2 2
Get values at specified index
>>> df.xs('mammal') # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
animal locomotion
cat walks 4 0
dog walks 4 0
bat flies 2 2
Get values at several indexes
>>> df.xs(('mammal', 'dog')) # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
locomotion
walks 4 0
>>> df.xs(('mammal', 'dog', 'walks')) # doctest: +NORMALIZE_WHITESPACE
num_legs 4
num_wings 0
Name: (mammal, dog, walks), dtype: int64
Get values at specified index and level
>>> df.xs('cat', level=1) # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
class locomotion
mammal walks 4 0
"""
from pyspark.pandas.series import first_series
if not is_name_like_value(key):
raise TypeError("'key' should be a scalar value or tuple that contains scalar values")
if level is not None and is_name_like_tuple(key):
raise KeyError(key)
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
if not is_name_like_tuple(key):
key = (key,)
if len(key) > self._internal.index_level:
raise KeyError(
"Key length ({}) exceeds index depth ({})".format(
len(key), self._internal.index_level
)
)
if level is None:
level = 0
rows = [
self._internal.index_spark_columns[lvl] == index for lvl, index in enumerate(key, level)
]
internal = self._internal.with_filter(reduce(lambda x, y: x & y, rows))
if len(key) == self._internal.index_level:
psdf = DataFrame(internal) # type: DataFrame
pdf = psdf.head(2)._to_internal_pandas()
if len(pdf) == 0:
raise KeyError(key)
elif len(pdf) > 1:
return psdf
else:
return first_series(DataFrame(pdf.transpose()))
else:
index_spark_columns = (
internal.index_spark_columns[:level]
+ internal.index_spark_columns[level + len(key) :]
)
index_names = internal.index_names[:level] + internal.index_names[level + len(key) :]
index_fields = internal.index_fields[:level] + internal.index_fields[level + len(key) :]
internal = internal.copy(
index_spark_columns=index_spark_columns,
index_names=index_names,
index_fields=index_fields,
).resolved_copy
return DataFrame(internal)
def between_time(
self,
start_time: Union[datetime.time, str],
end_time: Union[datetime.time, str],
include_start: bool = True,
include_end: bool = True,
axis: Union[int, str] = 0,
) -> "DataFrame":
"""
Select values between particular times of the day (example: 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
Initial time as a time filter limit.
end_time : datetime.time or str
End time as a time filter limit.
include_start : bool, default True
Whether the start time needs to be included in the result.
include_end : bool, default True
Whether the end time needs to be included in the result.
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine range time on index or columns value.
Returns
-------
DataFrame
Data from the original object filtered to the specified dates range.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> idx = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=idx)
>>> psdf
A
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
>>> psdf.between_time('0:15', '0:45')
A
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
You get the times that are *not* between two times by setting
``start_time`` later than ``end_time``:
>>> psdf.between_time('0:45', '0:15')
A
2018-04-09 00:00:00 1
2018-04-12 01:00:00 4
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError("between_time currently only works for axis=0")
if not isinstance(self.index, ps.DatetimeIndex):
raise TypeError("Index must be DatetimeIndex")
psdf = self.copy()
psdf.index.name = verify_temp_column_name(psdf, "__index_name__")
return_types = [psdf.index.dtype] + list(psdf.dtypes)
def pandas_between_time(pdf) -> ps.DataFrame[return_types]: # type: ignore
return pdf.between_time(start_time, end_time, include_start, include_end).reset_index()
# apply_batch will remove the index of the pandas-on-Spark DataFrame and attach a
# default index, which will never be used. So use "distributed" index as a dummy to
# avoid overhead.
with option_context("compute.default_index_type", "distributed"):
psdf = psdf.pandas_on_spark.apply_batch(pandas_between_time)
return DataFrame(
self._internal.copy(
spark_frame=psdf._internal.spark_frame,
index_spark_columns=psdf._internal.data_spark_columns[:1],
index_fields=psdf._internal.data_fields[:1],
data_spark_columns=psdf._internal.data_spark_columns[1:],
data_fields=psdf._internal.data_fields[1:],
)
)
# TODO: implement axis=1
def at_time(
self, time: Union[datetime.time, str], asof: bool = False, axis: Union[int, str] = 0
) -> "DataFrame":
"""
Select values at particular time of day (example: 9:30AM).
Parameters
----------
time : datetime.time or str
axis : {0 or 'index', 1 or 'columns'}, default 0
Returns
-------
DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> idx = pd.date_range('2018-04-09', periods=4, freq='12H')
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=idx)
>>> psdf
A
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
>>> psdf.at_time('12:00')
A
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
"""
if asof:
raise NotImplementedError("'asof' argument is not supported")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError("at_time currently only works for axis=0")
if not isinstance(self.index, ps.DatetimeIndex):
raise TypeError("Index must be DatetimeIndex")
psdf = self.copy()
psdf.index.name = verify_temp_column_name(psdf, "__index_name__")
return_types = [psdf.index.dtype] + list(psdf.dtypes)
if LooseVersion(pd.__version__) < LooseVersion("0.24"):
def pandas_at_time(pdf) -> ps.DataFrame[return_types]: # type: ignore
return pdf.at_time(time, asof).reset_index()
else:
def pandas_at_time(pdf) -> ps.DataFrame[return_types]: # type: ignore
return pdf.at_time(time, asof, axis).reset_index()
# apply_batch will remove the index of the pandas-on-Spark DataFrame and attach
# a default index, which will never be used. So use "distributed" index as a dummy
# to avoid overhead.
with option_context("compute.default_index_type", "distributed"):
psdf = psdf.pandas_on_spark.apply_batch(pandas_at_time)
return DataFrame(
self._internal.copy(
spark_frame=psdf._internal.spark_frame,
index_spark_columns=psdf._internal.data_spark_columns[:1],
index_fields=psdf._internal.data_fields[:1],
data_spark_columns=psdf._internal.data_spark_columns[1:],
data_fields=psdf._internal.data_fields[1:],
)
)
def where(self, cond, other=np.nan) -> "DataFrame":
"""
Replace values where the condition is False.
Parameters
----------
cond : boolean DataFrame
Where cond is True, keep the original value. Where False,
replace with corresponding value from other.
other : scalar, DataFrame
Entries where cond is False are replaced with corresponding value from other.
Returns
-------
DataFrame
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> df1 = ps.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]})
>>> df2 = ps.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]})
>>> df1
A B
0 0 100
1 1 200
2 2 300
3 3 400
4 4 500
>>> df2
A B
0 0 -100
1 -1 -200
2 -2 -300
3 -3 -400
4 -4 -500
>>> df1.where(df1 > 0).sort_index()
A B
0 NaN 100.0
1 1.0 200.0
2 2.0 300.0
3 3.0 400.0
4 4.0 500.0
>>> df1.where(df1 > 1, 10).sort_index()
A B
0 10 100
1 10 200
2 2 300
3 3 400
4 4 500
>>> df1.where(df1 > 1, df1 + 100).sort_index()
A B
0 100 100
1 101 200
2 2 300
3 3 400
4 4 500
>>> df1.where(df1 > 1, df2).sort_index()
A B
0 0 100
1 -1 200
2 2 300
3 3 400
4 4 500
When the column name of cond is different from self, it treats all values are False
>>> cond = ps.DataFrame({'C': [0, -1, -2, -3, -4], 'D':[4, 3, 2, 1, 0]}) % 3 == 0
>>> cond
C D
0 True False
1 False True
2 False False
3 True False
4 False True
>>> df1.where(cond).sort_index()
A B
0 NaN NaN
1 NaN NaN
2 NaN NaN
3 NaN NaN
4 NaN NaN
When the type of cond is Series, it just check boolean regardless of column name
>>> cond = ps.Series([1, 2]) > 1
>>> cond
0 False
1 True
dtype: bool
>>> df1.where(cond).sort_index()
A B
0 NaN NaN
1 1.0 200.0
2 NaN NaN
3 NaN NaN
4 NaN NaN
>>> reset_option("compute.ops_on_diff_frames")
"""
from pyspark.pandas.series import Series
tmp_cond_col_name = "__tmp_cond_col_{}__".format
tmp_other_col_name = "__tmp_other_col_{}__".format
psdf = self.copy()
tmp_cond_col_names = [
tmp_cond_col_name(name_like_string(label)) for label in self._internal.column_labels
]
if isinstance(cond, DataFrame):
cond = cond[
[
(
cond._internal.spark_column_for(label)
if label in cond._internal.column_labels
else F.lit(False)
).alias(name)
for label, name in zip(self._internal.column_labels, tmp_cond_col_names)
]
]
psdf[tmp_cond_col_names] = cond
elif isinstance(cond, Series):
cond = cond.to_frame()
cond = cond[
[cond._internal.data_spark_columns[0].alias(name) for name in tmp_cond_col_names]
]
psdf[tmp_cond_col_names] = cond
else:
raise TypeError("type of cond must be a DataFrame or Series")
tmp_other_col_names = [
tmp_other_col_name(name_like_string(label)) for label in self._internal.column_labels
]
if isinstance(other, DataFrame):
other = other[
[
(
other._internal.spark_column_for(label)
if label in other._internal.column_labels
else F.lit(np.nan)
).alias(name)
for label, name in zip(self._internal.column_labels, tmp_other_col_names)
]
]
psdf[tmp_other_col_names] = other
elif isinstance(other, Series):
other = other.to_frame()
other = other[
[other._internal.data_spark_columns[0].alias(name) for name in tmp_other_col_names]
]
psdf[tmp_other_col_names] = other
else:
for label in self._internal.column_labels:
psdf[tmp_other_col_name(name_like_string(label))] = other
# above logic make spark dataframe looks like below:
# +-----------------+---+---+------------------+-------------------+------------------+--...
# |__index_level_0__| A| B|__tmp_cond_col_A__|__tmp_other_col_A__|__tmp_cond_col_B__|__...
# +-----------------+---+---+------------------+-------------------+------------------+--...
# | 0| 0|100| true| 0| false| ...
# | 1| 1|200| false| -1| false| ...
# | 3| 3|400| true| -3| false| ...
# | 2| 2|300| false| -2| true| ...
# | 4| 4|500| false| -4| false| ...
# +-----------------+---+---+------------------+-------------------+------------------+--...
data_spark_columns = []
for label in self._internal.column_labels:
data_spark_columns.append(
F.when(
psdf[tmp_cond_col_name(name_like_string(label))].spark.column,
psdf._internal.spark_column_for(label),
)
.otherwise(psdf[tmp_other_col_name(name_like_string(label))].spark.column)
.alias(psdf._internal.spark_column_name_for(label))
)
return DataFrame(
psdf._internal.with_new_columns(
data_spark_columns, column_labels=self._internal.column_labels # TODO: dtypes?
)
)
def mask(self, cond, other=np.nan) -> "DataFrame":
"""
Replace values where the condition is True.
Parameters
----------
cond : boolean DataFrame
Where cond is False, keep the original value. Where True,
replace with corresponding value from other.
other : scalar, DataFrame
Entries where cond is True are replaced with corresponding value from other.
Returns
-------
DataFrame
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> df1 = ps.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]})
>>> df2 = ps.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]})
>>> df1
A B
0 0 100
1 1 200
2 2 300
3 3 400
4 4 500
>>> df2
A B
0 0 -100
1 -1 -200
2 -2 -300
3 -3 -400
4 -4 -500
>>> df1.mask(df1 > 0).sort_index()
A B
0 0.0 NaN
1 NaN NaN
2 NaN NaN
3 NaN NaN
4 NaN NaN
>>> df1.mask(df1 > 1, 10).sort_index()
A B
0 0 10
1 1 10
2 10 10
3 10 10
4 10 10
>>> df1.mask(df1 > 1, df1 + 100).sort_index()
A B
0 0 200
1 1 300
2 102 400
3 103 500
4 104 600
>>> df1.mask(df1 > 1, df2).sort_index()
A B
0 0 -100
1 1 -200
2 -2 -300
3 -3 -400
4 -4 -500
>>> reset_option("compute.ops_on_diff_frames")
"""
from pyspark.pandas.series import Series
if not isinstance(cond, (DataFrame, Series)):
raise TypeError("type of cond must be a DataFrame or Series")
cond_inversed = cond._apply_series_op(lambda psser: ~psser)
return self.where(cond_inversed, other)
@property
def index(self) -> "Index":
"""The index (row labels) Column of the DataFrame.
Currently not supported when the DataFrame has no index.
See Also
--------
Index
"""
from pyspark.pandas.indexes.base import Index
return Index._new_instance(self)
@property
def empty(self) -> bool:
"""
Returns true if the current DataFrame is empty. Otherwise, returns false.
Examples
--------
>>> ps.range(10).empty
False
>>> ps.range(0).empty
True
>>> ps.DataFrame({}, index=list('abc')).empty
True
"""
return (
len(self._internal.column_labels) == 0
or self._internal.resolved_copy.spark_frame.rdd.isEmpty()
)
@property
def style(self) -> "Styler":
"""
Property returning a Styler object containing methods for
building a styled HTML representation for the DataFrame.
.. note:: currently it collects top 1000 rows and return its
pandas `pandas.io.formats.style.Styler` instance.
Examples
--------
>>> ps.range(1001).style # doctest: +SKIP
<pandas.io.formats.style.Styler object at ...>
"""
max_results = get_option("compute.max_rows")
pdf = self.head(max_results + 1)._to_internal_pandas()
if len(pdf) > max_results:
warnings.warn("'style' property will only use top %s rows." % max_results, UserWarning)
return pdf.head(max_results).style
def set_index(self, keys, drop=True, append=False, inplace=False) -> Optional["DataFrame"]:
"""Set the DataFrame index (row labels) using one or more existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index` and ``np.ndarray``.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
Returns
-------
DataFrame
Changed row labels.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
Examples
--------
>>> df = ps.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]},
... columns=['month', 'year', 'sale'])
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if is_name_like_tuple(keys):
keys = [keys]
elif is_name_like_value(keys):
keys = [(keys,)]
else:
keys = [key if is_name_like_tuple(key) else (key,) for key in keys]
columns = set(self._internal.column_labels)
for key in keys:
if key not in columns:
raise KeyError(name_like_string(key))
if drop:
column_labels = [label for label in self._internal.column_labels if label not in keys]
else:
column_labels = self._internal.column_labels
if append:
index_spark_columns = self._internal.index_spark_columns + [
self._internal.spark_column_for(label) for label in keys
]
index_names = self._internal.index_names + keys
index_fields = self._internal.index_fields + [
self._internal.field_for(label) for label in keys
]
else:
index_spark_columns = [self._internal.spark_column_for(label) for label in keys]
index_names = keys
index_fields = [self._internal.field_for(label) for label in keys]
internal = self._internal.copy(
index_spark_columns=index_spark_columns,
index_names=index_names,
index_fields=index_fields,
column_labels=column_labels,
data_spark_columns=[self._internal.spark_column_for(label) for label in column_labels],
data_fields=[self._internal.field_for(label) for label in column_labels],
)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal)
def reset_index(
self, level=None, drop=False, inplace=False, col_level=0, col_fill=""
) -> Optional["DataFrame"]:
"""Reset the index, or a level of it.
For DataFrame with multi-level index, return new DataFrame with labeling information in
the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None.
For a standard index, the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame
DataFrame with the new index.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
Examples
--------
>>> df = ps.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column. Unlike pandas, pandas-on-Spark
does not automatically add a sequential index. The following 0, 1, 2, 3 are only
there when we display the DataFrame.
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = ps.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df # doctest: +NORMALIZE_WHITESPACE
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class') # doctest: +NORMALIZE_WHITESPACE
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1) # doctest: +NORMALIZE_WHITESPACE
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1,
... col_fill='species') # doctest: +NORMALIZE_WHITESPACE
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1,
... col_fill='genus') # doctest: +NORMALIZE_WHITESPACE
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, "inplace")
multi_index = self._internal.index_level > 1
def rename(index):
if multi_index:
return ("level_{}".format(index),)
else:
if ("index",) not in self._internal.column_labels:
return ("index",)
else:
return ("level_{}".format(index),)
if level is None:
new_column_labels = [
name if name is not None else rename(i)
for i, name in enumerate(self._internal.index_names)
]
new_data_spark_columns = [
scol.alias(name_like_string(label))
for scol, label in zip(self._internal.index_spark_columns, new_column_labels)
]
new_data_fields = self._internal.index_fields
index_spark_columns = []
index_names = []
index_fields = []
else:
if is_list_like(level):
level = list(level)
if isinstance(level, int) or is_name_like_tuple(level):
level = [level]
elif is_name_like_value(level):
level = [(level,)]
else:
level = [
lvl if isinstance(lvl, int) or is_name_like_tuple(lvl) else (lvl,)
for lvl in level
]
if all(isinstance(l, int) for l in level):
for lev in level:
if lev >= self._internal.index_level:
raise IndexError(
"Too many levels: Index has only {} level, not {}".format(
self._internal.index_level, lev + 1
)
)
idx = level
elif all(is_name_like_tuple(lev) for lev in level):
idx = []
for l in level:
try:
i = self._internal.index_names.index(l)
idx.append(i)
except ValueError:
if multi_index:
raise KeyError("Level unknown not found")
else:
raise KeyError(
"Level unknown must be same as name ({})".format(
name_like_string(self._internal.index_names[0])
)
)
else:
raise ValueError("Level should be all int or all string.")
idx.sort()
new_column_labels = []
new_data_spark_columns = []
new_data_fields = []
index_spark_columns = self._internal.index_spark_columns.copy()
index_names = self._internal.index_names.copy()
index_fields = self._internal.index_fields.copy()
for i in idx[::-1]:
name = index_names.pop(i)
new_column_labels.insert(0, name if name is not None else rename(i))
scol = index_spark_columns.pop(i)
new_data_spark_columns.insert(0, scol.alias(name_like_string(name)))
new_data_fields.insert(0, index_fields.pop(i).copy(name=name_like_string(name)))
if drop:
new_data_spark_columns = []
new_column_labels = []
new_data_fields = []
for label in new_column_labels:
if label in self._internal.column_labels:
raise ValueError("cannot insert {}, already exists".format(name_like_string(label)))
if self._internal.column_labels_level > 1:
column_depth = len(self._internal.column_labels[0])
if col_level >= column_depth:
raise IndexError(
"Too many levels: Index has only {} levels, not {}".format(
column_depth, col_level + 1
)
)
if any(col_level + len(label) > column_depth for label in new_column_labels):
raise ValueError("Item must have length equal to number of levels.")
new_column_labels = [
tuple(
([col_fill] * col_level)
+ list(label)
+ ([col_fill] * (column_depth - (len(label) + col_level)))
)
for label in new_column_labels
]
internal = self._internal.copy(
index_spark_columns=index_spark_columns,
index_names=index_names,
index_fields=index_fields,
column_labels=new_column_labels + self._internal.column_labels,
data_spark_columns=new_data_spark_columns + self._internal.data_spark_columns,
data_fields=new_data_fields + self._internal.data_fields,
)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal)
def isnull(self) -> "DataFrame":
"""
Detects missing values for items in the current Dataframe.
Return a boolean same-sized Dataframe indicating if the values are NA.
NA values, such as None or numpy.NaN, gets mapped to True values.
Everything else gets mapped to False values.
See Also
--------
DataFrame.notnull
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.isnull()
0 1
0 False False
1 False True
2 False True
3 False False
>>> df = ps.DataFrame([[None, 'bee', None], ['dog', None, 'fly']])
>>> df.isnull()
0 1 2
0 True False True
1 False True False
"""
return self._apply_series_op(lambda psser: psser.isnull())
isna = isnull
def notnull(self) -> "DataFrame":
"""
Detects non-missing values for items in the current Dataframe.
This function takes a dataframe and indicates whether it's
values are valid (not missing, which is ``NaN`` in numeric
datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike).
See Also
--------
DataFrame.isnull
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.notnull()
0 1
0 True True
1 True False
2 True False
3 True True
>>> df = ps.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df.notnull()
0 1 2
0 True True True
1 True False True
"""
return self._apply_series_op(lambda psser: psser.notnull())
notna = notnull
def insert(
self,
loc: int,
column,
value: Union[Scalar, "Series", Iterable],
allow_duplicates: bool = False,
) -> None:
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns).
column : str, number, or hashable object
Label of the inserted column.
value : int, Series, or array-like
allow_duplicates : bool, optional
Examples
--------
>>> psdf = ps.DataFrame([1, 2, 3])
>>> psdf.sort_index()
0
0 1
1 2
2 3
>>> psdf.insert(0, 'x', 4)
>>> psdf.sort_index()
x 0
0 4 1
1 4 2
2 4 3
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> psdf.insert(1, 'y', [5, 6, 7])
>>> psdf.sort_index()
x y 0
0 4 5 1
1 4 6 2
2 4 7 3
>>> psdf.insert(2, 'z', ps.Series([8, 9, 10]))
>>> psdf.sort_index()
x y z 0
0 4 5 8 1
1 4 6 9 2
2 4 7 10 3
>>> reset_option("compute.ops_on_diff_frames")
"""
if not isinstance(loc, int):
raise TypeError("loc must be int")
assert 0 <= loc <= len(self.columns)
assert allow_duplicates is False
if not is_name_like_value(column):
raise TypeError(
'"column" should be a scalar value or tuple that contains scalar values'
)
if is_name_like_tuple(column):
if len(column) != len(self.columns.levels):
# To be consistent with pandas
raise ValueError('"column" must have length equal to number of column levels.')
if column in self.columns:
raise ValueError("cannot insert %s, already exists" % column)
psdf = self.copy()
psdf[column] = value
columns = psdf.columns[:-1].insert(loc, psdf.columns[-1])
psdf = psdf[columns]
self._update_internal_frame(psdf._internal)
# TODO: add frep and axis parameter
def shift(self, periods=1, fill_value=None) -> "DataFrame":
"""
Shift DataFrame by desired number of periods.
.. note:: the current implementation of shift uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default depends on the dtype of self. For numeric data, np.nan is used.
Returns
-------
Copy of input DataFrame, shifted.
Examples
--------
>>> df = ps.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]},
... columns=['Col1', 'Col2', 'Col3'])
>>> df.shift(periods=3)
Col1 Col2 Col3
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 10.0 13.0 17.0
4 20.0 23.0 27.0
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
0 0 0 0
1 0 0 0
2 0 0 0
3 10 13 17
4 20 23 27
"""
return self._apply_series_op(
lambda psser: psser._shift(periods, fill_value), should_resolve=True
)
# TODO: axis should support 1 or 'columns' either at this moment
def diff(self, periods: int = 1, axis: Union[int, str] = 0) -> "DataFrame":
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another element in the
DataFrame (default is the element in the same column of the previous row).
.. note:: the current implementation of diff uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative values.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
Returns
-------
diffed : DataFrame
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._apply_series_op(lambda psser: psser._diff(periods), should_resolve=True)
# TODO: axis should support 1 or 'columns' either at this moment
def nunique(
self,
axis: Union[int, str] = 0,
dropna: bool = True,
approx: bool = False,
rsd: float = 0.05,
) -> "Series":
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
dropna : bool, default True
Don’t include NaN in the count.
approx: bool, default False
If False, will use the exact algorithm and return the exact number of unique.
If True, it uses the HyperLogLog approximate algorithm, which is significantly faster
for large amount of data.
Note: This parameter is specific to pandas-on-Spark and is not found in pandas.
rsd: float, default 0.05
Maximum estimation error allowed in the HyperLogLog algorithm.
Note: Just like ``approx`` this parameter is specific to pandas-on-Spark.
Returns
-------
The number of unique values per column as a pandas-on-Spark Series.
Examples
--------
>>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [np.nan, 3, np.nan]})
>>> df.nunique()
A 3
B 1
dtype: int64
>>> df.nunique(dropna=False)
A 3
B 2
dtype: int64
On big data, we recommend using the approximate algorithm to speed up this function.
The result will be very close to the exact unique count.
>>> df.nunique(approx=True)
A 3
B 1
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
sdf = self._internal.spark_frame.select(
[F.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)]
+ [
self._psser_for(label)._nunique(dropna, approx, rsd)
for label in self._internal.column_labels
]
)
# The data is expected to be small so it's fine to transpose/use default index.
with ps.option_context("compute.max_rows", 1):
internal = self._internal.copy(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)],
index_names=[None],
index_fields=[None],
data_spark_columns=[
scol_for(sdf, col) for col in self._internal.data_spark_column_names
],
data_fields=None,
)
return first_series(DataFrame(internal).transpose())
def round(self, decimals=0) -> "DataFrame":
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
.. note:: If `decimals` is a Series, it is expected to be small,
as all the data is loaded into the driver's memory.
Returns
-------
DataFrame
See Also
--------
Series.round
Examples
--------
>>> df = ps.DataFrame({'A':[0.028208, 0.038683, 0.877076],
... 'B':[0.992815, 0.645646, 0.149370],
... 'C':[0.173891, 0.577595, 0.491027]},
... columns=['A', 'B', 'C'],
... index=['first', 'second', 'third'])
>>> df
A B C
first 0.028208 0.992815 0.173891
second 0.038683 0.645646 0.577595
third 0.877076 0.149370 0.491027
>>> df.round(2)
A B C
first 0.03 0.99 0.17
second 0.04 0.65 0.58
third 0.88 0.15 0.49
>>> df.round({'A': 1, 'C': 2})
A B C
first 0.0 0.992815 0.17
second 0.0 0.645646 0.58
third 0.9 0.149370 0.49
>>> decimals = ps.Series([1, 0, 2], index=['A', 'B', 'C'])
>>> df.round(decimals)
A B C
first 0.0 1.0 0.17
second 0.0 1.0 0.58
third 0.9 0.0 0.49
"""
if isinstance(decimals, ps.Series):
decimals = {
k if isinstance(k, tuple) else (k,): v
for k, v in decimals._to_internal_pandas().items()
}
elif isinstance(decimals, dict):
decimals = {k if is_name_like_tuple(k) else (k,): v for k, v in decimals.items()}
elif isinstance(decimals, int):
decimals = {k: decimals for k in self._internal.column_labels}
else:
raise TypeError("decimals must be an integer, a dict-like or a Series")
def op(psser):
label = psser._column_label
if label in decimals:
return F.round(psser.spark.column, decimals[label]).alias(
psser._internal.data_spark_column_names[0]
)
else:
return psser
return self._apply_series_op(op)
def _mark_duplicates(self, subset=None, keep="first"):
if subset is None:
subset = self._internal.column_labels
else:
if is_name_like_tuple(subset):
subset = [subset]
elif is_name_like_value(subset):
subset = [(subset,)]
else:
subset = [sub if is_name_like_tuple(sub) else (sub,) for sub in subset]
diff = set(subset).difference(set(self._internal.column_labels))
if len(diff) > 0:
raise KeyError(", ".join([name_like_string(d) for d in diff]))
group_cols = [self._internal.spark_column_name_for(label) for label in subset]
sdf = self._internal.resolved_copy.spark_frame
column = verify_temp_column_name(sdf, "__duplicated__")
if keep == "first" or keep == "last":
if keep == "first":
ord_func = spark.functions.asc
else:
ord_func = spark.functions.desc
window = (
Window.partitionBy(group_cols)
.orderBy(ord_func(NATURAL_ORDER_COLUMN_NAME))
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
)
sdf = sdf.withColumn(column, F.row_number().over(window) > 1)
elif not keep:
window = Window.partitionBy(group_cols).rowsBetween(
Window.unboundedPreceding, Window.unboundedFollowing
)
sdf = sdf.withColumn(column, F.count("*").over(window) > 1)
else:
raise ValueError("'keep' only supports 'first', 'last' and False")
return sdf, column
def duplicated(self, subset=None, keep="first") -> "Series":
"""
Return boolean Series denoting duplicate rows, optionally only considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates,
by default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : Series
Examples
--------
>>> df = ps.DataFrame({'a': [1, 1, 1, 3], 'b': [1, 1, 1, 4], 'c': [1, 1, 1, 5]},
... columns = ['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 1 1 1
2 1 1 1
3 3 4 5
>>> df.duplicated().sort_index()
0 False
1 True
2 True
3 False
dtype: bool
Mark duplicates as ``True`` except for the last occurrence.
>>> df.duplicated(keep='last').sort_index()
0 True
1 True
2 False
3 False
dtype: bool
Mark all duplicates as ``True``.
>>> df.duplicated(keep=False).sort_index()
0 True
1 True
2 True
3 False
dtype: bool
"""
from pyspark.pandas.series import first_series
sdf, column = self._mark_duplicates(subset, keep)
sdf = sdf.select(
self._internal.index_spark_columns
+ [scol_for(sdf, column).alias(SPARK_DEFAULT_SERIES_NAME)]
)
return first_series(
DataFrame(
InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
index_fields=self._internal.index_fields,
column_labels=[None], # type: ignore
data_spark_columns=[scol_for(sdf, SPARK_DEFAULT_SERIES_NAME)],
)
)
)
# TODO: support other as DataFrame or array-like
def dot(self, other: "Series") -> "Series":
"""
Compute the matrix multiplication between the DataFrame and other.
This method computes the matrix product between the DataFrame and the
values of an other Series
It can also be called using ``self @ other`` in Python >= 3.5.
.. note:: This method is based on an expensive operation due to the nature
of big data. Internally it needs to generate each row for each value, and
then group twice - it is a huge operation. To prevent misusage, this method
has the 'compute.max_rows' default limit of input length, and raises a ValueError.
>>> from pyspark.pandas.config import option_context
>>> with option_context(
... 'compute.max_rows', 1000, "compute.ops_on_diff_frames", True
... ): # doctest: +NORMALIZE_WHITESPACE
... psdf = ps.DataFrame({'a': range(1001)})
... psser = ps.Series([2], index=['a'])
... psdf.dot(psser)
Traceback (most recent call last):
...
ValueError: Current DataFrame has more then the given limit 1000 rows.
Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option'
to retrieve to retrieve more than 1000 rows. Note that, before changing the
'compute.max_rows', this operation is considerably expensive.
Parameters
----------
other : Series
The other object to compute the matrix product with.
Returns
-------
Series
Return the matrix product between self and other as a Series.
See Also
--------
Series.dot: Similar method for Series.
Notes
-----
The dimensions of DataFrame and other must be compatible in order to
compute the matrix multiplication. In addition, the column names of
DataFrame and the index of other must contain the same values, as they
will be aligned prior to the multiplication.
The dot method for Series computes the inner product, instead of the
matrix product here.
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> psdf = ps.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
>>> psser = ps.Series([1, 1, 2, 1])
>>> psdf.dot(psser)
0 -4
1 5
dtype: int64
Note how shuffling of the objects does not change the result.
>>> psser2 = psser.reindex([1, 0, 2, 3])
>>> psdf.dot(psser2)
0 -4
1 5
dtype: int64
>>> psdf @ psser2
0 -4
1 5
dtype: int64
>>> reset_option("compute.ops_on_diff_frames")
"""
if not isinstance(other, ps.Series):
raise TypeError("Unsupported type {}".format(type(other).__name__))
else:
return cast(ps.Series, other.dot(self.transpose())).rename(None)
def __matmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def to_pandas_on_spark(self, index_col: Optional[Union[str, List[str]]] = None) -> "DataFrame":
"""
Converts the existing DataFrame into a pandas-on-Spark DataFrame.
This method is monkey-patched into Spark's DataFrame and can be used
to convert a Spark DataFrame into a pandas-on-Spark DataFrame. If running on
an existing pandas-on-Spark DataFrame, the method returns itself.
If a pandas-on-Spark DataFrame is converted to a Spark DataFrame and then back
to pandas-on-Spark, it will lose the index information and the original index
will be turned into a normal column.
Parameters
----------
index_col: str or list of str, optional, default: None
Index column of table in Spark.
See Also
--------
DataFrame.to_spark
Examples
--------
>>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4]}, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
>>> spark_df = df.to_spark()
>>> spark_df
DataFrame[col1: bigint, col2: bigint]
>>> psdf = spark_df.to_pandas_on_spark()
>>> psdf
col1 col2
0 1 3
1 2 4
We can specify the index columns.
>>> psdf = spark_df.to_pandas_on_spark(index_col='col1')
>>> psdf # doctest: +NORMALIZE_WHITESPACE
col2
col1
1 3
2 4
Calling to_pandas_on_spark on a pandas-on-Spark DataFrame simply returns itself.
>>> df.to_pandas_on_spark()
col1 col2
0 1 3
1 2 4
"""
if isinstance(self, DataFrame):
return self
else:
assert isinstance(self, spark.DataFrame), type(self)
from pyspark.pandas.namespace import _get_index_map
index_spark_columns, index_names = _get_index_map(self, index_col)
internal = InternalFrame(
spark_frame=self, index_spark_columns=index_spark_columns, index_names=index_names
)
return DataFrame(internal)
# Keep to_koalas for backward compatibility for now.
def to_koalas(self, index_col: Optional[Union[str, List[str]]] = None) -> "DataFrame":
warnings.warn(
"DataFrame.to_koalas is deprecated. Use DataFrame.to_pandas_on_spark instead.",
FutureWarning,
)
return self.to_pandas_on_spark(index_col)
def to_table(
self,
name: str,
format: Optional[str] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options
) -> None:
return self.spark.to_table(name, format, mode, partition_cols, index_col, **options)
to_table.__doc__ = SparkFrameMethods.to_table.__doc__
def to_delta(
self,
path: str,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options
) -> None:
"""
Write the DataFrame out as a Delta Lake table.
Parameters
----------
path : str, required
Path to write to.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default
'overwrite'. Specifies the behavior of the save operation when the destination
exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options : dict
All other options passed directly into Delta Lake.
See Also
--------
read_delta
DataFrame.to_parquet
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
Create a new Delta Lake table, partitioned by one column:
>>> df.to_delta('%s/to_delta/foo' % path, partition_cols='date') # doctest: +SKIP
Partitioned by two columns:
>>> df.to_delta('%s/to_delta/bar' % path,
... partition_cols=['date', 'country']) # doctest: +SKIP
Overwrite an existing table's partitions, using the 'replaceWhere' capability in Delta:
>>> df.to_delta('%s/to_delta/bar' % path,
... mode='overwrite', replaceWhere='date >= "2012-01-01"') # doctest: +SKIP
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
self.spark.to_spark_io(
path=path,
mode=mode,
format="delta",
partition_cols=partition_cols,
index_col=index_col,
**options,
)
def to_parquet(
self,
path: str,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
compression: Optional[str] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options
) -> None:
"""
Write the DataFrame out as a Parquet file or directory.
Parameters
----------
path : str, required
Path to write to.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
compression : str {'none', 'uncompressed', 'snappy', 'gzip', 'lzo', 'brotli', 'lz4', 'zstd'}
Compression codec to use when saving to file. If None is set, it uses the
value specified in `spark.sql.parquet.compression.codec`.
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options : dict
All other options passed directly into Spark's data source.
See Also
--------
read_parquet
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_parquet('%s/to_parquet/foo.parquet' % path, partition_cols='date')
>>> df.to_parquet(
... '%s/to_parquet/foo.parquet' % path,
... mode = 'overwrite',
... partition_cols=['date', 'country'])
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
builder = self.to_spark(index_col=index_col).write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
if compression is not None:
builder.option("compression", compression)
builder.options(**options).format("parquet").save(path)
def to_orc(
self,
path: str,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options
) -> None:
"""
Write the DataFrame out as a ORC file or directory.
Parameters
----------
path : str, required
Path to write to.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options : dict
All other options passed directly into Spark's data source.
See Also
--------
read_orc
DataFrame.to_delta
DataFrame.to_parquet
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_orc('%s/to_orc/foo.orc' % path, partition_cols='date')
>>> df.to_orc(
... '%s/to_orc/foo.orc' % path,
... mode = 'overwrite',
... partition_cols=['date', 'country'])
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
self.spark.to_spark_io(
path=path,
mode=mode,
format="orc",
partition_cols=partition_cols,
index_col=index_col,
**options,
)
def to_spark_io(
self,
path: Optional[str] = None,
format: Optional[str] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options
) -> None:
"""An alias for :func:`DataFrame.spark.to_spark_io`.
See :meth:`pyspark.pandas.spark.accessors.SparkFrameMethods.to_spark_io`.
.. deprecated:: 3.2.0
Use :func:`DataFrame.spark.to_spark_io` instead.
"""
warnings.warn("Deprecated in 3.2, Use spark.to_spark_io instead.", FutureWarning)
return self.spark.to_spark_io(path, format, mode, partition_cols, index_col, **options)
to_spark_io.__doc__ = SparkFrameMethods.to_spark_io.__doc__
def to_spark(self, index_col: Optional[Union[str, List[str]]] = None) -> SparkDataFrame:
return self.spark.frame(index_col)
to_spark.__doc__ = SparkFrameMethods.__doc__
def to_pandas(self) -> pd.DataFrame:
"""
Return a pandas DataFrame.
.. note:: This method should only be used if the resulting pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.to_pandas()
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
"""
return self._internal.to_pandas_frame.copy()
def assign(self, **kwargs) -> "DataFrame":
"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable, Series or Index}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas-on-Spark doesn't check it).
If the values are not callable, (e.g. a Series or a literal),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Examples
--------
>>> df = ps.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence and you can also
create multiple columns within the same assign.
>>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32,
... temp_k=df['temp_c'] + 273.15,
... temp_idx=df.index)
>>> assigned[['temp_c', 'temp_f', 'temp_k', 'temp_idx']]
temp_c temp_f temp_k temp_idx
Portland 17.0 62.6 290.15 Portland
Berkeley 25.0 77.0 298.15 Berkeley
Notes
-----
Assigning multiple columns within the same ``assign`` is possible
but you cannot refer to newly created or modified columns. This
feature is supported in pandas for Python 3.6 and later but not in
pandas-on-Spark. In pandas-on-Spark, all items are computed first,
and then assigned.
"""
return self._assign(kwargs)
def _assign(self, kwargs):
assert isinstance(kwargs, dict)
from pyspark.pandas.indexes import MultiIndex
from pyspark.pandas.series import IndexOpsMixin
for k, v in kwargs.items():
is_invalid_assignee = (
not (isinstance(v, (IndexOpsMixin, spark.Column)) or callable(v) or is_scalar(v))
) or isinstance(v, MultiIndex)
if is_invalid_assignee:
raise TypeError(
"Column assignment doesn't support type " "{0}".format(type(v).__name__)
)
if callable(v):
kwargs[k] = v(self)
pairs = {
(k if is_name_like_tuple(k) else (k,)): (
(v.spark.column, v._internal.data_fields[0])
if isinstance(v, IndexOpsMixin) and not isinstance(v, MultiIndex)
else (v, None)
if isinstance(v, spark.Column)
else (F.lit(v), None)
)
for k, v in kwargs.items()
}
scols = []
data_fields = []
for label in self._internal.column_labels:
for i in range(len(label)):
if label[: len(label) - i] in pairs:
scol, field = pairs[label[: len(label) - i]]
name = self._internal.spark_column_name_for(label)
scol = scol.alias(name)
if field is not None:
field = field.copy(name=name)
break
else:
scol = self._internal.spark_column_for(label)
field = self._internal.field_for(label)
scols.append(scol)
data_fields.append(field)
column_labels = self._internal.column_labels.copy()
for label, (scol, field) in pairs.items():
if label not in set(i[: len(label)] for i in self._internal.column_labels):
name = name_like_string(label)
scols.append(scol.alias(name))
if field is not None:
field = field.copy(name=name)
data_fields.append(field)
column_labels.append(label)
level = self._internal.column_labels_level
column_labels = [
tuple(list(label) + ([""] * (level - len(label)))) for label in column_labels
]
internal = self._internal.with_new_columns(
scols, column_labels=column_labels, data_fields=data_fields
)
return DataFrame(internal)
@staticmethod
def from_records(
data: Union[np.array, List[tuple], dict, pd.DataFrame],
index: Union[str, list, np.array] = None,
exclude: list = None,
columns: list = None,
coerce_float: bool = False,
nrows: int = None,
) -> "DataFrame":
"""
Convert structured or record ndarray to DataFrame.
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names associated with them, this
argument provides names for the columns. Otherwise this argument indicates the order of
the columns in the result (any names not found in the data will become all-NA columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to
floating point, useful for SQL result sets
nrows : int, default None
Number of rows to read if data is an iterator
Returns
-------
df : DataFrame
Examples
--------
Use dict as input
>>> ps.DataFrame.from_records({'A': [1, 2, 3]})
A
0 1
1 2
2 3
Use list of tuples as input
>>> ps.DataFrame.from_records([(1, 2), (3, 4)])
0 1
0 1 2
1 3 4
Use NumPy array as input
>>> ps.DataFrame.from_records(np.eye(3))
0 1 2
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
return DataFrame(
pd.DataFrame.from_records(data, index, exclude, columns, coerce_float, nrows)
)
def to_records(self, index=True, column_dtypes=None, index_dtypes=None) -> np.recarray:
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
.. note:: This method should only be used if the resulting NumPy ndarray is
expected to be small, as all the data is loaded into the driver's memory.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
column_dtypes : str, type, dict, default None
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = ps.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records() # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False) # doctest: +SKIP
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Specification of dtype for columns is new in pandas 0.24.0.
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"}) # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')])
Specification of dtype for index is new in pandas 0.24.0.
Data types can also be specified for the index:
>>> df.to_records(index_dtypes="<S2") # doctest: +SKIP
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')])
"""
args = locals()
psdf = self
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_records, pd.DataFrame.to_records, args
)
def copy(self, deep=None) -> "DataFrame":
"""
Make a copy of this object's indices and data.
Parameters
----------
deep : None
this parameter is not supported but just dummy parameter to match pandas.
Returns
-------
copy : DataFrame
Examples
--------
>>> df = ps.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> df
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df_copy = df.copy()
>>> df_copy
x y z w
0 1 3 5 7
1 2 4 6 8
"""
return DataFrame(self._internal)
def dropna(
self, axis=0, how="any", thresh=None, subset=None, inplace=False
) -> Optional["DataFrame"]:
"""
Remove missing values.
Parameters
----------
axis : {0 or 'index'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.drop : Drop specified labels from columns.
DataFrame.isnull: Indicate missing values.
DataFrame.notnull : Indicate existing (non-missing) values.
Examples
--------
>>> df = ps.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [None, 'Batmobile', 'Bullwhip'],
... "born": [None, "1940-04-25", None]},
... columns=['name', 'toy', 'born'])
>>> df
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
axis = validate_axis(axis)
inplace = validate_bool_kwarg(inplace, "inplace")
if thresh is None:
if how is None:
raise TypeError("must specify how or thresh")
elif how not in ("any", "all"):
raise ValueError("invalid how option: {h}".format(h=how))
if subset is not None:
if isinstance(subset, str):
labels = [(subset,)] # type: Optional[List[Tuple]]
elif isinstance(subset, tuple):
labels = [subset]
else:
labels = [sub if isinstance(sub, tuple) else (sub,) for sub in subset]
else:
labels = None
if axis == 0:
if labels is not None:
invalids = [label for label in labels if label not in self._internal.column_labels]
if len(invalids) > 0:
raise KeyError(invalids)
else:
labels = self._internal.column_labels
cnt = reduce(
lambda x, y: x + y,
[
F.when(self._psser_for(label).notna().spark.column, 1).otherwise(0)
for label in labels
],
F.lit(0),
)
if thresh is not None:
pred = cnt >= F.lit(int(thresh))
elif how == "any":
pred = cnt == F.lit(len(labels))
elif how == "all":
pred = cnt > F.lit(0)
internal = self._internal.with_filter(pred)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal)
else:
assert axis == 1
internal = self._internal.resolved_copy
if labels is not None:
if any(len(lbl) != internal.index_level for lbl in labels):
raise ValueError(
"The length of each subset must be the same as the index size."
)
cond = reduce(
lambda x, y: x | y,
[
reduce(
lambda x, y: x & y,
[
scol == F.lit(l)
for l, scol in zip(lbl, internal.index_spark_columns)
],
)
for lbl in labels
],
)
internal = internal.with_filter(cond)
null_counts = []
for label in internal.column_labels:
scol = internal.spark_column_for(label)
if isinstance(internal.spark_type_for(label), (FloatType, DoubleType)):
cond = scol.isNull() | F.isnan(scol)
else:
cond = scol.isNull()
null_counts.append(
F.sum(F.when(~cond, 1).otherwise(0)).alias(name_like_string(label))
)
counts = internal.spark_frame.select(null_counts + [F.count("*")]).head()
if thresh is not None:
column_labels = [
label
for label, cnt in zip(internal.column_labels, counts)
if (cnt or 0) >= int(thresh)
]
elif how == "any":
column_labels = [
label
for label, cnt in zip(internal.column_labels, counts)
if (cnt or 0) == counts[-1]
]
elif how == "all":
column_labels = [
label for label, cnt in zip(internal.column_labels, counts) if (cnt or 0) > 0
]
psdf = self[column_labels]
if inplace:
self._update_internal_frame(psdf._internal)
return None
else:
return psdf
# TODO: add 'limit' when value parameter exists
def fillna(
self, value=None, method=None, axis=None, inplace=False, limit=None
) -> Optional["DataFrame"]:
"""Fill NA/NaN values.
.. note:: the current implementation of 'method' parameter in fillna uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
value : scalar, dict, Series
Value to use to fill holes. alternately a dict/Series of values
specifying which value to use for each column.
DataFrame is not supported.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series pad / ffill: propagate last valid
observation forward to next valid backfill / bfill:
use NEXT valid observation to fill gap
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 1.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 1.0 4
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError("fillna currently only works for axis=0 or axis='index'")
if value is not None:
if not isinstance(value, (float, int, str, bool, dict, pd.Series)):
raise TypeError("Unsupported type %s" % type(value).__name__)
if limit is not None:
raise ValueError("limit parameter for value is not support now")
if isinstance(value, pd.Series):
value = value.to_dict()
if isinstance(value, dict):
for v in value.values():
if not isinstance(v, (float, int, str, bool)):
raise TypeError("Unsupported type %s" % type(v).__name__)
value = {k if is_name_like_tuple(k) else (k,): v for k, v in value.items()}
def op(psser):
label = psser._column_label
for k, v in value.items():
if k == label[: len(k)]:
return psser._fillna(
value=value[k], method=method, axis=axis, limit=limit
)
else:
return psser
else:
op = lambda psser: psser._fillna(value=value, method=method, axis=axis, limit=limit)
elif method is not None:
op = lambda psser: psser._fillna(value=value, method=method, axis=axis, limit=limit)
else:
raise ValueError("Must specify a fillna 'value' or 'method' parameter.")
psdf = self._apply_series_op(op, should_resolve=(method is not None))
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
self._update_internal_frame(psdf._internal, requires_same_anchor=False)
return None
else:
return psdf
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
) -> Optional["DataFrame"]:
"""
Returns a new DataFrame replacing a value with another value.
Parameters
----------
to_replace : int, float, string, list, tuple or dict
Value to be replaced.
value : int, float, string, list or tuple
Value to use to replace holes. The replacement value must be an int, float,
or string.
If value is a list or tuple, value should be of the same length with to_replace.
inplace : boolean, default False
Fill in place (do not create a new object)
Returns
-------
DataFrame
Object after replacement.
Examples
--------
>>> df = ps.DataFrame({"name": ['Ironman', 'Captain America', 'Thor', 'Hulk'],
... "weapon": ['Mark-45', 'Shield', 'Mjolnir', 'Smash']},
... columns=['name', 'weapon'])
>>> df
name weapon
0 Ironman Mark-45
1 Captain America Shield
2 Thor Mjolnir
3 Hulk Smash
Scalar `to_replace` and `value`
>>> df.replace('Ironman', 'War-Machine')
name weapon
0 War-Machine Mark-45
1 Captain America Shield
2 Thor Mjolnir
3 Hulk Smash
List like `to_replace` and `value`
>>> df.replace(['Ironman', 'Captain America'], ['Rescue', 'Hawkeye'], inplace=True)
>>> df
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Mjolnir
3 Hulk Smash
Dicts can be used to specify different replacement values for different existing values
To use a dict in this way the value parameter should be None
>>> df.replace({'Mjolnir': 'Stormbuster'})
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Stormbuster
3 Hulk Smash
Dict can specify that different values should be replaced in different columns
The value parameter should not be None in this case
>>> df.replace({'weapon': 'Mjolnir'}, 'Stormbuster')
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Stormbuster
3 Hulk Smash
Nested dictionaries
The value parameter should be None to use a nested dict in this way
>>> df.replace({'weapon': {'Mjolnir': 'Stormbuster'}})
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Stormbuster
3 Hulk Smash
"""
if method != "pad":
raise NotImplementedError("replace currently works only for method='pad")
if limit is not None:
raise NotImplementedError("replace currently works only when limit=None")
if regex is not False:
raise NotImplementedError("replace currently doesn't supports regex")
inplace = validate_bool_kwarg(inplace, "inplace")
if value is not None and not isinstance(value, (int, float, str, list, tuple, dict)):
raise TypeError("Unsupported type {}".format(type(value).__name__))
if to_replace is not None and not isinstance(
to_replace, (int, float, str, list, tuple, dict)
):
raise TypeError("Unsupported type {}".format(type(to_replace).__name__))
if isinstance(value, (list, tuple)) and isinstance(to_replace, (list, tuple)):
if len(value) != len(to_replace):
raise ValueError("Length of to_replace and value must be same")
if isinstance(to_replace, dict) and (
value is not None or all(isinstance(i, dict) for i in to_replace.values())
):
def op(psser):
if psser.name in to_replace:
return psser.replace(
to_replace=to_replace[psser.name], value=value, regex=regex
)
else:
return psser
else:
op = lambda psser: psser.replace(to_replace=to_replace, value=value, regex=regex)
psdf = self._apply_series_op(op)
if inplace:
self._update_internal_frame(psdf._internal)
return None
else:
return psdf
def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) -> "DataFrame":
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values.
Parameters
----------
lower : float or int, default None
Minimum threshold value. All values below this threshold will be set to it.
upper : float or int, default None
Maximum threshold value. All values above this threshold will be set to it.
Returns
-------
DataFrame
DataFrame with the values outside the clip boundaries replaced.
Examples
--------
>>> ps.DataFrame({'A': [0, 2, 4]}).clip(1, 3)
A
0 1
1 2
2 3
Notes
-----
One difference between this implementation and pandas is that running
pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with "TypeError: '<=' not supported
between instances of 'str' and 'int'" while ps.DataFrame({'A': ['a', 'b']}).clip(0, 1)
will output the original DataFrame, simply ignoring the incompatible types.
"""
if is_list_like(lower) or is_list_like(upper):
raise TypeError(
"List-like value are not supported for 'lower' and 'upper' at the " + "moment"
)
if lower is None and upper is None:
return self
return self._apply_series_op(lambda psser: psser.clip(lower=lower, upper=upper))
def head(self, n: int = 5) -> "DataFrame":
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_head : same type as caller
The first `n` rows of the caller object.
Examples
--------
>>> df = ps.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
"""
if n < 0:
n = len(self) + n
if n <= 0:
return DataFrame(self._internal.with_filter(F.lit(False)))
else:
sdf = self._internal.resolved_copy.spark_frame
if get_option("compute.ordered_head"):
sdf = sdf.orderBy(NATURAL_ORDER_COLUMN_NAME)
return DataFrame(self._internal.with_new_sdf(sdf.limit(n)))
def last(self, offset: Union[str, DateOffset]) -> "DataFrame":
"""
Select final periods of time series data based on a date offset.
When having a DataFrame with dates as index, this function can
select the last few rows based on a date offset.
Parameters
----------
offset : str or DateOffset
The offset length of the data that will be selected. For instance,
'3D' will display all the rows having their index within the last 3 days.
Returns
-------
DataFrame
A subset of the caller.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
Examples
--------
>>> index = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=index)
>>> psdf
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the last 3 days:
>>> psdf.last('3D')
A
2018-04-13 3
2018-04-15 4
Notice the data for 3 last calendar days were returned, not the last
3 observed days in the dataset, and therefore data for 2018-04-11 was
not returned.
"""
# Check index type should be format DateTime
if not isinstance(self.index, ps.DatetimeIndex):
raise TypeError("'last' only supports a DatetimeIndex")
offset = to_offset(offset)
from_date = self.index.max() - offset
return cast(DataFrame, self.loc[from_date:])
def first(self, offset: Union[str, DateOffset]) -> "DataFrame":
"""
Select first periods of time series data based on a date offset.
When having a DataFrame with dates as index, this function can
select the first few rows based on a date offset.
Parameters
----------
offset : str or DateOffset
The offset length of the data that will be selected. For instance,
'3D' will display all the rows having their index within the first 3 days.
Returns
-------
DataFrame
A subset of the caller.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
Examples
--------
>>> index = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=index)
>>> psdf
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the last 3 days:
>>> psdf.first('3D')
A
2018-04-09 1
2018-04-11 2
Notice the data for 3 first calendar days were returned, not the first
3 observed days in the dataset, and therefore data for 2018-04-13 was
not returned.
"""
# Check index type should be format DatetimeIndex
if not isinstance(self.index, ps.DatetimeIndex):
raise TypeError("'first' only supports a DatetimeIndex")
offset = to_offset(offset)
to_date = self.index.min() + offset
return cast(DataFrame, self.loc[:to_date])
def pivot_table(
self, values=None, index=None, columns=None, aggfunc="mean", fill_value=None
) -> "DataFrame":
"""
Create a spreadsheet-style pivot table as a DataFrame. The levels in
the pivot table will be stored in MultiIndex objects (hierarchical
indexes) on the index and columns of the result DataFrame.
Parameters
----------
values : column to aggregate.
They should be either a list less than three or a string.
index : column (string) or list of columns
If an array is passed, it must be the same length as the data.
The list should contain string.
columns : column
Columns used in the pivot operation. Only one column is supported and
it should be a string.
aggfunc : function (string), dict, default mean
If dict is passed, the key is column to aggregate and value
is function or list of functions.
fill_value : scalar, default None
Value to replace missing values with.
Returns
-------
table : DataFrame
Examples
--------
>>> df = ps.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]},
... columns=['A', 'B', 'C', 'D', 'E'])
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = df.pivot_table(values='D', index=['A', 'B'],
... columns='C', aggfunc='sum')
>>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE
C large small
A B
bar one 4.0 5
two 7.0 6
foo one 4.0 1
two NaN 6
We can also fill missing values using the `fill_value` parameter.
>>> table = df.pivot_table(values='D', index=['A', 'B'],
... columns='C', aggfunc='sum', fill_value=0)
>>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE
C large small
A B
bar one 4 5
two 7 6
foo one 4 1
two 0 6
We can also calculate multiple types of aggregations for any given
value column.
>>> table = df.pivot_table(values=['D'], index =['C'],
... columns="A", aggfunc={'D': 'mean'})
>>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE
D
A bar foo
C
large 5.5 2.000000
small 5.5 2.333333
The next example aggregates on multiple values.
>>> table = df.pivot_table(index=['C'], columns="A", values=['D', 'E'],
... aggfunc={'D': 'mean', 'E': 'sum'})
>>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE
D E
A bar foo bar foo
C
large 5.5 2.000000 15 9
small 5.5 2.333333 17 13
"""
if not is_name_like_value(columns):
raise TypeError("columns should be one column name.")
if not is_name_like_value(values) and not (
isinstance(values, list) and all(is_name_like_value(v) for v in values)
):
raise TypeError("values should be one column or list of columns.")
if not isinstance(aggfunc, str) and (
not isinstance(aggfunc, dict)
or not all(
is_name_like_value(key) and isinstance(value, str) for key, value in aggfunc.items()
)
):
raise TypeError(
"aggfunc must be a dict mapping from column name "
"to aggregate functions (string)."
)
if isinstance(aggfunc, dict) and index is None:
raise NotImplementedError(
"pivot_table doesn't support aggfunc" " as dict and without index."
)
if isinstance(values, list) and index is None:
raise NotImplementedError("values can't be a list without index.")
if columns not in self.columns:
raise ValueError("Wrong columns {}.".format(name_like_string(columns)))
if not is_name_like_tuple(columns):
columns = (columns,)
if isinstance(values, list):
values = [col if is_name_like_tuple(col) else (col,) for col in values]
if not all(
isinstance(self._internal.spark_type_for(col), NumericType) for col in values
):
raise TypeError("values should be a numeric type.")
else:
values = values if is_name_like_tuple(values) else (values,)
if not isinstance(self._internal.spark_type_for(values), NumericType):
raise TypeError("values should be a numeric type.")
if isinstance(aggfunc, str):
if isinstance(values, list):
agg_cols = [
F.expr(
"{1}(`{0}`) as `{0}`".format(
self._internal.spark_column_name_for(value), aggfunc
)
)
for value in values
]
else:
agg_cols = [
F.expr(
"{1}(`{0}`) as `{0}`".format(
self._internal.spark_column_name_for(values), aggfunc
)
)
]
elif isinstance(aggfunc, dict):
aggfunc = {
key if is_name_like_tuple(key) else (key,): value for key, value in aggfunc.items()
}
agg_cols = [
F.expr(
"{1}(`{0}`) as `{0}`".format(self._internal.spark_column_name_for(key), value)
)
for key, value in aggfunc.items()
]
agg_columns = [key for key, _ in aggfunc.items()]
if set(agg_columns) != set(values):
raise ValueError("Columns in aggfunc must be the same as values.")
sdf = self._internal.resolved_copy.spark_frame
if index is None:
sdf = (
sdf.groupBy()
.pivot(pivot_col=self._internal.spark_column_name_for(columns))
.agg(*agg_cols)
)
elif isinstance(index, list):
index = [label if is_name_like_tuple(label) else (label,) for label in index]
sdf = (
sdf.groupBy([self._internal.spark_column_name_for(label) for label in index])
.pivot(pivot_col=self._internal.spark_column_name_for(columns))
.agg(*agg_cols)
)
else:
raise TypeError("index should be a None or a list of columns.")
if fill_value is not None and isinstance(fill_value, (int, float)):
sdf = sdf.fillna(fill_value)
if index is not None:
index_columns = [self._internal.spark_column_name_for(label) for label in index]
index_fields = [self._internal.field_for(label) for label in index]
if isinstance(values, list):
data_columns = [column for column in sdf.columns if column not in index_columns]
if len(values) > 1:
# If we have two values, Spark will return column's name
# in this format: column_values, where column contains
# their values in the DataFrame and values is
# the column list passed to the pivot_table().
# E.g. if column is b and values is ['b','e'],
# then ['2_b', '2_e', '3_b', '3_e'].
# We sort the columns of Spark DataFrame by values.
data_columns.sort(key=lambda x: x.split("_", 1)[1])
sdf = sdf.select(index_columns + data_columns)
column_name_to_index = dict(
zip(self._internal.data_spark_column_names, self._internal.column_labels)
)
column_labels = [
tuple(list(column_name_to_index[name.split("_")[1]]) + [name.split("_")[0]])
for name in data_columns
]
column_label_names = ([None] * column_labels_level(values)) + [columns]
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_columns],
index_names=index,
index_fields=index_fields,
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
column_label_names=column_label_names, # type: ignore
)
psdf = DataFrame(internal) # type: "DataFrame"
else:
column_labels = [tuple(list(values[0]) + [column]) for column in data_columns]
column_label_names = ([None] * len(values[0])) + [columns]
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_columns],
index_names=index,
index_fields=index_fields,
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
column_label_names=column_label_names, # type: ignore
)
psdf = DataFrame(internal)
else:
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_columns],
index_names=index,
index_fields=index_fields,
column_label_names=[columns],
)
psdf = DataFrame(internal)
else:
if isinstance(values, list):
index_values = values[-1]
else:
index_values = values
index_map = OrderedDict() # type: Dict[str, Optional[Tuple]]
for i, index_value in enumerate(index_values):
colname = SPARK_INDEX_NAME_FORMAT(i)
sdf = sdf.withColumn(colname, F.lit(index_value))
index_map[colname] = None
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_map.keys()],
index_names=list(index_map.values()),
column_label_names=[columns],
)
psdf = DataFrame(internal)
psdf_columns = psdf.columns
if isinstance(psdf_columns, pd.MultiIndex):
psdf.columns = psdf_columns.set_levels(
psdf_columns.levels[-1].astype(
spark_type_to_pandas_dtype(self._psser_for(columns).spark.data_type)
),
level=-1,
)
else:
psdf.columns = psdf_columns.astype(
spark_type_to_pandas_dtype(self._psser_for(columns).spark.data_type)
)
return psdf
def pivot(self, index=None, columns=None, values=None) -> "DataFrame":
"""
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation.
Parameters
----------
index : string, optional
Column to use to make new frame's index. If None, uses
existing index.
columns : string
Column to use to make new frame's columns.
values : string, object or a list of the previous
Column(s) to use for populating new frame's values.
Returns
-------
DataFrame
Returns reshaped DataFrame.
See Also
--------
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
Examples
--------
>>> df = ps.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']},
... columns=['foo', 'bar', 'baz', 'zoo'])
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz').sort_index()
... # doctest: +NORMALIZE_WHITESPACE
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(columns='bar', values='baz').sort_index() # doctest: +NORMALIZE_WHITESPACE
bar A B C
0 1.0 NaN NaN
1 NaN 2.0 NaN
2 NaN NaN 3.0
3 4.0 NaN NaN
4 NaN 5.0 NaN
5 NaN NaN 6.0
Notice that, unlike pandas raises an ValueError when duplicated values are found,
pandas-on-Spark's pivot still works with its first value it meets during operation because
pivot is an expensive operation and it is preferred to permissively execute over failing
fast when processing large data.
>>> df = ps.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]}, columns=['foo', 'bar', 'baz'])
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
>>> df.pivot(index='foo', columns='bar', values='baz').sort_index()
... # doctest: +NORMALIZE_WHITESPACE
bar A B C
foo
one 1.0 NaN NaN
two NaN 3.0 4.0
It also support multi-index and multi-index column.
>>> df.columns = pd.MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'), ('b', 'baz')])
>>> df = df.set_index(('a', 'bar'), append=True)
>>> df # doctest: +NORMALIZE_WHITESPACE
a b
foo baz
(a, bar)
0 A one 1
1 A one 2
2 B two 3
3 C two 4
>>> df.pivot(columns=('a', 'foo'), values=('b', 'baz')).sort_index()
... # doctest: +NORMALIZE_WHITESPACE
('a', 'foo') one two
(a, bar)
0 A 1.0 NaN
1 A 2.0 NaN
2 B NaN 3.0
3 C NaN 4.0
"""
if columns is None:
raise ValueError("columns should be set.")
if values is None:
raise ValueError("values should be set.")
should_use_existing_index = index is not None
if should_use_existing_index:
df = self
index = [index]
else:
# The index after `reset_index()` will never be used, so use "distributed" index
# as a dummy to avoid overhead.
with option_context("compute.default_index_type", "distributed"):
df = self.reset_index()
index = df._internal.column_labels[: self._internal.index_level]
df = df.pivot_table(index=index, columns=columns, values=values, aggfunc="first")
if should_use_existing_index:
return df
else:
internal = df._internal.copy(index_names=self._internal.index_names)
return DataFrame(internal)
@property
def columns(self) -> pd.Index:
"""The column labels of the DataFrame."""
names = [
name if name is None or len(name) > 1 else name[0]
for name in self._internal.column_label_names
]
if self._internal.column_labels_level > 1:
columns = pd.MultiIndex.from_tuples(self._internal.column_labels, names=names)
else:
columns = pd.Index([label[0] for label in self._internal.column_labels], name=names[0])
return columns
@columns.setter
def columns(self, columns) -> None:
if isinstance(columns, pd.MultiIndex):
column_labels = columns.tolist()
else:
column_labels = [
col if is_name_like_tuple(col, allow_none=False) else (col,) for col in columns
]
if len(self._internal.column_labels) != len(column_labels):
raise ValueError(
"Length mismatch: Expected axis has {} elements, "
"new values have {} elements".format(
len(self._internal.column_labels), len(column_labels)
)
)
if isinstance(columns, pd.Index):
column_label_names = [
name if is_name_like_tuple(name) else (name,) for name in columns.names
] # type: Optional[List]
else:
column_label_names = None
pssers = [
self._psser_for(label).rename(name)
for label, name in zip(self._internal.column_labels, column_labels)
]
self._update_internal_frame(
self._internal.with_new_columns(pssers, column_label_names=column_label_names)
)
@property
def dtypes(self) -> pd.Series:
"""Return the dtypes in the DataFrame.
This returns a Series with the data type of each column. The result's index is the original
DataFrame's columns. Columns with mixed types are stored with the object dtype.
Returns
-------
pd.Series
The data type of each column.
Examples
--------
>>> df = ps.DataFrame({'a': list('abc'),
... 'b': list(range(1, 4)),
... 'c': np.arange(3, 6).astype('i1'),
... 'd': np.arange(4.0, 7.0, dtype='float64'),
... 'e': [True, False, True],
... 'f': pd.date_range('20130101', periods=3)},
... columns=['a', 'b', 'c', 'd', 'e', 'f'])
>>> df.dtypes
a object
b int64
c int8
d float64
e bool
f datetime64[ns]
dtype: object
"""
return pd.Series(
[self._psser_for(label).dtype for label in self._internal.column_labels],
index=pd.Index(
[label if len(label) > 1 else label[0] for label in self._internal.column_labels]
),
)
def select_dtypes(self, include=None, exclude=None) -> "DataFrame":
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied. It also takes Spark SQL
DDL type strings, for instance, 'string' and 'date'.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
>>> df = ps.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df.select_dtypes()
Traceback (most recent call last):
...
ValueError: at least one of include or exclude must be nonempty
* If ``include`` and ``exclude`` have overlapping elements
>>> df = ps.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df.select_dtypes(include='a', exclude='a')
Traceback (most recent call last):
...
ValueError: include and exclude overlap on {'a'}
Notes
-----
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3,
... 'd': ['a', 'b'] * 3}, columns=['a', 'b', 'c', 'd'])
>>> df
a b c d
0 1 True 1.0 a
1 2 False 2.0 b
2 1 True 1.0 a
3 2 False 2.0 b
4 1 True 1.0 a
5 2 False 2.0 b
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'], exclude=['int'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c d
0 True 1.0 a
1 False 2.0 b
2 True 1.0 a
3 False 2.0 b
4 True 1.0 a
5 False 2.0 b
Spark SQL DDL type strings can be used as well.
>>> df.select_dtypes(exclude=['string'])
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
"""
from pyspark.sql.types import _parse_datatype_string # type: ignore
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
if not any((include, exclude)):
raise ValueError("at least one of include or exclude must be " "nonempty")
# can't both include AND exclude!
if set(include).intersection(set(exclude)):
raise ValueError(
"include and exclude overlap on {inc_ex}".format(
inc_ex=set(include).intersection(set(exclude))
)
)
# Handle Spark types
include_spark_type = []
for inc in include:
try:
include_spark_type.append(_parse_datatype_string(inc))
except:
pass
exclude_spark_type = []
for exc in exclude:
try:
exclude_spark_type.append(_parse_datatype_string(exc))
except:
pass
# Handle pandas types
include_numpy_type = []
for inc in include:
try:
include_numpy_type.append(infer_dtype_from_object(inc))
except:
pass
exclude_numpy_type = []
for exc in exclude:
try:
exclude_numpy_type.append(infer_dtype_from_object(exc))
except:
pass
column_labels = []
for label in self._internal.column_labels:
if len(include) > 0:
should_include = (
infer_dtype_from_object(self._psser_for(label).dtype.name) in include_numpy_type
or self._internal.spark_type_for(label) in include_spark_type
)
else:
should_include = not (
infer_dtype_from_object(self._psser_for(label).dtype.name) in exclude_numpy_type
or self._internal.spark_type_for(label) in exclude_spark_type
)
if should_include:
column_labels.append(label)
return DataFrame(
self._internal.with_new_columns([self._psser_for(label) for label in column_labels])
)
def droplevel(self, level, axis=0) -> "DataFrame":
"""
Return DataFrame with requested index / column level(s) removed.
Parameters
----------
level: int, str, or list-like
If a string is given, must be the name of a level If list-like, elements must
be names or positional indexes of levels.
axis: {0 or ‘index’, 1 or ‘columns’}, default 0
Returns
-------
DataFrame with requested index / column level(s) removed.
Examples
--------
>>> df = ps.DataFrame(
... [[3, 4], [7, 8], [11, 12]],
... index=pd.MultiIndex.from_tuples([(1, 2), (5, 6), (9, 10)], names=["a", "b"]),
... )
>>> df.columns = pd.MultiIndex.from_tuples([
... ('c', 'e'), ('d', 'f')
... ], names=['level_1', 'level_2'])
>>> df # doctest: +NORMALIZE_WHITESPACE
level_1 c d
level_2 e f
a b
1 2 3 4
5 6 7 8
9 10 11 12
>>> df.droplevel('a') # doctest: +NORMALIZE_WHITESPACE
level_1 c d
level_2 e f
b
2 3 4
6 7 8
10 11 12
>>> df.droplevel('level_2', axis=1) # doctest: +NORMALIZE_WHITESPACE
level_1 c d
a b
1 2 3 4
5 6 7 8
9 10 11 12
"""
axis = validate_axis(axis)
if axis == 0:
if not isinstance(level, (tuple, list)): # huh?
level = [level]
index_names = self.index.names
nlevels = self._internal.index_level
int_level = set()
for n in level:
if isinstance(n, int):
if n < 0:
n = n + nlevels
if n < 0:
raise IndexError(
"Too many levels: Index has only {} levels, "
"{} is not a valid level number".format(nlevels, (n - nlevels))
)
if n >= nlevels:
raise IndexError(
"Too many levels: Index has only {} levels, not {}".format(
nlevels, (n + 1)
)
)
else:
if n not in index_names:
raise KeyError("Level {} not found".format(n))
n = index_names.index(n)
int_level.add(n)
if len(level) >= nlevels:
raise ValueError(
"Cannot remove {} levels from an index with {} levels: "
"at least one level must be left.".format(len(level), nlevels)
)
index_spark_columns, index_names, index_fields = zip(
*[
item
for i, item in enumerate(
zip(
self._internal.index_spark_columns,
self._internal.index_names,
self._internal.index_fields,
)
)
if i not in int_level
]
)
internal = self._internal.copy(
index_spark_columns=list(index_spark_columns),
index_names=list(index_names),
index_fields=list(index_fields),
)
return DataFrame(internal)
else:
psdf = self.copy()
psdf.columns = psdf.columns.droplevel(level)
return psdf
def drop(
self, labels=None, axis=1, columns: Union[Any, Tuple, List[Any], List[Tuple]] = None
) -> "DataFrame":
"""
Drop specified labels from columns.
Remove columns by specifying label names and axis=1 or columns.
When specifying both labels and columns, only labels will be dropped.
Removing rows is yet to be implemented.
Parameters
----------
labels : single label or list-like
Column labels to drop.
axis : {1 or 'columns'}, default 1
.. dropna currently only works for axis=1 'columns'
axis=0 is yet to be implemented.
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
Returns
-------
dropped : DataFrame
See Also
--------
Series.dropna
Examples
--------
>>> df = ps.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> df
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df.drop('x', axis=1)
y z w
0 3 5 7
1 4 6 8
>>> df.drop(['y', 'z'], axis=1)
x w
0 1 7
1 2 8
>>> df.drop(columns=['y', 'z'])
x w
0 1 7
1 2 8
Also support for MultiIndex
>>> df = ps.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> columns = [('a', 'x'), ('a', 'y'), ('b', 'z'), ('b', 'w')]
>>> df.columns = pd.MultiIndex.from_tuples(columns)
>>> df # doctest: +NORMALIZE_WHITESPACE
a b
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df.drop('a') # doctest: +NORMALIZE_WHITESPACE
b
z w
0 5 7
1 6 8
Notes
-----
Currently only axis = 1 is supported in this function,
axis = 0 is yet to be implemented.
"""
if labels is not None:
axis = validate_axis(axis)
if axis == 1:
return self.drop(columns=labels)
raise NotImplementedError("Drop currently only works for axis=1")
elif columns is not None:
if is_name_like_tuple(columns):
columns = [columns]
elif is_name_like_value(columns):
columns = [(columns,)]
else:
columns = [col if is_name_like_tuple(col) else (col,) for col in columns]
drop_column_labels = set(
label
for label in self._internal.column_labels
for col in columns
if label[: len(col)] == col
)
if len(drop_column_labels) == 0:
raise KeyError(columns)
cols, labels = zip(
*(
(column, label)
for column, label in zip(
self._internal.data_spark_column_names, self._internal.column_labels
)
if label not in drop_column_labels
)
)
internal = self._internal.with_new_columns([self._psser_for(label) for label in labels])
return DataFrame(internal)
else:
raise ValueError("Need to specify at least one of 'labels' or 'columns'")
def _sort(
self, by: List[Column], ascending: Union[bool, List[bool]], inplace: bool, na_position: str
):
if isinstance(ascending, bool):
ascending = [ascending] * len(by)
if len(ascending) != len(by):
raise ValueError(
"Length of ascending ({}) != length of by ({})".format(len(ascending), len(by))
)
if na_position not in ("first", "last"):
raise ValueError("invalid na_position: '{}'".format(na_position))
# Mapper: Get a spark column function for (ascending, na_position) combination
# Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847.
mapper = {
(True, "first"): lambda x: Column(getattr(x._jc, "asc_nulls_first")()),
(True, "last"): lambda x: Column(getattr(x._jc, "asc_nulls_last")()),
(False, "first"): lambda x: Column(getattr(x._jc, "desc_nulls_first")()),
(False, "last"): lambda x: Column(getattr(x._jc, "desc_nulls_last")()),
}
by = [mapper[(asc, na_position)](scol) for scol, asc in zip(by, ascending)]
sdf = self._internal.resolved_copy.spark_frame.sort(*by, NATURAL_ORDER_COLUMN_NAME)
psdf = DataFrame(self._internal.with_new_sdf(sdf)) # type: DataFrame
if inplace:
self._update_internal_frame(psdf._internal)
return None
else:
return psdf
def sort_values(
self,
by: Union[Any, List[Any], Tuple, List[Tuple]],
ascending: Union[bool, List[bool]] = True,
inplace: bool = False,
na_position: str = "last",
) -> Optional["DataFrame"]:
"""
Sort by the values along either axis.
Parameters
----------
by : str or list of str
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ps.DataFrame({
... 'col1': ['A', 'B', None, 'D', 'C'],
... 'col2': [2, 9, 8, 7, 4],
... 'col3': [0, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df
col1 col2 col3
0 A 2 0
1 B 9 9
2 None 8 4
3 D 7 2
4 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 B 9 9
4 C 4 3
3 D 7 2
2 None 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
3 D 7 2
4 C 4 3
1 B 9 9
0 A 2 0
2 None 8 4
Sort by multiple columns
>>> df = ps.DataFrame({
... 'col1': ['A', 'A', 'B', None, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 None 8 4
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if is_name_like_value(by):
by = [by]
else:
assert is_list_like(by), type(by)
new_by = []
for colname in by:
ser = self[colname]
if not isinstance(ser, ps.Series):
raise ValueError(
"The column %s is not unique. For a multi-index, the label must be a tuple "
"with elements corresponding to each level." % name_like_string(colname)
)
new_by.append(ser.spark.column)
return self._sort(by=new_by, ascending=ascending, inplace=inplace, na_position=na_position)
def sort_index(
self,
axis: int = 0,
level: Optional[Union[int, List[int]]] = None,
ascending: bool = True,
inplace: bool = False,
kind: str = None,
na_position: str = "last",
) -> Optional["DataFrame"]:
"""
Sort object by labels (along an axis)
Parameters
----------
axis : index, columns to direct sorting. Currently, only axis = 0 is supported.
level : int or level name or list of ints or list of level names
if not None, sort on values in specified index level(s)
ascending : boolean, default True
Sort ascending vs. descending
inplace : bool, default False
if True, perform operation in-place
kind : str, default None
pandas-on-Spark does not allow specifying the sorting algorithm at the moment,
default None
na_position : {‘first’, ‘last’}, default ‘last’
first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for
MultiIndex.
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ps.DataFrame({'A': [2, 1, np.nan]}, index=['b', 'a', np.nan])
>>> df.sort_index()
A
a 1.0
b 2.0
NaN NaN
>>> df.sort_index(ascending=False)
A
b 2.0
a 1.0
NaN NaN
>>> df.sort_index(na_position='first')
A
NaN NaN
a 1.0
b 2.0
>>> df.sort_index(inplace=True)
>>> df
A
a 1.0
b 2.0
NaN NaN
>>> df = ps.DataFrame({'A': range(4), 'B': range(4)[::-1]},
... index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]],
... columns=['A', 'B'])
>>> df.sort_index()
A B
a 0 3 0
1 2 1
b 0 1 2
1 0 3
>>> df.sort_index(level=1) # doctest: +SKIP
A B
a 0 3 0
b 0 1 2
a 1 2 1
b 1 0 3
>>> df.sort_index(level=[1, 0])
A B
a 0 3 0
b 0 1 2
a 1 2 1
b 1 0 3
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError("No other axis than 0 are supported at the moment")
if kind is not None:
raise NotImplementedError(
"Specifying the sorting algorithm is not supported at the moment."
)
if level is None or (is_list_like(level) and len(level) == 0): # type: ignore
by = self._internal.index_spark_columns
elif is_list_like(level):
by = [self._internal.index_spark_columns[l] for l in level] # type: ignore
else:
by = [self._internal.index_spark_columns[level]] # type: ignore
return self._sort(by=by, ascending=ascending, inplace=inplace, na_position=na_position)
def swaplevel(self, i=-2, j=-1, axis=0) -> "DataFrame":
"""
Swap levels i and j in a MultiIndex on a particular axis.
Parameters
----------
i, j : int or str
Levels of the indices to be swapped. Can pass level name as string.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to swap levels on. 0 or 'index' for row-wise, 1 or
'columns' for column-wise.
Returns
-------
DataFrame
DataFrame with levels swapped in MultiIndex.
Examples
--------
>>> midx = pd.MultiIndex.from_arrays(
... [['red', 'blue'], [1, 2], ['s', 'm']], names = ['color', 'number', 'size'])
>>> midx # doctest: +SKIP
MultiIndex([( 'red', 1, 's'),
('blue', 2, 'm')],
names=['color', 'number', 'size'])
Swap levels in a MultiIndex on index.
>>> psdf = ps.DataFrame({'x': [5, 6], 'y':[5, 6]}, index=midx)
>>> psdf # doctest: +NORMALIZE_WHITESPACE
x y
color number size
red 1 s 5 5
blue 2 m 6 6
>>> psdf.swaplevel() # doctest: +NORMALIZE_WHITESPACE
x y
color size number
red s 1 5 5
blue m 2 6 6
>>> psdf.swaplevel(0, 1) # doctest: +NORMALIZE_WHITESPACE
x y
number color size
1 red s 5 5
2 blue m 6 6
>>> psdf.swaplevel('number', 'size') # doctest: +NORMALIZE_WHITESPACE
x y
color size number
red s 1 5 5
blue m 2 6 6
Swap levels in a MultiIndex on columns.
>>> psdf = ps.DataFrame({'x': [5, 6], 'y':[5, 6]})
>>> psdf.columns = midx
>>> psdf
color red blue
number 1 2
size s m
0 5 5
1 6 6
>>> psdf.swaplevel(axis=1)
color red blue
size s m
number 1 2
0 5 5
1 6 6
>>> psdf.swaplevel(axis=1)
color red blue
size s m
number 1 2
0 5 5
1 6 6
>>> psdf.swaplevel(0, 1, axis=1)
number 1 2
color red blue
size s m
0 5 5
1 6 6
>>> psdf.swaplevel('number', 'color', axis=1)
number 1 2
color red blue
size s m
0 5 5
1 6 6
"""
axis = validate_axis(axis)
if axis == 0:
internal = self._swaplevel_index(i, j)
else:
assert axis == 1
internal = self._swaplevel_columns(i, j)
return DataFrame(internal)
def swapaxes(self, i: Union[str, int], j: Union[str, int], copy: bool = True) -> "DataFrame":
"""
Interchange axes and swap values axes appropriately.
.. note:: This method is based on an expensive operation due to the nature
of big data. Internally it needs to generate each row for each value, and
then group twice - it is a huge operation. To prevent misusage, this method
has the 'compute.max_rows' default limit of input length, and raises a ValueError.
>>> from pyspark.pandas.config import option_context
>>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE
... ps.DataFrame({'a': range(1001)}).swapaxes(i=0, j=1)
Traceback (most recent call last):
...
ValueError: Current DataFrame has more then the given limit 1000 rows.
Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option'
to retrieve to retrieve more than 1000 rows. Note that, before changing the
'compute.max_rows', this operation is considerably expensive.
Parameters
----------
i: {0 or 'index', 1 or 'columns'}. The axis to swap.
j: {0 or 'index', 1 or 'columns'}. The axis to swap.
copy : bool, default True.
Returns
-------
DataFrame
Examples
--------
>>> psdf = ps.DataFrame(
... [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['x', 'y', 'z'], columns=['a', 'b', 'c']
... )
>>> psdf
a b c
x 1 2 3
y 4 5 6
z 7 8 9
>>> psdf.swapaxes(i=1, j=0)
x y z
a 1 4 7
b 2 5 8
c 3 6 9
>>> psdf.swapaxes(i=1, j=1)
a b c
x 1 2 3
y 4 5 6
z 7 8 9
"""
assert copy is True
i = validate_axis(i)
j = validate_axis(j)
return self.copy() if i == j else self.transpose()
def _swaplevel_columns(self, i, j) -> InternalFrame:
assert isinstance(self.columns, pd.MultiIndex)
for index in (i, j):
if not isinstance(index, int) and index not in self.columns.names:
raise KeyError("Level %s not found" % index)
i = i if isinstance(i, int) else self.columns.names.index(i)
j = j if isinstance(j, int) else self.columns.names.index(j)
for index in (i, j):
if index >= len(self.columns) or index < -len(self.columns):
raise IndexError(
"Too many levels: Columns have only %s levels, "
"%s is not a valid level number" % (self._internal.index_level, index)
)
column_label_names = self._internal.column_label_names.copy()
column_label_names[i], column_label_names[j], = (
column_label_names[j],
column_label_names[i],
)
column_labels = self._internal._column_labels
column_label_list = [list(label) for label in column_labels]
for label_list in column_label_list:
label_list[i], label_list[j] = label_list[j], label_list[i]
column_labels = [tuple(x) for x in column_label_list]
internal = self._internal.copy(
column_label_names=list(column_label_names), column_labels=list(column_labels)
)
return internal
def _swaplevel_index(self, i, j) -> InternalFrame:
assert isinstance(self.index, ps.MultiIndex)
for index in (i, j):
if not isinstance(index, int) and index not in self.index.names:
raise KeyError("Level %s not found" % index)
i = i if isinstance(i, int) else self.index.names.index(i)
j = j if isinstance(j, int) else self.index.names.index(j)
for index in (i, j):
if index >= self._internal.index_level or index < -self._internal.index_level:
raise IndexError(
"Too many levels: Index has only %s levels, "
"%s is not a valid level number" % (self._internal.index_level, index)
)
index_map = list(
zip(
self._internal.index_spark_columns,
self._internal.index_names,
self._internal.index_fields,
)
)
index_map[i], index_map[j] = index_map[j], index_map[i]
index_spark_columns, index_names, index_fields = zip(*index_map)
internal = self._internal.copy(
index_spark_columns=list(index_spark_columns),
index_names=list(index_names),
index_fields=list(index_fields),
)
return internal
# TODO: add keep = First
def nlargest(self, n: int, columns: "Any") -> "DataFrame":
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant in pandas.
In pandas-on-Spark, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = ps.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(n=3, columns='X')
X Y
5 7.0 11
4 6.0 10
3 5.0 9
>>> df.nlargest(n=3, columns=['Y', 'X'])
X Y
6 NaN 12
5 7.0 11
4 6.0 10
"""
return self.sort_values(by=columns, ascending=False).head(n=n)
# TODO: add keep = First
def nsmallest(self, n: int, columns: "Any") -> "DataFrame":
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``,
but more performant. In pandas-on-Spark, thanks to Spark's lazy execution and query
optimizer, the two would have same performance.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = ps.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "a".
>>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
"""
return self.sort_values(by=columns, ascending=True).head(n=n)
def isin(self, values) -> "DataFrame":
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable or dict
The sequence of values to test. If values is a dict,
the keys must be the column names, which must match.
Series and DataFrame are not supported.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
Examples
--------
>>> df = ps.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'],
... columns=['num_legs', 'num_wings'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
"""
if isinstance(values, (pd.DataFrame, pd.Series)):
raise NotImplementedError("DataFrame and Series are not supported")
if isinstance(values, dict) and not set(values.keys()).issubset(self.columns):
raise AttributeError(
"'DataFrame' object has no attribute %s"
% (set(values.keys()).difference(self.columns))
)
data_spark_columns = []
if isinstance(values, dict):
for i, col in enumerate(self.columns):
if col in values:
item = values[col]
item = item.tolist() if isinstance(item, np.ndarray) else list(item)
data_spark_columns.append(
self._internal.spark_column_for(self._internal.column_labels[i])
.isin(item)
.alias(self._internal.data_spark_column_names[i])
)
else:
data_spark_columns.append(
F.lit(False).alias(self._internal.data_spark_column_names[i])
)
elif is_list_like(values):
values = values.tolist() if isinstance(values, np.ndarray) else list(values)
data_spark_columns += [
self._internal.spark_column_for(label)
.isin(values)
.alias(self._internal.spark_column_name_for(label))
for label in self._internal.column_labels
]
else:
raise TypeError("Values should be iterable, Series, DataFrame or dict.")
return DataFrame(self._internal.with_new_columns(data_spark_columns))
@property
def shape(self) -> Tuple[int, int]:
"""
Return a tuple representing the dimensionality of the DataFrame.
Examples
--------
>>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self), len(self.columns)
def merge(
self,
right: "DataFrame",
how: str = "inner",
on: Optional[Union[Any, List[Any], Tuple, List[Tuple]]] = None,
left_on: Optional[Union[Any, List[Any], Tuple, List[Tuple]]] = None,
right_on: Optional[Union[Any, List[Any], Tuple, List[Tuple]]] = None,
left_index: bool = False,
right_index: bool = False,
suffixes: Tuple[str, str] = ("_x", "_y"),
) -> "DataFrame":
"""
Merge DataFrame objects with a database-style join.
The index of the resulting DataFrame will be one of the following:
- 0...n if no index is used for merging
- Index of the left DataFrame if merged only on the index of the right DataFrame
- Index of the right DataFrame if merged only on the index of the left DataFrame
- All involved indices if merged using the indices of both DataFrames
e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will
be an index (x, a, b)
Parameters
----------
right: Object to merge with.
how: Type of merge to be performed.
{'left', 'right', 'outer', 'inner'}, default 'inner'
left: use only keys from left frame, similar to a SQL left outer join; not preserve
key order unlike pandas.
right: use only keys from right frame, similar to a SQL right outer join; not preserve
key order unlike pandas.
outer: use union of keys from both frames, similar to a SQL full outer join; sort keys
lexicographically.
inner: use intersection of keys from both frames, similar to a SQL inner join;
not preserve the order of the left keys unlike pandas.
on: Column or index level names to join on. These must be found in both DataFrames. If on
is None and not merging on indexes then this defaults to the intersection of the
columns in both DataFrames.
left_on: Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on: Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index: Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index or a number of
columns) must match the number of levels.
right_index: Use the index from the right DataFrame as the join key. Same caveats as
left_index.
suffixes: Suffix to apply to overlapping column names in the left and right side,
respectively.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
See Also
--------
DataFrame.join : Join columns of another DataFrame.
DataFrame.update : Modify in place using non-NA values from another DataFrame.
DataFrame.hint : Specifies some hint on the current DataFrame.
broadcast : Marks a DataFrame as small enough for use in broadcast joins.
Examples
--------
>>> df1 = ps.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value'])
>>> df2 = ps.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value'])
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> merged = df1.merge(df2, left_on='lkey', right_on='rkey')
>>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y']) # doctest: +ELLIPSIS
lkey value_x rkey value_y
...bar 2 bar 6
...baz 3 baz 7
...foo 1 foo 5
...foo 1 foo 8
...foo 5 foo 5
...foo 5 foo 8
>>> left_psdf = ps.DataFrame({'A': [1, 2]})
>>> right_psdf = ps.DataFrame({'B': ['x', 'y']}, index=[1, 2])
>>> left_psdf.merge(right_psdf, left_index=True, right_index=True).sort_index()
A B
1 2 x
>>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='left').sort_index()
A B
0 1 None
1 2 x
>>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='right').sort_index()
A B
1 2.0 x
2 NaN y
>>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='outer').sort_index()
A B
0 1.0 None
1 2.0 x
2 NaN y
Notes
-----
As described in #263, joining string columns currently returns None for missing values
instead of NaN.
"""
def to_list(os: Optional[Union[Any, List[Any], Tuple, List[Tuple]]]) -> List[Tuple]:
if os is None:
return []
elif is_name_like_tuple(os):
return [os] # type: ignore
elif is_name_like_value(os):
return [(os,)]
else:
return [o if is_name_like_tuple(o) else (o,) for o in os]
if isinstance(right, ps.Series):
right = right.to_frame()
if on:
if left_on or right_on:
raise ValueError(
'Can only pass argument "on" OR "left_on" and "right_on", '
"not a combination of both."
)
left_key_names = list(map(self._internal.spark_column_name_for, to_list(on)))
right_key_names = list(map(right._internal.spark_column_name_for, to_list(on)))
else:
# TODO: need special handling for multi-index.
if left_index:
left_key_names = self._internal.index_spark_column_names
else:
left_key_names = list(map(self._internal.spark_column_name_for, to_list(left_on)))
if right_index:
right_key_names = right._internal.index_spark_column_names
else:
right_key_names = list(
map(right._internal.spark_column_name_for, to_list(right_on))
)
if left_key_names and not right_key_names:
raise ValueError("Must pass right_on or right_index=True")
if right_key_names and not left_key_names:
raise ValueError("Must pass left_on or left_index=True")
if not left_key_names and not right_key_names:
common = list(self.columns.intersection(right.columns))
if len(common) == 0:
raise ValueError(
"No common columns to perform merge on. Merge options: "
"left_on=None, right_on=None, left_index=False, right_index=False"
)
left_key_names = list(map(self._internal.spark_column_name_for, to_list(common)))
right_key_names = list(map(right._internal.spark_column_name_for, to_list(common)))
if len(left_key_names) != len(right_key_names):
raise ValueError("len(left_keys) must equal len(right_keys)")
# We should distinguish the name to avoid ambiguous column name after merging.
right_prefix = "__right_"
right_key_names = [right_prefix + right_key_name for right_key_name in right_key_names]
how = validate_how(how)
def resolve(internal, side):
rename = lambda col: "__{}_{}".format(side, col)
internal = internal.resolved_copy
sdf = internal.spark_frame
sdf = sdf.select(
[
scol_for(sdf, col).alias(rename(col))
for col in sdf.columns
if col not in HIDDEN_COLUMNS
]
+ list(HIDDEN_COLUMNS)
)
return internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.index_spark_column_names
],
index_fields=[
field.copy(name=rename(field.name)) for field in internal.index_fields
],
data_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.data_spark_column_names
],
data_fields=[field.copy(name=rename(field.name)) for field in internal.data_fields],
)
left_internal = self._internal.resolved_copy
right_internal = resolve(right._internal, "right")
left_table = left_internal.spark_frame.alias("left_table")
right_table = right_internal.spark_frame.alias("right_table")
left_key_columns = [scol_for(left_table, label) for label in left_key_names]
right_key_columns = [scol_for(right_table, label) for label in right_key_names]
join_condition = reduce(
lambda x, y: x & y,
[lkey == rkey for lkey, rkey in zip(left_key_columns, right_key_columns)],
)
joined_table = left_table.join(right_table, join_condition, how=how)
# Unpack suffixes tuple for convenience
left_suffix = suffixes[0]
right_suffix = suffixes[1]
# Append suffixes to columns with the same name to avoid conflicts later
duplicate_columns = set(left_internal.column_labels) & set(right_internal.column_labels)
exprs = []
data_columns = []
column_labels = []
left_scol_for = lambda label: scol_for(
left_table, left_internal.spark_column_name_for(label)
)
right_scol_for = lambda label: scol_for(
right_table, right_internal.spark_column_name_for(label)
)
for label in left_internal.column_labels:
col = left_internal.spark_column_name_for(label)
scol = left_scol_for(label)
if label in duplicate_columns:
spark_column_name = left_internal.spark_column_name_for(label)
if (
spark_column_name in left_key_names
and (right_prefix + spark_column_name) in right_key_names
):
right_scol = right_scol_for(label)
if how == "right":
scol = right_scol.alias(col)
elif how == "full":
scol = F.when(scol.isNotNull(), scol).otherwise(right_scol).alias(col)
else:
pass
else:
col = col + left_suffix
scol = scol.alias(col)
label = tuple([str(label[0]) + left_suffix] + list(label[1:]))
exprs.append(scol)
data_columns.append(col)
column_labels.append(label)
for label in right_internal.column_labels:
# recover `right_prefix` here.
col = right_internal.spark_column_name_for(label)[len(right_prefix) :]
scol = right_scol_for(label).alias(col)
if label in duplicate_columns:
spark_column_name = left_internal.spark_column_name_for(label)
if (
spark_column_name in left_key_names
and (right_prefix + spark_column_name) in right_key_names
):
continue
else:
col = col + right_suffix
scol = scol.alias(col)
label = tuple([str(label[0]) + right_suffix] + list(label[1:]))
exprs.append(scol)
data_columns.append(col)
column_labels.append(label)
left_index_scols = left_internal.index_spark_columns
right_index_scols = right_internal.index_spark_columns
# Retain indices if they are used for joining
if left_index:
if right_index:
if how in ("inner", "left"):
exprs.extend(left_index_scols)
index_spark_column_names = left_internal.index_spark_column_names
index_names = left_internal.index_names
elif how == "right":
exprs.extend(right_index_scols)
index_spark_column_names = right_internal.index_spark_column_names
index_names = right_internal.index_names
else:
index_spark_column_names = left_internal.index_spark_column_names
index_names = left_internal.index_names
for col, left_scol, right_scol in zip(
index_spark_column_names, left_index_scols, right_index_scols
):
scol = F.when(left_scol.isNotNull(), left_scol).otherwise(right_scol)
exprs.append(scol.alias(col))
else:
exprs.extend(right_index_scols)
index_spark_column_names = right_internal.index_spark_column_names
index_names = right_internal.index_names
elif right_index:
exprs.extend(left_index_scols)
index_spark_column_names = left_internal.index_spark_column_names
index_names = left_internal.index_names
else:
index_spark_column_names = []
index_names = []
selected_columns = joined_table.select(*exprs)
internal = InternalFrame(
spark_frame=selected_columns,
index_spark_columns=[
scol_for(selected_columns, col) for col in index_spark_column_names
],
index_names=index_names,
column_labels=column_labels,
data_spark_columns=[scol_for(selected_columns, col) for col in data_columns],
)
return DataFrame(internal)
def join(
self,
right: "DataFrame",
on: Optional[Union[Any, List[Any], Tuple, List[Tuple]]] = None,
how: str = "left",
lsuffix: str = "",
rsuffix: str = "",
) -> "DataFrame":
"""
Join columns of another DataFrame.
Join columns with `right` DataFrame either on index or on a key column. Efficiently join
multiple DataFrame objects by index at once by passing a list.
Parameters
----------
right: DataFrame, Series
on: str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index in `right`, otherwise
joins index-on-index. If multiple values given, the `right` DataFrame must have a
MultiIndex. Can pass an array as the join key if it is not already contained in the
calling DataFrame. Like an Excel VLOOKUP operation.
how: {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use `left` frame’s index (or column if on is specified).
* right: use `right`’s index.
* outer: form union of `left` frame’s index (or column if on is specified) with
right’s index, and sort it. lexicographically.
* inner: form intersection of `left` frame’s index (or column if on is specified)
with `right`’s index, preserving the order of the `left`’s one.
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from `right` frame's overlapping columns.
Returns
-------
DataFrame
A dataframe containing columns from both the `left` and `right`.
See Also
--------
DataFrame.merge: For column(s)-on-columns(s) operations.
DataFrame.update : Modify in place using non-NA values from another DataFrame.
DataFrame.hint : Specifies some hint on the current DataFrame.
broadcast : Marks a DataFrame as small enough for use in broadcast joins.
Notes
-----
Parameters on, lsuffix, and rsuffix are not supported when passing a list of DataFrame
objects.
Examples
--------
>>> psdf1 = ps.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
... 'A': ['A0', 'A1', 'A2', 'A3']},
... columns=['key', 'A'])
>>> psdf2 = ps.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']},
... columns=['key', 'B'])
>>> psdf1
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
>>> psdf2
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> join_psdf = psdf1.join(psdf2, lsuffix='_left', rsuffix='_right')
>>> join_psdf.sort_values(by=join_psdf.columns)
key_left A key_right B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 None None
If we want to join using the key columns, we need to set key to be the index in both df and
right. The joined DataFrame will have key as its index.
>>> join_psdf = psdf1.set_index('key').join(psdf2.set_index('key'))
>>> join_psdf.sort_values(by=join_psdf.columns) # doctest: +NORMALIZE_WHITESPACE
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 None
Another option to join using the key columns is to use the on parameter. DataFrame.join
always uses right’s index but we can use any column in df. This method not preserve the
original DataFrame’s index in the result unlike pandas.
>>> join_psdf = psdf1.join(psdf2.set_index('key'), on='key')
>>> join_psdf.index
Int64Index([0, 1, 2, 3], dtype='int64')
"""
if isinstance(right, ps.Series):
common = list(self.columns.intersection([right.name]))
else:
common = list(self.columns.intersection(right.columns))
if len(common) > 0 and not lsuffix and not rsuffix:
raise ValueError(
"columns overlap but no suffix specified: " "{rename}".format(rename=common)
)
need_set_index = False
if on:
if not is_list_like(on):
on = [on] # type: ignore
if len(on) != right._internal.index_level:
raise ValueError(
'len(left_on) must equal the number of levels in the index of "right"'
)
need_set_index = len(set(on) & set(self.index.names)) == 0
if need_set_index:
self = self.set_index(on)
join_psdf = self.merge(
right, left_index=True, right_index=True, how=how, suffixes=(lsuffix, rsuffix)
)
return join_psdf.reset_index() if need_set_index else join_psdf
def append(
self,
other: "DataFrame",
ignore_index: bool = False,
verify_integrity: bool = False,
sort: bool = False,
) -> "DataFrame":
"""
Append rows of other to the end of caller, returning a new object.
Columns in other that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
sort : boolean, default False
Currently not supported.
Returns
-------
appended : DataFrame
Examples
--------
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df.append(df)
A B
0 1 2
1 3 4
0 1 2
1 3 4
>>> df.append(df, ignore_index=True)
A B
0 1 2
1 3 4
2 1 2
3 3 4
"""
if isinstance(other, ps.Series):
raise TypeError("DataFrames.append() does not support appending Series to DataFrames")
if sort:
raise NotImplementedError("The 'sort' parameter is currently not supported")
if not ignore_index:
index_scols = self._internal.index_spark_columns
if len(index_scols) != other._internal.index_level:
raise ValueError("Both DataFrames have to have the same number of index levels")
if verify_integrity and len(index_scols) > 0:
if (
self._internal.spark_frame.select(index_scols)
.intersect(
other._internal.spark_frame.select(other._internal.index_spark_columns)
)
.count()
) > 0:
raise ValueError("Indices have overlapping values")
# Lazy import to avoid circular dependency issues
from pyspark.pandas.namespace import concat
return cast(DataFrame, concat([self, other], ignore_index=ignore_index))
# TODO: add 'filter_func' and 'errors' parameter
def update(self, other: "DataFrame", join: str = "left", overwrite: bool = True) -> None:
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or Series
join : 'left', default 'left'
Only left join is implemented, keeping the index and columns of the original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values with values from `other`.
* False: only update values that are NA in the original DataFrame.
Returns
-------
None : method directly changes calling object
See Also
--------
DataFrame.merge : For column(s)-on-columns(s) operations.
DataFrame.join : Join columns of another DataFrame.
DataFrame.hint : Specifies some hint on the current DataFrame.
broadcast : Marks a DataFrame as small enough for use in broadcast joins.
Examples
--------
>>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])
>>> new_df = ps.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]}, columns=['B', 'C'])
>>> df.update(new_df)
>>> df.sort_index()
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = ps.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])
>>> new_df = ps.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}, columns=['B'])
>>> df.update(new_df)
>>> df.sort_index()
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = ps.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])
>>> new_column = ps.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df.sort_index()
A B
0 a d
1 b y
2 c e
If `other` contains None the corresponding values are not updated in the original dataframe.
>>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])
>>> new_df = ps.DataFrame({'B': [4, None, 6]}, columns=['B'])
>>> df.update(new_df)
>>> df.sort_index()
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
if join != "left":
raise NotImplementedError("Only left join is supported")
if isinstance(other, ps.Series):
other = other.to_frame()
update_columns = list(
set(self._internal.column_labels).intersection(set(other._internal.column_labels))
)
update_sdf = self.join(
other[update_columns], rsuffix="_new"
)._internal.resolved_copy.spark_frame
data_fields = self._internal.data_fields.copy()
for column_labels in update_columns:
column_name = self._internal.spark_column_name_for(column_labels)
old_col = scol_for(update_sdf, column_name)
new_col = scol_for(
update_sdf, other._internal.spark_column_name_for(column_labels) + "_new"
)
if overwrite:
update_sdf = update_sdf.withColumn(
column_name, F.when(new_col.isNull(), old_col).otherwise(new_col)
)
else:
update_sdf = update_sdf.withColumn(
column_name, F.when(old_col.isNull(), new_col).otherwise(old_col)
)
data_fields[self._internal.column_labels.index(column_labels)] = None # TODO: dtype?
sdf = update_sdf.select(
*[scol_for(update_sdf, col) for col in self._internal.spark_column_names],
*HIDDEN_COLUMNS,
)
internal = self._internal.with_new_sdf(sdf, data_fields=data_fields)
self._update_internal_frame(internal, requires_same_anchor=False)
def sample(
self,
n: Optional[int] = None,
frac: Optional[float] = None,
replace: bool = False,
random_state: Optional[int] = None,
) -> "DataFrame":
"""
Return a random sample of items from an axis of object.
Please call this function using named argument by specifying the ``frac`` argument.
You can use `random_state` for reproducibility. However, note that different from pandas,
specifying a seed in pandas-on-Spark/Spark does not guarantee the sampled rows will
be fixed. The result set depends on not only the seed, but also how the data is distributed
across machines and to some extent network randomness when shuffle operations are involved.
Even in the simplest case, the result set will depend on the system's CPU core count.
Parameters
----------
n : int, optional
Number of items to return. This is currently NOT supported. Use frac instead.
frac : float, optional
Fraction of axis items to return.
replace : bool, default False
Sample with or without replacement.
random_state : int, optional
Seed for the random number generator (if int).
Returns
-------
Series or DataFrame
A new object of same type as caller containing the sampled items.
Examples
--------
>>> df = ps.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'],
... columns=['num_legs', 'num_wings', 'num_specimen_seen'])
>>> df # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
A random 25% sample of the ``DataFrame``.
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement,
so the same items could appear more than once.
>>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP
falcon 2
spider 8
spider 8
Name: num_legs, dtype: int64
Specifying the exact number of items to return is not supported at the moment.
>>> df.sample(n=5) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
NotImplementedError: Function sample currently does not support specifying ...
"""
# Note: we don't run any of the doctests because the result can change depending on the
# system's core count.
if n is not None:
raise NotImplementedError(
"Function sample currently does not support specifying "
"exact number of items to return. Use frac instead."
)
if frac is None:
raise ValueError("frac must be specified.")
sdf = self._internal.resolved_copy.spark_frame.sample(
withReplacement=replace, fraction=frac, seed=random_state
)
return DataFrame(self._internal.with_new_sdf(sdf))
def astype(self, dtype) -> "DataFrame":
"""
Cast a pandas-on-Spark object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas-on-Spark object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64')
>>> df
a b
0 1 1
1 2 2
2 3 3
Convert to float type:
>>> df.astype('float')
a b
0 1.0 1.0
1 2.0 2.0
2 3.0 3.0
Convert to int64 type back:
>>> df.astype('int64')
a b
0 1 1
1 2 2
2 3 3
Convert column a to float type:
>>> df.astype({'a': float})
a b
0 1.0 1
1 2.0 2
2 3.0 3
"""
applied = []
if is_dict_like(dtype):
for col_name in dtype.keys():
if col_name not in self.columns:
raise KeyError(
"Only a column name can be used for the "
"key in a dtype mappings argument."
)
for col_name, col in self.items():
if col_name in dtype:
applied.append(col.astype(dtype=dtype[col_name]))
else:
applied.append(col)
else:
for col_name, col in self.items():
applied.append(col.astype(dtype=dtype))
return DataFrame(self._internal.with_new_columns(applied))
def add_prefix(self, prefix) -> "DataFrame":
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> df = ps.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(prefix, str)
return self._apply_series_op(
lambda psser: psser.rename(tuple([prefix + i for i in psser._column_label]))
)
def add_suffix(self, suffix) -> "DataFrame":
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> df = ps.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(suffix, str)
return self._apply_series_op(
lambda psser: psser.rename(tuple([i + suffix for i in psser._column_label]))
)
# TODO: include, and exclude should be implemented.
def describe(self, percentiles: Optional[List[float]] = None) -> "DataFrame":
"""
Generate descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding
``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list of ``float`` in range [0.0, 1.0], default [0.25, 0.5, 0.75]
A list of percentiles to be computed.
Returns
-------
DataFrame
Summary statistics of the Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the observations.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``25%``, ``50%``, ``75%``, ``max``.
Currently only numeric data is supported.
Examples
--------
Describing a numeric ``Series``.
>>> s = ps.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
dtype: float64
Describing a ``DataFrame``. Only numeric fields are returned.
>>> df = ps.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0],
... 'object': ['a', 'b', 'c']
... },
... columns=['numeric1', 'numeric2', 'object'])
>>> df.describe()
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
25% 1.0 4.0
50% 2.0 5.0
75% 3.0 6.0
max 3.0 6.0
For multi-index columns:
>>> df.columns = [('num', 'a'), ('num', 'b'), ('obj', 'c')]
>>> df.describe() # doctest: +NORMALIZE_WHITESPACE
num
a b
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
25% 1.0 4.0
50% 2.0 5.0
75% 3.0 6.0
max 3.0 6.0
>>> df[('num', 'b')].describe()
count 3.0
mean 5.0
std 1.0
min 4.0
25% 4.0
50% 5.0
75% 6.0
max 6.0
Name: (num, b), dtype: float64
Describing a ``DataFrame`` and selecting custom percentiles.
>>> df = ps.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0]
... },
... columns=['numeric1', 'numeric2'])
>>> df.describe(percentiles = [0.85, 0.15])
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
15% 1.0 4.0
50% 2.0 5.0
85% 3.0 6.0
max 3.0 6.0
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric1.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
Name: numeric1, dtype: float64
Describing a column from a ``DataFrame`` by accessing it as
an attribute and selecting custom percentiles.
>>> df.numeric1.describe(percentiles = [0.85, 0.15])
count 3.0
mean 2.0
std 1.0
min 1.0
15% 1.0
50% 2.0
85% 3.0
max 3.0
Name: numeric1, dtype: float64
"""
exprs = []
column_labels = []
for label in self._internal.column_labels:
scol = self._internal.spark_column_for(label)
spark_type = self._internal.spark_type_for(label)
if isinstance(spark_type, DoubleType) or isinstance(spark_type, FloatType):
exprs.append(
F.nanvl(scol, F.lit(None)).alias(self._internal.spark_column_name_for(label))
)
column_labels.append(label)
elif isinstance(spark_type, NumericType):
exprs.append(scol)
column_labels.append(label)
if len(exprs) == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
if any((p < 0.0) or (p > 1.0) for p in percentiles):
raise ValueError("Percentiles should all be in the interval [0, 1]")
# appending 50% if not in percentiles already
percentiles = (percentiles + [0.5]) if 0.5 not in percentiles else percentiles
else:
percentiles = [0.25, 0.5, 0.75]
formatted_perc = ["{:.0%}".format(p) for p in sorted(percentiles)]
stats = ["count", "mean", "stddev", "min", *formatted_perc, "max"]
sdf = self._internal.spark_frame.select(*exprs).summary(*stats)
sdf = sdf.replace("stddev", "std", subset=["summary"])
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, "summary")],
column_labels=column_labels,
data_spark_columns=[
scol_for(sdf, self._internal.spark_column_name_for(label))
for label in column_labels
],
)
return DataFrame(internal).astype("float64")
def drop_duplicates(self, subset=None, keep="first", inplace=False) -> Optional["DataFrame"]:
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to keep.
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy.
Returns
-------
DataFrame
DataFrame with duplicates removed or None if ``inplace=True``.
>>> df = ps.DataFrame(
... {'a': [1, 2, 2, 2, 3], 'b': ['a', 'a', 'a', 'c', 'd']}, columns = ['a', 'b'])
>>> df
a b
0 1 a
1 2 a
2 2 a
3 2 c
4 3 d
>>> df.drop_duplicates().sort_index()
a b
0 1 a
1 2 a
3 2 c
4 3 d
>>> df.drop_duplicates('a').sort_index()
a b
0 1 a
1 2 a
4 3 d
>>> df.drop_duplicates(['a', 'b']).sort_index()
a b
0 1 a
1 2 a
3 2 c
4 3 d
>>> df.drop_duplicates(keep='last').sort_index()
a b
0 1 a
2 2 a
3 2 c
4 3 d
>>> df.drop_duplicates(keep=False).sort_index()
a b
0 1 a
3 2 c
4 3 d
"""
inplace = validate_bool_kwarg(inplace, "inplace")
sdf, column = self._mark_duplicates(subset, keep)
sdf = sdf.where(~scol_for(sdf, column)).drop(column)
internal = self._internal.with_new_sdf(sdf)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal)
def reindex(
self,
labels: Optional[Any] = None,
index: Optional[Any] = None,
columns: Optional[Any] = None,
axis: Optional[Union[int, str]] = None,
copy: Optional[bool] = True,
fill_value: Optional[Any] = None,
) -> "DataFrame":
"""
Conform DataFrame to new index with optional filling logic, placing
NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
labels: array-like, optional
New labels / index to conform the axis specified by ‘axis’ to.
index, columns: array-like, optional
New labels / index to conform to, should be specified using keywords.
Preferably an Index object to avoid duplicating data
axis: int or str, optional
Axis to target. Can be either the axis name (‘index’, ‘columns’) or
number (0, 1).
copy : bool, default True
Return a new object, even if the passed indexes are the same.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
Returns
-------
DataFrame with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = ps.DataFrame({
... 'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index,
... columns=['http_status', 'response_time'])
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index).sort_index()
http_status response_time
Chrome 200.0 0.02
Comodo Dragon NaN NaN
IE10 404.0 0.08
Iceweasel NaN NaN
Safari 404.0 0.07
We can fill in the missing values by passing a value to
the keyword ``fill_value``.
>>> df.reindex(new_index, fill_value=0, copy=False).sort_index()
http_status response_time
Chrome 200 0.02
Comodo Dragon 0 0.00
IE10 404 0.08
Iceweasel 0 0.00
Safari 404 0.07
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent']).sort_index()
http_status user_agent
Chrome 200 NaN
Firefox 200 NaN
IE10 404 NaN
Konqueror 301 NaN
Safari 404 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns").sort_index()
http_status user_agent
Chrome 200 NaN
Firefox 200 NaN
IE10 404 NaN
Konqueror 301 NaN
Safari 404 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = ps.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2.sort_index()
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2).sort_index()
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
"""
if axis is not None and (index is not None or columns is not None):
raise TypeError("Cannot specify both 'axis' and any of 'index' or 'columns'.")
if labels is not None:
axis = validate_axis(axis)
if axis == 0:
index = labels
elif axis == 1:
columns = labels
else:
raise ValueError(
"No axis named %s for object type %s." % (axis, type(axis).__name__)
)
if index is not None and not is_list_like(index):
raise TypeError(
"Index must be called with a collection of some kind, "
"%s was passed" % type(index)
)
if columns is not None and not is_list_like(columns):
raise TypeError(
"Columns must be called with a collection of some kind, "
"%s was passed" % type(columns)
)
df = self
if index is not None:
df = df._reindex_index(index, fill_value)
if columns is not None:
df = df._reindex_columns(columns, fill_value)
# Copy
if copy and df is self:
return df.copy()
else:
return df
def _reindex_index(self, index, fill_value):
# When axis is index, we can mimic pandas' by a right outer join.
nlevels = self._internal.index_level
assert nlevels <= 1 or (
isinstance(index, ps.MultiIndex) and nlevels == index.nlevels
), "MultiIndex DataFrame can only be reindexed with a similar pandas-on-Spark MultiIndex."
index_columns = self._internal.index_spark_column_names
frame = self._internal.resolved_copy.spark_frame.drop(NATURAL_ORDER_COLUMN_NAME)
if isinstance(index, ps.Index):
if nlevels != index.nlevels:
return DataFrame(index._internal.with_new_columns([])).reindex(
columns=self.columns, fill_value=fill_value
)
index_names = index._internal.index_names
scols = index._internal.index_spark_columns
labels = index._internal.spark_frame.select(
[scol.alias(index_column) for scol, index_column in zip(scols, index_columns)]
)
else:
psser = ps.Series(list(index))
labels = psser._internal.spark_frame.select(psser.spark.column.alias(index_columns[0]))
index_names = self._internal.index_names
if fill_value is not None:
frame_index_columns = [
verify_temp_column_name(frame, "__frame_index_column_{}__".format(i))
for i in range(nlevels)
]
index_scols = [
scol_for(frame, index_col).alias(frame_index_col)
for index_col, frame_index_col in zip(index_columns, frame_index_columns)
]
scols = self._internal.resolved_copy.data_spark_columns
frame = frame.select(index_scols + scols)
temp_fill_value = verify_temp_column_name(frame, "__fill_value__")
labels = labels.withColumn(temp_fill_value, F.lit(fill_value))
frame_index_scols = [scol_for(frame, col) for col in frame_index_columns]
labels_index_scols = [scol_for(labels, col) for col in index_columns]
joined_df = frame.join(
labels,
on=[fcol == lcol for fcol, lcol in zip(frame_index_scols, labels_index_scols)],
how="right",
)
joined_df = joined_df.select(
*labels_index_scols,
*[
F.when(
reduce(
lambda c1, c2: c1 & c2,
[
fcol.isNull() & lcol.isNotNull()
for fcol, lcol in zip(frame_index_scols, labels_index_scols)
],
),
scol_for(joined_df, temp_fill_value),
)
.otherwise(scol_for(joined_df, col))
.alias(col)
for col in self._internal.data_spark_column_names
],
)
else:
joined_df = frame.join(labels, on=index_columns, how="right")
sdf = joined_df.drop(NATURAL_ORDER_COLUMN_NAME)
internal = self._internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=index_names,
index_fields=None, # TODO: dtypes?
data_spark_columns=[
scol_for(sdf, col) for col in self._internal.data_spark_column_names
],
data_fields=[InternalField(dtype=field.dtype) for field in self._internal.data_fields],
)
return DataFrame(internal)
def _reindex_columns(self, columns, fill_value):
level = self._internal.column_labels_level
if level > 1:
label_columns = list(columns)
for col in label_columns:
if not isinstance(col, tuple):
raise TypeError("Expected tuple, got {}".format(type(col).__name__))
else:
label_columns = [(col,) for col in columns]
for col in label_columns:
if len(col) != level:
raise ValueError(
"shape (1,{}) doesn't match the shape (1,{})".format(len(col), level)
)
fill_value = np.nan if fill_value is None else fill_value
scols_or_pssers, labels = [], []
for label in label_columns:
if label in self._internal.column_labels:
scols_or_pssers.append(self._psser_for(label))
else:
scols_or_pssers.append(F.lit(fill_value).alias(name_like_string(label)))
labels.append(label)
if isinstance(columns, pd.Index):
column_label_names = [
name if is_name_like_tuple(name) else (name,) for name in columns.names
]
internal = self._internal.with_new_columns(
scols_or_pssers, column_labels=labels, column_label_names=column_label_names
)
else:
internal = self._internal.with_new_columns(scols_or_pssers, column_labels=labels)
return DataFrame(internal)
def reindex_like(self, other: "DataFrame", copy: bool = True) -> "DataFrame":
"""
Return a DataFrame with matching indices as other object.
Conform the object to the same index on all axes. Places NA/NaN in locations
having no value in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : DataFrame
Its row and column indices are used to define the new indices
of this object.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
Returns
-------
DataFrame
DataFrame with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = ps.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit',
... 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = ps.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1).sort_index() # doctest: +NORMALIZE_WHITESPACE
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN None
2014-02-15 35.1 NaN medium
"""
if isinstance(other, DataFrame):
return self.reindex(index=other.index, columns=other.columns, copy=copy)
else:
raise TypeError("other must be a pandas-on-Spark DataFrame")
def melt(
self,
id_vars: Optional[Union[Any, Tuple, List[Union[Any, Tuple]]]] = None,
value_vars: Optional[Union[Any, Tuple, List[Union[Any, Tuple]]]] = None,
var_name: Optional[Union[str, List[str]]] = None,
value_name: str = "value",
) -> "DataFrame":
"""
Unpivot a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar, default 'variable'
Name to use for the 'variable' column. If None it uses `frame.columns.name` or
‘variable’.
value_name : scalar, default 'value'
Name to use for the 'value' column.
Returns
-------
DataFrame
Unpivoted DataFrame.
Examples
--------
>>> df = ps.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}},
... columns=['A', 'B', 'C'])
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> ps.melt(df)
variable value
0 A a
1 B 1
2 C 2
3 A b
4 B 3
5 C 4
6 A c
7 B 5
8 C 6
>>> df.melt(id_vars='A')
A variable value
0 a B 1
1 a C 2
2 b B 3
3 b C 4
4 c B 5
5 c C 6
>>> df.melt(value_vars='A')
variable value
0 A a
1 A b
2 A c
>>> ps.melt(df, id_vars=['A', 'B'])
A B variable value
0 a 1 C 2
1 b 3 C 4
2 c 5 C 6
>>> df.melt(id_vars=['A'], value_vars=['C'])
A variable value
0 a C 2
1 b C 4
2 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> ps.melt(df, id_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
"""
column_labels = self._internal.column_labels
if id_vars is None:
id_vars = []
else:
if isinstance(id_vars, tuple):
if self._internal.column_labels_level == 1:
id_vars = [idv if is_name_like_tuple(idv) else (idv,) for idv in id_vars]
else:
raise ValueError(
"id_vars must be a list of tuples" " when columns are a MultiIndex"
)
elif is_name_like_value(id_vars):
id_vars = [(id_vars,)]
else:
id_vars = [idv if is_name_like_tuple(idv) else (idv,) for idv in id_vars]
non_existence_col = [idv for idv in id_vars if idv not in column_labels]
if len(non_existence_col) != 0:
raveled_column_labels = np.ravel(column_labels)
missing = [
nec for nec in np.ravel(non_existence_col) if nec not in raveled_column_labels
]
if len(missing) != 0:
raise KeyError(
"The following 'id_vars' are not present"
" in the DataFrame: {}".format(missing)
)
else:
raise KeyError(
"None of {} are in the {}".format(non_existence_col, column_labels)
)
if value_vars is None:
value_vars = []
else:
if isinstance(value_vars, tuple):
if self._internal.column_labels_level == 1:
value_vars = [
valv if is_name_like_tuple(valv) else (valv,) for valv in value_vars
]
else:
raise ValueError(
"value_vars must be a list of tuples" " when columns are a MultiIndex"
)
elif is_name_like_value(value_vars):
value_vars = [(value_vars,)]
else:
value_vars = [valv if is_name_like_tuple(valv) else (valv,) for valv in value_vars]
non_existence_col = [valv for valv in value_vars if valv not in column_labels]
if len(non_existence_col) != 0:
raveled_column_labels = np.ravel(column_labels)
missing = [
nec for nec in np.ravel(non_existence_col) if nec not in raveled_column_labels
]
if len(missing) != 0:
raise KeyError(
"The following 'value_vars' are not present"
" in the DataFrame: {}".format(missing)
)
else:
raise KeyError(
"None of {} are in the {}".format(non_existence_col, column_labels)
)
if len(value_vars) == 0:
value_vars = column_labels
column_labels = [label for label in column_labels if label not in id_vars]
sdf = self._internal.spark_frame
if var_name is None:
if (
self._internal.column_labels_level == 1
and self._internal.column_label_names[0] is None
):
var_name = ["variable"]
else:
var_name = [
name_like_string(name) if name is not None else "variable_{}".format(i)
for i, name in enumerate(self._internal.column_label_names)
]
elif isinstance(var_name, str):
var_name = [var_name]
pairs = F.explode(
F.array(
*[
F.struct(
*[F.lit(c).alias(name) for c, name in zip(label, var_name)],
*[self._internal.spark_column_for(label).alias(value_name)],
)
for label in column_labels
if label in value_vars
]
)
)
columns = (
[
self._internal.spark_column_for(label).alias(name_like_string(label))
for label in id_vars
]
+ [F.col("pairs.`%s`" % name) for name in var_name]
+ [F.col("pairs.`%s`" % value_name)]
)
exploded_df = sdf.withColumn("pairs", pairs).select(columns)
return DataFrame(
InternalFrame(
spark_frame=exploded_df,
index_spark_columns=None,
column_labels=(
[label if len(label) == 1 else (name_like_string(label),) for label in id_vars]
+ [(name,) for name in var_name]
+ [(value_name,)]
),
)
)
def stack(self) -> Union["DataFrame", "Series"]:
"""
Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
The new index levels are sorted.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being reorganized from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = ps.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack().sort_index()
cat height 1
weight 0
dog height 3
weight 2
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = ps.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1 # doctest: +NORMALIZE_WHITESPACE
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack().sort_index()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = ps.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack().sort_index() # doctest: +SKIP
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
"""
from pyspark.pandas.series import first_series
if len(self._internal.column_labels) == 0:
return DataFrame(
self._internal.copy(
column_label_names=self._internal.column_label_names[:-1]
).with_filter(F.lit(False))
)
column_labels = defaultdict(dict) # type: Union[defaultdict, OrderedDict]
index_values = set()
should_returns_series = False
for label in self._internal.column_labels:
new_label = label[:-1]
if len(new_label) == 0:
new_label = None
should_returns_series = True
value = label[-1]
scol = self._internal.spark_column_for(label)
column_labels[new_label][value] = scol
index_values.add(value)
column_labels = OrderedDict(sorted(column_labels.items(), key=lambda x: x[0]))
index_name = self._internal.column_label_names[-1]
column_label_names = self._internal.column_label_names[:-1]
if len(column_label_names) == 0:
column_label_names = [None]
index_column = SPARK_INDEX_NAME_FORMAT(self._internal.index_level)
data_columns = [name_like_string(label) for label in column_labels]
structs = [
F.struct(
*[F.lit(value).alias(index_column)],
*[
(
column_labels[label][value]
if value in column_labels[label]
else F.lit(None)
).alias(name)
for label, name in zip(column_labels, data_columns)
],
).alias(value)
for value in index_values
]
pairs = F.explode(F.array(*structs))
sdf = self._internal.spark_frame.withColumn("pairs", pairs)
sdf = sdf.select(
self._internal.index_spark_columns
+ [sdf["pairs"][index_column].alias(index_column)]
+ [sdf["pairs"][name].alias(name) for name in data_columns]
)
internal = InternalFrame( # TODO: dtypes?
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col)
for col in (self._internal.index_spark_column_names + [index_column])
],
index_names=self._internal.index_names + [index_name],
index_fields=self._internal.index_fields + [None],
column_labels=list(column_labels),
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
column_label_names=column_label_names, # type: ignore
)
psdf = DataFrame(internal) # type: "DataFrame"
if should_returns_series:
return first_series(psdf)
else:
return psdf
def unstack(self) -> Union["DataFrame", "Series"]:
"""
Pivot the (necessarily hierarchical) index labels.
Returns a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series.
.. note:: If the index is a MultiIndex, the output DataFrame could be very wide, and
it could cause a serious performance degradation since Spark partitions it row based.
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation from unstack).
Examples
--------
>>> df = ps.DataFrame({"A": {"0": "a", "1": "b", "2": "c"},
... "B": {"0": "1", "1": "3", "2": "5"},
... "C": {"0": "2", "1": "4", "2": "6"}},
... columns=["A", "B", "C"])
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> df.unstack().sort_index()
A 0 a
1 b
2 c
B 0 1
1 3
2 5
C 0 2
1 4
2 6
dtype: object
>>> df.columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C')])
>>> df.unstack().sort_index()
X A 0 a
1 b
2 c
B 0 1
1 3
2 5
Y C 0 2
1 4
2 6
dtype: object
For MultiIndex case:
>>> df = ps.DataFrame({"A": ["a", "b", "c"],
... "B": [1, 3, 5],
... "C": [2, 4, 6]},
... columns=["A", "B", "C"])
>>> df = df.set_index('A', append=True)
>>> df # doctest: +NORMALIZE_WHITESPACE
B C
A
0 a 1 2
1 b 3 4
2 c 5 6
>>> df.unstack().sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
A a b c a b c
0 1.0 NaN NaN 2.0 NaN NaN
1 NaN 3.0 NaN NaN 4.0 NaN
2 NaN NaN 5.0 NaN NaN 6.0
"""
from pyspark.pandas.series import first_series
if self._internal.index_level > 1:
# The index after `reset_index()` will never be used, so use "distributed" index
# as a dummy to avoid overhead.
with option_context("compute.default_index_type", "distributed"):
df = self.reset_index()
index = df._internal.column_labels[: self._internal.index_level - 1]
columns = df.columns[self._internal.index_level - 1]
df = df.pivot_table(
index=index, columns=columns, values=self._internal.column_labels, aggfunc="first"
)
internal = df._internal.copy(
index_names=self._internal.index_names[:-1],
index_fields=df._internal.index_fields[: self._internal.index_level - 1],
column_label_names=(
df._internal.column_label_names[:-1]
+ [
None
if self._internal.index_names[-1] is None
else df._internal.column_label_names[-1]
]
),
)
return DataFrame(internal)
# TODO: Codes here are similar with melt. Should we deduplicate?
column_labels = self._internal.column_labels
ser_name = SPARK_DEFAULT_SERIES_NAME
sdf = self._internal.spark_frame
new_index_columns = [
SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level)
]
new_index_map = list(zip(new_index_columns, self._internal.column_label_names))
pairs = F.explode(
F.array(
*[
F.struct(
*[F.lit(c).alias(name) for c, name in zip(idx, new_index_columns)],
*[self._internal.spark_column_for(idx).alias(ser_name)],
)
for idx in column_labels
]
)
)
columns = [
F.col("pairs.%s" % name)
for name in new_index_columns[: self._internal.column_labels_level]
] + [F.col("pairs.%s" % ser_name)]
new_index_len = len(new_index_columns)
existing_index_columns = []
for i, index_name in enumerate(self._internal.index_names):
new_index_map.append((SPARK_INDEX_NAME_FORMAT(i + new_index_len), index_name))
existing_index_columns.append(
self._internal.index_spark_columns[i].alias(
SPARK_INDEX_NAME_FORMAT(i + new_index_len)
)
)
exploded_df = sdf.withColumn("pairs", pairs).select(existing_index_columns + columns)
index_spark_column_names, index_names = zip(*new_index_map)
return first_series(
DataFrame(
InternalFrame( # TODO: dtypes?
exploded_df,
index_spark_columns=[
scol_for(exploded_df, col) for col in index_spark_column_names
],
index_names=list(index_names),
column_labels=[None],
)
)
)
# TODO: axis, skipna, and many arguments should be implemented.
def all(self, axis: Union[int, str] = 0) -> "Series":
"""
Return whether all elements are True.
Returns True unless there is at least one element within a series that is
False or equivalent (e.g. zero or empty)
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Returns
-------
Series
Examples
--------
Create a dataframe from a dictionary.
>>> df = ps.DataFrame({
... 'col1': [True, True, True],
... 'col2': [True, False, False],
... 'col3': [0, 0, 0],
... 'col4': [1, 2, 3],
... 'col5': [True, True, None],
... 'col6': [True, False, None]},
... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])
Default behaviour checks if column-wise values all return a boolean.
>>> df.all()
col1 True
col2 False
col3 False
col4 True
col5 True
col6 False
dtype: bool
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
applied = []
column_labels = self._internal.column_labels
for label in column_labels:
scol = self._internal.spark_column_for(label)
all_col = F.min(F.coalesce(scol.cast("boolean"), F.lit(True)))
applied.append(F.when(all_col.isNull(), True).otherwise(all_col))
# TODO: there is a similar logic to transpose in, for instance,
# DataFrame.any, Series.quantile. Maybe we should deduplicate it.
value_column = "value"
cols = []
for label, applied_col in zip(column_labels, applied):
cols.append(
F.struct(
*[F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(label)],
*[applied_col.alias(value_column)],
)
)
sdf = self._internal.spark_frame.select(F.array(*cols).alias("arrays")).select(
F.explode(F.col("arrays"))
)
sdf = sdf.selectExpr("col.*")
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, SPARK_INDEX_NAME_FORMAT(i))
for i in range(self._internal.column_labels_level)
],
index_names=self._internal.column_label_names,
column_labels=[None],
data_spark_columns=[scol_for(sdf, value_column)],
)
return first_series(DataFrame(internal))
# TODO: axis, skipna, and many arguments should be implemented.
def any(self, axis: Union[int, str] = 0) -> "Series":
"""
Return whether any element is True.
Returns False unless there is at least one element within a series that is
True or equivalent (e.g. non-zero or non-empty).
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Returns
-------
Series
Examples
--------
Create a dataframe from a dictionary.
>>> df = ps.DataFrame({
... 'col1': [False, False, False],
... 'col2': [True, False, False],
... 'col3': [0, 0, 1],
... 'col4': [0, 1, 2],
... 'col5': [False, False, None],
... 'col6': [True, False, None]},
... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])
Default behaviour checks if column-wise values all return a boolean.
>>> df.any()
col1 False
col2 True
col3 True
col4 True
col5 False
col6 True
dtype: bool
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
applied = []
column_labels = self._internal.column_labels
for label in column_labels:
scol = self._internal.spark_column_for(label)
all_col = F.max(F.coalesce(scol.cast("boolean"), F.lit(False)))
applied.append(F.when(all_col.isNull(), False).otherwise(all_col))
# TODO: there is a similar logic to transpose in, for instance,
# DataFrame.all, Series.quantile. Maybe we should deduplicate it.
value_column = "value"
cols = []
for label, applied_col in zip(column_labels, applied):
cols.append(
F.struct(
*[F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(label)],
*[applied_col.alias(value_column)],
)
)
sdf = self._internal.spark_frame.select(F.array(*cols).alias("arrays")).select(
F.explode(F.col("arrays"))
)
sdf = sdf.selectExpr("col.*")
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, SPARK_INDEX_NAME_FORMAT(i))
for i in range(self._internal.column_labels_level)
],
index_names=self._internal.column_label_names,
column_labels=[None],
data_spark_columns=[scol_for(sdf, value_column)],
)
return first_series(DataFrame(internal))
# TODO: add axis, numeric_only, pct, na_option parameter
def rank(self, method="average", ascending=True) -> "DataFrame":
"""
Compute numerical data ranks (1 through n) along axis. Equal values are
assigned a rank that is the average of the ranks of those values.
.. note:: the current implementation of rank uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
ascending : boolean, default True
False for ranks by high (1) to low (N)
Returns
-------
ranks : same type as caller
Examples
--------
>>> df = ps.DataFrame({'A': [1, 2, 2, 3], 'B': [4, 3, 2, 1]}, columns= ['A', 'B'])
>>> df
A B
0 1 4
1 2 3
2 2 2
3 3 1
>>> df.rank().sort_index()
A B
0 1.0 4.0
1 2.5 3.0
2 2.5 2.0
3 4.0 1.0
If method is set to 'min', it use lowest rank in group.
>>> df.rank(method='min').sort_index()
A B
0 1.0 4.0
1 2.0 3.0
2 2.0 2.0
3 4.0 1.0
If method is set to 'max', it use highest rank in group.
>>> df.rank(method='max').sort_index()
A B
0 1.0 4.0
1 3.0 3.0
2 3.0 2.0
3 4.0 1.0
If method is set to 'dense', it leaves no gaps in group.
>>> df.rank(method='dense').sort_index()
A B
0 1.0 4.0
1 2.0 3.0
2 2.0 2.0
3 3.0 1.0
"""
return self._apply_series_op(
lambda psser: psser._rank(method=method, ascending=ascending), should_resolve=True
)
def filter(self, items=None, like=None, regex=None, axis=None) -> "DataFrame":
"""
Subset rows or columns of dataframe according to labels in
the specified index.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : string
Keep labels from axis for which "like in label == True".
regex : string (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : int or string axis name
The axis to filter on. By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = ps.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
For a Series,
>>> # select rows by name
>>> df.one.filter(items=['rabbit'])
rabbit 4
Name: one, dtype: int64
>>> # select rows by regular expression
>>> df.one.filter(regex='e$')
mouse 1
Name: one, dtype: int64
>>> # select rows containing 'bbi'
>>> df.one.filter(like='bbi')
rabbit 4
Name: one, dtype: int64
"""
if sum(x is not None for x in (items, like, regex)) > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` " "are mutually exclusive"
)
axis = validate_axis(axis, none_axis=1)
index_scols = self._internal.index_spark_columns
if items is not None:
if is_list_like(items):
items = list(items)
else:
raise ValueError("items should be a list-like object.")
if axis == 0:
if len(index_scols) == 1:
col = None
for item in items:
if col is None:
col = index_scols[0] == F.lit(item)
else:
col = col | (index_scols[0] == F.lit(item))
elif len(index_scols) > 1:
# for multi-index
col = None
for item in items:
if not isinstance(item, tuple):
raise TypeError("Unsupported type {}".format(type(item).__name__))
if not item:
raise ValueError("The item should not be empty.")
midx_col = None
for i, element in enumerate(item):
if midx_col is None:
midx_col = index_scols[i] == F.lit(element)
else:
midx_col = midx_col & (index_scols[i] == F.lit(element))
if col is None:
col = midx_col
else:
col = col | midx_col
else:
raise ValueError("Single or multi index must be specified.")
return DataFrame(self._internal.with_filter(col))
else:
return self[items]
elif like is not None:
if axis == 0:
col = None
for index_scol in index_scols:
if col is None:
col = index_scol.contains(like)
else:
col = col | index_scol.contains(like)
return DataFrame(self._internal.with_filter(col))
else:
column_labels = self._internal.column_labels
output_labels = [label for label in column_labels if any(like in i for i in label)]
return self[output_labels]
elif regex is not None:
if axis == 0:
col = None
for index_scol in index_scols:
if col is None:
col = index_scol.rlike(regex)
else:
col = col | index_scol.rlike(regex)
return DataFrame(self._internal.with_filter(col))
else:
column_labels = self._internal.column_labels
matcher = re.compile(regex)
output_labels = [
label
for label in column_labels
if any(matcher.search(i) is not None for i in label)
]
return self[output_labels]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
def rename(
self,
mapper=None,
index=None,
columns=None,
axis="index",
inplace=False,
level=None,
errors="ignore",
) -> Optional["DataFrame"]:
"""
Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series
will be left as-is. Extra labels listed don’t throw an error.
Parameters
----------
mapper : dict-like or function
Dict-like or functions transformations to apply to that axis’ values.
Use either `mapper` and `axis` to specify the axis to target with `mapper`, or `index`
and `columns`.
index : dict-like or function
Alternative to specifying axis ("mapper, axis=0" is equivalent to "index=mapper").
columns : dict-like or function
Alternative to specifying axis ("mapper, axis=1" is equivalent to "columns=mapper").
axis : int or str, default 'index'
Axis to target with mapper. Can be either the axis name ('index', 'columns') or
number (0, 1).
inplace : bool, default False
Whether to return a new DataFrame.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified level.
errors : {'ignore', 'raise}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns`
contains labels that are not present in the Index being transformed. If 'ignore',
existing keys will be renamed and extra keys will be ignored.
Returns
-------
DataFrame with the renamed axis labels.
Raises
------
`KeyError`
If any of the labels is not found in the selected axis and "errors='raise'".
Examples
--------
>>> psdf1 = ps.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> psdf1.rename(columns={"A": "a", "B": "c"}) # doctest: +NORMALIZE_WHITESPACE
a c
0 1 4
1 2 5
2 3 6
>>> psdf1.rename(index={1: 10, 2: 20}) # doctest: +NORMALIZE_WHITESPACE
A B
0 1 4
10 2 5
20 3 6
>>> def str_lower(s) -> str:
... return str.lower(s)
>>> psdf1.rename(str_lower, axis='columns') # doctest: +NORMALIZE_WHITESPACE
a b
0 1 4
1 2 5
2 3 6
>>> def mul10(x) -> int:
... return x * 10
>>> psdf1.rename(mul10, axis='index') # doctest: +NORMALIZE_WHITESPACE
A B
0 1 4
10 2 5
20 3 6
>>> idx = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')])
>>> psdf2 = ps.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)
>>> psdf2.rename(columns=str_lower, level=0) # doctest: +NORMALIZE_WHITESPACE
x y
A B C D
0 1 2 3 4
1 5 6 7 8
>>> psdf3 = ps.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list('ab'))
>>> psdf3.rename(index=str_lower) # doctest: +NORMALIZE_WHITESPACE
a b
x a 1 2
b 3 4
y c 5 6
d 7 8
"""
def gen_mapper_fn(mapper):
if isinstance(mapper, dict):
if len(mapper) == 0:
if errors == "raise":
raise KeyError("Index include label which is not in the `mapper`.")
else:
return DataFrame(self._internal)
type_set = set(map(lambda x: type(x), mapper.values()))
if len(type_set) > 1:
raise ValueError("Mapper dict should have the same value type.")
spark_return_type = as_spark_type(list(type_set)[0])
def mapper_fn(x):
if x in mapper:
return mapper[x]
else:
if errors == "raise":
raise KeyError("Index include value which is not in the `mapper`")
return x
elif callable(mapper):
spark_return_type = cast(ScalarType, infer_return_type(mapper)).spark_type
def mapper_fn(x):
return mapper(x)
else:
raise ValueError(
"`mapper` or `index` or `columns` should be "
"either dict-like or function type."
)
return mapper_fn, spark_return_type
index_mapper_fn = None
index_mapper_ret_stype = None
columns_mapper_fn = None
inplace = validate_bool_kwarg(inplace, "inplace")
if mapper:
axis = validate_axis(axis)
if axis == 0:
index_mapper_fn, index_mapper_ret_stype = gen_mapper_fn(mapper)
elif axis == 1:
columns_mapper_fn, columns_mapper_ret_stype = gen_mapper_fn(mapper)
else:
raise ValueError(
"argument axis should be either the axis name "
"(‘index’, ‘columns’) or number (0, 1)"
)
else:
if index:
index_mapper_fn, index_mapper_ret_stype = gen_mapper_fn(index)
if columns:
columns_mapper_fn, _ = gen_mapper_fn(columns)
if not index and not columns:
raise ValueError("Either `index` or `columns` should be provided.")
psdf = self.copy()
if index_mapper_fn:
# rename index labels, if `level` is None, rename all index columns, otherwise only
# rename the corresponding level index.
# implement this by transform the underlying spark dataframe,
# Example:
# suppose the psdf index column in underlying spark dataframe is "index_0", "index_1",
# if rename level 0 index labels, will do:
# ``psdf._sdf.withColumn("index_0", mapper_fn_udf(col("index_0"))``
# if rename all index labels (`level` is None), then will do:
# ```
# psdf._sdf.withColumn("index_0", mapper_fn_udf(col("index_0"))
# .withColumn("index_1", mapper_fn_udf(col("index_1"))
# ```
index_columns = psdf._internal.index_spark_column_names
num_indices = len(index_columns)
if level:
if level < 0 or level >= num_indices:
raise ValueError("level should be an integer between [0, num_indices)")
def gen_new_index_column(level):
index_col_name = index_columns[level]
@pandas_udf(returnType=index_mapper_ret_stype)
def index_mapper_udf(s: pd.Series) -> pd.Series:
return s.map(index_mapper_fn)
return index_mapper_udf(scol_for(psdf._internal.spark_frame, index_col_name))
sdf = psdf._internal.resolved_copy.spark_frame
index_fields = self._internal.index_fields.copy()
if level is None:
for i in range(num_indices):
sdf = sdf.withColumn(index_columns[i], gen_new_index_column(i))
index_fields[i] = None # TODO: dtype?
else:
sdf = sdf.withColumn(index_columns[level], gen_new_index_column(level))
index_fields[level] = None # TODO: dtype?
psdf = DataFrame(psdf._internal.with_new_sdf(sdf, index_fields=index_fields))
if columns_mapper_fn:
# rename column name.
# Will modify the `_internal._column_labels` and transform underlying spark dataframe
# to the same column name with `_internal._column_labels`.
if level:
if level < 0 or level >= psdf._internal.column_labels_level:
raise ValueError("level should be an integer between [0, column_labels_level)")
def gen_new_column_labels_entry(column_labels_entry):
if isinstance(column_labels_entry, tuple):
if level is None:
# rename all level columns
return tuple(map(columns_mapper_fn, column_labels_entry))
else:
# only rename specified level column
entry_list = list(column_labels_entry)
entry_list[level] = columns_mapper_fn(entry_list[level])
return tuple(entry_list)
else:
return columns_mapper_fn(column_labels_entry)
new_column_labels = list(map(gen_new_column_labels_entry, psdf._internal.column_labels))
new_data_scols = [
psdf._psser_for(old_label).rename(new_label)
for old_label, new_label in zip(psdf._internal.column_labels, new_column_labels)
]
psdf = DataFrame(psdf._internal.with_new_columns(new_data_scols))
if inplace:
self._update_internal_frame(psdf._internal)
return None
else:
return psdf
def rename_axis(
self,
mapper: Optional[Any] = None,
index: Optional[Any] = None,
columns: Optional[Any] = None,
axis: Optional[Union[int, str]] = 0,
inplace: Optional[bool] = False,
) -> Optional["DataFrame"]:
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
A scalar, list-like, dict-like or functions transformations to
apply to the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename.
inplace : bool, default False
Modifies the object directly, instead of creating a new DataFrame.
Returns
-------
DataFrame, or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
The second calling convention will modify the names of the
corresponding index specified by axis.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
>>> df = ps.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... index=["dog", "cat", "monkey"],
... columns=["num_legs", "num_arms"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal").sort_index()
>>> df # doctest: +NORMALIZE_WHITESPACE
num_legs num_arms
animal
cat 4 0
dog 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns").sort_index()
>>> df # doctest: +NORMALIZE_WHITESPACE
limbs num_legs num_arms
animal
cat 4 0
dog 4 0
monkey 2 2
**MultiIndex**
>>> index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df = ps.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... index=index,
... columns=["num_legs", "num_arms"])
>>> df # doctest: +NORMALIZE_WHITESPACE
num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'}).sort_index() # doctest: +NORMALIZE_WHITESPACE
num_legs num_arms
class name
mammal cat 4 0
dog 4 0
monkey 2 2
>>> df.rename_axis(index=str.upper).sort_index() # doctest: +NORMALIZE_WHITESPACE
num_legs num_arms
TYPE NAME
mammal cat 4 0
dog 4 0
monkey 2 2
"""
def gen_names(v, curnames):
if is_scalar(v):
newnames = [v]
elif is_list_like(v) and not is_dict_like(v):
newnames = list(v)
elif is_dict_like(v):
newnames = [v[name] if name in v else name for name in curnames]
elif callable(v):
newnames = [v(name) for name in curnames]
else:
raise ValueError(
"`mapper` or `index` or `columns` should be "
"either dict-like or function type."
)
if len(newnames) != len(curnames):
raise ValueError(
"Length of new names must be {}, got {}".format(len(curnames), len(newnames))
)
return [name if is_name_like_tuple(name) else (name,) for name in newnames]
if mapper is not None and (index is not None or columns is not None):
raise TypeError("Cannot specify both 'mapper' and any of 'index' or 'columns'.")
if mapper is not None:
axis = validate_axis(axis)
if axis == 0:
index = mapper
elif axis == 1:
columns = mapper
column_label_names = (
gen_names(columns, self.columns.names)
if columns is not None
else self._internal.column_label_names
)
index_names = (
gen_names(index, self.index.names) if index is not None else self._internal.index_names
)
internal = self._internal.copy(
index_names=index_names, column_label_names=column_label_names
)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal)
def keys(self) -> pd.Index:
"""
Return alias for columns.
Returns
-------
Index
Columns of the DataFrame.
Examples
--------
>>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=['cobra', 'viper', 'sidewinder'],
... columns=['max_speed', 'shield'])
>>> df
max_speed shield
cobra 1 2
viper 4 5
sidewinder 7 8
>>> df.keys()
Index(['max_speed', 'shield'], dtype='object')
"""
return self.columns
def pct_change(self, periods=1) -> "DataFrame":
"""
Percentage change between the current and a prior element.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
Returns
-------
DataFrame
Examples
--------
Percentage change in French franc, Deutsche Mark, and Italian lira
from 1980-01-01 to 1980-03-01.
>>> df = ps.DataFrame({
... 'FR': [4.0405, 4.0963, 4.3149],
... 'GR': [1.7246, 1.7482, 1.8519],
... 'IT': [804.74, 810.01, 860.13]},
... index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
FR GR IT
1980-01-01 4.0405 1.7246 804.74
1980-02-01 4.0963 1.7482 810.01
1980-03-01 4.3149 1.8519 860.13
>>> df.pct_change()
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 0.013810 0.013684 0.006549
1980-03-01 0.053365 0.059318 0.061876
You can set periods to shift for forming percent change
>>> df.pct_change(2)
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 NaN NaN NaN
1980-03-01 0.067912 0.073814 0.06883
"""
window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(-periods, -periods)
def op(psser):
prev_row = F.lag(psser.spark.column, periods).over(window)
return ((psser.spark.column - prev_row) / prev_row).alias(
psser._internal.data_spark_column_names[0]
)
return self._apply_series_op(op, should_resolve=True)
# TODO: axis = 1
def idxmax(self, axis=0) -> "Series":
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
.. note:: This API collect all rows with maximum value using `to_pandas()`
because we suppose the number of rows with max values are usually small in general.
Parameters
----------
axis : 0 or 'index'
Can only be set to 0 at the moment.
Returns
-------
Series
See Also
--------
Series.idxmax
Examples
--------
>>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],
... 'b': [4.0, 2.0, 3.0, 1.0],
... 'c': [300, 200, 400, 200]})
>>> psdf
a b c
0 1 4.0 300
1 2 2.0 200
2 3 3.0 400
3 2 1.0 200
>>> psdf.idxmax()
a 2
b 0
c 2
dtype: int64
For Multi-column Index
>>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],
... 'b': [4.0, 2.0, 3.0, 1.0],
... 'c': [300, 200, 400, 200]})
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
0 1 4.0 300
1 2 2.0 200
2 3 3.0 400
3 2 1.0 200
>>> psdf.idxmax()
a x 2
b y 0
c z 2
dtype: int64
"""
max_cols = map(lambda scol: F.max(scol), self._internal.data_spark_columns)
sdf_max = self._internal.spark_frame.select(*max_cols).head()
# `sdf_max` looks like below
# +------+------+------+
# |(a, x)|(b, y)|(c, z)|
# +------+------+------+
# | 3| 4.0| 400|
# +------+------+------+
conds = (
scol == max_val for scol, max_val in zip(self._internal.data_spark_columns, sdf_max)
)
cond = reduce(lambda x, y: x | y, conds)
psdf = DataFrame(self._internal.with_filter(cond)) # type: "DataFrame"
return cast(ps.Series, ps.from_pandas(psdf._to_internal_pandas().idxmax()))
# TODO: axis = 1
def idxmin(self, axis=0) -> "Series":
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
.. note:: This API collect all rows with minimum value using `to_pandas()`
because we suppose the number of rows with min values are usually small in general.
Parameters
----------
axis : 0 or 'index'
Can only be set to 0 at the moment.
Returns
-------
Series
See Also
--------
Series.idxmin
Examples
--------
>>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],
... 'b': [4.0, 2.0, 3.0, 1.0],
... 'c': [300, 200, 400, 200]})
>>> psdf
a b c
0 1 4.0 300
1 2 2.0 200
2 3 3.0 400
3 2 1.0 200
>>> psdf.idxmin()
a 0
b 3
c 1
dtype: int64
For Multi-column Index
>>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],
... 'b': [4.0, 2.0, 3.0, 1.0],
... 'c': [300, 200, 400, 200]})
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
0 1 4.0 300
1 2 2.0 200
2 3 3.0 400
3 2 1.0 200
>>> psdf.idxmin()
a x 0
b y 3
c z 1
dtype: int64
"""
min_cols = map(lambda scol: F.min(scol), self._internal.data_spark_columns)
sdf_min = self._internal.spark_frame.select(*min_cols).head()
conds = (
scol == min_val for scol, min_val in zip(self._internal.data_spark_columns, sdf_min)
)
cond = reduce(lambda x, y: x | y, conds)
psdf = DataFrame(self._internal.with_filter(cond)) # type: "DataFrame"
return cast(ps.Series, ps.from_pandas(psdf._to_internal_pandas().idxmin()))
def info(self, verbose=None, buf=None, max_cols=None, null_counts=None) -> None:
"""
Print a concise summary of a DataFrame.
This method prints information about a DataFrame including
the index dtype and column dtypes, non-null values and memory usage.
Parameters
----------
verbose : bool, optional
Whether to print the full summary.
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used.
null_counts : bool, optional
Whether to show the non-null counts.
Returns
-------
None
This method prints a summary of a DataFrame and returns None.
See Also
--------
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
Examples
--------
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = ps.DataFrame(
... {"int_col": int_values, "text_col": text_values, "float_col": float_values},
... columns=['int_col', 'text_col', 'float_col'])
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True) # doctest: +SKIP
<class 'pyspark.pandas.frame.DataFrame'>
Index: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False) # doctest: +SKIP
<class 'pyspark.pandas.frame.DataFrame'>
Index: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open('%s/info.txt' % path, "w",
... encoding="utf-8") as f:
... _ = f.write(s)
>>> with open('%s/info.txt' % path) as f:
... f.readlines() # doctest: +SKIP
["<class 'pyspark.pandas.frame.DataFrame'>\\n",
'Index: 5 entries, 0 to 4\\n',
'Data columns (total 3 columns):\\n',
' # Column Non-Null Count Dtype \\n',
'--- ------ -------------- ----- \\n',
' 0 int_col 5 non-null int64 \\n',
' 1 text_col 5 non-null object \\n',
' 2 float_col 5 non-null float64\\n',
'dtypes: float64(1), int64(1), object(1)']
"""
# To avoid pandas' existing config affects pandas-on-Spark.
# TODO: should we have corresponding pandas-on-Spark configs?
with pd.option_context(
"display.max_info_columns", sys.maxsize, "display.max_info_rows", sys.maxsize
):
try:
# hack to use pandas' info as is.
object.__setattr__(self, "_data", self)
count_func = self.count
self.count = lambda: count_func().to_pandas() # type: ignore
return pd.DataFrame.info(
self,
verbose=verbose,
buf=buf,
max_cols=max_cols,
memory_usage=False,
null_counts=null_counts,
)
finally:
del self._data
self.count = count_func # type: ignore
# TODO: fix parameter 'axis' and 'numeric_only' to work same as pandas'
def quantile(
self,
q: Union[float, Iterable[float]] = 0.5,
axis: Union[int, str] = 0,
numeric_only: bool = True,
accuracy: int = 10000,
) -> Union["DataFrame", "Series"]:
"""
Return value at the given quantile.
.. note:: Unlike pandas', the quantile in pandas-on-Spark is an approximated quantile
based upon approximate percentile computation because computing quantile across a
large dataset is extremely expensive.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute.
axis : int or str, default 0 or 'index'
Can only be set to 0 at the moment.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be computed as well.
Can only be set to True at the moment.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
Series or DataFrame
If q is an array, a DataFrame will be returned where the
index is q, the columns are the columns of self, and the values are the quantiles.
If q is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
Examples
--------
>>> psdf = ps.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [6, 7, 8, 9, 0]})
>>> psdf
a b
0 1 6
1 2 7
2 3 8
3 4 9
4 5 0
>>> psdf.quantile(.5)
a 3.0
b 7.0
Name: 0.5, dtype: float64
>>> psdf.quantile([.25, .5, .75])
a b
0.25 2.0 6.0
0.50 3.0 7.0
0.75 4.0 8.0
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
if isinstance(q, Iterable):
q = list(q)
for v in q if isinstance(q, list) else [q]:
if not isinstance(v, float):
raise TypeError(
"q must be a float or an array of floats; however, [%s] found." % type(v)
)
if v < 0.0 or v > 1.0:
raise ValueError("percentiles should all be in the interval [0, 1].")
def quantile(spark_column, spark_type):
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), q, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if isinstance(q, list):
# First calculate the percentiles from all columns and map it to each `quantiles`
# by creating each entry as a struct. So, it becomes an array of structs as below:
#
# +-----------------------------------------+
# | arrays|
# +-----------------------------------------+
# |[[0.25, 2, 6], [0.5, 3, 7], [0.75, 4, 8]]|
# +-----------------------------------------+
percentile_cols = []
percentile_col_names = []
column_labels = []
for label, column in zip(
self._internal.column_labels, self._internal.data_spark_column_names
):
spark_type = self._internal.spark_type_for(label)
is_numeric_or_boolean = isinstance(spark_type, (NumericType, BooleanType))
keep_column = not numeric_only or is_numeric_or_boolean
if keep_column:
percentile_col = quantile(self._internal.spark_column_for(label), spark_type)
percentile_cols.append(percentile_col.alias(column))
percentile_col_names.append(column)
column_labels.append(label)
if len(percentile_cols) == 0:
return DataFrame(index=q)
sdf = self._internal.spark_frame.select(percentile_cols)
# Here, after select percentile cols, a spark_frame looks like below:
# +---------+---------+
# | a| b|
# +---------+---------+
# |[2, 3, 4]|[6, 7, 8]|
# +---------+---------+
cols_dict = OrderedDict() # type: OrderedDict
for column in percentile_col_names:
cols_dict[column] = list()
for i in range(len(q)):
cols_dict[column].append(scol_for(sdf, column).getItem(i).alias(column))
internal_index_column = SPARK_DEFAULT_INDEX_NAME
cols = []
for i, col in enumerate(zip(*cols_dict.values())):
cols.append(F.struct(F.lit(q[i]).alias(internal_index_column), *col))
sdf = sdf.select(F.array(*cols).alias("arrays"))
# And then, explode it and manually set the index.
# +-----------------+---+---+
# |__index_level_0__| a| b|
# +-----------------+---+---+
# | 0.25| 2| 6|
# | 0.5| 3| 7|
# | 0.75| 4| 8|
# +-----------------+---+---+
sdf = sdf.select(F.explode(F.col("arrays"))).selectExpr("col.*")
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, internal_index_column)],
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in percentile_col_names],
)
return DataFrame(internal)
else:
return self._reduce_for_stat_function(
quantile, name="quantile", numeric_only=numeric_only
).rename(q)
def query(self, expr, inplace=False) -> Optional["DataFrame"]:
"""
Query the columns of a DataFrame with a boolean expression.
.. note:: Internal columns that starting with a '__' prefix are able to access, however,
they are not supposed to be accessed.
.. note:: This API delegates to Spark SQL so the syntax follows Spark SQL. Therefore, the
pandas specific syntax such as `@` is not supported. If you want the pandas syntax,
you can work around with :meth:`DataFrame.pandas_on_spark.apply_batch`, but you should
be aware that `query_func` will be executed at different nodes in a distributed manner.
So, for example, to use `@` syntax, make sure the variable is serialized by, for
example, putting it within the closure as below.
>>> df = ps.DataFrame({'A': range(2000), 'B': range(2000)})
>>> def query_func(pdf):
... num = 1995
... return pdf.query('A > @num')
>>> df.pandas_on_spark.apply_batch(query_func)
A B
1996 1996 1996
1997 1997 1997
1998 1998 1998
1999 1999 1999
Parameters
----------
expr : str
The query string to evaluate.
You can refer to column names that contain spaces by surrounding
them in backticks.
For example, if one of your columns is called ``a a`` and you want
to sum it with ``b``, your query should be ```a a` + b``.
inplace : bool
Whether the query should modify the data in place or return
a modified copy.
Returns
-------
DataFrame
DataFrame resulting from the provided query expression.
Examples
--------
>>> df = ps.DataFrame({'A': range(1, 6),
... 'B': range(10, 0, -2),
... 'C C': range(10, 5, -1)})
>>> df
A B C C
0 1 10 10
1 2 8 9
2 3 6 8
3 4 4 7
4 5 2 6
>>> df.query('A > B')
A B C C
4 5 2 6
The previous expression is equivalent to
>>> df[df.A > df.B]
A B C C
4 5 2 6
For columns with spaces in their name, you can use backtick quoting.
>>> df.query('B == `C C`')
A B C C
0 1 10 10
The previous expression is equivalent to
>>> df[df.B == df['C C']]
A B C C
0 1 10 10
"""
if isinstance(self.columns, pd.MultiIndex):
raise TypeError("Doesn't support for MultiIndex columns")
if not isinstance(expr, str):
raise TypeError(
"expr must be a string to be evaluated, {} given".format(type(expr).__name__)
)
inplace = validate_bool_kwarg(inplace, "inplace")
data_columns = [label[0] for label in self._internal.column_labels]
sdf = self._internal.spark_frame.select(
self._internal.index_spark_columns
+ [
scol.alias(col)
for scol, col in zip(self._internal.data_spark_columns, data_columns)
]
).filter(expr)
internal = self._internal.with_new_sdf(sdf, data_columns=data_columns)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal)
def take(self, indices, axis=0, **kwargs) -> "DataFrame":
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : {0 or 'index', 1 or 'columns', None}, default 0
The axis on which to select elements. ``0`` means that we are
selecting rows, ``1`` means that we are selecting columns.
**kwargs
For compatibility with :meth:`numpy.take`. Has no effect on the
output.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
>>> df = ps.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=['name', 'class', 'max_speed'],
... index=[0, 2, 3, 1])
>>> df
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
Take elements at positions 0 and 3 along the axis 0 (default).
Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.
>>> df.take([0, 3]).sort_index()
name class max_speed
0 falcon bird 389.0
1 monkey mammal NaN
Take elements at indices 1 and 2 along the axis 1 (column selection).
>>> df.take([1, 2], axis=1)
class max_speed
0 bird 389.0
2 bird 24.0
3 mammal 80.5
1 mammal NaN
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> df.take([-1, -2]).sort_index()
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
"""
axis = validate_axis(axis)
if not is_list_like(indices) or isinstance(indices, (dict, set)):
raise TypeError("`indices` must be a list-like except dict or set")
if axis == 0:
return cast(DataFrame, self.iloc[indices, :])
else:
return cast(DataFrame, self.iloc[:, indices])
def eval(self, expr, inplace=False) -> Optional[Union["DataFrame", "Series"]]:
"""
Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
Returns
-------
The result of the evaluation.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
eval : Evaluate a Python expression as a string using various
backends.
Examples
--------
>>> df = ps.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
"""
from pyspark.pandas.series import first_series
if isinstance(self.columns, pd.MultiIndex):
raise TypeError("`eval` is not supported for multi-index columns")
inplace = validate_bool_kwarg(inplace, "inplace")
should_return_series = False
series_name = None
should_return_scalar = False
# Since `eval_func` doesn't have a type hint, inferring the schema is always preformed
# in the `apply_batch`. Hence, the variables `should_return_series`, `series_name`,
# and `should_return_scalar` can be updated.
def eval_func(pdf):
nonlocal should_return_series
nonlocal series_name
nonlocal should_return_scalar
result_inner = pdf.eval(expr, inplace=inplace)
if inplace:
result_inner = pdf
if isinstance(result_inner, pd.Series):
should_return_series = True
series_name = result_inner.name
result_inner = result_inner.to_frame()
elif is_scalar(result_inner):
should_return_scalar = True
result_inner = pd.Series(result_inner).to_frame()
return result_inner
result = self.pandas_on_spark.apply_batch(eval_func)
if inplace:
# Here, the result is always a frame because the error is thrown during schema inference
# from pandas.
self._update_internal_frame(result._internal, requires_same_anchor=False)
return None
elif should_return_series:
return first_series(result).rename(series_name)
elif should_return_scalar:
return first_series(result)[0]
else:
# Returns a frame
return result
def explode(self, column) -> "DataFrame":
"""
Transform each element of a list-like to a row, replicating index values.
Parameters
----------
column : str or tuple
Column to explode.
Returns
-------
DataFrame
Exploded lists to rows of the subset columns;
index will be duplicated for these rows.
See Also
--------
DataFrame.unstack : Pivot a level of the (necessarily hierarchical)
index labels.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
Examples
--------
>>> df = ps.DataFrame({'A': [[1, 2, 3], [], [3, 4]], 'B': 1})
>>> df
A B
0 [1, 2, 3] 1
1 [] 1
2 [3, 4] 1
>>> df.explode('A')
A B
0 1.0 1
0 2.0 1
0 3.0 1
1 NaN 1
2 3.0 1
2 4.0 1
"""
from pyspark.pandas.series import Series
if not is_name_like_value(column):
raise TypeError("column must be a scalar")
psdf = DataFrame(self._internal.resolved_copy) # type: "DataFrame"
psser = psdf[column]
if not isinstance(psser, Series):
raise ValueError(
"The column %s is not unique. For a multi-index, the label must be a tuple "
"with elements corresponding to each level." % name_like_string(column)
)
if not isinstance(psser.spark.data_type, ArrayType):
return self.copy()
sdf = psdf._internal.spark_frame.withColumn(
psser._internal.data_spark_column_names[0], F.explode_outer(psser.spark.column)
)
data_fields = psdf._internal.data_fields.copy()
data_fields[psdf._internal.column_labels.index(psser._column_label)] = None # TODO: dtype?
internal = psdf._internal.with_new_sdf(sdf, data_fields=data_fields)
return DataFrame(internal)
def mad(self, axis=0) -> "Series":
"""
Return the mean absolute deviation of values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
>>> df.mad()
a 0.666667
b 0.066667
dtype: float64
>>> df.mad(axis=1)
0 0.45
1 0.90
2 1.35
3 NaN
dtype: float64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
if axis == 0:
def get_spark_column(psdf, label):
scol = psdf._internal.spark_column_for(label)
col_type = psdf._internal.spark_type_for(label)
if isinstance(col_type, BooleanType):
scol = scol.cast("integer")
return scol
new_column_labels = []
for label in self._internal.column_labels:
# Filtering out only columns of numeric and boolean type column.
dtype = self._psser_for(label).spark.data_type
if isinstance(dtype, (NumericType, BooleanType)):
new_column_labels.append(label)
new_columns = [
F.avg(get_spark_column(self, label)).alias(name_like_string(label))
for label in new_column_labels
]
mean_data = self._internal.spark_frame.select(new_columns).first()
new_columns = [
F.avg(
F.abs(get_spark_column(self, label) - mean_data[name_like_string(label)])
).alias(name_like_string(label))
for label in new_column_labels
]
sdf = self._internal.spark_frame.select(
[F.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)] + new_columns
)
# The data is expected to be small so it's fine to transpose/use default index.
with ps.option_context("compute.max_rows", 1):
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)],
column_labels=new_column_labels,
column_label_names=self._internal.column_label_names,
)
return first_series(DataFrame(internal).transpose())
else:
@pandas_udf(returnType=DoubleType()) # type: ignore
def calculate_columns_axis(*cols: pd.Series) -> pd.Series:
return pd.concat(cols, axis=1).mad(axis=1)
internal = self._internal.copy(
column_labels=[None],
data_spark_columns=[
calculate_columns_axis(*self._internal.data_spark_columns).alias(
SPARK_DEFAULT_SERIES_NAME
)
],
data_fields=[None],
column_label_names=None,
)
return first_series(DataFrame(internal))
def tail(self, n=5) -> "DataFrame":
"""
Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
For negative values of `n`, this function returns all rows except
the first `n` rows, equivalent to ``df[n:]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> df = ps.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last 5 lines
>>> df.tail() # doctest: +SKIP
animal
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last `n` lines (three in this case)
>>> df.tail(3) # doctest: +SKIP
animal
6 shark
7 whale
8 zebra
For negative values of `n`
>>> df.tail(-3) # doctest: +SKIP
animal
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
"""
if not isinstance(n, int):
raise TypeError("bad operand type for unary -: '{}'".format(type(n).__name__))
if n < 0:
n = len(self) + n
if n <= 0:
return ps.DataFrame(self._internal.with_filter(F.lit(False)))
# Should use `resolved_copy` here for the case like `(psdf + 1).tail()`
sdf = self._internal.resolved_copy.spark_frame
rows = sdf.tail(n)
new_sdf = default_session().createDataFrame(rows, sdf.schema)
return DataFrame(self._internal.with_new_sdf(new_sdf))
def align(
self,
other: Union["DataFrame", "Series"],
join: str = "outer",
axis: Optional[Union[int, str]] = None,
copy: bool = True,
) -> Tuple["DataFrame", Union["DataFrame", "Series"]]:
"""
Align two objects on their axes with the specified join method.
Join method is specified for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {{'outer', 'inner', 'left', 'right'}}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None).
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
Returns
-------
(left, right) : (DataFrame, type of other)
Aligned objects.
Examples
--------
>>> ps.set_option("compute.ops_on_diff_frames", True)
>>> df1 = ps.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30])
>>> df2 = ps.DataFrame({"a": [4, 5, 6], "c": ["d", "e", "f"]}, index=[10, 11, 12])
Align both axis:
>>> aligned_l, aligned_r = df1.align(df2)
>>> aligned_l.sort_index()
a b c
10 1.0 a NaN
11 NaN None NaN
12 NaN None NaN
20 2.0 b NaN
30 3.0 c NaN
>>> aligned_r.sort_index()
a b c
10 4.0 NaN d
11 5.0 NaN e
12 6.0 NaN f
20 NaN NaN None
30 NaN NaN None
Align only axis=0 (index):
>>> aligned_l, aligned_r = df1.align(df2, axis=0)
>>> aligned_l.sort_index()
a b
10 1.0 a
11 NaN None
12 NaN None
20 2.0 b
30 3.0 c
>>> aligned_r.sort_index()
a c
10 4.0 d
11 5.0 e
12 6.0 f
20 NaN None
30 NaN None
Align only axis=1 (column):
>>> aligned_l, aligned_r = df1.align(df2, axis=1)
>>> aligned_l.sort_index()
a b c
10 1 a NaN
20 2 b NaN
30 3 c NaN
>>> aligned_r.sort_index()
a b c
10 4 NaN d
11 5 NaN e
12 6 NaN f
Align with the join type "inner":
>>> aligned_l, aligned_r = df1.align(df2, join="inner")
>>> aligned_l.sort_index()
a
10 1
>>> aligned_r.sort_index()
a
10 4
Align with a Series:
>>> s = ps.Series([7, 8, 9], index=[10, 11, 12])
>>> aligned_l, aligned_r = df1.align(s, axis=0)
>>> aligned_l.sort_index()
a b
10 1.0 a
11 NaN None
12 NaN None
20 2.0 b
30 3.0 c
>>> aligned_r.sort_index()
10 7.0
11 8.0
12 9.0
20 NaN
30 NaN
dtype: float64
>>> ps.reset_option("compute.ops_on_diff_frames")
"""
from pyspark.pandas.series import Series, first_series
if not isinstance(other, (DataFrame, Series)):
raise TypeError("unsupported type: {}".format(type(other).__name__))
how = validate_how(join)
axis = validate_axis(axis, None)
right_is_series = isinstance(other, Series)
if right_is_series:
if axis is None:
raise ValueError("Must specify axis=0 or 1")
elif axis != 0:
raise NotImplementedError(
"align currently only works for axis=0 when right is Series"
)
left = self
right = other
if (axis is None or axis == 0) and not same_anchor(left, right):
combined = combine_frames(left, right, how=how)
left = combined["this"]
right = combined["that"]
if right_is_series:
right = first_series(right).rename(other.name)
if (
axis is None or axis == 1
) and left._internal.column_labels != right._internal.column_labels:
if left._internal.column_labels_level != right._internal.column_labels_level:
raise ValueError("cannot join with no overlapping index names")
left = left.copy()
right = right.copy()
if how == "full":
column_labels = sorted(
list(set(left._internal.column_labels) | set(right._internal.column_labels))
)
elif how == "inner":
column_labels = sorted(
list(set(left._internal.column_labels) & set(right._internal.column_labels))
)
elif how == "left":
column_labels = left._internal.column_labels
else:
column_labels = right._internal.column_labels
for label in column_labels:
if label not in left._internal.column_labels:
left[label] = F.lit(None).cast(DoubleType())
left = left[column_labels]
for label in column_labels:
if label not in right._internal.column_labels:
right[label] = F.lit(None).cast(DoubleType())
right = right[column_labels]
return (left.copy(), right.copy()) if copy else (left, right)
@staticmethod
def from_dict(data, orient="columns", dtype=None, columns=None) -> "DataFrame":
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'``.
Returns
-------
DataFrame
See Also
--------
DataFrame.from_records : DataFrame from structured ndarray, sequence
of tuples or dicts, or DataFrame.
DataFrame : DataFrame object creation using constructor.
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': [10, 20, 30, 40]}
>>> ps.DataFrame.from_dict(data)
col_1 col_2
0 3 10
1 2 20
2 1 30
3 0 40
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': [10, 20, 30, 40]}
>>> ps.DataFrame.from_dict(data, orient='index').sort_index()
0 1 2 3
row_1 3 2 1 0
row_2 10 20 30 40
When using the 'index' orientation, the column names can be
specified manually:
>>> ps.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D']).sort_index()
A B C D
row_1 3 2 1 0
row_2 10 20 30 40
"""
return DataFrame(pd.DataFrame.from_dict(data, orient=orient, dtype=dtype, columns=columns))
def _to_internal_pandas(self):
"""
Return a pandas DataFrame directly from _internal to avoid overhead of copy.
This method is for internal use only.
"""
return self._internal.to_pandas_frame
def _get_or_create_repr_pandas_cache(self, n):
if not hasattr(self, "_repr_pandas_cache") or n not in self._repr_pandas_cache:
object.__setattr__(
self, "_repr_pandas_cache", {n: self.head(n + 1)._to_internal_pandas()}
)
return self._repr_pandas_cache[n]
def __repr__(self):
max_display_count = get_option("display.max_rows")
if max_display_count is None:
return self._to_internal_pandas().to_string()
pdf = self._get_or_create_repr_pandas_cache(max_display_count)
pdf_length = len(pdf)
pdf = pdf.iloc[:max_display_count]
if pdf_length > max_display_count:
repr_string = pdf.to_string(show_dimensions=True)
match = REPR_PATTERN.search(repr_string)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
footer = "\n\n[Showing only the first {nrows} rows x {ncols} columns]".format(
nrows=nrows, ncols=ncols
)
return REPR_PATTERN.sub(footer, repr_string)
return pdf.to_string()
def _repr_html_(self):
max_display_count = get_option("display.max_rows")
# pandas 0.25.1 has a regression about HTML representation so 'bold_rows'
# has to be set as False explicitly. See https://github.com/pandas-dev/pandas/issues/28204
bold_rows = not (LooseVersion("0.25.1") == LooseVersion(pd.__version__))
if max_display_count is None:
return self._to_internal_pandas().to_html(notebook=True, bold_rows=bold_rows)
pdf = self._get_or_create_repr_pandas_cache(max_display_count)
pdf_length = len(pdf)
pdf = pdf.iloc[:max_display_count]
if pdf_length > max_display_count:
repr_html = pdf.to_html(show_dimensions=True, notebook=True, bold_rows=bold_rows)
match = REPR_HTML_PATTERN.search(repr_html)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
by = chr(215)
footer = (
"\n<p>Showing only the first {rows} rows "
"{by} {cols} columns</p>\n</div>".format(rows=nrows, by=by, cols=ncols)
)
return REPR_HTML_PATTERN.sub(footer, repr_html)
return pdf.to_html(notebook=True, bold_rows=bold_rows)
def __getitem__(self, key):
from pyspark.pandas.series import Series
if key is None:
raise KeyError("none key")
elif isinstance(key, Series):
return self.loc[key.astype(bool)]
elif isinstance(key, slice):
if any(type(n) == int or None for n in [key.start, key.stop]):
# Seems like pandas Frame always uses int as positional search when slicing
# with ints.
return self.iloc[key]
return self.loc[key]
elif is_name_like_value(key):
return self.loc[:, key]
elif is_list_like(key):
return self.loc[:, list(key)]
raise NotImplementedError(key)
def __setitem__(self, key, value):
from pyspark.pandas.series import Series
if isinstance(value, (DataFrame, Series)) and not same_anchor(value, self):
# Different Series or DataFrames
level = self._internal.column_labels_level
key = DataFrame._index_normalized_label(level, key)
value = DataFrame._index_normalized_frame(level, value)
def assign_columns(psdf, this_column_labels, that_column_labels):
assert len(key) == len(that_column_labels)
# Note that here intentionally uses `zip_longest` that combine
# that_columns.
for k, this_label, that_label in zip_longest(
key, this_column_labels, that_column_labels
):
yield (psdf._psser_for(that_label), tuple(["that", *k]))
if this_label is not None and this_label[1:] != k:
yield (psdf._psser_for(this_label), this_label)
psdf = align_diff_frames(assign_columns, self, value, fillna=False, how="left")
elif isinstance(value, list):
if len(self) != len(value):
raise ValueError("Length of values does not match length of index")
# TODO: avoid using default index?
with option_context(
"compute.default_index_type",
"distributed-sequence",
"compute.ops_on_diff_frames",
True,
):
psdf = self.reset_index()
psdf[key] = ps.DataFrame(value)
psdf = psdf.set_index(psdf.columns[: self._internal.index_level])
psdf.index.names = self.index.names
elif isinstance(key, list):
assert isinstance(value, DataFrame)
# Same DataFrames.
field_names = value.columns
psdf = self._assign({k: value[c] for k, c in zip(key, field_names)})
else:
# Same Series.
psdf = self._assign({key: value})
self._update_internal_frame(psdf._internal)
@staticmethod
def _index_normalized_label(level, labels):
"""
Returns a label that is normalized against the current column index level.
For example, the key "abc" can be ("abc", "", "") if the current Frame has
a multi-index for its column
"""
if is_name_like_tuple(labels):
labels = [labels]
elif is_name_like_value(labels):
labels = [(labels,)]
else:
labels = [k if is_name_like_tuple(k) else (k,) for k in labels]
if any(len(label) > level for label in labels):
raise KeyError(
"Key length ({}) exceeds index depth ({})".format(
max(len(label) for label in labels), level
)
)
return [tuple(list(label) + ([""] * (level - len(label)))) for label in labels]
@staticmethod
def _index_normalized_frame(level, psser_or_psdf):
"""
Returns a frame that is normalized against the current column index level.
For example, the name in `pd.Series([...], name="abc")` can be can be
("abc", "", "") if the current DataFrame has a multi-index for its column
"""
from pyspark.pandas.series import Series
if isinstance(psser_or_psdf, Series):
psdf = psser_or_psdf.to_frame()
else:
assert isinstance(psser_or_psdf, DataFrame), type(psser_or_psdf)
psdf = psser_or_psdf.copy()
psdf.columns = pd.MultiIndex.from_tuples(
[
tuple([name_like_string(label)] + ([""] * (level - 1)))
for label in psdf._internal.column_labels
],
)
return psdf
def __getattr__(self, key: str) -> Any:
if key.startswith("__"):
raise AttributeError(key)
if hasattr(_MissingPandasLikeDataFrame, key):
property_or_func = getattr(_MissingPandasLikeDataFrame, key)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
try:
return self.loc[:, key]
except KeyError:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, key)
)
def __setattr__(self, key: str, value) -> None:
try:
object.__getattribute__(self, key)
return object.__setattr__(self, key, value)
except AttributeError:
pass
if (key,) in self._internal.column_labels:
self[key] = value
else:
msg = "pandas-on-Spark doesn't allow columns to be created via a new attribute name"
if is_testing():
raise AssertionError(msg)
else:
warnings.warn(msg, UserWarning)
def __len__(self):
return self._internal.resolved_copy.spark_frame.count()
def __dir__(self):
fields = [
f for f in self._internal.resolved_copy.spark_frame.schema.fieldNames() if " " not in f
]
return super().__dir__() + fields
def __iter__(self):
return iter(self.columns)
# NDArray Compat
def __array_ufunc__(self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any):
# TODO: is it possible to deduplicate it with '_map_series_op'?
if all(isinstance(inp, DataFrame) for inp in inputs) and any(
not same_anchor(inp, inputs[0]) for inp in inputs
):
# binary only
assert len(inputs) == 2
this = inputs[0]
that = inputs[1]
if this._internal.column_labels_level != that._internal.column_labels_level:
raise ValueError("cannot join with no overlapping index names")
# Different DataFrames
def apply_op(psdf, this_column_labels, that_column_labels):
for this_label, that_label in zip(this_column_labels, that_column_labels):
yield (
ufunc(
psdf._psser_for(this_label), psdf._psser_for(that_label), **kwargs
).rename(this_label),
this_label,
)
return align_diff_frames(apply_op, this, that, fillna=True, how="full")
else:
# DataFrame and Series
applied = []
this = inputs[0]
assert all(inp is this for inp in inputs if isinstance(inp, DataFrame))
for label in this._internal.column_labels:
arguments = []
for inp in inputs:
arguments.append(inp[label] if isinstance(inp, DataFrame) else inp)
# both binary and unary.
applied.append(ufunc(*arguments, **kwargs).rename(label))
internal = this._internal.with_new_columns(applied)
return DataFrame(internal)
if sys.version_info >= (3, 7):
def __class_getitem__(cls, params):
# This is a workaround to support variadic generic in DataFrame in Python 3.7.
# See https://github.com/python/typing/issues/193
# we always wraps the given type hints by a tuple to mimic the variadic generic.
return _create_tuple_for_frame_type(params)
elif (3, 5) <= sys.version_info < (3, 7):
# This is a workaround to support variadic generic in DataFrame in Python 3.5+
# The implementation is in its metaclass so this flag is needed to distinguish
# pandas-on-Spark DataFrame.
is_dataframe = None
def _reduce_spark_multi(sdf, aggs):
"""
Performs a reduction on a spark DataFrame, the functions being known sql aggregate functions.
"""
assert isinstance(sdf, spark.DataFrame)
sdf0 = sdf.agg(*aggs)
l = sdf0.limit(2).toPandas()
assert len(l) == 1, (sdf, l)
row = l.iloc[0]
l2 = list(row)
assert len(l2) == len(aggs), (row, l2)
return l2
class CachedDataFrame(DataFrame):
"""
Cached pandas-on-Spark DataFrame, which corresponds to pandas DataFrame logically, but
internally it caches the corresponding Spark DataFrame.
"""
def __init__(self, internal, storage_level=None):
if storage_level is None:
object.__setattr__(self, "_cached", internal.spark_frame.cache())
elif isinstance(storage_level, StorageLevel):
object.__setattr__(self, "_cached", internal.spark_frame.persist(storage_level))
else:
raise TypeError(
"Only a valid pyspark.StorageLevel type is acceptable for the `storage_level`"
)
super().__init__(internal)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.spark.unpersist()
# create accessor for Spark related methods.
spark = CachedAccessor("spark", CachedSparkFrameMethods)
def _test():
import os
import doctest
import shutil
import sys
import tempfile
import uuid
from pyspark.sql import SparkSession
import pyspark.pandas.frame
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.frame.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]").appName("pyspark.pandas.frame tests").getOrCreate()
)
db_name = "db%s" % str(uuid.uuid4()).replace("-", "")
spark.sql("CREATE DATABASE %s" % db_name)
globs["db"] = db_name
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.frame,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.sql("DROP DATABASE IF EXISTS %s CASCADE" % db_name)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
vikingMei/mxnet | python/mxnet/model.py | 13 | 41314 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines
# pylint: disable=too-many-branches, too-many-statements
"""MXNet model module"""
from __future__ import absolute_import, print_function
import os
import time
import logging
import warnings
from collections import namedtuple
import numpy as np
from . import io
from . import nd
from . import symbol as sym
from . import optimizer as opt
from . import metric
from . import kvstore as kvs
from .context import Context, cpu
from .initializer import Uniform
from .optimizer import get_updater
from .executor_manager import DataParallelExecutorManager, _check_arguments, _load_data
from .io import DataDesc
from .base import mx_real_t
BASE_ESTIMATOR = object
try:
from sklearn.base import BaseEstimator
BASE_ESTIMATOR = BaseEstimator
except ImportError:
SKLEARN_INSTALLED = False
# Parameter to pass to batch_end_callback
BatchEndParam = namedtuple('BatchEndParams',
['epoch',
'nbatch',
'eval_metric',
'locals'])
def _create_kvstore(kvstore, num_device, arg_params):
"""Create kvstore
This function select and create a proper kvstore if given the kvstore type.
Parameters
----------
kvstore : KVStore or str
The kvstore.
num_device : int
The number of devices
arg_params : dict of str to `NDArray`.
Model parameter, dict of name to `NDArray` of net's weights.
"""
update_on_kvstore = True
if kvstore is None:
kv = None
elif isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
# create kvstore using the string type
if num_device is 1 and 'dist' not in kvstore:
# no need to use kv for single device and single machine
kv = None
else:
kv = kvs.create(kvstore)
if kvstore == 'local':
# automatically select a proper local
max_size = max(np.prod(param.shape) for param in
arg_params.values())
if max_size > 1024 * 1024 * 16:
update_on_kvstore = False
else:
raise TypeError('kvstore must be KVStore, str or None')
if kv is None:
update_on_kvstore = False
return (kv, update_on_kvstore)
def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names, update_on_kvstore):
"""Initialize kvstore"""
for idx, param_on_devs in enumerate(param_arrays):
name = param_names[idx]
kvstore.init(name, arg_params[name])
if update_on_kvstore:
kvstore.pull(name, param_on_devs, priority=-idx)
def _update_params_on_kvstore_nccl(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on NCCL kvstore."""
valid_indices = [index for index, grad_list in
enumerate(grad_arrays) if grad_list[0] is not None]
valid_grad_arrays = [grad_arrays[i] for i in valid_indices]
valid_param_arrays = [param_arrays[i] for i in valid_indices]
valid_param_names = [param_names[i] for i in valid_indices]
size = len(valid_grad_arrays)
start = 0
# Use aggregation by default only with NCCL
default_batch = 16
batch = int(os.getenv('MXNET_UPDATE_AGGREGATION_SIZE', default_batch))
while start < size:
end = start + batch if start + batch < size else size
# push gradient, priority is negative index
kvstore.push(valid_param_names[start:end], valid_grad_arrays[start:end], priority=-start)
# pull back the weights
kvstore.pull(valid_param_names[start:end], valid_param_arrays[start:end], priority=-start)
start = end
def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the weights
kvstore.pull(name, arg_list, priority=-index)
def _update_params(param_arrays, grad_arrays, updater, num_device,
kvstore=None, param_names=None):
"""Perform update of param_arrays from grad_arrays not on kvstore."""
for i, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
index = i
if kvstore:
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the sum gradients, to the same locations.
kvstore.pull(name, grad_list, priority=-index)
for k, p in enumerate(zip(arg_list, grad_list)):
# faked an index here, to make optimizer create diff
# state for the same index but on diff devs, TODO(mli)
# use a better solution later
w, g = p
updater(index*num_device+k, g, w)
def _multiple_callbacks(callbacks, *args, **kwargs):
"""Sends args and kwargs to any configured callbacks.
This handles the cases where the 'callbacks' variable
is ``None``, a single function, or a list.
"""
if isinstance(callbacks, list):
for cb in callbacks:
cb(*args, **kwargs)
return
if callbacks:
callbacks(*args, **kwargs)
def _train_multi_device(symbol, ctx, arg_names, param_names, aux_names,
arg_params, aux_params,
begin_epoch, end_epoch, epoch_size, optimizer,
kvstore, update_on_kvstore,
train_data, eval_data=None, eval_metric=None,
epoch_end_callback=None, batch_end_callback=None,
logger=None, work_load_list=None, monitor=None,
eval_end_callback=None,
eval_batch_end_callback=None, sym_gen=None):
"""Internal training function on multiple devices.
This function will also work for single device as well.
Parameters
----------
symbol : Symbol
The network configuration.
ctx : list of Context
The training devices.
arg_names: list of str
Name of all arguments of the network.
param_names: list of str
Name of all trainable parameters of the network.
aux_names: list of str
Name of all auxiliary states of the network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
begin_epoch : int
The begining training epoch.
end_epoch : int
The end training epoch.
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : Optimizer
The optimization algorithm
train_data : DataIter
Training data iterator.
eval_data : DataIter
Validation data iterator.
eval_metric : EvalMetric
An evaluation function or a list of evaluation functions.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback : callable(BatchEndParams)
A callback that is invoked at end of each batch.
This can be used to measure speed, get result from evaluation metric. etc.
kvstore : KVStore
The KVStore.
update_on_kvstore : bool
Whether or not perform weight updating on kvstore.
logger : logging logger
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as ``ctx``.
monitor : Monitor, optional
Monitor installed to executor,
for monitoring outputs, weights, and gradients for debugging.
Notes
-----
- This function will inplace update the NDArrays in `arg_params` and `aux_states`.
"""
if logger is None:
logger = logging
executor_manager = DataParallelExecutorManager(symbol=symbol,
sym_gen=sym_gen,
ctx=ctx,
train_data=train_data,
param_names=param_names,
arg_names=arg_names,
aux_names=aux_names,
work_load_list=work_load_list,
logger=logger)
if monitor:
executor_manager.install_monitor(monitor)
executor_manager.set_params(arg_params, aux_params)
if not update_on_kvstore:
updater = get_updater(optimizer)
if kvstore:
_initialize_kvstore(kvstore=kvstore,
param_arrays=executor_manager.param_arrays,
arg_params=arg_params,
param_names=executor_manager.param_names,
update_on_kvstore=update_on_kvstore)
if update_on_kvstore:
kvstore.set_optimizer(optimizer)
# Now start training
train_data.reset()
for epoch in range(begin_epoch, end_epoch):
# Training phase
tic = time.time()
eval_metric.reset()
nbatch = 0
# Iterate over training data.
while True:
do_reset = True
for data_batch in train_data:
executor_manager.load_data_batch(data_batch)
if monitor is not None:
monitor.tic()
executor_manager.forward(is_train=True)
executor_manager.backward()
if update_on_kvstore:
if 'nccl' in kvstore.type:
_update_params_on_kvstore_nccl(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params_on_kvstore(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params(executor_manager.param_arrays,
executor_manager.grad_arrays,
updater=updater,
num_device=len(ctx),
kvstore=kvstore,
param_names=executor_manager.param_names)
if monitor is not None:
monitor.toc_print()
# evaluate at end, so we can lazy copy
executor_manager.update_metric(eval_metric, data_batch.label)
nbatch += 1
# batch callback (for print purpose)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
# this epoch is done possibly earlier
if epoch_size is not None and nbatch >= epoch_size:
do_reset = False
break
if do_reset:
logger.info('Epoch[%d] Resetting Data Iterator', epoch)
train_data.reset()
# this epoch is done
if epoch_size is None or nbatch >= epoch_size:
break
toc = time.time()
logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))
if epoch_end_callback or epoch + 1 == end_epoch:
executor_manager.copy_to(arg_params, aux_params)
_multiple_callbacks(epoch_end_callback, epoch, symbol, arg_params, aux_params)
# evaluation
if eval_data:
eval_metric.reset()
eval_data.reset()
total_num_batch = 0
for i, eval_batch in enumerate(eval_data):
executor_manager.load_data_batch(eval_batch)
executor_manager.forward(is_train=False)
executor_manager.update_metric(eval_metric, eval_batch.label)
if eval_batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_batch_end_callback, batch_end_params)
total_num_batch += 1
if eval_end_callback is not None:
eval_end_params = BatchEndParam(epoch=epoch,
nbatch=total_num_batch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_end_callback, eval_end_params)
eval_data.reset()
# end of all epochs
return
def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params):
"""Checkpoint the model data into file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input Symbol.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if symbol is not None:
symbol.save('%s-symbol.json' % prefix)
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
param_name = '%s-%04d.params' % (prefix, epoch)
nd.save(param_name, save_dict)
logging.info('Saved checkpoint to \"%s\"', param_name)
def load_checkpoint(prefix, epoch):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- Symbol will be loaded from ``prefix-symbol.json``.
- Parameters will be loaded from ``prefix-epoch.params``.
"""
symbol = sym.load('%s-symbol.json' % prefix)
save_dict = nd.load('%s-%04d.params' % (prefix, epoch))
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return (symbol, arg_params, aux_params)
from .callback import LogValidationMetricsCallback # pylint: disable=wrong-import-position
class FeedForward(BASE_ESTIMATOR):
"""Model class of MXNet for training and predicting feedforward nets.
This class is designed for a single-data single output supervised network.
Parameters
----------
symbol : Symbol
The symbol configuration of computation network.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi GPU training, pass in a list of gpu contexts.
num_epoch : int, optional
Training parameter, number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
Training parameter, name or optimizer object for training.
initializer : initializer function, optional
Training parameter, the initialization scheme used.
numpy_batch_size : int, optional
The batch size of training data.
Only needed when input array is numpy.
arg_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's auxiliary states.
allow_extra_params : boolean, optional
Whether allow extra parameters that are not needed by symbol
to be passed by aux_params and ``arg_params``.
If this is True, no error will be thrown when ``aux_params`` and ``arg_params``
contain more parameters than needed.
begin_epoch : int, optional
The begining training epoch.
kwargs : dict
The additional keyword arguments passed to optimizer.
"""
def __init__(self, symbol, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd',
initializer=Uniform(0.01),
numpy_batch_size=128,
arg_params=None, aux_params=None,
allow_extra_params=False,
begin_epoch=0,
**kwargs):
warnings.warn(
'\033[91mmxnet.model.FeedForward has been deprecated. ' + \
'Please use mxnet.mod.Module instead.\033[0m',
DeprecationWarning, stacklevel=2)
if isinstance(symbol, sym.Symbol):
self.symbol = symbol
self.sym_gen = None
else:
assert(callable(symbol))
self.symbol = None
self.sym_gen = symbol
# model parameters
self.arg_params = arg_params
self.aux_params = aux_params
self.allow_extra_params = allow_extra_params
self.argument_checked = False
if self.sym_gen is None:
self._check_arguments()
# basic configuration
if ctx is None:
ctx = [cpu()]
elif isinstance(ctx, Context):
ctx = [ctx]
self.ctx = ctx
# training parameters
self.num_epoch = num_epoch
self.epoch_size = epoch_size
self.kwargs = kwargs.copy()
self.optimizer = optimizer
self.initializer = initializer
self.numpy_batch_size = numpy_batch_size
# internal helper state
self._pred_exec = None
self.begin_epoch = begin_epoch
def _check_arguments(self):
"""verify the argument of the default symbol and user provided parameters"""
if self.argument_checked:
return
assert(self.symbol is not None)
self.argument_checked = True
# check if symbol contain duplicated names.
_check_arguments(self.symbol)
# rematch parameters to delete useless ones
if self.allow_extra_params:
if self.arg_params:
arg_names = set(self.symbol.list_arguments())
self.arg_params = {k : v for k, v in self.arg_params.items()
if k in arg_names}
if self.aux_params:
aux_names = set(self.symbol.list_auxiliary_states())
self.aux_params = {k : v for k, v in self.aux_params.items()
if k in aux_names}
@staticmethod
def _is_data_arg(name):
"""Check if name is a data argument."""
return name.endswith('data') or name.endswith('label')
def _init_params(self, inputs, overwrite=False):
"""Initialize weight parameters and auxiliary states."""
inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs]
input_shapes = {item.name: item.shape for item in inputs}
arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None
input_dtypes = {item.name: item.dtype for item in inputs}
arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes)
assert arg_dtypes is not None
arg_names = self.symbol.list_arguments()
input_names = input_shapes.keys()
param_names = [key for key in arg_names if key not in input_names]
aux_names = self.symbol.list_auxiliary_states()
param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes)
if x[0] in param_names]
arg_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in param_name_attrs}
aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes)
if x[0] in aux_names]
aux_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in aux_name_attrs}
for k, v in arg_params.items():
if self.arg_params and k in self.arg_params and (not overwrite):
arg_params[k][:] = self.arg_params[k][:]
else:
self.initializer(k, v)
for k, v in aux_params.items():
if self.aux_params and k in self.aux_params and (not overwrite):
aux_params[k][:] = self.aux_params[k][:]
else:
self.initializer(k, v)
self.arg_params = arg_params
self.aux_params = aux_params
return (arg_names, list(param_names), aux_names)
def __getstate__(self):
this = self.__dict__.copy()
this['_pred_exec'] = None
return this
def __setstate__(self, state):
self.__dict__.update(state)
def _init_predictor(self, input_shapes, type_dict=None):
"""Initialize the predictor module for running prediction."""
if self._pred_exec is not None:
arg_shapes, _, _ = self.symbol.infer_shape(**dict(input_shapes))
assert arg_shapes is not None, "Incomplete input shapes"
pred_shapes = [x.shape for x in self._pred_exec.arg_arrays]
if arg_shapes == pred_shapes:
return
# for now only use the first device
pred_exec = self.symbol.simple_bind(
self.ctx[0], grad_req='null', type_dict=type_dict, **dict(input_shapes))
pred_exec.copy_params_from(self.arg_params, self.aux_params)
_check_arguments(self.symbol)
self._pred_exec = pred_exec
def _init_iter(self, X, y, is_train):
"""Initialize the iterator given input."""
if isinstance(X, (np.ndarray, nd.NDArray)):
if y is None:
if is_train:
raise ValueError('y must be specified when X is numpy.ndarray')
else:
y = np.zeros(X.shape[0])
if not isinstance(y, (np.ndarray, nd.NDArray)):
raise TypeError('y must be ndarray when X is numpy.ndarray')
if X.shape[0] != y.shape[0]:
raise ValueError("The numbers of data points and labels not equal")
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
if y.ndim != 1:
raise ValueError("Label must be 1D or 2D (with 2nd dimension being 1)")
if is_train:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size),
shuffle=is_train, last_batch_handle='roll_over')
else:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False)
if not isinstance(X, io.DataIter):
raise TypeError('X must be DataIter, NDArray or numpy.ndarray')
return X
def _init_eval_iter(self, eval_data):
"""Initialize the iterator given eval_data."""
if eval_data is None:
return eval_data
if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2:
if eval_data[0] is not None:
if eval_data[1] is None and isinstance(eval_data[0], io.DataIter):
return eval_data[0]
input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list)
else eval_data[0])
input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list)
else eval_data[1])
return self._init_iter(input_data, input_label, is_train=True)
else:
raise ValueError("Eval data is NONE")
if not isinstance(eval_data, io.DataIter):
raise TypeError('Eval data must be DataIter, or ' \
'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)')
return eval_data
def predict(self, X, num_batch=None, return_data=False, reset=True):
"""Run the prediction, always only use one device.
Parameters
----------
X : mxnet.DataIter
num_batch : int or None
The number of batch to run. Go though all batches if ``None``.
Returns
-------
y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs.
The predicted value of the output.
"""
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
batch_size = X.batch_size
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
output_list = [[] for _ in range(len(self._pred_exec.outputs))]
if return_data:
data_list = [[] for _ in X.provide_data]
label_list = [[] for _ in X.provide_label]
i = 0
for batch in X:
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
padded = batch.pad
real_size = batch_size - padded
for o_list, o_nd in zip(output_list, self._pred_exec.outputs):
o_list.append(o_nd[0:real_size].asnumpy())
if return_data:
for j, x in enumerate(batch.data):
data_list[j].append(x[0:real_size].asnumpy())
for j, x in enumerate(batch.label):
label_list[j].append(x[0:real_size].asnumpy())
i += 1
if num_batch is not None and i == num_batch:
break
outputs = [np.concatenate(x) for x in output_list]
if len(outputs) == 1:
outputs = outputs[0]
if return_data:
data = [np.concatenate(x) for x in data_list]
label = [np.concatenate(x) for x in label_list]
if len(data) == 1:
data = data[0]
if len(label) == 1:
label = label[0]
return outputs, data, label
else:
return outputs
def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True):
"""Run the model given an input and calculate the score
as assessed by an evaluation metric.
Parameters
----------
X : mxnet.DataIter
eval_metric : metric.metric
The metric for calculating score.
num_batch : int or None
The number of batches to run. Go though all batches if ``None``.
Returns
-------
s : float
The final score.
"""
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
for i, batch in enumerate(X):
if num_batch is not None and i == num_batch:
break
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
eval_metric.update(batch.label, self._pred_exec.outputs)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=0,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
return eval_metric.get()[1]
def fit(self, X, y=None, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None,
work_load_list=None, monitor=None, eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None):
"""Fit the model.
Parameters
----------
X : DataIter, or numpy.ndarray/NDArray
Training data. If `X` is a `DataIter`, the name or (if name not available)
the position of its outputs should match the corresponding variable
names defined in the symbolic graph.
y : numpy.ndarray/NDArray, optional
Training set label.
If X is ``numpy.ndarray`` or `NDArray`, `y` is required to be set.
While y can be 1D or 2D (with 2nd dimension as 1), its first dimension must be
the same as `X`, i.e. the number of data points and labels should be equal.
eval_data : DataIter or numpy.ndarray/list/NDArray pair
If eval_data is numpy.ndarray/list/NDArray pair,
it should be ``(valid_data, valid_label)``.
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. This could be the name of evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for purposes of printing.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dist_async'
In default uses 'local', often no need to change for single machiine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Note
----
KVStore behavior
- 'local', multi-devices on a single machine, will automatically choose best type.
- 'dist_sync', multiple machines communicating via BSP.
- 'dist_async', multiple machines with asynchronous communication.
"""
data = self._init_iter(X, y, is_train=True)
eval_data = self._init_eval_iter(eval_data)
if self.sym_gen:
self.symbol = self.sym_gen(data.default_bucket_key) # pylint: disable=no-member
self._check_arguments()
self.kwargs["sym"] = self.symbol
arg_names, param_names, aux_names = \
self._init_params(data.provide_data+data.provide_label)
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
# create kvstore
(kvstore, update_on_kvstore) = _create_kvstore(
kvstore, len(self.ctx), self.arg_params)
param_idx2name = {}
if update_on_kvstore:
param_idx2name.update(enumerate(param_names))
else:
for i, n in enumerate(param_names):
for k in range(len(self.ctx)):
param_idx2name[i*len(self.ctx)+k] = n
self.kwargs["param_idx2name"] = param_idx2name
# init optmizer
if isinstance(self.optimizer, str):
batch_size = data.batch_size
if kvstore and 'dist' in kvstore.type and not '_async' in kvstore.type:
batch_size *= kvstore.num_workers
optimizer = opt.create(self.optimizer,
rescale_grad=(1.0/batch_size),
**(self.kwargs))
elif isinstance(self.optimizer, opt.Optimizer):
optimizer = self.optimizer
# do training
_train_multi_device(self.symbol, self.ctx, arg_names, param_names, aux_names,
self.arg_params, self.aux_params,
begin_epoch=self.begin_epoch, end_epoch=self.num_epoch,
epoch_size=self.epoch_size,
optimizer=optimizer,
train_data=data, eval_data=eval_data,
eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore, update_on_kvstore=update_on_kvstore,
logger=logger, work_load_list=work_load_list, monitor=monitor,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback,
sym_gen=self.sym_gen)
def save(self, prefix, epoch=None):
"""Checkpoint the model checkpoint into file.
You can also use `pickle` to do the job if you only work on Python.
The advantage of `load` and `save` (as compared to `pickle`) is that
the resulting file can be loaded from other MXNet language bindings.
One can also directly `load`/`save` from/to cloud storage(S3, HDFS)
Parameters
----------
prefix : str
Prefix of model name.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if epoch is None:
epoch = self.num_epoch
assert epoch is not None
save_checkpoint(prefix, epoch, self.symbol, self.arg_params, self.aux_params)
@staticmethod
def load(prefix, epoch, ctx=None, **kwargs):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
epoch number of model we would like to load.
ctx : Context or list of Context, optional
The device context of training and prediction.
kwargs : dict
Other parameters for model, including `num_epoch`, optimizer and `numpy_batch_size`.
Returns
-------
model : FeedForward
The loaded model that can be used for prediction.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
symbol, arg_params, aux_params = load_checkpoint(prefix, epoch)
return FeedForward(symbol, ctx=ctx,
arg_params=arg_params, aux_params=aux_params,
begin_epoch=epoch,
**kwargs)
@staticmethod
def create(symbol, X, y=None, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01),
eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None,
kvstore='local', logger=None, work_load_list=None,
eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None, **kwargs):
"""Functional style to create a model.
This function is more consistent with functional
languages such as R, where mutation is not allowed.
Parameters
----------
symbol : Symbol
The symbol configuration of a computation network.
X : DataIter
Training data.
y : numpy.ndarray, optional
If `X` is a ``numpy.ndarray``, `y` must be set.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi-GPU training, pass in a list of GPU contexts.
num_epoch : int, optional
The number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
The name of the chosen optimizer, or an optimizer object, used for training.
initializer : initializer function, optional
The initialization scheme used.
eval_data : DataIter or numpy.ndarray pair
If `eval_set` is ``numpy.ndarray`` pair, it should
be (`valid_data`, `valid_label`).
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. Can be the name of an evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for print purposes.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'.
Defaults to 'local', often no need to change for single machine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
"""
model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch,
epoch_size=epoch_size,
optimizer=optimizer, initializer=initializer, **kwargs)
model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore,
logger=logger,
work_load_list=work_load_list,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback)
return model
| apache-2.0 |
pylhc/PyLHC | tests/unit/test_forced_da_analysis.py | 1 | 2469 | from pathlib import Path
import matplotlib
import pytest
from pylhc.forced_da_analysis import main as fda_analysis
# Forcing non-interactive Agg backend so rendering is done similarly across platforms during tests
matplotlib.use("Agg")
INPUT = Path(__file__).parent.parent / "inputs"
@pytest.mark.cern_network
class TestOnCernNetwork:
def test_md3312_data(self, tmp_path):
data_dir = INPUT / "kicks_vertical_md3312"
fda_analysis(
beam=1,
kick_directory=data_dir,
energy=6500.0,
plane="Y",
intensity_tfs=data_dir / "intensity.tfs",
emittance_tfs=data_dir / "emittance_y.tfs",
show_wirescan_emittance=data_dir / "emittance_bws_y.tfs",
output_directory=tmp_path,
# show=True,
)
check_output(tmp_path)
def test_md3312_data_linear(self, tmp_path):
data_dir = INPUT / "kicks_vertical_md3312"
fda_analysis(
fit="linear",
beam=1,
kick_directory=data_dir,
energy=6500.0,
plane="Y",
intensity_tfs=data_dir / "intensity.tfs",
emittance_tfs=data_dir / "emittance_y.tfs",
show_wirescan_emittance=data_dir / "emittance_bws_y.tfs",
output_directory=tmp_path,
)
check_output(tmp_path)
def test_md3312_no_data_given(self, tmp_path):
with pytest.raises(OSError):
fda_analysis(
beam=1,
kick_directory=INPUT / "kicks_vertical_md3312",
energy=6500.0,
plane="Y",
output_directory=tmp_path,
)
def test_md2162_timberdb(tmp_path):
data_dir = INPUT / "kicks_horizontal_md2162"
fda_analysis(
fit="linear",
beam=1,
kick_directory=data_dir,
energy=6500.0,
plane="X",
output_directory=tmp_path,
pagestore_db=data_dir / "MD2162_ACD_TimberDB_Fill6196.db",
emittance_type="fit_sigma",
show_wirescan_emittance=True,
# show=True,
)
check_output(tmp_path)
# Helper -----------------------------------------------------------------------
def check_output(output_dir: Path) -> None:
assert len(list(output_dir.glob("*.pdf"))) == 5
assert len(list(output_dir.glob("*.tfs"))) == 4
assert len(list(output_dir.glob("*.ini"))) == 1
assert len(list(output_dir.glob("*_[xy]*"))) == 13
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.